prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>test_user.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# * Authors:
# * TJEBBES Gaston <[email protected]>
# * Arezki Feth <[email protected]>;
# * Miotte Julien <[email protected]>;
import pytest
from autonomie.tests.tools import Dummy
def test_default_disable():
from autonomie.forms.user.user import deferred_company_disable_default
companies = [Dummy(employees=range(2))]
user = Dummy(companies=companies)
req = Dummy(context=user)
assert not deferred_company_disable_default("", {'request': req})
companies = [Dummy(employees=[1])]
user = Dummy(companies=companies)<|fim▁hole|>def test_user_add_schema(pyramid_request):
import colander
from autonomie.forms.user.user import get_add_edit_schema
appstruct = {
'civilite': u'Monsieur',
'lastname': u'Test lastname',
'firstname': u"Firstname",
'email': "[email protected]",
'add_login': "0",
}
schema = get_add_edit_schema()
schema = schema.bind(request=pyramid_request)
result = schema.deserialize(appstruct)
assert 'email' in result
# civilite
with pytest.raises(colander.Invalid):
appstruct = {
'civilite': u"Not a valid one",
'lastname': u'Test lastname',
'firstname': u"Firstname",
'email': "[email protected]",
'add_login': "0",
}
schema.deserialize(appstruct)
# lastname
with pytest.raises(colander.Invalid):
appstruct = {
'civilite': u'Monsieur',
'firstname': u"Firstname",
'email': "[email protected]",
'add_login': "0",
}
schema.deserialize(appstruct)
# firstname
with pytest.raises(colander.Invalid):
appstruct = {
'civilite': u'Monsieur',
'lastname': u'Test lastname',
'email': "[email protected]",
'add_login': "0",
}
schema.deserialize(appstruct)
# email
with pytest.raises(colander.Invalid):
appstruct = {
'civilite': u'Monsieur',
'lastname': u'Test lastname',
'firstname': u"Firstname",
'add_login': "0",
}
schema.deserialize(appstruct)
with pytest.raises(colander.Invalid):
appstruct = {
'civilite': u'Monsieur',
'lastname': u'Test lastname',
'firstname': u"Firstname",
'email': "notanemail",
'add_login': "0",
}
schema.deserialize(appstruct)<|fim▁end|> | req = Dummy(context=user)
assert(deferred_company_disable_default("", {'request': req}))
|
<|file_name|>build.rs<|end_file_name|><|fim▁begin|>extern crate vulkano_shaders;
<|fim▁hole|>fn main() {
// Compiling shaders to SPIR-V
vulkano_shaders::build_glsl_shaders([("src/shaders/vert.glsl",
vulkano_shaders::ShaderType::Vertex),
("src/shaders/frag.glsl",
vulkano_shaders::ShaderType::Fragment)]
.iter()
.cloned());
}<|fim▁end|> | |
<|file_name|>temp_arg_type.cpp<|end_file_name|><|fim▁begin|>// RUN: %clang_cc1 -fsyntax-only -verify %s
template<typename T> class A; // expected-note 2 {{template parameter is declared here}} expected-note{{template is declared here}}
// [temp.arg.type]p1
A<0> *a1; // expected-error{{template argument for template type parameter must be a type}}
A<A> *a2; // expected-error{{use of class template 'A' requires template arguments}}<|fim▁hole|>A<int(float)> *a5;
A<A<int> > *a6;
// Pass an overloaded function template:
template<typename T> void function_tpl(T);
A<function_tpl> a7; // expected-error{{template argument for template type parameter must be a type}}
// Pass a qualified name:
namespace ns {
template<typename T> class B {}; // expected-note{{template is declared here}}
}
A<ns::B> a8; // expected-error{{use of class template 'ns::B' requires template arguments}}
// [temp.arg.type]p2
void f() {
class X { };
A<X> * a = 0; // expected-warning{{template argument uses local type 'X'}}
}
struct { int x; } Unnamed; // expected-note{{unnamed type used in template argument was declared here}}
A<__typeof__(Unnamed)> *a9; // expected-warning{{template argument uses unnamed type}}
template<typename T, unsigned N>
struct Array {
typedef struct { T x[N]; } type;
};
template<typename T> struct A1 { };
A1<Array<int, 17>::type> ax;
// FIXME: [temp.arg.type]p3. The check doesn't really belong here (it
// belongs somewhere in the template instantiation section).<|fim▁end|> |
A<int> *a3;
A<int()> *a4; |
<|file_name|>NotificationDismissBroadcastReceiver.java<|end_file_name|><|fim▁begin|>package org.wordpress.android.ui.notifications;
import android.content.BroadcastReceiver;
import android.content.Context;
import android.content.Intent;
import android.support.v4.app.NotificationManagerCompat;
import org.wordpress.android.push.GCMMessageService;
/*
* Clears the notification map when a user dismisses a notification
*/
public class NotificationDismissBroadcastReceiver extends BroadcastReceiver {
@Override
public void onReceive(Context context, Intent intent) {
int notificationId = intent.getIntExtra("notificationId", 0);
if (notificationId == GCMMessageService.GROUP_NOTIFICATION_ID) {
GCMMessageService.clearNotifications();
} else {
GCMMessageService.removeNotification(notificationId);
// Dismiss the grouped notification if a user dismisses all notifications from a wear device
if (!GCMMessageService.hasNotifications()) {
NotificationManagerCompat notificationManager = NotificationManagerCompat.from(context);
notificationManager.cancel(GCMMessageService.GROUP_NOTIFICATION_ID);<|fim▁hole|> }
}
}
}<|fim▁end|> | |
<|file_name|>UdacityFrameWork.py<|end_file_name|><|fim▁begin|>__author__ = 'canderson'
import os
import webapp2
import jinja2
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),
autoescape=True)
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
<|fim▁hole|>class MainPage(Handler):
def get(self):
#self.write("asciichan!")
self.render('form.html')
app = webapp2.WSGIApplication([('/', MainPage)], debug=True)<|fim▁end|> | |
<|file_name|>chat.spec.ts<|end_file_name|><|fim▁begin|>describe('app.chat', function() {
beforeEach(module('app.chat'));
describe('ChatCtrl', function() {
var chatCtrl, $scope;
beforeEach(function() {
inject(function($controller) {
$scope = {};
chatCtrl = $controller('ChatCtrl', {$scope: $scope});
});
});
it('creates messages array in scope', function() {
expect(Object.prototype.toString.call($scope.messages)).toBe('[object Array]');
});<|fim▁hole|>});<|fim▁end|> | }); |
<|file_name|>JobManifest.py<|end_file_name|><|fim▁begin|>"""
"""<|fim▁hole|>from DIRAC.Core.Utilities.CFG import CFG
from DIRAC.Core.Utilities import List
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Utilities.JDL import loadJDLAsCFG, dumpCFGAsJDL
from DIRAC.WorkloadManagementSystem.Agent.SiteDirector import getSubmitPools
class JobManifest(object):
def __init__(self, manifest=""):
self.__manifest = CFG()
self.__dirty = False
self.__ops = False
if manifest:
result = self.load(manifest)
if not result['OK']:
raise Exception(result['Message'])
def isDirty(self):
return self.__dirty
def setDirty(self):
self.__dirty = True
def clearDirty(self):
self.__dirty = False
def load(self, dataString):
"""
Auto discover format type based on [ .. ] of JDL
"""
dataString = dataString.strip()
if dataString[0] == "[" and dataString[-1] == "]":
return self.loadJDL(dataString)
else:
return self.loadCFG(dataString)
def loadJDL(self, jdlString):
"""
Load job manifest from JDL format
"""
result = loadJDLAsCFG(jdlString.strip())
if not result['OK']:
self.__manifest = CFG()
return result
self.__manifest = result['Value'][0]
return S_OK()
def loadCFG(self, cfgString):
"""
Load job manifest from CFG format
"""
try:
self.__manifest.loadFromBuffer(cfgString)
except Exception as e:
return S_ERROR("Can't load manifest from cfg: %s" % str(e))
return S_OK()
def dumpAsCFG(self):
return str(self.__manifest)
def getAsCFG(self):
return self.__manifest.clone()
def dumpAsJDL(self):
return dumpCFGAsJDL(self.__manifest)
def __getCSValue(self, varName, defaultVal=None):
if not self.__ops:
self.__ops = Operations(group=self.__manifest['OwnerGroup'], setup=self.__manifest['DIRACSetup'])
if varName[0] != "/":
varName = "JobDescription/%s" % varName
return self.__ops.getValue(varName, defaultVal)
def __checkNumericalVar(self, varName, defaultVal, minVal, maxVal):
"""
Check a numerical var
"""
initialVal = False
if varName not in self.__manifest:
varValue = self.__getCSValue("Default%s" % varName, defaultVal)
else:
varValue = self.__manifest[varName]
initialVal = varValue
try:
varValue = long(varValue)
except BaseException:
return S_ERROR("%s must be a number" % varName)
minVal = self.__getCSValue("Min%s" % varName, minVal)
maxVal = self.__getCSValue("Max%s" % varName, maxVal)
varValue = max(minVal, min(varValue, maxVal))
if initialVal != varValue:
self.__manifest.setOption(varName, varValue)
return S_OK(varValue)
def __checkChoiceVar(self, varName, defaultVal, choices):
"""
Check a choice var
"""
initialVal = False
if varName not in self.__manifest:
varValue = self.__getCSValue("Default%s" % varName, defaultVal)
else:
varValue = self.__manifest[varName]
initialVal = varValue
if varValue not in self.__getCSValue("Choices%s" % varName, choices):
return S_ERROR("%s is not a valid value for %s" % (varValue, varName))
if initialVal != varValue:
self.__manifest.setOption(varName, varValue)
return S_OK(varValue)
def __checkMultiChoice(self, varName, choices):
"""
Check a multi choice var
"""
initialVal = False
if varName not in self.__manifest:
return S_OK()
else:
varValue = self.__manifest[varName]
initialVal = varValue
choices = self.__getCSValue("Choices%s" % varName, choices)
for v in List.fromChar(varValue):
if v not in choices:
return S_ERROR("%s is not a valid value for %s" % (v, varName))
if initialVal != varValue:
self.__manifest.setOption(varName, varValue)
return S_OK(varValue)
def __checkMaxInputData(self, maxNumber):
"""
Check Maximum Number of Input Data files allowed
"""
varName = "InputData"
if varName not in self.__manifest:
return S_OK()
varValue = self.__manifest[varName]
if len(List.fromChar(varValue)) > maxNumber:
return S_ERROR('Number of Input Data Files (%s) greater than current limit: %s' %
(len(List.fromChar(varValue)), maxNumber))
return S_OK()
def __contains__(self, key):
""" Check if the manifest has the required key
"""
return key in self.__manifest
def setOptionsFromDict(self, varDict):
for k in sorted(varDict):
self.setOption(k, varDict[k])
def check(self):
"""
Check that the manifest is OK
"""
for k in ['OwnerName', 'OwnerDN', 'OwnerGroup', 'DIRACSetup']:
if k not in self.__manifest:
return S_ERROR("Missing var %s in manifest" % k)
# Check CPUTime
result = self.__checkNumericalVar("CPUTime", 86400, 100, 500000)
if not result['OK']:
return result
result = self.__checkNumericalVar("Priority", 1, 0, 10)
if not result['OK']:
return result
allowedSubmitPools = getSubmitPools(self.__manifest['OwnerGroup'])
result = self.__checkMultiChoice("SubmitPools", list(set(allowedSubmitPools)))
if not result['OK']:
return result
result = self.__checkMultiChoice("PilotTypes", ['private'])
if not result['OK']:
return result
maxInputData = Operations().getValue("JobDescription/MaxInputData", 500)
result = self.__checkMaxInputData(maxInputData)
if not result['OK']:
return result
operation = Operations(group=self.__manifest['OwnerGroup'])
allowedJobTypes = operation.getValue("JobDescription/AllowedJobTypes", ['User', 'Test', 'Hospital'])
transformationTypes = operation.getValue("Transformations/DataProcessing", [])
result = self.__checkMultiChoice("JobType", allowedJobTypes + transformationTypes)
if not result['OK']:
return result
return S_OK()
def createSection(self, secName, contents=False):
if secName not in self.__manifest:
if contents and not isinstance(contents, CFG):
return S_ERROR("Contents for section %s is not a cfg object" % secName)
self.__dirty = True
return S_OK(self.__manifest.createNewSection(secName, contents=contents))
return S_ERROR("Section %s already exists" % secName)
def getSection(self, secName):
self.__dirty = True
if secName not in self.__manifest:
return S_ERROR("%s does not exist" % secName)
sec = self.__manifest[secName]
if not sec:
return S_ERROR("%s section empty" % secName)
return S_OK(sec)
def setSectionContents(self, secName, contents):
if contents and not isinstance(contents, CFG):
return S_ERROR("Contents for section %s is not a cfg object" % secName)
self.__dirty = True
if secName in self.__manifest:
self.__manifest[secName].reset()
self.__manifest[secName].mergeWith(contents)
else:
self.__manifest.createNewSection(secName, contents=contents)
def setOption(self, varName, varValue):
"""
Set a var in job manifest
"""
self.__dirty = True
levels = List.fromChar(varName, "/")
cfg = self.__manifest
for l in levels[:-1]:
if l not in cfg:
cfg.createNewSection(l)
cfg = cfg[l]
cfg.setOption(levels[-1], varValue)
def remove(self, opName):
levels = List.fromChar(opName, "/")
cfg = self.__manifest
for l in levels[:-1]:
if l not in cfg:
return S_ERROR("%s does not exist" % opName)
cfg = cfg[l]
if cfg.deleteKey(levels[-1]):
self.__dirty = True
return S_OK()
return S_ERROR("%s does not exist" % opName)
def getOption(self, varName, defaultValue=None):
"""
Get a variable from the job manifest
"""
cfg = self.__manifest
return cfg.getOption(varName, defaultValue)
def getOptionList(self, section=""):
"""
Get a list of variables in a section of the job manifest
"""
cfg = self.__manifest.getRecursive(section)
if not cfg or 'value' not in cfg:
return []
cfg = cfg['value']
return cfg.listOptions()
def isOption(self, opName):
"""
Check if it is a valid option
"""
return self.__manifest.isOption(opName)
def getSectionList(self, section=""):
"""
Get a list of sections in the job manifest
"""
cfg = self.__manifest.getRecursive(section)
if not cfg or 'value' not in cfg:
return []
cfg = cfg['value']
return cfg.listSections()<|fim▁end|> |
from DIRAC import S_OK, S_ERROR |
<|file_name|>service.go<|end_file_name|><|fim▁begin|>package sghelm
import (
"context"
"encoding/json"
"sort"
"strings"
"github.com/imdario/mergo"
"github.com/pkg/errors"
"k8s.io/helm/pkg/proto/hapi/chart"
"k8s.io/helm/pkg/repo"
"github.com/supergiant/control/pkg/model"
"github.com/supergiant/control/pkg/sgerrors"
"github.com/supergiant/control/pkg/sghelm/repositories"
"github.com/supergiant/control/pkg/storage"
)
const (
readmeFileName = "readme.md"
repoPrefix = "/helm/repositories/"
)
var _ Servicer = &Service{}
// Servicer is an interface for the helm service.
type Servicer interface {
CreateRepo(ctx context.Context, e *repo.Entry) (*model.RepositoryInfo, error)
UpdateRepo(ctx context.Context, name string, opts *repo.Entry) (*model.RepositoryInfo, error)
GetRepo(ctx context.Context, repoName string) (*model.RepositoryInfo, error)
ListRepos(ctx context.Context) ([]model.RepositoryInfo, error)
DeleteRepo(ctx context.Context, repoName string) (*model.RepositoryInfo, error)
GetChartData(ctx context.Context, repoName, chartName, chartVersion string) (*model.ChartData, error)
ListCharts(ctx context.Context, repoName string) ([]model.ChartInfo, error)
GetChart(ctx context.Context, repoName, chartName, chartVersion string) (*chart.Chart, error)
}
// Service manages helm repositories.
type Service struct {
storage storage.Interface
repos repositories.Interface
}
// NewService constructs a Service for helm repository.
func NewService(s storage.Interface) (*Service, error) {
repos, err := repositories.New(repositories.DefaultHome)
if err != nil {
return nil, errors.Wrap(err, "setup repositories manager")
}
return &Service{
storage: s,
repos: repos,
}, nil
}
// CreateRepo stores a helm repository in the provided storage.
func (s Service) CreateRepo(ctx context.Context, e *repo.Entry) (*model.RepositoryInfo, error) {
if e == nil {
return nil, sgerrors.ErrNilEntity
}
r, err := s.GetRepo(ctx, e.Name)
if err != nil && !sgerrors.IsNotFound(err) {
return nil, err
}
if r != nil && r.Config.Name == e.Name {
return nil, sgerrors.ErrAlreadyExists
}
ind, err := s.repos.GetIndexFile(e)
if err != nil {
return nil, errors.Wrap(err, "get repository index")
}
// store the index file
r = toRepoInfo(e, ind)
rawJSON, err := json.Marshal(r)
if err != nil {
return nil, errors.Wrap(err, "marshal index file")
}
if err = s.storage.Put(ctx, repoPrefix, e.Name, rawJSON); err != nil {
return nil, errors.Wrap(err, "storage")<|fim▁hole|> }
return r, nil
}
// UpdateRepo downloads the latest index file and update a helm repository in the provided storage.
func (s Service) UpdateRepo(ctx context.Context, name string, opts *repo.Entry) (*model.RepositoryInfo, error) {
r, err := s.GetRepo(ctx, name)
if err != nil {
return nil, err
}
// mergo panics on nil entry
if opts == nil {
opts = &repo.Entry{}
}
opts.Name = "" // prevent updating the repo name
// merge configs
if err = mergo.Merge(&r.Config, opts, mergo.WithOverride); err != nil {
return nil, err
}
ind, err := s.repos.GetIndexFile(&r.Config)
if err != nil {
return nil, errors.Wrap(err, "get repository index")
}
// store the index file
r = toRepoInfo(&r.Config, ind)
rawJSON, err := json.Marshal(r)
if err != nil {
return nil, errors.Wrap(err, "marshal index file")
}
if err = s.storage.Put(ctx, repoPrefix, name, rawJSON); err != nil {
return nil, errors.Wrap(err, "storage")
}
return r, nil
}
// GetRepo retrieves the repository index file for provided nam.
func (s Service) GetRepo(ctx context.Context, repoName string) (*model.RepositoryInfo, error) {
res, err := s.storage.Get(ctx, repoPrefix, repoName)
if err != nil {
return nil, errors.Wrap(err, "storage")
}
// not found
if res == nil {
return nil, errors.Wrap(sgerrors.ErrNotFound, "repo not found")
}
r := &model.RepositoryInfo{}
if err = json.Unmarshal(res, r); err != nil {
return nil, errors.Wrap(err, "unmarshal")
}
return r, nil
}
// ListRepos retrieves all helm repositories from the storage.
func (s Service) ListRepos(ctx context.Context) ([]model.RepositoryInfo, error) {
rawRepos, err := s.storage.GetAll(ctx, repoPrefix)
if err != nil {
return nil, errors.Wrap(err, "storage")
}
repos := make([]model.RepositoryInfo, len(rawRepos))
for i, raw := range rawRepos {
r := &model.RepositoryInfo{}
err = json.Unmarshal(raw, r)
if err != nil {
return nil, errors.Wrap(err, "unmarshal")
}
repos[i] = *r
}
return repos, nil
}
// DeleteRepo removes a helm repository from the storage by its name.
func (s Service) DeleteRepo(ctx context.Context, repoName string) (*model.RepositoryInfo, error) {
hrepo, err := s.GetRepo(ctx, repoName)
if err != nil {
return nil, errors.Wrap(err, "get repository")
}
return hrepo, s.storage.Delete(ctx, repoPrefix, repoName)
}
func (s Service) GetChartData(ctx context.Context, repoName, chartName, chartVersion string) (*model.ChartData, error) {
chrt, err := s.GetChart(ctx, repoName, chartName, chartVersion)
if err != nil {
return nil, err
}
return toChartData(chrt), nil
}
func (s Service) ListCharts(ctx context.Context, repoName string) ([]model.ChartInfo, error) {
hrepo, err := s.GetRepo(ctx, repoName)
if err != nil {
return nil, errors.Wrapf(err, "get %s repository info", repoName)
}
return hrepo.Charts, nil
}
func (s Service) GetChart(ctx context.Context, repoName, chartName, chartVersion string) (*chart.Chart, error) {
hrepo, err := s.GetRepo(ctx, repoName)
if err != nil {
return nil, errors.Wrapf(err, "get %s repository info", repoName)
}
ref, err := findChartURL(hrepo.Charts, chartName, chartVersion)
if err != nil {
return nil, errors.Wrapf(err, "get %s(%s) chart", chartName, chartVersion)
}
chrt, err := s.repos.GetChart(hrepo.Config, ref)
if err != nil {
return nil, errors.Wrapf(err, "get %s chart", ref)
}
return chrt, nil
}
func (s Service) GetChartRef(ctx context.Context, repoName, chartName, chartVersion string) (string, error) {
hrepo, err := s.GetRepo(ctx, repoName)
if err != nil {
return "", errors.Wrapf(err, "get %s repository info", repoName)
}
ref, err := findChartURL(hrepo.Charts, chartName, chartVersion)
if err != nil {
return "", errors.Wrapf(err, "get %s(%s) chart", chartName, chartVersion)
}
return ref, nil
}
func toChartData(chrt *chart.Chart) *model.ChartData {
if chrt == nil {
return nil
}
out := &model.ChartData{
Metadata: chrt.Metadata,
}
if chrt.Values != nil {
out.Values = chrt.Values.Raw
}
if chrt.Files != nil {
for _, f := range chrt.Files {
if f != nil && strings.ToLower(f.TypeUrl) == readmeFileName {
out.Readme = string(f.Value)
}
}
}
return out
}
func findChartURL(charts []model.ChartInfo, chartName, chartVersion string) (string, error) {
for _, chrt := range charts {
if chrt.Name != chartName {
continue
}
if len(chrt.Versions) == 0 {
break
}
chrtVer := findChartVersion(chrt.Versions, chartVersion)
if len(chrtVer.URLs) != 0 {
// charts are sorted in descending order
return chrtVer.URLs[0], nil
}
}
return "", sgerrors.ErrNotFound
}
func findChartVersion(chrtVers []model.ChartVersion, version string) model.ChartVersion {
version = strings.TrimSpace(version)
if len(chrtVers) > 0 && version == "" {
return chrtVers[len(chrtVers)-1]
}
for _, v := range chrtVers {
if v.Version == version {
return v
}
}
return model.ChartVersion{}
}
func toRepoInfo(e *repo.Entry, index *repo.IndexFile) *model.RepositoryInfo {
r := &model.RepositoryInfo{
Config: *e,
}
if index == nil || len(index.Entries) == 0 {
return r
}
r.Charts = make([]model.ChartInfo, 0, len(index.Entries))
for name, entry := range index.Entries {
if len(entry) == 0 {
continue
}
// ensure chart versions are sorted in descending order
sort.SliceStable(entry, func(i, j int) bool {
return entry[i].Version > entry[j].Version
})
if entry[0].Deprecated {
continue
}
r.Charts = append(r.Charts, model.ChartInfo{
Name: name,
Repo: e.Name,
Icon: iconFrom(entry),
Description: descriptionFrom(entry),
Versions: toChartVersions(entry),
})
}
// chartVersins received from the helm is a map
// sort the results by name to ensure ordering
sort.SliceStable(r.Charts, func(i, j int) bool {
return r.Charts[i].Name < r.Charts[j].Name
})
return r
}
func iconFrom(cvs repo.ChartVersions) string {
// chartVersions are sorted, use the latest one
if len(cvs) > 0 {
return cvs[0].Icon
}
return ""
}
func descriptionFrom(cvs repo.ChartVersions) string {
// chartVersions are sorted, use the latest one
if len(cvs) > 0 {
return cvs[0].Description
}
return ""
}
func toChartVersions(cvs repo.ChartVersions) []model.ChartVersion {
if len(cvs) == 0 {
return nil
}
chartVersions := make([]model.ChartVersion, 0, len(cvs))
for _, cv := range cvs {
chartVersions = append(chartVersions, model.ChartVersion{
Version: cv.Version,
AppVersion: cv.AppVersion,
Created: cv.Created,
Digest: cv.Digest,
URLs: cv.URLs,
})
}
return chartVersions
}<|fim▁end|> | |
<|file_name|>plot_mlp_training_curves.py<|end_file_name|><|fim▁begin|>"""
========================================================
Compare Stochastic learning strategies for MLPClassifier
========================================================
This example visualizes some training loss curves for different stochastic
learning strategies, including SGD and Adam. Because of time-constraints, we
use several small datasets, for which L-BFGS might be more suitable. The
general trend shown in these examples seems to carry over to larger datasets,
however.
Note that those results can be highly dependent on the value of
``learning_rate_init``.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn import datasets
# different learning rate schedules and momentum parameters
params = [{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': 0,
'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0,
'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'solver': 'adam', 'learning_rate_init': 0.01}]<|fim▁hole|> "inv-scaling learning-rate", "inv-scaling with momentum",
"inv-scaling with Nesterov's momentum", "adam"]
plot_args = [{'c': 'red', 'linestyle': '-'},
{'c': 'green', 'linestyle': '-'},
{'c': 'blue', 'linestyle': '-'},
{'c': 'red', 'linestyle': '--'},
{'c': 'green', 'linestyle': '--'},
{'c': 'blue', 'linestyle': '--'},
{'c': 'black', 'linestyle': '-'}]
def plot_on_dataset(X, y, ax, name):
# for each dataset, plot learning for each learning strategy
print("\nlearning on dataset %s" % name)
ax.set_title(name)
X = MinMaxScaler().fit_transform(X)
mlps = []
if name == "digits":
# digits is larger but converges fairly quickly
max_iter = 15
else:
max_iter = 400
for label, param in zip(labels, params):
print("training: %s" % label)
mlp = MLPClassifier(verbose=0, random_state=0,
max_iter=max_iter, **param)
mlp.fit(X, y)
mlps.append(mlp)
print("Training set score: %f" % mlp.score(X, y))
print("Training set loss: %f" % mlp.loss_)
for mlp, label, args in zip(mlps, labels, plot_args):
ax.plot(mlp.loss_curve_, label=label, **args)
fig, axes = plt.subplots(2, 2, figsize=(15, 10))
# load / generate some toy datasets
iris = datasets.load_iris()
digits = datasets.load_digits()
data_sets = [(iris.data, iris.target),
(digits.data, digits.target),
datasets.make_circles(noise=0.2, factor=0.5, random_state=1),
datasets.make_moons(noise=0.3, random_state=0)]
for ax, data, name in zip(axes.ravel(), data_sets, ['iris', 'digits',
'circles', 'moons']):
plot_on_dataset(*data, ax=ax, name=name)
fig.legend(ax.get_lines(), labels=labels, ncol=3, loc="upper center")
plt.show()<|fim▁end|> |
labels = ["constant learning-rate", "constant with momentum",
"constant with Nesterov's momentum", |
<|file_name|>configmanager.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#-*- coding: UTF-8 -*-
#Ticloud web version 2.0
#author:WangRui
import ConfigParser
import logging
class ConfigManager(object):
_config_dict = None
@staticmethod
def create(filename):
parse_file = ParseIniFile(filename)
parse_file.init()
parse_file.getvalue()
parse_file.close()
ConfigManager._config_dict = parse_file.ini_dict
@staticmethod
def getvalue(arr, args):
try:
return ConfigManager._config_dict[arr][args]
except AttributeError:
logging.error("from ConfigManager._config_dict get config attribute error")
return None
class ParseIniFile(object):
"""
解析ini配置文件<|fim▁hole|> self.cfg = None
self.read_handle = None
self.ini_dict = {}
def init(self):
self.cfg = ConfigParser.ConfigParser()
try:
with open(self.filename, "r") as self.read_handle:
self.cfg.readfp(self.read_handle)
except IOError:
logging.error("parse ini file error")
def close(self):
if self.read_handle is not None:
self.read_handle.close()
def getvalue(self):
if self.read_handle:
for sect in self.cfg.sections():
temp_dict = dict()
temp_dict["info"] = ''
for opt in self.cfg.options(sect):
temp_dict[opt] = self.cfg.get(sect, opt)
info = "\n" + opt + "=" + self.cfg.get(sect, opt)
temp_dict["info"] += info
self.ini_dict[sect] = temp_dict
def all_options(self, sect):
List = []
for opt in self.cfg.options(sect):
Dict = {}
Dict["opt"] = opt
Dict["value"] = self.cfg.get(sect, opt)
List.append(Dict)
return List
def get_value_now(self, sect, opt):
return self.cfg.get(sect, opt)
def write(self, data):
for k in self.ini_dict[data]:
if not cmp(k, "info"):
continue
self.cfg.set(data, k, self.ini_dict[data][k])
self.cfg.write(open(self.filename, "w"))
def delsection(self, name):
e = ''
self.cfg = ConfigParser.ConfigParser()
try:
self.cfg.read(self.filename)
self.cfg.remove_section(name)
self.cfg.write(open(self.filename, "w"))
except ConfigParser.ParsingError, e:
print e
return e
class ParseConfigFile(object):
def __init__(self, filename):
self.filename = filename
self.cfg = None
self.read_handle = None
self.ini_dict = {}
def init(self):
self.cfg = ConfigParser.ConfigParser()
try:
with open(self.filename, "r") as self.read_handle:
self.cfg.readfp(self.read_handle)
except IOError:
logging.error("parse ini file error")
def close(self):
if self.read_handle is not None:
self.read_handle.close()
def getvalue(self):
if self.read_handle:
for sect in self.cfg.sections():
temp_dict = dict()
temp_dict["info"] = ''
for opt in self.cfg.options(sect):
temp_dict[opt] = self.cfg.get(sect, opt)
info = "\n" + opt + "=" + self.cfg.get(sect, opt)
temp_dict["info"] += info
self.ini_dict[sect] = temp_dict
def write(self, data):
for k in self.ini_dict[data]:
if not cmp(k, "info"):
continue
self.cfg.set(data, k, self.ini_dict[data][k])
self.cfg.write(open(self.filename, "w"))
def delsection(self, name):
e = ''
self.cfg = ConfigParser.ConfigParser()
try:
self.cfg.read(self.filename)
self.cfg.remove_section(name)
self.cfg.write(open(self.filename, "w"))
except ConfigParser.ParsingError, e:
print e
return e<|fim▁end|> | """
def __init__(self, filename):
self.filename = filename |
<|file_name|>_ticksuffix.py<|end_file_name|><|fim▁begin|>import _plotly_utils.basevalidators
<|fim▁hole|>
class TicksuffixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="ticksuffix", parent_name="densitymapbox.colorbar", **kwargs
):
super(TicksuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
**kwargs
)<|fim▁end|> | |
<|file_name|>test_agent_finder.py<|end_file_name|><|fim▁begin|>from unittest import TestCase
from agent_finder import find_subclasses
import opencog.cogserver
import test_agent
class HelperTest(TestCase):
def setUp(self):
pass<|fim▁hole|> def test_find_agents(self):
x=find_subclasses(test_agent,opencog.cogserver.MindAgent)
self.assertEqual(len(x),1)
self.assertEqual(x[0][0], 'TestAgent')<|fim▁end|> |
def tearDown(self):
pass
|
<|file_name|>dev.js<|end_file_name|><|fim▁begin|>var path = require('path');
var webpack = require('webpack');
var _ = require('lodash');
var baseConfig = require('./base');
// Add needed plugins here
var BowerWebpackPlugin = require('bower-webpack-plugin');
var config = _.merge({
entry: [
'webpack-dev-server/client?http://127.0.0.1:8000',
'webpack/hot/only-dev-server',
'./src/components/run'
],
cache: true,
devtool: 'eval',
plugins: [
new webpack.HotModuleReplacementPlugin(),
new webpack.NoErrorsPlugin(),
new BowerWebpackPlugin({
searchResolveModulesDirectories: false
})
]
}, baseConfig);
// Add needed loaders
config.module.loaders.push({
test: /\.(js|jsx)$/,
loader: 'react-hot!babel-loader',
include: path.join(__dirname, '/../src')<|fim▁hole|>
module.exports = config;<|fim▁end|> | }); |
<|file_name|>lcls.py<|end_file_name|><|fim▁begin|># --------------------------------------------------------------------------------------
# Copyright 2016, Benedikt J. Daurer, Filipe R.N.C. Maia, Max F. Hantke, Carl Nettelblad
# Hummingbird is distributed under the terms of the Simplified BSD License.
# -------------------------------------------------------------------------
"""Translates between LCLS events and Hummingbird ones"""
from __future__ import print_function # Compatibility with python 2 and 3
import os
import logging
from backend.event_translator import EventTranslator
from backend.record import Record, add_record
import psana
import numpy
import datetime
from pytz import timezone
from . import ureg
from backend import Worker
import ipc
from hummingbird import parse_cmdline_args
_argparser = None
def add_cmdline_args():
global _argparser
from utils.cmdline_args import argparser
_argparser = argparser
group = _argparser.add_argument_group('LCLS', 'Options for the LCLS event translator')
group.add_argument('--lcls-run-number', metavar='lcls_run_number', nargs='?',
help="run number",
type=int)
group.add_argument('--lcls-number-of-frames', metavar='lcls_number_of_frames', nargs='?',
help="number of frames to be processed",
type=int)
# ADUthreshold for offline analysis
#group.add_argument('--ADUthreshold', metavar='ADUthreshold', nargs='?',
# help="ADU threshold",
# type=int)
# Hitscore threshold for offline analysis
#group.add_argument('--hitscore-thr', metavar='hitscore_thr', nargs='?',
# help="Hitscore threshold",
# type=int)
# Output directory for offline analysis
#group.add_argument('--out-dir', metavar='out_dir', nargs='?',
# help="Output directory",
# type=str)
# Reduce output from offline analysis
#group.add_argument('--reduced-output',
# help="Write only very few data to output file",
# action='store_true')
PNCCD_IDS = ['pnccdFront', 'pnccdBack']
ACQ_IDS = [('ACQ%i' % i) for i in range(1,4+1)]
class LCLSTranslator(object):
"""Translate between LCLS events and Hummingbird ones"""
def __init__(self, state):
self.timestamps = None
self.library = 'psana'
config_file = None
if('LCLS/PsanaConf' in state):
config_file = os.path.abspath(state['LCLS/PsanaConf'])
elif('LCLS' in state and 'PsanaConf' in state['LCLS']):
config_file = os.path.abspath(state['LCLS']['PsanaConf'])
if(config_file is not None):
if(not os.path.isfile(config_file)):
raise RuntimeError("Could not find [LCLS][PsanaConf]: %s" %
(config_file))
logging.info("Info: Found configuration file %s.", config_file)
psana.setConfigFile(config_file)
if 'LCLS/CalibDir' in state:
calibdir = state['LCLS/CalibDir']
logging.info("Setting calib-dir to %s" % calibdir)
psana.setOption('psana.calib-dir', calibdir)
elif('LCLS' in state and 'CalibDir' in state['LCLS']):
calibdir = state['LCLS']['CalibDir']
logging.info("Setting calib-dir to %s" % calibdir)
psana.setOption('psana.calib-dir', calibdir)
if('LCLS/DataSource' in state):
dsrc = state['LCLS/DataSource']
elif('LCLS' in state and 'DataSource' in state['LCLS']):
dsrc = state['LCLS']['DataSource']
else:
raise ValueError("You need to set the '[LCLS][DataSource]'"
" in the configuration")
cmdline_args = _argparser.parse_args()
self.N = cmdline_args.lcls_number_of_frames
if cmdline_args.lcls_run_number is not None:
dsrc += ":run=%i" % cmdline_args.lcls_run_number
# Cache times of events that shall be extracted from XTC (does not work for stream)
self.event_slice = slice(0,None,1)
if 'times' in state or 'fiducials' in state:
if not ('times' in state and 'fiducials' in state):
raise ValueError("Times or fiducials missing in state."
" Extraction of selected events expects both event identifiers")
if dsrc[:len('exp=')] != 'exp=':
raise ValueError("Extraction of events with given times and fiducials"
" only works when reading from XTC with index files")
if dsrc[-len(':idx'):] != ':idx':
dsrc += ':idx'
self.times = state['times']
self.fiducials = state['fiducials']
self.i = 0
self.data_source = psana.DataSource(dsrc)
self.run = self.data_source.runs().next()
elif 'indexing' in state:
if dsrc[-len(':idx'):] != ':idx':
dsrc += ':idx'
if 'index_offset' in state:
self.i = state['index_offset'] / ipc.mpi.nr_event_readers()
else:
self.i = 0
self.data_source = psana.DataSource(dsrc)
self.run = self.data_source.runs().next()
self.timestamps = self.run.times()
if self.N is not None:
self.timestamps = self.timestamps[:self.N]
self.timestamps = self.timestamps[ipc.mpi.event_reader_rank()::ipc.mpi.nr_event_readers()]
else:
self.times = None
self.fiducials = None
self.i = 0
if not dsrc.startswith('shmem='):
self.event_slice = slice(ipc.mpi.event_reader_rank(), None, ipc.mpi.nr_event_readers())
self.data_source = psana.DataSource(dsrc)
self.run = None
# Define how to translate between LCLS types and Hummingbird ones
self._n2c = {}
self._n2c[psana.Bld.BldDataFEEGasDetEnergy] = 'pulseEnergies'
self._n2c[psana.Bld.BldDataFEEGasDetEnergyV1] = 'pulseEnergies'
self._n2c[psana.Lusi.IpmFexV1] = 'pulseEnergies'
self._n2c[psana.Camera.FrameV1] = 'camera'
# Guard against old(er) psana versions
try:
self._n2c[psana.Bld.BldDataEBeamV1] = 'photonEnergies'
self._n2c[psana.Bld.BldDataEBeamV2] = 'photonEnergies'
self._n2c[psana.Bld.BldDataEBeamV3] = 'photonEnergies'
self._n2c[psana.Bld.BldDataEBeamV4] = 'photonEnergies'
self._n2c[psana.Bld.BldDataEBeamV5] = 'photonEnergies'
self._n2c[psana.Bld.BldDataEBeamV6] = 'photonEnergies'
self._n2c[psana.Bld.BldDataEBeamV7] = 'photonEnergies'
except AttributeError:
pass
# CXI (CsPad)
self._n2c[psana.CsPad.DataV2] = 'photonPixelDetectors'
self._n2c[psana.CsPad2x2.ElementV1] = 'photonPixelDetectors'
# CXI (OffAxis Cam)
#self._n2c[psana.Camera.FrameV1] = 'photonPixelDetectors'
# AMO (pnCCD)
self._n2c[psana.PNCCD.FullFrameV1] = 'photonPixelDetectors'
self._n2c[psana.PNCCD.FramesV1] = 'photonPixelDetectors'
# --
self._n2c[psana.Acqiris.DataDescV1] = 'ionTOFs'
self._n2c[psana.EventId] = 'eventID'
# Guard against old(er) psana versions
try:
self._n2c[psana.EvrData.DataV3] = 'eventCodes'
self._n2c[psana.EvrData.DataV4] = 'eventCodes'
except AttributeError:
pass
# Calculate the inverse mapping
self._c2n = {}
for k, v in self._n2c.iteritems():
self._c2n[v] = self._c2n.get(v, [])
self._c2n[v].append(k)
# Define how to translate between LCLS sources and Hummingbird ones
self._s2c = {}
# CXI (OnAxis Cam)
self._s2c['DetInfo(CxiEndstation.0:Opal4000.1)'] = 'Sc2Questar'
# CXI (OffAxis Cam)
self._s2c['DetInfo(CxiEndstation.0.Opal11000.0)'] = 'Sc2Offaxis'
# CXI (CsPad)
self._s2c['DetInfo(CxiDs1.0:Cspad.0)'] = 'CsPad Ds1'
self._s2c['DetInfo(CxiDsd.0:Cspad.0)'] = 'CsPad Dsd'
self._s2c['DetInfo(CxiDs2.0:Cspad.0)'] = 'CsPad Ds2'
self._s2c['DetInfo(CxiDg3.0:Cspad2x2.0)'] = 'CsPad Dg3'
self._s2c['DetInfo(CxiDg2.0:Cspad2x2.0)'] = 'CsPad Dg2'
# AMO (pnCCD)
self._s2c['DetInfo(Camp.0:pnCCD.1)'] = 'pnccdBack'
self._s2c['DetInfo(Camp.0:pnCCD.0)'] = 'pnccdFront'
# ToF detector
self._s2c['DetInfo(AmoEndstation.0:Acqiris.0)'] = 'Acqiris 0'
self._s2c['DetInfo(AmoEndstation.0:Acqiris.1)'] = 'Acqiris 1'
self._s2c['DetInfo(AmoEndstation.0:Acqiris.2)'] = 'Acqiris 2'
# AMO (Acqiris)
self._s2c['DetInfo(AmoETOF.0:Acqiris.0)'] = 'Acqiris 0'
self._s2c['DetInfo(AmoETOF.0:Acqiris.1)'] = 'Acqiris 1'
self._s2c['DetInfo(AmoITOF.0:Acqiris.0)'] = 'Acqiris 2'
self._s2c['DetInfo(AmoITOF.0:Acqiris.1)'] = 'Acqiris 3'
# MCP Camera
self._s2c['DetInfo(AmoEndstation.0:Opal1000.1)'] = 'OPAL1'
# CXI (Acqiris)
self._s2c['DetInfo(CxiEndstation.0:Acqiris.0)'] = 'Acqiris 0'
self._s2c['DetInfo(CxiEndstation.0:Acqiris.1)'] = 'Acqiris 1'
self.init_detectors(state)
#print("Detectors:" , psana.DetNames())
def init_detectors(self, state):
# New psana call pattern
self._detectors = {}
self._c2id_detectors = {}
if 'detectors' in state:
for detid, det_dict in state['detectors'].items():
if detid in PNCCD_IDS:
self._detectors[detid] = {}
self._detectors[detid]['id'] = det_dict['id']
self._detectors[detid]['type'] = det_dict['type']
self._detectors[detid]['key'] = det_dict['key']
obj = psana.Detector(det_dict['id'])
self._detectors[detid]['obj'] = obj
meth = det_dict['data_method']
if meth == "image":
f = lambda obj, evt: obj.image(evt)
elif meth == "calib":
f = lambda obj, evt: obj.calib(evt)
elif meth == "raw":
def f(obj, evt):
#obj = self._detectors[detid]['obj']
raw = numpy.array(obj.raw(evt), dtype=numpy.float32, copy=True)
return raw
elif meth == "calib_pc":
def f(obj, evt):
cdata = numpy.array(obj.raw(evt), dtype=numpy.float32, copy=True) - obj.pedestals(evt)
return cdata
elif meth == "calib_cmc":
def f(obj, evt):
#obj = self._detectors[detid]['obj']
rnum = obj.runnum(evt)
cdata = numpy.array(obj.raw(evt), dtype=numpy.float32, copy=True) - obj.pedestals(evt)
obj.common_mode_apply(rnum, cdata, cmpars=None)
return cdata
elif meth == "calib_gc":
def f(obj, evt):
#obj = self._detectors[detid]['obj']
rnum = obj.runnum(evt)
cdata = numpy.array(obj.raw(evt), dtype=numpy.float32, copy=True) - obj.pedestals(evt)
obj.common_mode_apply(rnum, cdata, cmpars=None)
gain = obj.gain(evt)
cdata *= gain
return cdata
else:
raise RuntimeError('data_method = %s not supported' % meth)
self._detectors[detid]['data_method'] = f
self._c2id_detectors[det_dict['type']] = detid
print("Set data method for detector id %s to %s." % (det_dict['id'], meth))
elif detid in ACQ_IDS:
self._detectors[detid] = {}
self._detectors[detid]['id'] = det_dict['id']
self._detectors[detid]['type'] = det_dict['type']
self._detectors[detid]['keys'] = det_dict['keys']
obj = psana.Detector(det_dict['id'])
self._detectors[detid]['obj'] = obj
self._c2id_detectors[det_dict['type']] = detid
else:
raise RuntimeError('Detector type = %s not implememented for ID %s' % (det_dict['type'], detid))
def next_event(self):
"""Grabs the next event and returns the translated version"""
if self.timestamps:
try:
evt = self.run.event(self.timestamps[self.i])
except (IndexError, StopIteration) as e:
#if 'end_of_run' in dir(Worker.conf):
# Worker.conf.end_of_run()
#ipc.mpi.slave_done()
return None
self.i += 1
elif self.times is not None:
evt = None
while self.i < len(self.times) and evt is None:
time = psana.EventTime(int(self.times[self.i]), self.fiducials[self.i])
self.i += 1
evt = self.run.event(time)<|fim▁hole|> # We got to the end without a valid event, time to call it a day
if evt is None:
#if 'end_of_run' in dir(Worker.conf):
# Worker.conf.end_of_run()
#ipc.mpi.slave_done()
return None
else:
try:
while (self.i % self.event_slice.step) != self.event_slice.start:
evt = self.data_source.events().next()
self.i += 1
if self.N is not None and self.i >= self.N:
raise StopIteration
evt = self.data_source.events().next()
self.i += 1
except StopIteration:
#if 'end_of_run' in dir(Worker.conf):
# Worker.conf.end_of_run()
#ipc.mpi.slave_done()
return None
return EventTranslator(evt, self)
def event_keys(self, evt):
"""Returns the translated keys available"""
native_keys = evt.keys()
common_keys = set()
for k in native_keys:
for c in self._native_to_common(k):
common_keys.add(c)
# parameters corresponds to the EPICS values, analysis is for values added later on
return list(common_keys)+['parameters']+['analysis']
def _native_to_common(self, key):
"""Translates a native key to a hummingbird one"""
if(key.type() in self._n2c):
return [self._n2c[key.type()]]
else:
return []
def event_native_keys(self, evt):
"""Returns the native keys available"""
return evt.keys()
def translate(self, evt, key):
"""Returns a dict of Records that match a given humminbird key"""
values = {}
if(key in self._c2id_detectors):
return self.translate_object(evt, key)
elif(key in self._c2n):
return self.translate_core(evt, key)
elif(key == 'parameters'):
return self._tr_epics()
elif(key == 'analysis'):
return {}
elif(key == 'stream'):
return {}
else:
# check if the key matches any of the existing keys in the event
event_keys = evt.keys()
values = {}
found = False
for event_key in event_keys:
if(event_key.key() == key):
obj = evt.get(event_key.type(), event_key.src(), event_key.key())
found = True
add_record(values, 'native', '%s[%s]' % (self._s2c[str(event_key.src())], key),
obj, ureg.ADU)
if(found):
return values
else:
print('%s not found in event' % (key))
def translate_object(self, evt, key):
values = {}
detid = self._c2id_detectors[key]
if detid in PNCCD_IDS:
det = self._detectors[detid]
obj = self._detectors[detid]['obj']
data_nda = det['data_method'](obj, evt)
if data_nda is None:
image = None
elif len(data_nda.shape) <= 2:
image = data_nda
elif len(data_nda.shape) == 3:
image = numpy.hstack([numpy.vstack([data_nda[0],data_nda[1][::-1,::-1]]),
numpy.vstack([data_nda[3],data_nda[2][::-1,::-1]])])
add_record(values, det['type'], det['key'], image, ureg.ADU)
elif detid in ACQ_IDS:
det = self._detectors[detid]
# waveforms are in Volts, times are in Seconds
obj = det['obj']
waveforms = obj.waveform(evt)
#print("waveforms", waveforms)
#times = obj.wftime(evt)
for i, wf in enumerate(waveforms):
add_record(values, det['type'], det['keys'][i], wf, ureg.V)
else:
raise RuntimeError('%s not yet supported' % key)
return values
def translate_core(self, evt, key):
"""Returns a dict of Records that matchs a core Hummingbird key.
Core keys include all except: parameters, any psana create key,
any native key."""
values = {}
native_keys = self._c2n[key]
event_keys = evt.keys()
for k in event_keys:
if(k.type() in native_keys):
obj = evt.get(k.type(), k.src(), k.key())
if(isinstance(obj, psana.Bld.BldDataFEEGasDetEnergy) or
isinstance(obj, psana.Bld.BldDataFEEGasDetEnergyV1)):
self._tr_bld_data_fee_gas_det_energy(values, obj)
elif(isinstance(obj, psana.Lusi.IpmFexV1)):
self._tr_lusi_ipm_fex(values, obj, k)
elif(key == 'photonEnergies'):
self._tr_bld_data_ebeam(values, obj)
elif(isinstance(obj, psana.CsPad2x2.ElementV1)):
self._tr_cspad2x2(values, obj)
elif(isinstance(obj, psana.CsPad.DataV2)):
self._tr_cspad(values, obj, k)
# AMO
elif(isinstance(obj, psana.PNCCD.FullFrameV1)):
self._tr_pnccdFullFrame(values, obj, k)
elif(isinstance(obj, psana.PNCCD.FramesV1)):
self._tr_pnccdFrames(values, obj, k)
# --
elif(isinstance(obj, psana.Acqiris.DataDescV1)):
self._tr_acqiris(values, obj, k)
elif(isinstance(obj, psana.Camera.FrameV1)):
self._tr_camera(values, obj)
elif(isinstance(obj, psana.EventId)):
self._tr_event_id(values, obj)
elif(isinstance(obj, psana.EvrData.DataV3) or
isinstance(obj, psana.EvrData.DataV4)):
self._tr_event_codes(values, obj)
else:
print(type(obj))
print(k)
raise RuntimeError('%s not yet supported' % (type(obj)))
return values
def event_id(self, evt):
"""Returns an id which should be unique for each
shot and increase monotonically"""
return self.translate(evt, 'eventID')['Timestamp'].timestamp
def event_id2(self, evt):
"""Returns the LCLS time, a 64-bit integer as an alterative ID"""
return self.translate(evt, 'eventID')['Timestamp'].timestamp2
def _tr_bld_data_ebeam(self, values, obj):
"""Translates BldDataEBeam to hummingbird photon energy and other beam properties"""
try:
photon_energy_ev = obj.ebeamPhotonEnergy()
except AttributeError:
peak_current = obj.ebeamPkCurrBC2()
dl2_energy_gev = 0.001*obj.ebeamL3Energy()
ltu_wake_loss = 0.0016293*peak_current
# Spontaneous radiation loss per segment
sr_loss_per_segment = 0.63*dl2_energy_gev
# wakeloss in an undulator segment
wake_loss_per_segment = 0.0003*peak_current
# energy loss per segment
energy_loss_per_segment = (sr_loss_per_segment +
wake_loss_per_segment)
# energy in first active undulator segment [GeV]
energy_profile = (dl2_energy_gev - 0.001*ltu_wake_loss -
0.0005*energy_loss_per_segment)
# Calculate the resonant photon energy of the first active segment
photon_energy_ev = 44.42*energy_profile*energy_profile
add_record(values, 'photonEnergies', 'photonEnergy', photon_energy_ev, ureg.eV)
try:
ebeam_ang_x = obj.ebeamLTUAngX()
ebeam_ang_y = obj.ebeamLTUAngY()
ebeam_pos_x = obj.ebeamLTUPosX()
ebeam_pos_y = obj.ebeamLTUPosY()
ebeam_charge = obj.ebeamCharge()
add_record(values, 'photonEnergies', 'angX', ebeam_ang_x)
add_record(values, 'photonEnergies', 'angY', ebeam_ang_y)
add_record(values, 'photonEnergies', 'posX', ebeam_pos_x)
add_record(values, 'photonEnergies', 'posY', ebeam_pos_y)
add_record(values, 'photonEnergies', 'charge', ebeam_charge)
except AttributeError:
print("Couldn't translate electron beam properties from BldDataEBeam")
def _tr_bld_data_fee_gas_det_energy(self, values, obj):
"""Translates gas monitor detector to hummingbird pulse energy"""
# convert from mJ to J
add_record(values, 'pulseEnergies', 'f_11_ENRC', obj.f_11_ENRC(), ureg.mJ)
add_record(values, 'pulseEnergies', 'f_12_ENRC', obj.f_12_ENRC(), ureg.mJ)
add_record(values, 'pulseEnergies', 'f_21_ENRC', obj.f_21_ENRC(), ureg.mJ)
add_record(values, 'pulseEnergies', 'f_22_ENRC', obj.f_22_ENRC(), ureg.mJ)
def _tr_lusi_ipm_fex(self, values, obj, evt_key):
"""Translates Ipm relative pulse energy monitor
to hummingbird pulse energy"""
add_record(values, 'pulseEnergies', 'IpmFex - '+str(evt_key.src()), obj.sum(), ureg.ADU)
def _tr_cspad2x2(self, values, obj):
"""Translates CsPad2x2 to hummingbird numpy array"""
try:
add_record(values, 'photonPixelDetectors', 'CsPad2x2S', obj.data(), ureg.ADU)
except AttributeError:
add_record(values, 'photonPixelDetectors', 'CsPad2x2', obj.data16(), ureg.ADU)
def _tr_camera(self, values, obj):
"""Translates Camera frame to hummingbird numpy array"""
#if obj.depth == 16 or obj.depth() == 12:
# data = obj.data16()
# print(data.shape)
#else:
# data = obj.data8()
# print(data.shape)
data = obj.data16()
# off Axis cam at CXI
#if data.shape == (1024,1024):
# add_record(values, 'camera', 'offAxis', data, ureg.ADU)
# MCP (PNCCD replacement) at AMO (June 2016)
if data.shape == (1024,1024):
add_record(values, 'camera', 'mcp', data, ureg.ADU)
if data.shape == (1752,2336):
add_record(values, 'camera', 'onAxis', data, ureg.ADU)
def _tr_cspad(self, values, obj, evt_key):
"""Translates CsPad to hummingbird numpy array, quad by quad"""
n_quads = obj.quads_shape()[0]
for i in range(0, n_quads):
add_record(values, 'photonPixelDetectors', '%sQuad%d' % (self._s2c[str(evt_key.src())], i),
obj.quads(i).data(), ureg.ADU)
def _tr_pnccdFullFrame(self, values, obj, evt_key):
"""Translates full pnCCD frame to hummingbird numpy array"""
add_record(values, 'photonPixelDetectors', '%sfullFrame' % self._s2c[str(evt_key.src())], obj.data(), ureg.ADU)
def _tr_pnccdFrames(self, values, obj, evt_key):
"""Translates pnCCD frames to hummingbird numpy array, frame by frame"""
n_frames = obj.frame_shape()[0]
for i in range(0, n_frames):
add_record(values, 'photonPixelDetectors', '%sFrame%d' % (self._s2c[str(evt_key.src())], i),
obj.frame(i).data(), ureg.ADU)
def _tr_acqiris(self, values, obj, evt_key):
"""Translates Acqiris TOF data to hummingbird numpy array"""
config_store = self.data_source.env().configStore()
acq_config = config_store.get(psana.Acqiris.ConfigV1, evt_key.src())
samp_interval = acq_config.horiz().sampInterval()
n_channels = obj.data_shape()[0]
for i in range(0, n_channels):
vert = acq_config.vert()[i]
elem = obj.data(i)
timestamp = elem.timestamp()[0].value()
raw = elem.waveforms()[0]
if(elem.nbrSamplesInSeg() == 0):
logging.warning("Warning: TOF data for "
"detector %s is missing.", evt_key)
data = raw*vert.slope() - vert.offset()
rec = Record('%s Channel %d' %(self._s2c[str(evt_key.src())], i),
data, ureg.V)
rec.time = (timestamp +
samp_interval * numpy.arange(0, elem.nbrSamplesInSeg()))
values[rec.name] = rec
def _tr_event_id(self, values, obj):
"""Translates LCLS eventID into a hummingbird one"""
timestamp = obj.time()[0]+obj.time()[1]*1e-9
time = datetime.datetime.fromtimestamp(timestamp, tz=timezone('utc'))
time = time.astimezone(tz=timezone('US/Pacific'))
rec = Record('Timestamp', time, ureg.s)
time = datetime.datetime.fromtimestamp(obj.time()[0])
rec.datetime64 = numpy.datetime64(time, 'ns')+obj.time()[1]
rec.fiducials = obj.fiducials()
rec.run = obj.run()
rec.ticks = obj.ticks()
rec.vector = obj.vector()
rec.timestamp = timestamp
rec.timestamp2 = obj.time()[0] << 32 | obj.time()[1]
values[rec.name] = rec
def _tr_event_codes(self, values, obj):
"""Translates LCLS event codes into a hummingbird ones"""
codes = []
for fifo_event in obj.fifoEvents():
codes.append(fifo_event.eventCode())
add_record(values, 'eventCodes', 'EvrEventCodes', codes)
def _tr_epics(self):
"""Returns an EPICSdict that provides access to EPICS parameters.
Check the EPICSdict class for more details.
"""
return EPICSdict(self.data_source.env().epicsStore())
class EPICSdict(object):
"""Provides a dict-like interface to EPICS parameters.
Translated all the parameters is too slow too slow.
Instead parameters are only translated as they are needed,
when they are accessed, using this class.
"""
def __init__(self, epics):
self.epics = epics
self._cache = {}
self._keys = None
def keys(self):
"""Returns available EPICS names"""
if self._keys is None:
self._keys = self.epics.pvNames() + self.epics.aliases()
return self._keys
def len(self):
"""Returns the length of the dictionary"""
return len(self.keys())
def __getitem__(self, key):
"""Calls psana to retrieve and translate the EPICS item"""
if(key not in self._cache):
pv = self.epics.getPV(key)
if(pv is None):
raise KeyError('%s is not a valid EPICS key' %(key))
rec = Record(key, pv.value(0))
rec.pv = pv
self._cache[key] = rec
return self._cache[key]<|fim▁end|> | if evt is None:
print("Unable to find event listed in index file") |
<|file_name|>engine_batched_task.hpp<|end_file_name|><|fim▁begin|>// Copyright 2015 Patrick Putnam
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ENGINE_BATCHED_TASK_HPP_
#define ENGINE_BATCHED_TASK_HPP_
#ifdef DEBUG_MODE
#define DEBUGGING 0
#endif // DEBUG_MODE
#include "qtlsim_logger.hpp"
#include <boost/property_tree/ptree.hpp>
#include "clotho/genetics/population_growth_toolkit.hpp"
#include "clotho/data_spaces/allele_space/allele_space_vector.hpp"
#include "clotho/data_spaces/allele_space/allele_generator_vector.hpp"
#include "clotho/data_spaces/phenotype_evaluator/trait_space_vector.hpp"
//#include "clotho/data_spaces/phenotype_evaluator/trait_space_generator.hpp"
#include "clotho/data_spaces/phenotype_evaluator/trait_accumulator.hpp"
#include "clotho/data_spaces/free_space/free_space_mts.hpp"
#ifdef USE_BATCH_JOBS
#include "clotho/data_spaces/crossover/batch_crossover_mts.hpp"
#ifdef USE_CROSSOVER_EVENT_POOLING
#define CROSSOVER_TYPE clotho::genetics::BatchCrossoverMTWE
#else
#define CROSSOVER_TYPE clotho::genetics::BatchCrossoverMT
#endif // USE_CROSSOVER_EVENT_POOLING
#include "clotho/data_spaces/phenotype_evaluator/batch_phenotype_mts.hpp"
#define PHENOTYPE_TYPE clotho::genetics::BatchPhenotypeMT
#else
#include "clotho/data_spaces/crossover/crossover_mt.hpp"
#define CROSSOVER_TYPE clotho::genetics::CrossoverMT
#include "clotho/data_spaces/phenotype_evaluator/phenotype_mt.hpp"
#define PHENOTYPE_TYPE clotho::genetics::PhenotypeMT
#endif // USE_BATCH_JOBS
#include "clotho/data_spaces/population_space/population_spaces.hpp"
#include "clotho/data_spaces/selection/selection.hpp"
#include "clotho/data_spaces/mutation/batch_mutation_mt.hpp"
#include "clotho/data_spaces/fitness/general_fitness.hpp"
#include "clotho/utility/state_object.hpp"
#include "clotho/data_spaces/task/thread_pool.hpp"
struct batched_task {} ;
template < class RNG, class RealType, class BlockType, class SizeType >
class Engine< RNG, RealType, BlockType, SizeType, batched_task > {
public:
typedef Engine< RNG, RealType, BlockType, SizeType, batched_task > self_type;
typedef RealType position_type;
typedef RealType weight_type;
typedef weight_type * phenotype_type;
typedef BlockType block_type;
typedef RNG random_engine_type;
typedef SizeType size_type;
typedef clotho::genetics::thread_pool< RNG > thread_pool_type;
typedef clotho::genetics::AlleleSpace< position_type, size_type > allele_type;
#ifdef USE_ROW_MODIFICATION
typedef clotho::genetics::population_space_row_modified< block_type, weight_type > sequence_space_type;
#else
typedef clotho::genetics::population_space_row< block_type, weight_type > sequence_space_type;
#endif // USE_ROW_MODIFICATION
typedef clotho::genetics::trait_space_vector< weight_type > trait_space_type;
typedef clotho::genetics::FreeSpaceAnalyzerMT< sequence_space_type, size_type > free_space_type;
typedef clotho::genetics::mutation_allocator< random_engine_type, size_type > mutation_alloc_type;
typedef clotho::genetics::BatchMutationMT< random_engine_type, sequence_space_type, allele_type, free_space_type, trait_space_type > mutation_type;
typedef clotho::genetics::GeneralFitness fitness_type;
typedef clotho::genetics::SelectionGenerator< random_engine_type, clotho::genetics::fitness_selection< fitness_type > > selection_type;
typedef CROSSOVER_TYPE< random_engine_type, sequence_space_type, allele_type > crossover_type;
typedef PHENOTYPE_TYPE< sequence_space_type, trait_space_type > phenotype_eval_type;
typedef std::shared_ptr< ipopulation_growth_generator > population_growth_generator_type;
typedef std::shared_ptr< ipopulation_growth > population_growth_type;
friend struct clotho::utility::state_getter< self_type >;
Engine( random_engine_type * rng, boost::property_tree::ptree & config ) :
m_rand( rng )
, m_parent( &m_pop0 )
, m_child( &m_pop1 )
, m_trait_space( config )
, m_fixed_traits( config )
, m_thread_pool( rng, config )
, m_free_space( )
, select_gen( rng, config )
, cross_gen( rng, config )
, mutate_gen( rng, config )
, m_fit( config )
, m_generation( 0 )
, m_pop_growth()
{
population_growth_generator_type tmp = population_growth_toolkit::getInstance()->get_tool( config );
if( tmp ) {
m_pop_growth = tmp->generate();
if( m_pop_growth ) {
m_pop_growth->log( std::cerr );
std::cerr << std::endl;
}
} else {
population_growth_toolkit::getInstance()->tool_configurations( config );
}
init(0);
}
size_t getGeneration() const {
return m_generation;
}
void init( size_t aN ) {
size_t pN = 0;
if( m_pop_growth ) {
pN = m_pop_growth->operator()( pN, m_generation );
}
m_pop0.grow( pN, aN, m_trait_space.trait_count() );
m_pop1.grow( pN, aN, m_trait_space.trait_count() );
m_pop1.clear();
m_pop0.clear();
m_fit.resize( pN );
#ifdef USE_ROW_VECTOR
m_pop1.getSequenceSpace().fill_empty();
m_pop1.getSequenceSpace().finalize();
#endif // USE_ROW_VECTOR
++m_generation;
}
void simulate( ) {
std::swap( m_child, m_parent ); // use the current child population as the parent population for the next round
// at the start of each simulate round, m_fit has already been updated from the previous
// round with the fitness of the "then child/now parent" popualtions fitness
//
size_t pN = select_gen.individual_count();
if( m_pop_growth ) {
pN = m_pop_growth->operator()( pN, m_generation );
}
// size_type pM = m_mut_alloc.allocate( 2 * pN ); // generate the number of new mutations
size_type pM = mutate_gen.generateNewMutation( 2 * pN );
timer_type fix_time;
size_type free_count = updateFixedAlleles( m_parent ); // update the fixed alleles with those of parent population
fix_time.stop();
size_t all_size = child_max_alleles( m_allele_space.size(), free_count, pM ); // rescale allele space for child population given free space from parent population and new allele count (pM)
#ifdef DEBUGGING
BOOST_LOG_TRIVIAL( debug ) << "Generation " << m_generation << ": " << pN << " individuals; " << pM << " new alleles";
BOOST_LOG_TRIVIAL( debug ) << "Free space: " << free_count << "; alleles: " << m_allele_space.size();
BOOST_LOG_TRIVIAL( debug ) << "Rescaling child population to be: " << pN << " individuals x " << all_size << " alleles";
std::cerr << "Generation " << m_generation << ": " << pN << " individuals; " << pM << " new alleles" << std::endl;
std::cerr << "Rescaling child population to be: " << pN << " individuals x " << all_size << " alleles" << std::endl;
#endif // DEBUGGING
m_child->grow( pN, all_size, m_trait_space.trait_count() ); // grow the child population accordingly
// m_allele_space.alignNeutralToPopulation( m_child->getMaxBlocks() );
select_gen.update( m_fit, pN );
timer_type xover_time;
cross_gen( select_gen, m_parent, m_child, &m_allele_space, m_thread_pool );
xover_time.stop();
timer_type mutate_time;
mutate_gen( m_child, &m_allele_space, &m_trait_space, &m_free_space, pM, m_generation, m_thread_pool, 1 );
mutate_time.stop();
timer_type pheno_time;
//if( !m_allele_space.isAllNeutral() ) {
if( !m_trait_space.isAllNeutral() ) {
m_pheno( m_parent, m_child, &m_trait_space, m_thread_pool );
} else {
m_pheno.constant_phenotype( m_child, &m_trait_space );
}
pheno_time.stop();
m_fit( m_pheno );
clotho::utility::add_value_array( fix_times, fix_time );
clotho::utility::add_value_array( xover_times, xover_time );
clotho::utility::add_value_array( mutate_times, mutate_time );
clotho::utility::add_value_array( pheno_times, pheno_time );
clotho::utility::add_value_array( free_sizes, free_count );
clotho::utility::add_value_array( var_sizes, m_free_space.variable_count() );
clotho::utility::add_value_array( fixed_sizes, m_free_space.fixed_size() );
++m_generation;
}
sequence_space_type * getChildPopulation() const {
return m_child;
}
sequence_space_type * getParentPopulation() const {
return m_parent;
}
void getPerformanceResults( boost::property_tree::ptree & log ) {
log.put_child( "performance.mutate", mutate_times );
log.put_child( "performance.crossover", xover_times );
log.put_child( "performance.fixed", fix_times );
log.put_child( "performance.phenotypes", pheno_times );
log.put_child( "memory.free_count", free_sizes );
log.put_child( "memory.variable_count", var_sizes );
log.put_child( "memory.fixed_count", fixed_sizes );
}
allele_type * getAlleleSpace() {
return &m_allele_space;
}
virtual ~Engine() { }
protected:
// void generate_child_mutations( unsigned int N ) {
//// std::cerr << "Child population size: " << m_child->haploid_genome_count() << std::endl;
// typename mutation_type::sequence_distribution_type seq_gen( 0, m_child->haploid_genome_count() - 1);
//
// typename free_space_type::base_type::iterator it = m_free_space.free_begin(), end = m_free_space.free_end();
// while( N && it != end ) {
// typename free_space_type::size_type all_idx = *it++;
// unsigned int seq_idx = seq_gen( *m_rand );
//
// mutate_gen( m_child, seq_idx, all_idx );
// allele_gen( m_allele_space, all_idx, m_generation );
// trait_gen( m_trait_space, all_idx );
// --N;
// }
//
// while( N ) {
// typename free_space_type::size_type all_idx = m_allele_space.size();
// unsigned int seq_idx = seq_gen( *m_rand );
//
// assert( all_idx < m_child->getMaxAlleles() );
//
// mutate_gen( m_child, seq_idx, all_idx );
// allele_gen( m_allele_space, all_idx, m_generation );
// trait_gen( m_trait_space, all_idx );
// --N;
// }
// }
/**
* estimate the maximum number of alleles in the child
*
* N_parent - number of alleles in the parent population
* F_parent - number of free alleles in the parent population
* M_child - number of new alleles to be added the child population
*/
size_t child_max_alleles( size_t N_parent, size_t F_parent, size_t M_child ) const {
#ifdef DEBUGGING
BOOST_LOG_TRIVIAL(info) << "Parent alleles: " << N_parent << "; Free: " << F_parent << "; New Alleles: " << M_child;
std::cerr << "Parent alleles: " << N_parent << "; Free: " << F_parent << "; New Alleles: " << M_child << std::endl;
#endif // DEBUGGING
if( F_parent >= M_child ) {
// if there are more free alleles in the parent generation
// than there are new alleles to be added to the child generation
// then do not adjust scale of the allele space
return N_parent;
} else {
return N_parent + (M_child - F_parent);
}
}
size_type updateFixedAlleles( sequence_space_type * ss ) {
m_free_space( ss, m_thread_pool ); // analyze the parent population sequence space
typedef typename free_space_type::iterator fixed_iterator;
typedef typename trait_space_type::iterator trait_iterator;
// std::cerr << "Fixed count: " << m_free_space.fixed_size() << std::endl;
fixed_iterator fix_it = m_free_space.fixed_begin();
fixed_iterator fix_end = m_free_space.fixed_end();
while( fix_it != fix_end ) {
size_type fixed_index = *fix_it++;
ss->remove_fixed_allele( fixed_index );
m_fixed.append( m_allele_space, fixed_index );
trait_iterator tstart = m_trait_space.begin( fixed_index ), tend = m_trait_space.end( fixed_index );
m_fixed_traits.append( tstart, tend );
}
#ifdef DEBUGGING
typedef typename free_space_type::iterator free_iterator;
free_iterator fr_it = m_free_space.free_begin();
free_iterator fr_end = m_free_space.free_end();
unsigned int j = 0;
while( fr_it != fr_end ) {
size_type i = *fr_it++;
if( !ss->freeColumn( i ) ) {
assert(false);
}
++j;
}
assert( j == m_free_space.free_size() );
#endif // DEBUGGING
return m_free_space.free_size();
}
random_engine_type * m_rand;
allele_type m_allele_space, m_fixed;
sequence_space_type m_pop0, m_pop1;
sequence_space_type * m_parent, * m_child;
trait_space_type m_trait_space, m_fixed_traits;
thread_pool_type m_thread_pool;<|fim▁hole|>
selection_type select_gen;
crossover_type cross_gen;
mutation_type mutate_gen;
fitness_type m_fit;
size_t m_generation;
population_growth_type m_pop_growth;
// mutation_alloc_type m_mut_alloc;
// trait_generator_type trait_gen;
// allele_generator_type allele_gen;
//
boost::property_tree::ptree fix_times, mutate_times, xover_times, pheno_times;
boost::property_tree::ptree free_sizes, var_sizes, fixed_sizes;
};
namespace clotho {
namespace utility {
template < class RNG, class RealType, class BlockType, class SizeType >
struct state_getter< Engine< RNG, RealType, BlockType, SizeType, batched_task > > {
typedef Engine< RNG, RealType, BlockType, SizeType, batched_task > object_type;
void operator()( boost::property_tree::ptree & s, object_type & obj ) {
boost::property_tree::ptree tr;
state_getter< typename object_type::trait_space_type > tr_logger;
tr_logger( tr, obj.m_trait_space );
boost::property_tree::ptree ph;
state_getter< typename object_type::phenotype_eval_type > pheno_logger;
pheno_logger( ph, obj.m_pheno );
boost::property_tree::ptree fr;
state_getter< typename object_type::free_space_type > free_logger;
free_logger( fr, obj.m_free_space );
boost::property_tree::ptree fx, alls;
state_getter< typename object_type::allele_type > all_logger;
all_logger( fx, obj.m_fixed );
all_logger( alls, obj.m_allele_space );
// boost::property_tree::ptree c_pop;
// state_getter< typename object_type::sequence_space_type > pop_logger;
// pop_logger( c_pop, *(obj.m_child) );
s.put_child( "phenotypes", ph );
s.put_child( "free_space", fr );
s.put_child( "allele_space", alls );
s.put_child( "trait_space", tr );
s.put_child( "fixed_alleles", fx );
// s.put_child( "child", c_pop );
}
};
}
}
#endif // ENGINE_BATCHED_TASK_HPP_<|fim▁end|> | phenotype_eval_type m_pheno;
free_space_type m_free_space; |
<|file_name|>jquery.numeric.input.js<|end_file_name|><|fim▁begin|>!function($) {
$(document).on("keydown", 'input[data-type="numeric"]', function (e)
{
var key = e.charCode || e.keyCode || 0;
// allow backspace, tab, delete, arrows, numbers and keypad numbers ONLY
return (
key == 8 ||
key == 9 ||
key == 46 ||
(key >= 37 && key <= 40) ||
(key >= 48 && key <= 57) ||
(key >= 96 && key <= 105));
});<|fim▁hole|><|fim▁end|> | }(window.jQuery || window.ender); |
<|file_name|>type-params-in-for-each.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_code)]
// pretty-expanded FIXME #23616
struct S<T> {
a: T,
b: usize,
}
fn range_<F>(lo: usize, hi: usize, mut it: F) where F: FnMut(usize) {
let mut lo_ = lo;<|fim▁hole|> while lo_ < hi { it(lo_); lo_ += 1; }
}
fn create_index<T>(_index: Vec<S<T>> , _hash_fn: extern fn(T) -> usize) {
range_(0, 256, |_i| {
let _bucket: Vec<T> = Vec::new();
})
}
pub fn main() { }<|fim▁end|> | |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Nahuel Riva
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__revision__ = "$Id$"
__all__ = ['metadata', 'setup']
from distutils.core import setup
from distutils import version
from warnings import warn
import re
import os
import sys
import glob
# Distutils hack: in order to be able to build MSI installers with loose
# version numbers, we subclass StrictVersion to accept loose version numbers
# and convert them to the strict format. This works because Distutils will
# happily reinstall a package even if the version number matches exactly the
# one already installed on the system - so we can simply strip all extraneous
# characters and beta/postrelease version numbers will be treated just like
# the base version number.
if __name__ == '__main__':
StrictVersion = version.StrictVersion
class NotSoStrictVersion (StrictVersion):
def parse (self, vstring):
components = []
for token in vstring.split('.'):
token = token.strip()
match = re.search('^[0-9]+', token)
if match:
number = token[ match.start() : match.end() ]
components.append(number)
vstring = '.'.join(components)
return StrictVersion.parse(self, vstring)
version.StrictVersion = NotSoStrictVersion
# Get the base directory
here = os.path.dirname(__file__)
if not here:
here = os.path.curdir
# Text describing the module (reStructured text)<|fim▁hole|>except Exception:
warn("README file not found or unreadable!")
long_description = """pype32 is python library to read and write PE/PE+ binary files."""
# Get the list of scripts in the "tools" folder
scripts = glob.glob(os.path.join(here, 'tools', '*.py'))
# Set the parameters for the setup script
metadata = {
# Setup instructions
'provides' : ['pype32'],
'packages' : ['pype32'],
'scripts' : scripts,
# Metadata
'name' : 'pype32',
'version' : '0.1-alpha4',
'description' : 'Yet another Python library to read and write PE/PE+ files.',
'long_description' : long_description,
'author' : 'Nahuel Riva',
'author_email' : 'crackinglandia'+chr(64)+'gmail'+chr(0x2e)+'com',
'url' : 'https://github.com/crackinglandia/pype32',
'keywords' : ['pecoff', 'x86', 'x64', '.net', 'parser'],
'download_url' : 'https://github.com/crackinglandia/pype32/tarball/v0.1-alpha4',
}
# Execute the setup script
if __name__ == '__main__':
setup(**metadata)<|fim▁end|> | try:
readme = os.path.join(here, 'README')
long_description = open(readme, 'r').read() |
<|file_name|>update_subscription.py<|end_file_name|><|fim▁begin|>"""
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.db import models
from storageadmin.models import Appliance
class UpdateSubscription(models.Model):
"""name of the channel. eg: stable"""
name = models.CharField(max_length=64, unique=True)
"""detailed description or a longer name"""
description = models.CharField(max_length=128)<|fim▁hole|> """url of the repo"""
url = models.CharField(max_length=512)
appliance = models.ForeignKey(Appliance)
password = models.CharField(max_length=64, null=True)
"""status of subscription: active, inactive, expired etc.."""
status = models.CharField(max_length=64)
class Meta:
app_label = 'storageadmin'<|fim▁end|> | |
<|file_name|>dualhypercubes.cc<|end_file_name|><|fim▁begin|>/* Exploit smf when computing the intersection of NNC dual hypercubes.
Copyright (C) 2001-2009 Roberto Bagnara <[email protected]>
This file is part of the Parma Polyhedra Library (PPL).
The PPL is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
The PPL is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1307, USA.
For the most up-to-date information see the Parma Polyhedra Library
site: http://www.cs.unipr.it/ppl/ . */
#include "ppl_test.hh"
#include "timings.hh"
#include <vector>
#include <map>
// Define EXP_EVAL to 1 if you want to reproduce the results
// of the experimental evaluation reported in Table 2 of the paper:
// R. Bagnara, P.M. Hill, E. Zaffanella
// Not Necessarily Closed Convex Polyhedra and the Double Description Method.
// Formal Aspects of Computing, 17, 2 (2005), pp. 222-257.
#ifndef EXP_EVAL
#define EXP_EVAL 0
#endif
namespace {
void
closure_points_dual_hypercube(const dimension_type dims,
const Linear_Expression& weight_center,
const Coefficient& half_diagonal,
Generator_System& gs) {
// An ill-formed (it has no points at all) generator system
// for a dual hypercube.
for (dimension_type axis = dims; axis-- > 0; ) {
gs.insert(closure_point(weight_center + half_diagonal * Variable(axis)));
gs.insert(closure_point(weight_center - half_diagonal * Variable(axis)));
}
}
void
add_facets(dimension_type& to_be_added,
Generator_System& gs,
const Linear_Expression& expr,
const dimension_type axis,
const dimension_type dims,
const Linear_Expression& weight_center,
const Coefficient& half_diagonal) {
// Return if we have already added all facets.
if (to_be_added == 0)
return;
Linear_Expression expr1 = expr;
expr1 += half_diagonal * Variable(axis);
Linear_Expression expr2 = expr;
expr2 -= half_diagonal * Variable(axis);
if (axis == 0) {
gs.insert(point(dims * weight_center + expr1, dims));
--to_be_added;
if (to_be_added == 0)
return;
gs.insert(point(dims * weight_center + expr2, dims));
--to_be_added;
return;
}
// Here axis > 0.
// First recursive call with variable with index `axis'
// having coordinate 1/dims.
add_facets(to_be_added, gs, expr1,
axis-1, dims, weight_center, half_diagonal);
if (to_be_added == 0)
return;
// Second recursive call with variable with index `axis'
// having coordinate -1/dims.
add_facets(to_be_added, gs, expr2,
axis-1, dims, weight_center, half_diagonal);
}
NNC_Polyhedron
NNC_dual_hypercube(const dimension_type dims,
const Linear_Expression& weight_center,
const Coefficient& half_diagonal,
const int facet_percentage) {
Generator_System gs;
closure_points_dual_hypercube(dims, weight_center, half_diagonal, gs);
// Number of facets in the closed dual hypercube.
dimension_type num_facets = 1;
for (dimension_type axis = dims; axis-- > 0; )
num_facets *= 2;
dimension_type facets_to_be_added = (num_facets * facet_percentage) / 100;
if (facets_to_be_added == 0)
// There has to be a point, at least.
gs.insert(point(weight_center));
else
add_facets(facets_to_be_added, gs, Linear_Expression(0),
dims-1, dims, weight_center, half_diagonal);
// Actually build the polyhedron.
return NNC_Polyhedron(gs);
}
void
build_polyhedra(const dimension_type dims,
const int percentage,
std::vector<NNC_Polyhedron>& ph) {
Linear_Expression weight_center;
// 1st-polyhedron.
weight_center = Linear_Expression(0);
for (dimension_type axis = dims; axis-- > 0; )
weight_center += Variable(axis);
ph.push_back(NNC_dual_hypercube(dims, weight_center, 5, percentage));
// 2nd-polyhedron.
weight_center = Linear_Expression(0);
for (dimension_type axis = dims; axis-- > 0; )
weight_center += 2*Variable(axis);
ph.push_back(NNC_dual_hypercube(dims, weight_center, 4, percentage));
// 3rd-polyhedron.
weight_center = Linear_Expression(0);
for (dimension_type axis = dims; axis-- > 0; )
if (axis % 2 == 0)
weight_center += 10*Variable(axis);
else
weight_center += 2*Variable(axis);
ph.push_back(NNC_dual_hypercube(dims, weight_center, 5, percentage));
// 4th-polyhedron.
weight_center = Linear_Expression(0);
for (dimension_type axis = dims; axis-- > 0; )
if (axis % 2 == 0)
weight_center += 10*Variable(axis);
else
weight_center += Variable(axis);
ph.push_back(NNC_dual_hypercube(dims, weight_center, 4, percentage));
}
long
computation(std::vector<NNC_Polyhedron>& ph, bool enhanced) {
nout << endl;
if (enhanced)
nout << "Enhanced computation: ";
else
nout << "Standard computation: ";
nout << "working with 4 NNC dual hypercubes of dimension "
<< ph[0].space_dimension() << endl;
start_clock();
/**** Compute the intersection of ph[0] and ph[1]. ****/
// Print cardinalities of arguments.
nout << " - Computing intersection of ph[0] and ph[1]:" << endl;
const Generator_System& gs_0 = ph[0].generators();
nout << " # ph[0].generators() = "
<< std::distance(gs_0.begin(), gs_0.end()) << endl;
const Generator_System& gs_1 = ph[1].generators();
nout << " # ph[1].generators() = "
<< std::distance(gs_1.begin(), gs_1.end()) << endl;
// Very noisy dump of arguments.
vnout << "*** ph[0] generators ***" << endl;
gs_0.ascii_dump(vnout);
vnout << "*** ph[1] generators ***" << endl;
gs_1.ascii_dump(vnout);
vnout << endl;
const Constraint_System& cs_0 = enhanced
? ph[0].minimized_constraints()
: ph[0].constraints();
const Constraint_System& cs_1 = enhanced
? ph[1].minimized_constraints()
: ph[1].constraints();
// Print cardinalities of constraint systems.
nout << " # ph[0].constraints() = "
<< std::distance(cs_0.begin(), cs_0.end()) << endl;
nout << " # ph[1].constraints() = "
<< std::distance(cs_1.begin(), cs_1.end()) << endl;
// Very noisy dump of arguments.
vnout << "*** ph[0] constraints ***" << endl;
cs_0.ascii_dump(vnout);
vnout << "*** ph[1] constraints ***" << endl;
cs_1.ascii_dump(vnout);
vnout << endl;
ph[0].intersection_assign(ph[1]);
/**** Compute the intersection of ph[2] and ph[3]. ****/
// Print cardinalities of arguments.
nout << " - Computing intersection of ph[2] and ph[3]:" << endl;
const Generator_System& gs_2 = ph[2].generators();
nout << " # ph[2].generators() = "
<< std::distance(gs_2.begin(), gs_2.end()) << endl;
const Generator_System& gs_3 = ph[3].generators();
nout << " # ph[3].generators() = "
<< std::distance(gs_3.begin(), gs_3.end()) << endl;
// Very noisy dump of arguments.
vnout << "*** ph[2] generators ***" << endl;
gs_2.ascii_dump(vnout);
vnout << "*** ph[3] generators ***" << endl;
gs_3.ascii_dump(vnout);
vnout << endl;
const Constraint_System& cs_2 = enhanced
? ph[2].minimized_constraints()
: ph[2].constraints();
const Constraint_System& cs_3 = enhanced
? ph[3].minimized_constraints()
: ph[3].constraints();
// Print cardinalities of constraint systems.
nout << " # ph[2].constraints() = "
<< std::distance(cs_2.begin(), cs_2.end()) << endl;
nout << " # ph[3].constraints() = "
<< std::distance(cs_3.begin(), cs_3.end()) << endl;
// Very noisy dump of arguments.
vnout << "*** ph[2] constraints ***" << endl;
cs_2.ascii_dump(vnout);
vnout << "*** ph[3] constraints ***" << endl;
cs_3.ascii_dump(vnout);
vnout << endl;
ph[2].intersection_assign(ph[3]);
/**** Compute the poly-hull of ph[0] and ph[2]. ****/
const Generator_System& gs_01 = enhanced<|fim▁hole|> const Generator_System& gs_23 = enhanced
? ph[2].minimized_generators()
: ph[2].generators();
// Print cardinalities of arguments.
nout << " - Computing poly-hull of ph[0] and ph[2]:" << endl;
nout << " # ph[0].generators() = "
<< std::distance(gs_01.begin(), gs_01.end()) << endl;
nout << " # ph[2].generators() = "
<< std::distance(gs_23.begin(), gs_23.end()) << endl;
// Very noisy dump of arguments.
vnout << "*** ph[0] generators ***" << endl;
gs_01.ascii_dump(vnout);
vnout << "*** ph[2] generators ***" << endl;
gs_23.ascii_dump(vnout);
vnout << endl;
ph[0].upper_bound_assign(ph[2]);
/**** Final conversion ****/
const Constraint_System& cs = ph[0].constraints();
nout << "Wmf final result timing: ";
print_clock(nout);
nout << endl;
// How many constraints obtained?
const long cs_cardinality = std::distance(cs.begin(), cs.end());
// Print cardinality of weakly-minimized final result.
nout << " - Final (wmf) result is ph[0]:" << endl;
nout << " # ph[0].constraints() = " << cs_cardinality << endl;
// Very noisy dump of weakly-minimized final result.
vnout << "*** ph[0] constraints ***" << endl;
cs.ascii_dump(vnout);
vnout << endl;
/**** Final strong minimization ****/
nout << "Smf (cons) final result timing: ";
start_clock();
const Constraint_System& min_cs = ph[0].minimized_constraints();
print_clock(nout);
nout << endl;
// How many constraints obtained?
const long min_cs_cardinality = std::distance(min_cs.begin(), min_cs.end());
// Print cardinality of strongly-minimized final result.
nout << " - Final (smf) result is ph[0]:" << endl;
nout << " # ph[0].minimized_constraints() = "
<< min_cs_cardinality << endl;
// Very noisy dump of strongly-minimized final result.
vnout << "*** ph[0] minimized constraints ***" << endl;
min_cs.ascii_dump(vnout);
vnout << endl;
return enhanced ? min_cs_cardinality : cs_cardinality;
}
bool
test01() {
std::vector<NNC_Polyhedron> ph;
#if EXP_EVAL
dimension_type first_dim = 4;
dimension_type last_dim = 5;
#else
dimension_type first_dim = 2;
dimension_type last_dim = 4;
#endif
// Storing cardinalities of known results.
// NOTE: the numbers reported here differ a little bit from those
// in the FAC paper in that here we do not count low-level constraints
// related to the epsilon dimension. The difference is at most 2
// (the eps_geq_zero and eps_leq_one constraints).
typedef std::map<std::pair<dimension_type, int>, long> My_Map;
My_Map::const_iterator known_result;
My_Map standard_cardinalities;
My_Map enhanced_cardinalities;
using std::make_pair;
standard_cardinalities[make_pair(4, 25)] = 331; // FAC 332
enhanced_cardinalities[make_pair(4, 25)] = 31; // FAC 33
standard_cardinalities[make_pair(4, 50)] = 519; // FAC 520
enhanced_cardinalities[make_pair(4, 50)] = 41; // FAC 43
standard_cardinalities[make_pair(5, 25)] = 2692; // FAC 2693
enhanced_cardinalities[make_pair(5, 25)] = 125; // FAC 127
standard_cardinalities[make_pair(5, 50)] = 4993; // FAC 4994
enhanced_cardinalities[make_pair(5, 50)] = 150; // FAC 152
int num_errors = 0;
for (dimension_type dims = first_dim; dims <= last_dim; dims++)
for (int percentage = 25; percentage <= 50; percentage += 25) {
nout << endl
<< "++++++++ DIMENSIONS = " << dims << " ++++++++"
<< endl
<< "++++++++ PERCENTAGE = " << percentage << " ++++++++"
<< endl;
// Standard evaluation strategy.
ph.clear();
build_polyhedra(dims, percentage, ph);
const long standard_eval_card = computation(ph, false);
// Check if there is a known result.
known_result = standard_cardinalities.find(make_pair(dims, percentage));
if (known_result != standard_cardinalities.end()
&& known_result->second != standard_eval_card) {
++num_errors;
nout << "Cardinality mismatch: "
<< "expected " << known_result->second << ", "
<< "obtained " << standard_eval_card << ".\n";
}
// Enhanced evaluation strategy.
ph.clear();
build_polyhedra(dims, percentage, ph);
const long enhanced_eval_card = computation(ph, true);
// Check if there is a known result.
known_result = enhanced_cardinalities.find(make_pair(dims, percentage));
if (known_result != enhanced_cardinalities.end()
&& known_result->second != enhanced_eval_card) {
++num_errors;
nout << "Cardinality mismatch: "
<< "expected " << known_result->second << ", "
<< "obtained " << enhanced_eval_card <<".\n";
}
}
return (num_errors == 0);
}
} // namespace
BEGIN_MAIN
DO_TEST_F64A(test01);
END_MAIN<|fim▁end|> | ? ph[0].minimized_generators()
: ph[0].generators(); |
<|file_name|>VariablesScreen.js<|end_file_name|><|fim▁begin|>/* @flow strict-local */
import React, { PureComponent } from 'react';
import { FlatList } from 'react-native';<|fim▁hole|>import config from '../config';
import { Screen } from '../common';
import InfoItem from './InfoItem';
export default class VariablesScreen extends PureComponent<{||}> {
render() {
const variables = {
enableReduxLogging: config.enableReduxLogging,
enableReduxSlowReducerWarnings: config.enableReduxSlowReducerWarnings,
'process.env.NODE_ENV': process.env.NODE_ENV ?? '(not defined)',
'global.btoa': !!global.btoa,
};
return (
<Screen title="Variables" scrollEnabled={false}>
<FlatList
data={Object.keys(variables)}
keyExtractor={item => item}
renderItem={({ item }) => <InfoItem label={item} value={variables[item]} />}
/>
</Screen>
);
}
}<|fim▁end|> | |
<|file_name|>memory.py<|end_file_name|><|fim▁begin|># (c) 2014, Brian Coca, Josh Drake, et al
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
cache: memory
short_description: RAM backed, non persistent
description:
- RAM backed cache that is not persistent.
- This is the default used if no other plugin is specified.
- There are no options to configure.
version_added: historical
author: core team (@ansible-core)
'''
from ansible.plugins.cache import BaseCacheModule
class CacheModule(BaseCacheModule):
def __init__(self, *args, **kwargs):
self._cache = {}
def get(self, key):
return self._cache.get(key)
def set(self, key, value):
self._cache[key] = value
def keys(self):
return self._cache.keys()
<|fim▁hole|> def delete(self, key):
del self._cache[key]
def flush(self):
self._cache = {}
def copy(self):
return self._cache.copy()
def __getstate__(self):
return self.copy()
def __setstate__(self, data):
self._cache = data<|fim▁end|> | def contains(self, key):
return key in self._cache
|
<|file_name|>AddNewRolesController.js<|end_file_name|><|fim▁begin|>Ext.define('Healthsurvey.view.usermanagement.roles.AddNewRolesController', {
extend : 'Ext.app.ViewController',
alias : 'controller.newRolesController',
defaultMenuStore:null,
init:function()
{
this.initializeDragZone();
},
afterAllFeatureTreeRender:function()
{
debugger;
var currentObject = this;
var loadMask = new Ext.LoadMask({
msg : 'Loading data...',
target : currentObject.getView()
}).show();
Ext.Ajax.request({<|fim▁hole|> loadMask:loadMask,
controller:currentObject,
jsonData:{
},
success : function(response,currentObject)
{
debugger;
var responseJson = Ext.JSON.decode(response.responseText);
var rawData=Ext.JSON.decode(responseJson.response.data);
currentObject.controller.defaultMenuStore=rawData;
currentObject.controller.createTree(rawData)
currentObject.loadMask.hide();
},
failure : function(response,currentObject){
currentObject.loadMask.hide();
Ext.Msg.alert({title: 'Error',msg: "Cannot connect to server.",icon: Ext.MessageBox.ERROR});
}
},currentObject);
},
createTree:function(rawData)
{
var allTreePanel=this.getView().down("#allFeatureTree");
var rootNode=allTreePanel.getRootNode();
/*for(i=0;i<rawData.length;i++){
data=rawData[i];
var fChild=data.children;*/
var fChild=rawData;
for(var x1=0;x1<fChild.length;x1++)
{
this.addChild(rootNode,fChild[x1]);
}
//} /**Use this for loop if menu list order is changed **/
},
addChild:function(parentNode,node)
{
if(node.hasOwnProperty("children")&& node.children!=null)
{
var child={
text:node.text,
menuId:node.menuId,
icon : 'images/folder-database-icon.png',
read :true,
}
child["expanded"]=true;
child["isRead"]=true;
child["isWrite"]=true;
child["isExecute"]=false;
/** here we are not getting does the menu is head or not from db so i am adding headMenu property here to identify it**/
child["isHeadMenu"] = true;
var newNode=parentNode.appendChild(child);
for(var x=0;x<node.children.length;x++)
{
this.addChild(newNode,node.children[x]);
}
}else{
debugger;
node["isRead"]=true;
node["isWrite"]=true;
node["isExecute"]=false;
node['visible'] = (node.text=="")?false:true;
node["isHeadMenu"] = false;
parentNode.appendChild(node);
}
},
// Propagate change downwards (for all children of current node).
setChildrenCheckedStatus : function (node, property, checked) {
if ((node.data.isHeadMenu) && (node.hasChildNodes())) {
for(var i=0; i<node.childNodes.length; i++) {
var child = node.childNodes[i];
if ((child.data.isHeadMenu) && (child.hasChildNodes())) {
this.setChildrenCheckedStatus(child, property, checked);
}
if(child.data.visible)
child.set(property,checked);
}
}
},
// Propagate change upwards (if all siblings are the same, update parent).
updateParentCheckedStatus : function (current, property, checked) {
debugger;
if (current.parentNode.data.isHeadMenu) {
debugger;
var parent = current.parentNode;
var checkedCount = 0;
var visibleChildLength = 0;
parent.eachChild(function(n) {
if(n.data.visible) {
visibleChildLength++;
checkedCount += (n.get(property) ? 1 : 0);
}
});
// a single child is checked, so check the parent.
if (checkedCount == 1){
debugger;
parent.set(property, true);
if(parent.parentNode.data.isHeadMenu && checked == true) {
this.updateParentCheckedStatus(parent, property, checked);
}
}
// Children have same value if all of them are checked or none is checked.
var sameValue = (checkedCount == visibleChildLength) || (checkedCount == 0); //parent.childNodes.length) || (checkedCount == 0);
if (sameValue) {
parent.set(property, checked);
if(parent.parentNode.data.isHeadMenu) {
this.updateParentCheckedStatus(parent, property, checked);
}
}
}
},
onIsReadCheckChange : function(checkcolumn, rowIndex, checked, eOpts) {
this.applyCheckBehaviour(rowIndex,'isRead',checked);
},
onIsWriteCheckChange : function(checkcolumn, rowIndex, checked, eOpts) {
this.applyCheckBehaviour(rowIndex,'isWrite',checked);
},
onIsExecuteCheckChange : function(checkcolumn, rowIndex, checked, eOpts) {
this.applyCheckBehaviour(rowIndex,'isExecute',checked);
},
applyCheckBehaviour : function(rowIndex, property, checked){
debugger;
var mappingTree = this.getView().down('#mappedFeatureTree');
var node = mappingTree.store.getAt(rowIndex);
// Propagate change downwards (for all children of current node).
this.setChildrenCheckedStatus(node, property, checked);
// Propagate change upwards (if all siblings are the same, update parent).
this.updateParentCheckedStatus(node, property, checked);
},
initializeDragZone:function()
{
var dragData = null;
var treeComponentPanel = this.getView().down("#allFeatureTree");
treeComponentPanel.on('itemmousedown', function( treeComponentPanel, record, item, index, e, eOpts ){
dragData = record //record.data will give only the selected node
});
treeComponentPanel.on('render', function(v) {
treeComponentPanel.dragZone = new Ext.dd.DragZone(v.getEl(), {
onBeforeDrag : function(data, e)
{
//Parent Node not allowed to dragged
/*if(data.draggedRecord.data.leaf==false){
return false;
}*/
if (data.draggedRecord.cannotDrag) {
return false;
}
},
getDragData: function(e)
{
var sourceEl = e.getTarget(v.itemSelector, 10);
if (sourceEl) {
d = sourceEl.cloneNode(true);
var dragDataToSend = d.id;
if (dragData.component == d.textContent){
dragDataToSend = dragData;
}
d.id = Ext.id();
return {
ddel: d,
sourceEl: sourceEl,
repairXY: Ext.fly(sourceEl).getXY(),
sourceStore: v.store,
draggedRecord: dragData
};
}
},
getRepairXY: function() {
//console.log('Drag Zone: getRepairXY() called...');
return this.dragData.repairXY;
},
ddGroup : 'myDDGroup'
});
});
},//initializeDragZone
initializeDropZone:function(panel)
{
debugger;
var me =this; //dBBuilderController
/**Click Event of Panel's items*/
panel.getEl().on('click', function(e,el,panel){
var targetNode =this.getNodeForEl(e.getTarget());
},me);
/**Initialize DropZone*/
var drop = new Ext.dd.DropZone(panel.el, {
ddGroup:'myDDGroup',
scope:this,
getTargetFromEvent: function(e)
{
//return e.getTarget(this.layoutPanel);
},
notifyOver : function(src,e,data)
{
return Ext.dd.DropZone.prototype.dropAllowed;
},
notifyDrop : function(src,e,data)
{
debugger;
var rootNode=this.scope.getView().down('#mappedFeatureTree').getRootNode();
var draggedRecord=data.draggedRecord;
//If leaf node is dragged then drag its parent nodes in hierarchy also
if(draggedRecord.data.leaf==true)
{
var tempArr=[];
while(draggedRecord.data.text!="Root")
{
tempArr.push(draggedRecord.data);
draggedRecord=draggedRecord.parentNode;
}
var parentNode=rootNode;
for(i=tempArr.length-1;i>=0;i--)
{
if(parentNode.findChild("text",tempArr[i].text,true)==null){
parentNode=parentNode.appendChild(tempArr[i]);
}
else{
parentNode=parentNode.findChild("text",tempArr[i].text,true);
}
}
}
//If folder node is dragged then drag its parent nodes in hierarchy as well as all its children
else{
var tempArr1=[];
tempArr1.push(draggedRecord);
while(draggedRecord.parentNode.data.text!="Root")
{
draggedRecord=draggedRecord.parentNode;
tempArr1.push(draggedRecord.data);
}
var parentNode=rootNode;
for(i=tempArr1.length-1;i>=0;i--)
{
if(parentNode.findChild("text",tempArr1[i].data.text,true)==null){
parentNode=parentNode.appendChild(tempArr1[i]);
}
else{
parentNode=parentNode.findChild("text",tempArr1[i].text,true);
}
}
this.scope.refreshDefaultTree()
}
},
notifyOut : function(src,e,data)
{
//this.removehighlightElement();
//Ext.fly(src).removeCls('my-row-highlight-class')
}
});
panel.drop=drop;
},//initializeDropZone ends
getNodeForEl : function(el)
{
var search = 0;
var target = null;
while (search < 10) {
target = Ext.ComponentMgr.get(el.id);
if (target) {
return target;
}
el = el.parentNode;
if (!el) { break; }
search++;
}
return null;
},
refreshDefaultTree : function() {
if (this.defaultMenuStore != null){
debugger;
this.getView().down('#allFeatureTree').getRootNode().removeAll();
this.createTree(this.defaultMenuStore);
}
},
itemContextMenuClick:function(obj, record, item, index, e, eOpts )
{
e.stopEvent();
Ext.create('Ext.menu.Menu', {
items : [ Ext.create('Ext.Action', {
text : 'Remove',
iconCls:'menu-delete',
handler : function()
{
/*if(record.data.leaf == true){*/
Ext.Msg.confirm('Confirm', 'Are you sure you want to delete',function(btn, text){
if (btn == 'yes')
{
this.record.remove();
}},{
me:this,
record:record
});//MessageBox ends
//}//if closes
}//handler
})
]//menu items closes
}).showAt(e.getXY());
},//itemContextMenu ends
onSaveRolesClick:function()
{
debugger;
var me =this;
var roleName=this.getView().down('#roleName').getValue();
var roleDesc=this.getView().down('#roleDesc').getValue();
if(roleName==""){ Ext.Msg.alert({title:'Info',msg:"Enter Role Name",icon:Ext.MessageBox.INFO});return;}
var mappedTree=this.getView().down('#mappedFeatureTree');
roleMenuBridge=this.prepareRoleMenuBridge(mappedTree);
var jsonData ={
roleName : roleName,
roleDescription : roleDesc,
roleMenuBridge : roleMenuBridge
};
Ext.Ajax.request({
url:'secure/Roles',
method : 'POST',
me:me,
jsonData : jsonData,
success : function(response,currentObject)
{
debugger;
var responseJson = Ext.JSON.decode(response.responseText);
if(responseJson.response.success==true)
{
Ext.Msg.alert('Success',"Data Saved Successfully");
rolesTree=currentObject.me.getView().up().up().down('#rolesTree');
rolesTree.store.load();
currentObject.me.onResetRolesClick();
}
else{
Ext.Msg.alert({title : 'Error',msg : 'Data Transaction Failed',icon : Ext.MessageBox.ERROR});
}
},
failure : function() {
Ext.Msg.alert({title : 'Error',msg : 'Cannot connect to server',icon : Ext.MessageBox.ERROR});
}
});
},//onSaveRolesClick ends
prepareRoleMenuBridge : function(mappedTree)
{
debugger;
var criteria = [];
var store = mappedTree.getStore();
Ext.Array.each(store.data.items,function(item, idx, items) {
var object = {
menuId : item.data.menuId,
isRead: item.data.isRead,
isWrite: item.data.isWrite,
isExecute: item.data.isExecute
};
this.criteria.push(object);
}, {
criteria : criteria,
mappedTree : mappedTree,
scope : this
});
return criteria;
},//prepareRoleMenuBridge ends
onResetRolesClick:function()
{
this.getView().down('#roleFormPanel').reset();
this.getView().down('#mappedFeatureTree').getRootNode().removeAll()
}
});<|fim▁end|> | url : "secure/MenuService/fetchmenus",
method:'POST', |
<|file_name|>REDBEARDUO.py<|end_file_name|><|fim▁begin|>#!/bin/false
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 Gordon Williams <[email protected]>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# This file contains information for a specific board - the available pins, and where LEDs,
# Buttons, and other in-built peripherals are. It is used to build documentation as well
# as various source and header files for Espruino.
# ----------------------------------------------------------------------------------------
import pinutils;
# placeholder
info = {
'name' : "RedBear Duo",
'link' : [ "http://www.RedBear.cc/Duo" ],
'default_console' : "EV_SERIAL1",
'variables' : 2250,
'bootloader' : 0,
'binary_name' : 'espruino_%v_redbearduo.bin',
'build' : {
'defines' : [
]
}
};
chip = {<|fim▁hole|> 'flash' : 1024,
'speed' : 120,
'usart' : 2,
'spi' : 2,
'i2c' : 1,
'adc' : 8,
'dac' : 2,
'saved_code' : {
'address' : 0x08010000, # internal EEPROM flash
'page_size' : 131072, # size of pages
'pages' : 0.5, # number of pages we're using
'flash_available' : 256 # 256KB internal flash used for user part
},
};
devices = {
'LED1' : { 'pin' : 'B11' }, # R
'LED2' : { 'pin' : 'B1' }, # G
'LED3' : { 'pin' : 'B0' }, # B
'LED4' : { 'pin' : 'A13' }, # user led
'BTN1' : { 'pin' : 'B2' },
};
def get_pins():
pins = pinutils.scan_pin_file([], 'stm32f20x.csv', 6, 9, 10)
pins = pinutils.scan_pin_af_file(pins, 'stm32f20x_af.csv', 0, 1)
return pinutils.only_from_package(pinutils.fill_gaps_in_pin_list(pins), chip["package"])<|fim▁end|> | 'part' : "STM32F205RGT6",
'family' : "STM32F2",
'package' : "WLCSP64",
'ram' : 60, # just a guess left for user part |
<|file_name|>boot.js<|end_file_name|><|fim▁begin|>define(["require"], function (require) {
function boot(ev) {
ev.target.removeEventListener("click", boot);
require(["demos/water/water"]);
}
const start = document.querySelector(".code-demo.water [data-trigger='water.start']");<|fim▁hole|><|fim▁end|> | start.addEventListener("click", boot);
start.disabled = false;
}); |
<|file_name|>out.js<|end_file_name|><|fim▁begin|>var __extends = (this && this.__extends) || (function () {
var extendStatics = function (d, b) {
extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; };
return extendStatics(d, b);
};
return function (d, b) {
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
var _this = this;
// Add a lambda to ensure global 'this' capture is triggered
(function () { return _this.window; });
// class inheritance to ensure __extends is emitted
var m;
(function (m) {
var base = /** @class */ (function () {
function base() {
}
return base;
}());
m.base = base;
var child = /** @class */ (function (_super) {
__extends(child, _super);
function child() {
return _super !== null && _super.apply(this, arguments) || this;
}
<|fim▁hole|><|fim▁end|> | return child;
}(base));
m.child = child;
})(m || (m = {})); |
<|file_name|>stats.py<|end_file_name|><|fim▁begin|># Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import elapsed_time_to_string, html_escape, normalize
from .tags import TagPatterns
class Stat(object):
"""Generic statistic object used for storing all the statistic values."""
def __init__(self, name):
#: Human readable identifier of the object these statistics
#: belong to. Either `All Tests` or `Critical Tests` for
#: :class:`~robot.model.totalstatistics.TotalStatistics`,
#: long name of the suite for
#: :class:`~robot.model.suitestatistics.SuiteStatistics`
#: or name of the tag for
#: :class:`~robot.model.tagstatistics.TagStatistics`
self.name = name
#: Number of passed tests.
self.passed = 0
#: Number of failed tests.
self.failed = 0
#: Number of milliseconds it took to execute.
self.elapsed = 0
self._norm_name = normalize(name, ignore='_')
def get_attributes(self, include_label=False, include_elapsed=False,
exclude_empty=False, values_as_strings=False,
html_escape=False):
attrs = {'pass': self.passed, 'fail': self.failed}
attrs.update(self._get_custom_attrs())
if include_label:
attrs['label'] = self.name
if include_elapsed:
attrs['elapsed'] = elapsed_time_to_string(self.elapsed,
include_millis=False)
if exclude_empty:
attrs = dict((k, v) for k, v in attrs.items() if v != '')
if values_as_strings:
attrs = dict((k, unicode(v)) for k, v in attrs.items())
if html_escape:
attrs = dict((k, self._html_escape(v)) for k, v in attrs.items())
return attrs
def _get_custom_attrs(self):
return {}
def _html_escape(self, item):
return html_escape(item) if isinstance(item, basestring) else item
@property
def total(self):
return self.passed + self.failed
def add_test(self, test):
self._update_stats(test)
self._update_elapsed(test)
def _update_stats(self, test):
if test.passed:
self.passed += 1
else:
self.failed += 1
def _update_elapsed(self, test):
self.elapsed += test.elapsedtime
def __cmp__(self, other):
return cmp(self._norm_name, other._norm_name)
def __nonzero__(self):
return not self.failed
def visit(self, visitor):
visitor.visit_stat(self)
class TotalStat(Stat):
"""Stores statistic values for a test run."""
#: Always string `total`
type = 'total'
class SuiteStat(Stat):
"""Stores statistics values for a single suite."""
#: Always string `suite`
type = 'suite'
def __init__(self, suite):<|fim▁hole|> #: Number of milliseconds it took to execute this suite,
#: including sub-suites.
self.elapsed = suite.elapsedtime
self._name = suite.name
def _get_custom_attrs(self):
return {'id': self.id, 'name': self._name}
def _update_elapsed(self, test):
pass
def add_stat(self, other):
self.passed += other.passed
self.failed += other.failed
class TagStat(Stat):
"""Stores statistic values for a single tag."""
#: Always string `tag`.
type = 'tag'
def __init__(self, name, doc='', links=None, critical=False,
non_critical=False, combined=''):
Stat.__init__(self, name)
#: Documentation of tag as a string.
self.doc = doc
#: List of tuples in which the first value is the link URL and
#: the second is the link title. An empty list by default.
self.links = links or []
#: ``True`` if tag is considered critical, ``False`` otherwise.
self.critical = critical
#: ``True`` if tag is considered non-critical, ``False`` otherwise.
self.non_critical = non_critical
#: Pattern as a string if the tag is combined,
#: an empty string otherwise.
self.combined = combined
@property
def info(self):
"""Returns additional information of the tag statistics
are about. Either `critical`, `non-critical`, `combined` or an
empty string.
"""
if self.critical:
return 'critical'
if self.non_critical:
return 'non-critical'
if self.combined:
return 'combined'
return ''
def _get_custom_attrs(self):
return {'doc': self.doc, 'links': self._get_links_as_string(),
'info': self.info, 'combined': self.combined}
def _get_links_as_string(self):
return ':::'.join('%s:%s' % (title, url) for url, title in self.links)
def __cmp__(self, other):
return cmp(other.critical, self.critical) \
or cmp(other.non_critical, self.non_critical) \
or cmp(bool(other.combined), bool(self.combined)) \
or Stat.__cmp__(self, other)
class CombinedTagStat(TagStat):
def __init__(self, pattern, name=None, doc='', links=None):
TagStat.__init__(self, name or pattern, doc, links, combined=pattern)
self._matcher = TagPatterns(pattern)
def match(self, tags):
return self._matcher.match(tags)<|fim▁end|> | Stat.__init__(self, suite.longname)
#: Identifier of the suite, e.g. `s1-s2`.
self.id = suite.id |
<|file_name|>subtypesOfTypeParameterWithConstraints2.ts<|end_file_name|><|fim▁begin|>// checking whether other types are subtypes of type parameters with constraints
function f1<T extends U, U>(x: T, y: U) {
var r = true ? x : y;
var r = true ? y : x;
}
// V > U > T
function f2<T extends U, U extends V, V>(x: T, y: U, z: V) {
var r = true ? x : y;
var r = true ? y : x;
// ok
var r2 = true ? z : y;
var r2 = true ? y : z;
// ok
var r2a = true ? z : x;
var r2b = true ? x : z;
}
// Date > U > T
function f3<T extends U, U extends Date>(x: T, y: U) {
var r = true ? x : y;
var r = true ? y : x;
// ok
var r2 = true ? x : new Date();
var r2 = true ? new Date() : x;
// ok
var r3 = true ? y : new Date();
var r3 = true ? new Date() : y;
}
<|fim▁hole|>class C2<T> { foo: T; }
enum E { A }
function f() { }
module f {
export var bar = 1;
}
class c { baz: string }
module c {
export var bar = 1;
}
function f4<T extends Number>(x: T) {
var r0 = true ? x : null; // ok
var r0 = true ? null : x; // ok
var u: typeof undefined;
var r0b = true ? u : x; // ok
var r0b = true ? x : u; // ok
}
function f5<T extends Number>(x: T) {
var r1 = true ? 1 : x; // ok
var r1 = true ? x : 1; // ok
}
function f6<T extends String>(x: T) {
var r2 = true ? '' : x; // ok
var r2 = true ? x : ''; // ok
}
function f7<T extends Boolean>(x: T) {
var r3 = true ? true : x; // ok
var r3 = true ? x : true; // ok
}
function f8<T extends Date>(x: T) {
var r4 = true ? new Date() : x; // ok
var r4 = true ? x : new Date(); // ok
}
function f9<T extends RegExp>(x: T) {
var r5 = true ? /1/ : x; // ok
var r5 = true ? x : /1/; // ok
}
function f10<T extends { foo: number }>(x: T) {
var r6 = true ? { foo: 1 } : x; // ok
var r6 = true ? x : { foo: 1 }; // ok
}
function f11<T extends () => void>(x: T) {
var r7 = true ? () => { } : x; // ok
var r7 = true ? x : () => { }; // ok
}
function f12<T extends <U>(x: U) => U>(x: T) {
var r8 = true ? <T>(x: T) => { return x } : x; // ok
var r8b = true ? x : <T>(x: T) => { return x }; // ok, type parameters not identical across declarations
}
function f13<T extends I1>(x: T) {
var i1: I1;
var r9 = true ? i1 : x; // ok
var r9 = true ? x : i1; // ok
}
function f14<T extends C1>(x: T) {
var c1: C1;
var r10 = true ? c1 : x; // ok
var r10 = true ? x : c1; // ok
}
function f15<T extends C2<number>>(x: T) {
var c2: C2<number>;
var r12 = true ? c2 : x; // ok
var r12 = true ? x : c2; // ok
}
function f16<T extends E>(x: T) {
var r13 = true ? E : x; // ok
var r13 = true ? x : E; // ok
var r14 = true ? E.A : x; // ok
var r14 = true ? x : E.A; // ok
}
function f17<T extends typeof f>(x: T) {
var af: typeof f;
var r15 = true ? af : x; // ok
var r15 = true ? x : af; // ok
}
function f18<T extends typeof c>(x: T) {
var ac: typeof c;
var r16 = true ? ac : x; // ok
var r16 = true ? x : ac; // ok
}
function f19<T>(x: T) {
function f17<U extends T>(a: U) {
var r17 = true ? x : a; // ok
var r17 = true ? a : x; // ok
}
function f18<V extends U, U extends T>(a: V) {
var r18 = true ? x : a; // ok
var r18 = true ? a : x; // ok
}
}
function f20<T extends Number>(x: T) {
var r19 = true ? new Object() : x; // ok
var r19 = true ? x : new Object(); // ok
}
function f21<T extends Number>(x: T) {
var r20 = true ? {} : x; // ok
var r20 = true ? x : {}; // ok
}<|fim▁end|> |
interface I1 { foo: number; }
class C1 { foo: number; }
|
<|file_name|>docopt_macro_use_case.rs<|end_file_name|><|fim▁begin|>// stolen from here: https://github.com/docopt/docopt.rs
#![feature(plugin)]
#![plugin(docopt_macros)]
extern crate rustc_serialize;
extern crate docopt;
use docopt::Docopt;
docopt!(Args derive Debug, "<|fim▁hole|>Usage:
naval_fate.py ship new <name>...
naval_fate.py ship <name> move <x> <y> [--speed=<kn>]
naval_fate.py ship shoot <x> <y>
naval_fate.py mine (set|remove) <x> <y> [--moored | --drifting]
naval_fate.py (-h | --help)
naval_fate.py --version
Options:
-h --help Show this screen.
--version Show version.
--speed=<kn> Speed in knots [default: 10].
--moored Moored (anchored) mine.
--drifting Drifting mine.
");
fn main() {
let args: Args = Args::docopt().decode().unwrap_or_else(|e| e.exit());
println!("{:?}", args);
}<|fim▁end|> | Naval Fate.
|
<|file_name|>test_neutron_loadbalancer.py<|end_file_name|><|fim▁begin|># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from testtools import skipIf
from heat.common import exception
from heat.common import template_format
from heat.engine import clients
from heat.engine import scheduler
from heat.engine.resources.neutron import loadbalancer
from heat.openstack.common.importutils import try_import
from heat.tests import fakes
from heat.tests import utils
from heat.tests.common import HeatTestCase
from heat.tests.v1_1 import fakes as nova_fakes
neutronclient = try_import('neutronclient.v2_0.client')
health_monitor_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to test load balancer resources",
"Parameters" : {},
"Resources" : {
"monitor": {
"Type": "OS::Neutron::HealthMonitor",
"Properties": {
"type": "HTTP",
"delay": 3,
"max_retries": 5,
"timeout": 10
}
}
}
}
'''
pool_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to test load balancer resources",
"Parameters" : {},
"Resources" : {
"pool": {
"Type": "OS::Neutron::Pool",
"Properties": {
"protocol": "HTTP",
"subnet_id": "sub123",
"lb_method": "ROUND_ROBIN",
"vip": {
"protocol_port": 80
}
}
}
}
}
'''
member_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to test load balancer member",
"Resources" : {
"member": {
"Type": "OS::Neutron::PoolMember",
"Properties": {
"protocol_port": 8080,
"pool_id": "pool123",
"address": "1.2.3.4"
}
}
}
}
'''
lb_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to test load balancer resources",
"Parameters" : {},
"Resources" : {
"lb": {
"Type": "OS::Neutron::LoadBalancer",
"Properties": {
"protocol_port": 8080,
"pool_id": "pool123",
"members": ["1234"]
}
}
}
}
'''
pool_with_session_persistence_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to test load balancer resources wit",
"Parameters" : {},
"Resources" : {
"pool": {
"Type": "OS::Neutron::Pool",
"Properties": {
"protocol": "HTTP",
"subnet_id": "sub123",
"lb_method": "ROUND_ROBIN",
"vip": {
"protocol_port": 80,
"session_persistence": {
"type": "APP_COOKIE",
"cookie_name": "cookie"
}
}
}
}
}
}
'''
@skipIf(neutronclient is None, 'neutronclient unavailable')
class HealthMonitorTest(HeatTestCase):
def setUp(self):
super(HealthMonitorTest, self).setUp()
self.m.StubOutWithMock(neutronclient.Client, 'create_health_monitor')
self.m.StubOutWithMock(neutronclient.Client, 'delete_health_monitor')
self.m.StubOutWithMock(neutronclient.Client, 'show_health_monitor')
self.m.StubOutWithMock(neutronclient.Client, 'update_health_monitor')
self.m.StubOutWithMock(clients.OpenStackClients, 'keystone')
utils.setup_dummy_db()
def create_health_monitor(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_health_monitor({
'health_monitor': {
'delay': 3, 'max_retries': 5, 'type': u'HTTP',
'timeout': 10, 'admin_state_up': True}}
).AndReturn({'health_monitor': {'id': '5678'}})
snippet = template_format.parse(health_monitor_template)
stack = utils.parse_stack(snippet)
return loadbalancer.HealthMonitor(
'monitor', snippet['Resources']['monitor'], stack)
def test_create(self):
rsrc = self.create_health_monitor()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_create_failed(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_health_monitor({
'health_monitor': {
'delay': 3, 'max_retries': 5, 'type': u'HTTP',
'timeout': 10, 'admin_state_up': True}}
).AndRaise(loadbalancer.NeutronClientException())
self.m.ReplayAll()
snippet = template_format.parse(health_monitor_template)
stack = utils.parse_stack(snippet)
rsrc = loadbalancer.HealthMonitor(
'monitor', snippet['Resources']['monitor'], stack)
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.create))
self.assertEqual(
'NeutronClientException: An unknown exception occurred.',
str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_delete(self):
neutronclient.Client.delete_health_monitor('5678')
neutronclient.Client.show_health_monitor('5678').AndRaise(
loadbalancer.NeutronClientException(status_code=404))
rsrc = self.create_health_monitor()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_already_gone(self):
neutronclient.Client.delete_health_monitor('5678').AndRaise(
loadbalancer.NeutronClientException(status_code=404))
rsrc = self.create_health_monitor()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_failed(self):
neutronclient.Client.delete_health_monitor('5678').AndRaise(
loadbalancer.NeutronClientException(status_code=400))
rsrc = self.create_health_monitor()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.delete))
self.assertEqual(
'NeutronClientException: An unknown exception occurred.',
str(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_attribute(self):
rsrc = self.create_health_monitor()
neutronclient.Client.show_health_monitor('5678').MultipleTimes(
).AndReturn(
{'health_monitor': {'admin_state_up': True, 'delay': 3}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertIs(True, rsrc.FnGetAtt('admin_state_up'))
self.assertEqual(3, rsrc.FnGetAtt('delay'))
self.m.VerifyAll()
def test_attribute_failed(self):
rsrc = self.create_health_monitor()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
error = self.assertRaises(exception.InvalidTemplateAttribute,
rsrc.FnGetAtt, 'subnet_id')
self.assertEqual(
'The Referenced Attribute (monitor subnet_id) is incorrect.',
str(error))
self.m.VerifyAll()
def test_update(self):
rsrc = self.create_health_monitor()
neutronclient.Client.update_health_monitor(
'5678', {'health_monitor': {'delay': 10}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['delay'] = 10
scheduler.TaskRunner(rsrc.update, update_template)()
self.m.VerifyAll()
@skipIf(neutronclient is None, 'neutronclient unavailable')
class PoolTest(HeatTestCase):
def setUp(self):
super(PoolTest, self).setUp()
self.m.StubOutWithMock(neutronclient.Client, 'create_pool')
self.m.StubOutWithMock(neutronclient.Client, 'delete_pool')
self.m.StubOutWithMock(neutronclient.Client, 'show_pool')
self.m.StubOutWithMock(neutronclient.Client, 'update_pool')
self.m.StubOutWithMock(neutronclient.Client,
'associate_health_monitor')
self.m.StubOutWithMock(neutronclient.Client,
'disassociate_health_monitor')
self.m.StubOutWithMock(neutronclient.Client, 'create_vip')
self.m.StubOutWithMock(neutronclient.Client, 'delete_vip')
self.m.StubOutWithMock(neutronclient.Client, 'show_vip')
self.m.StubOutWithMock(clients.OpenStackClients, 'keystone')
utils.setup_dummy_db()
def create_pool(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_pool({
'pool': {
'subnet_id': 'sub123', 'protocol': u'HTTP',
'name': utils.PhysName('test_stack', 'pool'),
'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}}
).AndReturn({'pool': {'id': '5678'}})
neutronclient.Client.create_vip({
'vip': {
'protocol': u'HTTP', 'name': 'pool.vip',
'admin_state_up': True, 'subnet_id': u'sub123',
'pool_id': '5678', 'protocol_port': 80}}
).AndReturn({'vip': {'id': 'xyz'}})
neutronclient.Client.show_pool('5678').AndReturn(
{'pool': {'status': 'ACTIVE'}})
neutronclient.Client.show_vip('xyz').AndReturn(
{'vip': {'status': 'ACTIVE'}})
snippet = template_format.parse(pool_template)
stack = utils.parse_stack(snippet)
return loadbalancer.Pool(
'pool', snippet['Resources']['pool'], stack)
def test_create(self):
rsrc = self.create_pool()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_create_pending(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_pool({
'pool': {
'subnet_id': 'sub123', 'protocol': u'HTTP',
'name': utils.PhysName('test_stack', 'pool'),
'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}}
).AndReturn({'pool': {'id': '5678'}})
neutronclient.Client.create_vip({
'vip': {
'protocol': u'HTTP', 'name': 'pool.vip',
'admin_state_up': True, 'subnet_id': u'sub123',
'pool_id': '5678', 'protocol_port': 80}}
).AndReturn({'vip': {'id': 'xyz'}})
neutronclient.Client.show_pool('5678').AndReturn(
{'pool': {'status': 'PENDING_CREATE'}})
neutronclient.Client.show_pool('5678').MultipleTimes().AndReturn(
{'pool': {'status': 'ACTIVE'}})
neutronclient.Client.show_vip('xyz').AndReturn(
{'vip': {'status': 'PENDING_CREATE'}})
neutronclient.Client.show_vip('xyz').AndReturn(
{'vip': {'status': 'ACTIVE'}})
snippet = template_format.parse(pool_template)
stack = utils.parse_stack(snippet)
rsrc = loadbalancer.Pool(
'pool', snippet['Resources']['pool'], stack)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_create_failed_unexpected_status(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_pool({
'pool': {
'subnet_id': 'sub123', 'protocol': u'HTTP',
'name': utils.PhysName('test_stack', 'pool'),
'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}}
).AndReturn({'pool': {'id': '5678'}})
neutronclient.Client.create_vip({
'vip': {
'protocol': u'HTTP', 'name': 'pool.vip',
'admin_state_up': True, 'subnet_id': u'sub123',
'pool_id': '5678', 'protocol_port': 80}}
).AndReturn({'vip': {'id': 'xyz'}})
neutronclient.Client.show_pool('5678').AndReturn(
{'pool': {'status': 'ERROR', 'name': '5678'}})
snippet = template_format.parse(pool_template)
stack = utils.parse_stack(snippet)
rsrc = loadbalancer.Pool(
'pool', snippet['Resources']['pool'], stack)
self.m.ReplayAll()
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.create))
self.assertEqual(
'Error: neutron report unexpected pool '
'resource[5678] status[ERROR]',
str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_create_failed_unexpected_vip_status(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_pool({
'pool': {<|fim▁hole|> ).AndReturn({'pool': {'id': '5678'}})
neutronclient.Client.create_vip({
'vip': {
'protocol': u'HTTP', 'name': 'pool.vip',
'admin_state_up': True, 'subnet_id': u'sub123',
'pool_id': '5678', 'protocol_port': 80}}
).AndReturn({'vip': {'id': 'xyz'}})
neutronclient.Client.show_pool('5678').MultipleTimes().AndReturn(
{'pool': {'status': 'ACTIVE'}})
neutronclient.Client.show_vip('xyz').AndReturn(
{'vip': {'status': 'ERROR', 'name': 'xyz'}})
snippet = template_format.parse(pool_template)
stack = utils.parse_stack(snippet)
rsrc = loadbalancer.Pool(
'pool', snippet['Resources']['pool'], stack)
self.m.ReplayAll()
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.create))
self.assertEqual(
'Error: neutron reported unexpected vip '
'resource[xyz] status[ERROR]',
str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_create_failed(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_pool({
'pool': {
'subnet_id': 'sub123', 'protocol': u'HTTP',
'name': utils.PhysName('test_stack', 'pool'),
'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}}
).AndRaise(loadbalancer.NeutronClientException())
self.m.ReplayAll()
snippet = template_format.parse(pool_template)
stack = utils.parse_stack(snippet)
rsrc = loadbalancer.Pool(
'pool', snippet['Resources']['pool'], stack)
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.create))
self.assertEqual(
'NeutronClientException: An unknown exception occurred.',
str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_create_with_session_persistence(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_pool({
'pool': {
'subnet_id': 'sub123', 'protocol': u'HTTP',
'name': utils.PhysName('test_stack', 'pool'),
'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}}
).AndReturn({'pool': {'id': '5678'}})
neutronclient.Client.create_vip({
'vip': {
'protocol': u'HTTP', 'name': 'pool.vip',
'admin_state_up': True, 'subnet_id': u'sub123',
'pool_id': '5678', 'protocol_port': 80,
'session_persistence': {
'type': 'APP_COOKIE',
'cookie_name': 'cookie'}}}
).AndReturn({'vip': {'id': 'xyz'}})
neutronclient.Client.show_pool('5678').AndReturn(
{'pool': {'status': 'ACTIVE'}})
neutronclient.Client.show_vip('xyz').AndReturn(
{'vip': {'status': 'ACTIVE'}})
snippet = template_format.parse(pool_with_session_persistence_template)
stack = utils.parse_stack(snippet)
rsrc = loadbalancer.Pool(
'pool', snippet['Resources']['pool'], stack)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_failing_validation_with_session_persistence(self):
msg = _('Property cookie_name is required, when '
'session_persistence type is set to APP_COOKIE.')
snippet = template_format.parse(pool_with_session_persistence_template)
pool = snippet['Resources']['pool']
persistence = pool['Properties']['vip']['session_persistence']
#When persistence type is set to APP_COOKIE, cookie_name is required
persistence['type'] = 'APP_COOKIE'
persistence['cookie_name'] = None
resource = loadbalancer.Pool('pool', pool, utils.parse_stack(snippet))
error = self.assertRaises(exception.StackValidationFailed,
resource.validate)
self.assertEqual(msg, str(error))
def test_validation_not_failing_without_session_persistence(self):
snippet = template_format.parse(pool_template)
pool = snippet['Resources']['pool']
resource = loadbalancer.Pool('pool', pool, utils.parse_stack(snippet))
self.assertIsNone(resource.validate())
def test_properties_are_prepared_for_session_persistence(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_pool({
'pool': {
'subnet_id': 'sub123', 'protocol': u'HTTP',
'name': utils.PhysName('test_stack', 'pool'),
'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}}
).AndReturn({'pool': {'id': '5678'}})
neutronclient.Client.create_vip({
'vip': {
'protocol': u'HTTP', 'name': 'pool.vip',
'admin_state_up': True, 'subnet_id': u'sub123',
'pool_id': '5678', 'protocol_port': 80,
'session_persistence': {'type': 'HTTP_COOKIE'}}}
).AndReturn({'vip': {'id': 'xyz'}})
neutronclient.Client.show_pool('5678').AndReturn(
{'pool': {'status': 'ACTIVE'}})
neutronclient.Client.show_vip('xyz').AndReturn(
{'vip': {'status': 'ACTIVE'}})
snippet = template_format.parse(pool_with_session_persistence_template)
pool = snippet['Resources']['pool']
persistence = pool['Properties']['vip']['session_persistence']
#change persistence type to HTTP_COOKIE that not require cookie_name
persistence['type'] = 'HTTP_COOKIE'
del persistence['cookie_name']
resource = loadbalancer.Pool('pool', pool, utils.parse_stack(snippet))
#assert that properties contain cookie_name property with None value
persistence = resource.properties['vip']['session_persistence']
self.assertIn('cookie_name', persistence)
self.assertIsNone(persistence['cookie_name'])
self.m.ReplayAll()
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE), resource.state)
self.m.VerifyAll()
def test_delete(self):
rsrc = self.create_pool()
neutronclient.Client.delete_vip('xyz')
neutronclient.Client.show_vip('xyz').AndRaise(
loadbalancer.NeutronClientException(status_code=404))
neutronclient.Client.delete_pool('5678')
neutronclient.Client.show_pool('5678').AndRaise(
loadbalancer.NeutronClientException(status_code=404))
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_already_gone(self):
neutronclient.Client.delete_vip('xyz').AndRaise(
loadbalancer.NeutronClientException(status_code=404))
neutronclient.Client.delete_pool('5678').AndRaise(
loadbalancer.NeutronClientException(status_code=404))
rsrc = self.create_pool()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_vip_failed(self):
neutronclient.Client.delete_vip('xyz').AndRaise(
loadbalancer.NeutronClientException(status_code=400))
rsrc = self.create_pool()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.delete))
self.assertEqual(
'NeutronClientException: An unknown exception occurred.',
str(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_delete_failed(self):
neutronclient.Client.delete_vip('xyz').AndRaise(
loadbalancer.NeutronClientException(status_code=404))
neutronclient.Client.delete_pool('5678').AndRaise(
loadbalancer.NeutronClientException(status_code=400))
rsrc = self.create_pool()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.delete))
self.assertEqual(
'NeutronClientException: An unknown exception occurred.',
str(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_attribute(self):
rsrc = self.create_pool()
neutronclient.Client.show_pool('5678').MultipleTimes(
).AndReturn(
{'pool': {'admin_state_up': True, 'lb_method': 'ROUND_ROBIN'}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertIs(True, rsrc.FnGetAtt('admin_state_up'))
self.assertEqual('ROUND_ROBIN', rsrc.FnGetAtt('lb_method'))
self.m.VerifyAll()
def test_vip_attribute(self):
rsrc = self.create_pool()
neutronclient.Client.show_vip('xyz').AndReturn(
{'vip': {'address': '10.0.0.3', 'name': 'xyz'}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual({'address': '10.0.0.3', 'name': 'xyz'},
rsrc.FnGetAtt('vip'))
self.m.VerifyAll()
def test_attribute_failed(self):
rsrc = self.create_pool()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
error = self.assertRaises(exception.InvalidTemplateAttribute,
rsrc.FnGetAtt, 'net_id')
self.assertEqual(
'The Referenced Attribute (pool net_id) is incorrect.',
str(error))
self.m.VerifyAll()
def test_update(self):
rsrc = self.create_pool()
neutronclient.Client.update_pool(
'5678', {'pool': {'admin_state_up': False}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['admin_state_up'] = False
scheduler.TaskRunner(rsrc.update, update_template)()
self.m.VerifyAll()
def test_update_monitors(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_pool({
'pool': {
'subnet_id': 'sub123', 'protocol': u'HTTP',
'name': utils.PhysName('test_stack', 'pool'),
'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}}
).AndReturn({'pool': {'id': '5678'}})
neutronclient.Client.associate_health_monitor(
'5678', {'health_monitor': {'id': 'mon123'}})
neutronclient.Client.associate_health_monitor(
'5678', {'health_monitor': {'id': 'mon456'}})
neutronclient.Client.create_vip({
'vip': {
'protocol': u'HTTP', 'name': 'pool.vip',
'admin_state_up': True, 'subnet_id': u'sub123',
'pool_id': '5678', 'protocol_port': 80}}
).AndReturn({'vip': {'id': 'xyz'}})
neutronclient.Client.show_pool('5678').AndReturn(
{'pool': {'status': 'ACTIVE'}})
neutronclient.Client.show_vip('xyz').AndReturn(
{'vip': {'status': 'ACTIVE'}})
neutronclient.Client.disassociate_health_monitor(
'5678', {'health_monitor': {'id': 'mon456'}})
neutronclient.Client.associate_health_monitor(
'5678', {'health_monitor': {'id': 'mon789'}})
snippet = template_format.parse(pool_template)
stack = utils.parse_stack(snippet)
snippet['Resources']['pool']['Properties']['monitors'] = [
'mon123', 'mon456']
rsrc = loadbalancer.Pool(
'pool', snippet['Resources']['pool'], stack)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['monitors'] = ['mon123', 'mon789']
scheduler.TaskRunner(rsrc.update, update_template)()
self.m.VerifyAll()
@skipIf(neutronclient is None, 'neutronclient unavailable')
class PoolMemberTest(HeatTestCase):
def setUp(self):
super(PoolMemberTest, self).setUp()
self.fc = nova_fakes.FakeClient()
self.m.StubOutWithMock(neutronclient.Client, 'create_member')
self.m.StubOutWithMock(neutronclient.Client, 'delete_member')
self.m.StubOutWithMock(neutronclient.Client, 'update_member')
self.m.StubOutWithMock(neutronclient.Client, 'show_member')
self.m.StubOutWithMock(clients.OpenStackClients, 'keystone')
utils.setup_dummy_db()
def create_member(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_member({
'member': {
'pool_id': 'pool123', 'protocol_port': 8080,
'address': '1.2.3.4', 'admin_state_up': True}}
).AndReturn({'member': {'id': 'member5678'}})
snippet = template_format.parse(member_template)
stack = utils.parse_stack(snippet)
return loadbalancer.PoolMember(
'member', snippet['Resources']['member'], stack)
def test_create(self):
rsrc = self.create_member()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual('member5678', rsrc.resource_id)
self.m.VerifyAll()
def test_create_optional_parameters(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
neutronclient.Client.create_member({
'member': {
'pool_id': 'pool123', 'protocol_port': 8080,
'weight': 100, 'admin_state_up': False,
'address': '1.2.3.4'}}
).AndReturn({'member': {'id': 'member5678'}})
snippet = template_format.parse(member_template)
snippet['Resources']['member']['Properties']['admin_state_up'] = False
snippet['Resources']['member']['Properties']['weight'] = 100
stack = utils.parse_stack(snippet)
rsrc = loadbalancer.PoolMember(
'member', snippet['Resources']['member'], stack)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual('member5678', rsrc.resource_id)
self.m.VerifyAll()
def test_attribute(self):
rsrc = self.create_member()
neutronclient.Client.show_member('member5678').MultipleTimes(
).AndReturn(
{'member': {'admin_state_up': True, 'weight': 5}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertIs(True, rsrc.FnGetAtt('admin_state_up'))
self.assertEqual(5, rsrc.FnGetAtt('weight'))
self.m.VerifyAll()
def test_update(self):
rsrc = self.create_member()
neutronclient.Client.update_member(
'member5678', {'member': {'pool_id': 'pool456'}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['pool_id'] = 'pool456'
scheduler.TaskRunner(rsrc.update, update_template)()
self.m.VerifyAll()
def test_delete(self):
rsrc = self.create_member()
neutronclient.Client.delete_member(u'member5678')
neutronclient.Client.show_member(u'member5678').AndRaise(
loadbalancer.NeutronClientException(status_code=404))
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_missing_member(self):
rsrc = self.create_member()
neutronclient.Client.delete_member(u'member5678').AndRaise(
loadbalancer.NeutronClientException(status_code=404))
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
@skipIf(neutronclient is None, 'neutronclient unavailable')
class LoadBalancerTest(HeatTestCase):
def setUp(self):
super(LoadBalancerTest, self).setUp()
self.fc = nova_fakes.FakeClient()
self.m.StubOutWithMock(neutronclient.Client, 'create_member')
self.m.StubOutWithMock(neutronclient.Client, 'delete_member')
self.m.StubOutWithMock(clients.OpenStackClients, 'keystone')
self.m.StubOutWithMock(clients.OpenStackClients, 'nova')
utils.setup_dummy_db()
def create_load_balancer(self):
clients.OpenStackClients.keystone().AndReturn(
fakes.FakeKeystoneClient())
clients.OpenStackClients.nova("compute").MultipleTimes().AndReturn(
self.fc)
neutronclient.Client.create_member({
'member': {
'pool_id': 'pool123', 'protocol_port': 8080,
'address': '1.2.3.4'}}
).AndReturn({'member': {'id': 'member5678'}})
snippet = template_format.parse(lb_template)
stack = utils.parse_stack(snippet)
return loadbalancer.LoadBalancer(
'lb', snippet['Resources']['lb'], stack)
def test_create(self):
rsrc = self.create_load_balancer()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_update(self):
rsrc = self.create_load_balancer()
neutronclient.Client.delete_member(u'member5678')
neutronclient.Client.create_member({
'member': {
'pool_id': 'pool123', 'protocol_port': 8080,
'address': '4.5.6.7'}}
).AndReturn({'member': {'id': 'memberxyz'}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['members'] = ['5678']
scheduler.TaskRunner(rsrc.update, update_template)()
self.m.VerifyAll()
def test_update_missing_member(self):
rsrc = self.create_load_balancer()
neutronclient.Client.delete_member(u'member5678').AndRaise(
loadbalancer.NeutronClientException(status_code=404))
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['members'] = []
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete(self):
rsrc = self.create_load_balancer()
neutronclient.Client.delete_member(u'member5678')
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_missing_member(self):
rsrc = self.create_load_balancer()
neutronclient.Client.delete_member(u'member5678').AndRaise(
loadbalancer.NeutronClientException(status_code=404))
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()<|fim▁end|> | 'subnet_id': 'sub123', 'protocol': u'HTTP',
'name': utils.PhysName('test_stack', 'pool'),
'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}} |
<|file_name|>classowncloudsharp_1_1_exceptions_1_1_o_c_s_response_error.js<|end_file_name|><|fim▁begin|>var classowncloudsharp_1_1_exceptions_1_1_o_c_s_response_error =
[<|fim▁hole|><|fim▁end|> | [ "OCSResponseError", "classowncloudsharp_1_1_exceptions_1_1_o_c_s_response_error.html#a4a8be342eabdb1ff56309de41bb27376", null ],
[ "OCSResponseError", "classowncloudsharp_1_1_exceptions_1_1_o_c_s_response_error.html#abe1faed2e2f100a1d5e5c051413be70a", null ],
[ "OCSResponseError", "classowncloudsharp_1_1_exceptions_1_1_o_c_s_response_error.html#a140bbc4910589e73d076ba7a977b9e88", null ]
]; |
<|file_name|>test_basic.py<|end_file_name|><|fim▁begin|>''' Crawl the running Docker site and verify all links give a 200 OK '''
import unittest
import subprocess
# Placeholder for future python based codi/TURFF
class BasicTests(unittest.TestCase):
''' Base class for testing '''
def setUp(self):
''' Define some unique data for validation '''
pass
def tearDown(self):<|fim▁hole|><|fim▁end|> | ''' Destroy unique data '''
pass |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|> DataSiftDefinition,
DataSiftStream,
DataSiftStreamListener
)<|fim▁end|> | from datasift import (
DataSiftUser, |
<|file_name|>stem to SVD.py<|end_file_name|><|fim▁begin|>#Thanks for the approach https://github.com/ML-Person/My-solution-to-Avito-Challenge-2018 (@nikita)
import pandas as pd
import numpy as np
import gc
import os
import re
import pickle
import string
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from scipy.sparse import hstack, csr_matrix
import lightgbm as lgb
# for text data
from nltk.stem.snowball import SnowballStemmer
from nltk.corpus import stopwords
from sklearn.decomposition import TruncatedSVD
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
%matplotlib inline
pd.set_option('max_columns', 84)
import warnings
warnings.filterwarnings('ignore')
PATH_TO_DATA = '/Avito'
traintrain == pdpd..read_csvread_cs (os.path.join(PATH_TO_DATA, 'train.csv'))
test = pd.read_csv(os.path.join(PATH_TO_DATA, 'test.csv'))
'''
item_id - Ad id.
user_id - User id.
region - Ad region.
city - Ad city.
parent_category_name - Top level ad category as classified by Avito's ad model.
category_name - Fine grain ad category as classified by Avito's ad model.
param_1 - Optional parameter from Avito's ad model.
param_2 - Optional parameter from Avito's ad model.
param_3 - Optional parameter from Avito's ad model.
title - Ad title.
description - Ad description.
price - Ad price.
item_seq_number - Ad sequential number for user.
activation_date - Date ad was placed.
user_type - User type.
image - Id code of image. Ties to a jpg file in train_jpg. Not every ad has an image.
image_top_1 - Avito's classification code for the image.
deal_probability - The target variable. This is the likelihood that an ad actually sold something. It's not possible to verify every transaction with certainty, so this column's value can be any float from zero to one.
'''
categorical = [
'image_top_1', 'param_1', 'param_2', 'param_3',
'city', 'region', 'category_name', 'parent_category_name', 'user_type'
]
# easy preprocessing
text_cols = [
'title', 'description', 'param_1', 'param_2', 'param_3',
'city', 'region', 'category_name', 'parent_category_name'
]
for col in text_cols:
for df in [train, test]:
df[col] = df[col].str.replace(r"[^А-Яа-яA-Za-z0-9,!?@\'\`\"\_\n]", ' ')
df[col].fillna("NA", inplace=True)
df[col] = df[col].str.lower()
forfor dfdf inin [[traintrain,, testtest]:]:
dfdf[['len_description''len_de ] = df['description'].apply(lambda x: len(str(x)))
df['num_desc_punct'] = df['description'].apply(lambda x: len([c for c in str(x) if c in string.punctuation])) / df['len_description']
for col in ['description', 'title']:
df['num_words_' + col] = df[col].apply(lambda comment: len(comment.split()))
df['num_unique_words_' + col] = df[col].apply(lambda comment: len(set(w for w in comment.split())))
# percentage of unique words
df['words_vs_unique_title'] = df['num_unique_words_title'] / df['num_words_title'] * 100
df['words_vs_unique_description'] = df['num_unique_words_description'] / df['num_words_description'] * 100
# [DUMP] TRAIN + TEST# [DUMP]
train.to_csv(os.path.join(PATH_TO_DATA, 'train_all_features.csv'), index=False, encoding='utf-8')
test.to_csv(os.path.join(PATH_TO_DATA, 'test_all_features.csv'), index=False, encoding='utf-8')
del train, test
gc.collect()
train = pd.read_csv(os.path.join(PATH_TO_DATA, 'train.csv'))
test = pd.read_csv(os.path.join(PATH_TO_DATA, 'test.csv
stemmer = SnowballStemmer("russian", ignore_stopwords=False)
train['title_stemm'] = train['title'].apply(lambda string: ' '.join([stemmer.stem(w) for w in string.split()]))
test['title_stemm'] = test['title'].apply(lambda string: ' '.join([stemmer.stem(w) for w in string.split()]))
train['description_stemm'] = train['description'].apply(lambda string: ' '.join([stemmer.stem(w) for w in string.split()]))
test['description_stemm'] = test['description'].apply(lambda string: ' '.join([stemmer.stem(w) for w in string.split()]))
train['text'] = train['param_1'] + " " + train['param_2'] + " " + train['param_3'] + " " + \
train['city'] + " " + train['category_name'] + " " + train['parent_category_name']
test['text'] = test['param_1'] + " " + test['param_2'] + " " + test['param_3'] + " " + \
test['city'] + " " + test['category_name'] + " " + test['parent_category_name']
train['text_stemm'] = train['text'].apply(lambda string: ' '.join([stemmer.stem(w) for w in string.split()]))
test['text_stemm'] = test['text'].apply(lambda string: ' '.join([stemmer.stem(w) for w in string.split()]))
for df in [train, test]:
df.drop(['title', 'description', 'text'], axis=1, inplace=True)
#TF-IDF + SVD
# CountVectorizer for 'title'
title_tfidf = CountVectorizer(stop_words=stopwords.words('russian'), lowercase=True,
token_pattern=r'\w{1,}', ngram_range=(1, 1))
full_tfidf = title_tfidf.fit_transform(train['title_stemm'].values.tolist() + test['title_stemm'].values.tolist())
train_title_tfidf = title_tfidf.transform(train['title_stemm'].values.tolist())
test_title_tfidf = title_tfidf.transform(test['title_stemm'].values.tolist())
### SVD Components ###
n_comp = 10
svd_obj = TruncatedSVD(n_components=n_comp, algorithm='arpack')
svd_obj.fit(full_tfidf)
train_svd = pd.DataFrame(svd_obj.transform(train_title_tfidf))
test_svd = pd.DataFrame(svd_obj.transform(test_title_tfidf))
train_svd.columns = ['svd_title_'+str(i+1) for i in range(n_comp)]
test_svd.columns = ['svd_title_'+str(i+1) for i in range(n_comp)]
train_svd['item_id'] = train['item_id']
test_svd['item_id'] = test['item_id']
# Merge and delete
train = train.merge(train_svd, on='item_id', how='left')
test = test.merge(test_svd, on='item_id', how='left')
del full_tfidf, train_svd, test_svd
gc.collect()
# TF-IDF for 'description'
desc_tfidf = TfidfVectorizer(stop_words=stopwords.words('russian'), token_pattern=r'\w{1,}',
lowercase=True, ngram_range=(1, 2), norm='l2', smooth_idf=False,
max_features=17000)
full_tfidf = desc_tfidf.fit_transform(train['description_stemm'].values.tolist() + test['description_stemm'].values.tolist())
train_desc_tfidf = desc_tfidf.transform(train['description_stemm'].values.tolist())
test_desc_tfidf = desc_tfidf.transform(test['description_stemm'].values.tolist())
### SVD Components ###
n_comp = 10
svd_obj = TruncatedSVD(n_components=n_comp, algorithm='arpack')
svd_obj.fit(full_tfidf)
train_svd = pd.DataFrame(svd_obj.transform(train_desc_tfidf))
test_svd = pd.DataFrame(svd_obj.transform(test_desc_tfidf))
train_svd.columns = ['svd_description_'+str(i+1) for i in range(n_comp)]
test_svd.columns = ['svd_description_'+str(i+1) for i in range(n_comp)]
train_svd['item_id'] = train['item_id']
test_svd['item_id'] = test['item_id']
# Merge and delete
train = train.merge(train_svd, on='item_id', how='left')
test = test.merge(test_svd, on='item_id', how='left')
del full_tfidf, train_svd, test_svd
gc.collect()
# [STACKING]# [STACK
train_tfidf = csr_matrix(hstack([train_title_tfidf, train_desc_tfidf, train_text_tfidf]))
test_tfidf = csr_matrix(hstack([test_title_tfidf, test_desc_tfidf, test_text_tfidf]))
del train_title_tfidf, train_desc_tfidf, train_text_tfidf
del test_title_tfidf, test_desc_tfidf, test_text_tfidf
gc.collect()
vocab = np.hstack([
title_tfidf.get_feature_names(),
desc_tfidf.get_feature_names(),
text_tfidf.get_feature_names()
])
[DUMP] TF-IDF pickle files + vocabulary
with open(os.path.join(PATH_TO_DATA, 'train_tfidf.pkl'), 'wb') as train_tfidf_pkl:<|fim▁hole|> pickle.dump(test_tfidf, test_tfidf_pkl, protocol=2)
with open(os.path.join(PATH_TO_DATA, 'vocab.pkl'), 'wb') as vocab_pkl:
pickle.dump(vocab, vocab_pkl, protocol=2)
del train, train_tfidf, test, test_tfidf, vocab
gc.collect()<|fim▁end|> | pickle.dump(train_tfidf, train_tfidf_pkl, protocol=2)
with open(os.path.join(PATH_TO_DATA, 'test_tfidf.pkl'), 'wb') as test_tfidf_pkl: |
<|file_name|>diepempmsk.rs<|end_file_name|><|fim▁begin|>#[doc = "Register `DIEPEMPMSK` reader"]
pub struct R(crate::R<DIEPEMPMSK_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<DIEPEMPMSK_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<DIEPEMPMSK_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<DIEPEMPMSK_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `DIEPEMPMSK` writer"]
pub struct W(crate::W<DIEPEMPMSK_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<DIEPEMPMSK_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0<|fim▁hole|>impl From<crate::W<DIEPEMPMSK_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<DIEPEMPMSK_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `InEpTxfEmpMsk` reader - IN EP Tx FIFO Empty Interrupt Mask Bits"]
pub struct INEPTXFEMPMSK_R(crate::FieldReader<u16, u16>);
impl INEPTXFEMPMSK_R {
pub(crate) fn new(bits: u16) -> Self {
INEPTXFEMPMSK_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for INEPTXFEMPMSK_R {
type Target = crate::FieldReader<u16, u16>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `InEpTxfEmpMsk` writer - IN EP Tx FIFO Empty Interrupt Mask Bits"]
pub struct INEPTXFEMPMSK_W<'a> {
w: &'a mut W,
}
impl<'a> INEPTXFEMPMSK_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !0xffff) | (value as u32 & 0xffff);
self.w
}
}
impl R {
#[doc = "Bits 0:15 - IN EP Tx FIFO Empty Interrupt Mask Bits"]
#[inline(always)]
pub fn in_ep_txf_emp_msk(&self) -> INEPTXFEMPMSK_R {
INEPTXFEMPMSK_R::new((self.bits & 0xffff) as u16)
}
}
impl W {
#[doc = "Bits 0:15 - IN EP Tx FIFO Empty Interrupt Mask Bits"]
#[inline(always)]
pub fn in_ep_txf_emp_msk(&mut self) -> INEPTXFEMPMSK_W {
INEPTXFEMPMSK_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Device IN Endpoint FIFO Empty Interrupt Mask Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [diepempmsk](index.html) module"]
pub struct DIEPEMPMSK_SPEC;
impl crate::RegisterSpec for DIEPEMPMSK_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [diepempmsk::R](R) reader structure"]
impl crate::Readable for DIEPEMPMSK_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [diepempmsk::W](W) writer structure"]
impl crate::Writable for DIEPEMPMSK_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets DIEPEMPMSK to value 0"]
impl crate::Resettable for DIEPEMPMSK_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}<|fim▁end|> | }
} |
<|file_name|>utils.py<|end_file_name|><|fim▁begin|>from unittest import skipIf
from django.conf import settings
def skipIfDefaultUser(test_func):
"""
Skip a test if a default user model is in use.
"""
return skipIf(settings.AUTH_USER_MODEL == "auth.User", "Default user model in use")(
test_func
)
def skipIfCustomUser(test_func):
"""
Skip a test if a custom user model is in use.
"""
return skipIf(settings.AUTH_USER_MODEL != "auth.User", "Custom user model in use")(
test_func<|fim▁hole|><|fim▁end|> | ) |
<|file_name|>errorcodes.py<|end_file_name|><|fim▁begin|># -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 操作失败。
FAILEDOPERATION = 'FailedOperation'
# API网关触发器创建失败。
FAILEDOPERATION_APIGATEWAY = 'FailedOperation.ApiGateway'
# 创建触发器失败。
FAILEDOPERATION_APIGW = 'FailedOperation.Apigw'
# 获取Apm InstanceId失败。
FAILEDOPERATION_APMCONFIGINSTANCEID = 'FailedOperation.ApmConfigInstanceId'
# 当前异步事件状态不支持此操作,请稍后重试。
FAILEDOPERATION_ASYNCEVENTSTATUS = 'FailedOperation.AsyncEventStatus'
# 复制函数失败。
FAILEDOPERATION_COPYFAILED = 'FailedOperation.CopyFailed'
# 不支持复制到该地域。
FAILEDOPERATION_COPYFUNCTION = 'FailedOperation.CopyFunction'
# 操作COS资源失败。
FAILEDOPERATION_COS = 'FailedOperation.Cos'
# 创建别名失败。
FAILEDOPERATION_CREATEALIAS = 'FailedOperation.CreateAlias'
# 操作失败。
FAILEDOPERATION_CREATEFUNCTION = 'FailedOperation.CreateFunction'
# 创建命名空间失败。
FAILEDOPERATION_CREATENAMESPACE = 'FailedOperation.CreateNamespace'
# 当前函数状态无法进行此操作。
FAILEDOPERATION_CREATETRIGGER = 'FailedOperation.CreateTrigger'
# 当前调试状态无法执行此操作。
FAILEDOPERATION_DEBUGMODESTATUS = 'FailedOperation.DebugModeStatus'
# 调试状态下无法更新执行超时时间。
FAILEDOPERATION_DEBUGMODEUPDATETIMEOUTFAIL = 'FailedOperation.DebugModeUpdateTimeOutFail'
# 删除别名失败。
FAILEDOPERATION_DELETEALIAS = 'FailedOperation.DeleteAlias'
# 当前函数状态无法进行此操作,请在函数状态正常时重试。
FAILEDOPERATION_DELETEFUNCTION = 'FailedOperation.DeleteFunction'
# 删除layer版本失败。
FAILEDOPERATION_DELETELAYERVERSION = 'FailedOperation.DeleteLayerVersion'
# 无法删除默认Namespace。
FAILEDOPERATION_DELETENAMESPACE = 'FailedOperation.DeleteNamespace'
# 删除触发器失败。
FAILEDOPERATION_DELETETRIGGER = 'FailedOperation.DeleteTrigger'
# 当前函数状态无法更新代码,请在状态为正常时更新。
FAILEDOPERATION_FUNCTIONNAMESTATUSERROR = 'FailedOperation.FunctionNameStatusError'
# 函数在部署中,无法做此操作。
FAILEDOPERATION_FUNCTIONSTATUSERROR = 'FailedOperation.FunctionStatusError'
# 当前函数版本状态无法进行此操作,请在版本状态为正常时重试。
FAILEDOPERATION_FUNCTIONVERSIONSTATUSNOTACTIVE = 'FailedOperation.FunctionVersionStatusNotActive'
# 获取别名信息失败。
FAILEDOPERATION_GETALIAS = 'FailedOperation.GetAlias'
# 获取函数代码地址失败。
FAILEDOPERATION_GETFUNCTIONADDRESS = 'FailedOperation.GetFunctionAddress'
# 当前账号或命名空间处于欠费状态,请在可用时重试。
FAILEDOPERATION_INSUFFICIENTBALANCE = 'FailedOperation.InsufficientBalance'
# 调用函数失败。
FAILEDOPERATION_INVOKEFUNCTION = 'FailedOperation.InvokeFunction'
# 命名空间已存在,请勿重复创建。
FAILEDOPERATION_NAMESPACE = 'FailedOperation.Namespace'
# 服务开通失败。
FAILEDOPERATION_OPENSERVICE = 'FailedOperation.OpenService'
# 操作冲突。
FAILEDOPERATION_OPERATIONCONFLICT = 'FailedOperation.OperationConflict'
# 创建定时预置任务失败。
FAILEDOPERATION_PROVISIONCREATETIMER = 'FailedOperation.ProvisionCreateTimer'
# 删除定时预置任务失败。
FAILEDOPERATION_PROVISIONDELETETIMER = 'FailedOperation.ProvisionDeleteTimer'
# 当前函数版本已有预置任务处于进行中,请稍后重试。
FAILEDOPERATION_PROVISIONEDINPROGRESS = 'FailedOperation.ProvisionedInProgress'
# 发布layer版本失败。
FAILEDOPERATION_PUBLISHLAYERVERSION = 'FailedOperation.PublishLayerVersion'
# 当前函数状态无法发布版本,请在状态为正常时发布。
FAILEDOPERATION_PUBLISHVERSION = 'FailedOperation.PublishVersion'
# 角色不存在。
FAILEDOPERATION_QCSROLENOTFOUND = 'FailedOperation.QcsRoleNotFound'
# 当前函数已有保留并发设置任务处于进行中,请稍后重试。
FAILEDOPERATION_RESERVEDINPROGRESS = 'FailedOperation.ReservedInProgress'
# Topic不存在。
FAILEDOPERATION_TOPICNOTEXIST = 'FailedOperation.TopicNotExist'
# 用户并发内存配额设置任务处于进行中,请稍后重试。
FAILEDOPERATION_TOTALCONCURRENCYMEMORYINPROGRESS = 'FailedOperation.TotalConcurrencyMemoryInProgress'
# 指定的服务未开通,可以提交工单申请开通服务。
FAILEDOPERATION_UNOPENEDSERVICE = 'FailedOperation.UnOpenedService'
# 更新别名失败。
FAILEDOPERATION_UPDATEALIAS = 'FailedOperation.UpdateAlias'
# 当前函数状态无法更新代码,请在状态为正常时更新。
FAILEDOPERATION_UPDATEFUNCTIONCODE = 'FailedOperation.UpdateFunctionCode'
# UpdateFunctionConfiguration操作失败。
FAILEDOPERATION_UPDATEFUNCTIONCONFIGURATION = 'FailedOperation.UpdateFunctionConfiguration'
# 内部错误。
INTERNALERROR = 'InternalError'
# 创建apigw触发器内部错误。
INTERNALERROR_APIGATEWAY = 'InternalError.ApiGateway'
# ckafka接口失败。
INTERNALERROR_CKAFKA = 'InternalError.Ckafka'
# 删除cmq触发器失败。
INTERNALERROR_CMQ = 'InternalError.Cmq'
# 更新触发器失败。
INTERNALERROR_COS = 'InternalError.Cos'
# ES错误。
INTERNALERROR_ES = 'InternalError.ES'
# 内部服务异常。
INTERNALERROR_EXCEPTION = 'InternalError.Exception'
# 内部服务错误。
INTERNALERROR_GETROLEERROR = 'InternalError.GetRoleError'
# 内部系统错误。
INTERNALERROR_SYSTEM = 'InternalError.System'
# 内部服务错误。
INTERNALERROR_SYSTEMERROR = 'InternalError.SystemError'
# FunctionName取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETER_FUNCTIONNAME = 'InvalidParameter.FunctionName'
# 请求参数不合法。
INVALIDPARAMETER_PAYLOAD = 'InvalidParameter.Payload'
# RoutingConfig参数传入错误。
INVALIDPARAMETER_ROUTINGCONFIG = 'InvalidParameter.RoutingConfig'
# 参数取值错误。
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# Action取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_ACTION = 'InvalidParameterValue.Action'
# AdditionalVersionWeights参数传入错误。
INVALIDPARAMETERVALUE_ADDITIONALVERSIONWEIGHTS = 'InvalidParameterValue.AdditionalVersionWeights'
# 不支持删除默认别名,请修正后重试。
INVALIDPARAMETERVALUE_ALIAS = 'InvalidParameterValue.Alias'
# ApiGateway参数错误。
INVALIDPARAMETERVALUE_APIGATEWAY = 'InvalidParameterValue.ApiGateway'
# ApmConfig参数传入错误。
INVALIDPARAMETERVALUE_APMCONFIG = 'InvalidParameterValue.ApmConfig'
# ApmConfigInstanceId参数传入错误。
INVALIDPARAMETERVALUE_APMCONFIGINSTANCEID = 'InvalidParameterValue.ApmConfigInstanceId'
# ApmConfigRegion参数传入错误。
INVALIDPARAMETERVALUE_APMCONFIGREGION = 'InvalidParameterValue.ApmConfigRegion'
# Args 参数值有误。
INVALIDPARAMETERVALUE_ARGS = 'InvalidParameterValue.Args'
# 函数异步重试配置参数无效。
INVALIDPARAMETERVALUE_ASYNCTRIGGERCONFIG = 'InvalidParameterValue.AsyncTriggerConfig'
# Cdn传入错误。
INVALIDPARAMETERVALUE_CDN = 'InvalidParameterValue.Cdn'
# cfs配置项重复。
INVALIDPARAMETERVALUE_CFSPARAMETERDUPLICATE = 'InvalidParameterValue.CfsParameterDuplicate'
# cfs配置项取值与规范不符。
INVALIDPARAMETERVALUE_CFSPARAMETERERROR = 'InvalidParameterValue.CfsParameterError'
# cfs参数格式与规范不符。
INVALIDPARAMETERVALUE_CFSSTRUCTIONERROR = 'InvalidParameterValue.CfsStructionError'
# Ckafka传入错误。
INVALIDPARAMETERVALUE_CKAFKA = 'InvalidParameterValue.Ckafka'
# 运行函数时的参数传入有误。
INVALIDPARAMETERVALUE_CLIENTCONTEXT = 'InvalidParameterValue.ClientContext'
# Cls传入错误。
INVALIDPARAMETERVALUE_CLS = 'InvalidParameterValue.Cls'
# 修改Cls配置需要传入Role参数,请修正后重试。
INVALIDPARAMETERVALUE_CLSROLE = 'InvalidParameterValue.ClsRole'
# Cmq传入错误。
INVALIDPARAMETERVALUE_CMQ = 'InvalidParameterValue.Cmq'
# Code传入错误。
INVALIDPARAMETERVALUE_CODE = 'InvalidParameterValue.Code'
# CodeSecret传入错误。
INVALIDPARAMETERVALUE_CODESECRET = 'InvalidParameterValue.CodeSecret'
# CodeSource传入错误。
INVALIDPARAMETERVALUE_CODESOURCE = 'InvalidParameterValue.CodeSource'
# Command[Entrypoint] 参数值有误。
INVALIDPARAMETERVALUE_COMMAND = 'InvalidParameterValue.Command'
# CompatibleRuntimes参数传入错误。
INVALIDPARAMETERVALUE_COMPATIBLERUNTIMES = 'InvalidParameterValue.CompatibleRuntimes'
# Content参数传入错误。
INVALIDPARAMETERVALUE_CONTENT = 'InvalidParameterValue.Content'
# Cos传入错误。
INVALIDPARAMETERVALUE_COS = 'InvalidParameterValue.Cos'
# CosBucketName不符合规范。
INVALIDPARAMETERVALUE_COSBUCKETNAME = 'InvalidParameterValue.CosBucketName'
# CosBucketRegion取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_COSBUCKETREGION = 'InvalidParameterValue.CosBucketRegion'
# CosObjectName不符合规范。
INVALIDPARAMETERVALUE_COSOBJECTNAME = 'InvalidParameterValue.CosObjectName'
# CustomArgument参数长度超限。
INVALIDPARAMETERVALUE_CUSTOMARGUMENT = 'InvalidParameterValue.CustomArgument'
# DateTime传入错误。
INVALIDPARAMETERVALUE_DATETIME = 'InvalidParameterValue.DateTime'
# DeadLetterConfig取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_DEADLETTERCONFIG = 'InvalidParameterValue.DeadLetterConfig'
# 默认Namespace无法创建。
INVALIDPARAMETERVALUE_DEFAULTNAMESPACE = 'InvalidParameterValue.DefaultNamespace'
# Description传入错误。
INVALIDPARAMETERVALUE_DESCRIPTION = 'InvalidParameterValue.Description'
# 环境变量DNS[OS_NAMESERVER]配置有误。
INVALIDPARAMETERVALUE_DNSINFO = 'InvalidParameterValue.DnsInfo'
# EipConfig参数错误。
INVALIDPARAMETERVALUE_EIPCONFIG = 'InvalidParameterValue.EipConfig'
# Enable取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_ENABLE = 'InvalidParameterValue.Enable'
# Environment传入错误。
INVALIDPARAMETERVALUE_ENVIRONMENT = 'InvalidParameterValue.Environment'
# 环境变量大小超限,请保持在 4KB 以内。
INVALIDPARAMETERVALUE_ENVIRONMENTEXCEEDEDLIMIT = 'InvalidParameterValue.EnvironmentExceededLimit'
# 不支持修改函数系统环境变量和运行环境变量。
INVALIDPARAMETERVALUE_ENVIRONMENTSYSTEMPROTECT = 'InvalidParameterValue.EnvironmentSystemProtect'
# Filters参数错误。
INVALIDPARAMETERVALUE_FILTERS = 'InvalidParameterValue.Filters'
# Function取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_FUNCTION = 'InvalidParameterValue.Function'
# 函数不存在。
INVALIDPARAMETERVALUE_FUNCTIONNAME = 'InvalidParameterValue.FunctionName'
# GitBranch不符合规范。
INVALIDPARAMETERVALUE_GITBRANCH = 'InvalidParameterValue.GitBranch'
# GitCommitId取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_GITCOMMITID = 'InvalidParameterValue.GitCommitId'
# GitDirectory不符合规范。
INVALIDPARAMETERVALUE_GITDIRECTORY = 'InvalidParameterValue.GitDirectory'
# GitPassword不符合规范。
INVALIDPARAMETERVALUE_GITPASSWORD = 'InvalidParameterValue.GitPassword'
# GitUrl不符合规范。
INVALIDPARAMETERVALUE_GITURL = 'InvalidParameterValue.GitUrl'
# GitUserName不符合规范。
INVALIDPARAMETERVALUE_GITUSERNAME = 'InvalidParameterValue.GitUserName'
# Handler传入错误。
INVALIDPARAMETERVALUE_HANDLER = 'InvalidParameterValue.Handler'
# IdleTimeOut参数传入错误。
INVALIDPARAMETERVALUE_IDLETIMEOUT = 'InvalidParameterValue.IdleTimeOut'
# imageUri 传入有误。
INVALIDPARAMETERVALUE_IMAGEURI = 'InvalidParameterValue.ImageUri'
# InlineZipFile非法。
INVALIDPARAMETERVALUE_INLINEZIPFILE = 'InvalidParameterValue.InlineZipFile'
# InvokeType取值与规范不符,请修正后再试。
INVALIDPARAMETERVALUE_INVOKETYPE = 'InvalidParameterValue.InvokeType'
# L5Enable取值与规范不符,请修正后再试。
INVALIDPARAMETERVALUE_L5ENABLE = 'InvalidParameterValue.L5Enable'
# LayerName参数传入错误。
INVALIDPARAMETERVALUE_LAYERNAME = 'InvalidParameterValue.LayerName'
# Layers参数传入错误。
INVALIDPARAMETERVALUE_LAYERS = 'InvalidParameterValue.Layers'
# Limit传入错误。
INVALIDPARAMETERVALUE_LIMIT = 'InvalidParameterValue.Limit'
# 参数超出长度限制。
INVALIDPARAMETERVALUE_LIMITEXCEEDED = 'InvalidParameterValue.LimitExceeded'
# Memory取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_MEMORY = 'InvalidParameterValue.Memory'
# MemorySize错误。
INVALIDPARAMETERVALUE_MEMORYSIZE = 'InvalidParameterValue.MemorySize'
# MinCapacity 参数传入错误。
INVALIDPARAMETERVALUE_MINCAPACITY = 'InvalidParameterValue.MinCapacity'
# Name参数传入错误。
INVALIDPARAMETERVALUE_NAME = 'InvalidParameterValue.Name'
# Namespace参数传入错误。
INVALIDPARAMETERVALUE_NAMESPACE = 'InvalidParameterValue.Namespace'
# 规则不正确,Namespace为英文字母、数字、-_ 符号组成,长度30。
INVALIDPARAMETERVALUE_NAMESPACEINVALID = 'InvalidParameterValue.NamespaceInvalid'
# NodeSpec 参数传入错误。
INVALIDPARAMETERVALUE_NODESPEC = 'InvalidParameterValue.NodeSpec'
# NodeType 参数传入错误。
INVALIDPARAMETERVALUE_NODETYPE = 'InvalidParameterValue.NodeType'
# 偏移量不合法。
INVALIDPARAMETERVALUE_OFFSET = 'InvalidParameterValue.Offset'
# Order传入错误。
INVALIDPARAMETERVALUE_ORDER = 'InvalidParameterValue.Order'
# OrderBy取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_ORDERBY = 'InvalidParameterValue.OrderBy'
# 入参不是标准的json。
INVALIDPARAMETERVALUE_PARAM = 'InvalidParameterValue.Param'
# ProtocolType参数传入错误。
INVALIDPARAMETERVALUE_PROTOCOLTYPE = 'InvalidParameterValue.ProtocolType'
# 定时预置的cron配置重复。
INVALIDPARAMETERVALUE_PROVISIONTRIGGERCRONCONFIGDUPLICATE = 'InvalidParameterValue.ProvisionTriggerCronConfigDuplicate'
# TriggerName参数传入错误。
INVALIDPARAMETERVALUE_PROVISIONTRIGGERNAME = 'InvalidParameterValue.ProvisionTriggerName'
# TriggerName重复。
INVALIDPARAMETERVALUE_PROVISIONTRIGGERNAMEDUPLICATE = 'InvalidParameterValue.ProvisionTriggerNameDuplicate'
# ProvisionType 参数传入错误。
INVALIDPARAMETERVALUE_PROVISIONTYPE = 'InvalidParameterValue.ProvisionType'
# PublicNetConfig参数错误。
INVALIDPARAMETERVALUE_PUBLICNETCONFIG = 'InvalidParameterValue.PublicNetConfig'
# 不支持的函数版本。
INVALIDPARAMETERVALUE_QUALIFIER = 'InvalidParameterValue.Qualifier'
# 企业版镜像实例ID[RegistryId]传值错误。
INVALIDPARAMETERVALUE_REGISTRYID = 'InvalidParameterValue.RegistryId'
# RetCode不合法。
INVALIDPARAMETERVALUE_RETCODE = 'InvalidParameterValue.RetCode'
# RoutingConfig取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_ROUTINGCONFIG = 'InvalidParameterValue.RoutingConfig'
# Runtime传入错误。
INVALIDPARAMETERVALUE_RUNTIME = 'InvalidParameterValue.Runtime'
# searchkey 不是 Keyword,Tag 或者 Runtime。
INVALIDPARAMETERVALUE_SEARCHKEY = 'InvalidParameterValue.SearchKey'
# SecretInfo错误。
INVALIDPARAMETERVALUE_SECRETINFO = 'InvalidParameterValue.SecretInfo'
# ServiceName命名不规范。
INVALIDPARAMETERVALUE_SERVICENAME = 'InvalidParameterValue.ServiceName'
# Stamp取值与规范不符,请修正后再试。
INVALIDPARAMETERVALUE_STAMP = 'InvalidParameterValue.Stamp'
# 起始时间传入错误。
INVALIDPARAMETERVALUE_STARTTIME = 'InvalidParameterValue.StartTime'
# 需要同时指定开始日期与结束日期。
INVALIDPARAMETERVALUE_STARTTIMEORENDTIME = 'InvalidParameterValue.StartTimeOrEndTime'
# Status取值与规范不符,请修正后再试。
INVALIDPARAMETERVALUE_STATUS = 'InvalidParameterValue.Status'
# 系统环境变量错误。
INVALIDPARAMETERVALUE_SYSTEMENVIRONMENT = 'InvalidParameterValue.SystemEnvironment'
# 非法的TempCosObjectName。
INVALIDPARAMETERVALUE_TEMPCOSOBJECTNAME = 'InvalidParameterValue.TempCosObjectName'
# TraceEnable取值与规范不符,请修正后再试。
INVALIDPARAMETERVALUE_TRACEENABLE = 'InvalidParameterValue.TraceEnable'
# TrackingTarget 参数输入错误。
INVALIDPARAMETERVALUE_TRACKINGTARGET = 'InvalidParameterValue.TrackingTarget'
# TriggerCronConfig参数传入错误。
INVALIDPARAMETERVALUE_TRIGGERCRONCONFIG = 'InvalidParameterValue.TriggerCronConfig'
# TriggerCronConfig参数定时触发间隔小于指定值。
INVALIDPARAMETERVALUE_TRIGGERCRONCONFIGTIMEINTERVAL = 'InvalidParameterValue.TriggerCronConfigTimeInterval'
# TriggerDesc传入参数错误。
INVALIDPARAMETERVALUE_TRIGGERDESC = 'InvalidParameterValue.TriggerDesc'
# TriggerName传入错误。
INVALIDPARAMETERVALUE_TRIGGERNAME = 'InvalidParameterValue.TriggerName'
# TriggerProvisionedConcurrencyNum参数传入错误。
INVALIDPARAMETERVALUE_TRIGGERPROVISIONEDCONCURRENCYNUM = 'InvalidParameterValue.TriggerProvisionedConcurrencyNum'
# Type传入错误。
INVALIDPARAMETERVALUE_TYPE = 'InvalidParameterValue.Type'
# 开启cfs配置的同时必须开启vpc。
INVALIDPARAMETERVALUE_VPCNOTSETWHENOPENCFS = 'InvalidParameterValue.VpcNotSetWhenOpenCfs'
# WebSocketsParams参数传入错误。
INVALIDPARAMETERVALUE_WEBSOCKETSPARAMS = 'InvalidParameterValue.WebSocketsParams'
# 检测到不是标准的zip文件,请重新压缩后再试。
INVALIDPARAMETERVALUE_ZIPFILE = 'InvalidParameterValue.ZipFile'
# 压缩文件base64解码失败: `Incorrect padding`,请修正后再试。
INVALIDPARAMETERVALUE_ZIPFILEBASE64BINASCIIERROR = 'InvalidParameterValue.ZipFileBase64BinasciiError'
# 别名个数超过最大限制。
LIMITEXCEEDED_ALIAS = 'LimitExceeded.Alias'
# Cdn使用超过最大限制。
LIMITEXCEEDED_CDN = 'LimitExceeded.Cdn'
# eip资源超限。
LIMITEXCEEDED_EIP = 'LimitExceeded.Eip'
# 函数数量超出最大限制 ,可通过[提交工单](https://cloud.tencent.com/act/event/Online_service?from=scf%7Cindex)申请提升限制。
LIMITEXCEEDED_FUNCTION = 'LimitExceeded.Function'
# 同一个主题下的函数超过最大限制。
LIMITEXCEEDED_FUNCTIONONTOPIC = 'LimitExceeded.FunctionOnTopic'
# FunctionProvisionedConcurrencyMemory数量达到限制,可提交工单申请提升限制:https://tencentcs.com/7Fixwt63。
LIMITEXCEEDED_FUNCTIONPROVISIONEDCONCURRENCYMEMORY = 'LimitExceeded.FunctionProvisionedConcurrencyMemory'
# 函数保留并发内存超限。
LIMITEXCEEDED_FUNCTIONRESERVEDCONCURRENCYMEMORY = 'LimitExceeded.FunctionReservedConcurrencyMemory'
# FunctionTotalProvisionedConcurrencyMemory达到限制,可提交工单申请提升限制:https://tencentcs.com/7Fixwt63。
LIMITEXCEEDED_FUNCTIONTOTALPROVISIONEDCONCURRENCYMEMORY = 'LimitExceeded.FunctionTotalProvisionedConcurrencyMemory'
# 函数预置并发总数达到限制。
LIMITEXCEEDED_FUNCTIONTOTALPROVISIONEDCONCURRENCYNUM = 'LimitExceeded.FunctionTotalProvisionedConcurrencyNum'
# InitTimeout达到限制,可提交工单申请提升限制:https://tencentcs.com/7Fixwt63。
LIMITEXCEEDED_INITTIMEOUT = 'LimitExceeded.InitTimeout'
# layer版本数量超出最大限制。
LIMITEXCEEDED_LAYERVERSIONS = 'LimitExceeded.LayerVersions'
# layer数量超出最大限制。
LIMITEXCEEDED_LAYERS = 'LimitExceeded.Layers'
# 内存超出最大限制。
LIMITEXCEEDED_MEMORY = 'LimitExceeded.Memory'
# 函数异步重试配置消息保留时间超过限制。
LIMITEXCEEDED_MSGTTL = 'LimitExceeded.MsgTTL'
# 命名空间数量超过最大限制,可通过[提交工单](https://cloud.tencent.com/act/event/Online_service?from=scf%7Cindex)申请提升限制。
LIMITEXCEEDED_NAMESPACE = 'LimitExceeded.Namespace'
# Offset超出限制。
LIMITEXCEEDED_OFFSET = 'LimitExceeded.Offset'
# 定时预置数量超过最大限制。
LIMITEXCEEDED_PROVISIONTRIGGERACTION = 'LimitExceeded.ProvisionTriggerAction'
# 定时触发间隔小于最大限制。
LIMITEXCEEDED_PROVISIONTRIGGERINTERVAL = 'LimitExceeded.ProvisionTriggerInterval'
# 配额超限。
LIMITEXCEEDED_QUOTA = 'LimitExceeded.Quota'
# 函数异步重试配置异步重试次数超过限制。
LIMITEXCEEDED_RETRYNUM = 'LimitExceeded.RetryNum'
# Timeout超出最大限制。
LIMITEXCEEDED_TIMEOUT = 'LimitExceeded.Timeout'
# 用户并发内存配额超限。
LIMITEXCEEDED_TOTALCONCURRENCYMEMORY = 'LimitExceeded.TotalConcurrencyMemory'
# 触发器数量超出最大限制,可通过[提交工单](https://cloud.tencent.com/act/event/Online_service?from=scf%7Cindex)申请提升限制。
LIMITEXCEEDED_TRIGGER = 'LimitExceeded.Trigger'
# UserTotalConcurrencyMemory达到限制,可提交工单申请提升限制:https://tencentcs.com/7Fixwt63。
LIMITEXCEEDED_USERTOTALCONCURRENCYMEMORY = 'LimitExceeded.UserTotalConcurrencyMemory'
# 缺少参数错误。
MISSINGPARAMETER = 'MissingParameter'
# Code没有传入。
MISSINGPARAMETER_CODE = 'MissingParameter.Code'
# 缺失 Runtime 字段。
MISSINGPARAMETER_RUNTIME = 'MissingParameter.Runtime'
# 资源被占用。
RESOURCEINUSE = 'ResourceInUse'
# Alias已被占用。
RESOURCEINUSE_ALIAS = 'ResourceInUse.Alias'
# Cdn已被占用。
RESOURCEINUSE_CDN = 'ResourceInUse.Cdn'
# Cmq已被占用。
RESOURCEINUSE_CMQ = 'ResourceInUse.Cmq'
# Cos已被占用。
RESOURCEINUSE_COS = 'ResourceInUse.Cos'
# 函数已存在。
RESOURCEINUSE_FUNCTION = 'ResourceInUse.Function'
# FunctionName已存在。
RESOURCEINUSE_FUNCTIONNAME = 'ResourceInUse.FunctionName'
# Layer版本正在使用中。
RESOURCEINUSE_LAYERVERSION = 'ResourceInUse.LayerVersion'
# Namespace已存在。
RESOURCEINUSE_NAMESPACE = 'ResourceInUse.Namespace'
# TriggerName已存在。
RESOURCEINUSE_TRIGGER = 'ResourceInUse.Trigger'
# TriggerName已存在。
RESOURCEINUSE_TRIGGERNAME = 'ResourceInUse.TriggerName'
# COS资源不足。
RESOURCEINSUFFICIENT_COS = 'ResourceInsufficient.COS'
# 资源不存在。
RESOURCENOTFOUND = 'ResourceNotFound'
# 别名不存在。
RESOURCENOTFOUND_ALIAS = 'ResourceNotFound.Alias'
# 未找到指定的AsyncEvent,请创建后再试。
RESOURCENOTFOUND_ASYNCEVENT = 'ResourceNotFound.AsyncEvent'
# Cdn不存在。
RESOURCENOTFOUND_CDN = 'ResourceNotFound.Cdn'
# 指定的cfs下未找到您所指定的挂载点。
RESOURCENOTFOUND_CFSMOUNTINSNOTMATCH = 'ResourceNotFound.CfsMountInsNotMatch'
# 检测cfs状态为不可用。
RESOURCENOTFOUND_CFSSTATUSERROR = 'ResourceNotFound.CfsStatusError'
# cfs与云函数所处vpc不一致。
RESOURCENOTFOUND_CFSVPCNOTMATCH = 'ResourceNotFound.CfsVpcNotMatch'
# Ckafka不存在。
RESOURCENOTFOUND_CKAFKA = 'ResourceNotFound.Ckafka'
# Cmq不存在。
RESOURCENOTFOUND_CMQ = 'ResourceNotFound.Cmq'
# Cos不存在。
RESOURCENOTFOUND_COS = 'ResourceNotFound.Cos'
# 不存在的Demo。
RESOURCENOTFOUND_DEMO = 'ResourceNotFound.Demo'
# 函数不存在。
RESOURCENOTFOUND_FUNCTION = 'ResourceNotFound.Function'
# 函数不存在。
RESOURCENOTFOUND_FUNCTIONNAME = 'ResourceNotFound.FunctionName'
# 函数版本不存在。
RESOURCENOTFOUND_FUNCTIONVERSION = 'ResourceNotFound.FunctionVersion'
# 获取cfs挂载点信息错误。
RESOURCENOTFOUND_GETCFSMOUNTINSERROR = 'ResourceNotFound.GetCfsMountInsError'
# 获取cfs信息错误。
RESOURCENOTFOUND_GETCFSNOTMATCH = 'ResourceNotFound.GetCfsNotMatch'
# 未找到指定的ImageConfig,请创建后再试。
RESOURCENOTFOUND_IMAGECONFIG = 'ResourceNotFound.ImageConfig'
# layer不存在。
RESOURCENOTFOUND_LAYER = 'ResourceNotFound.Layer'
# Layer版本不存在。
RESOURCENOTFOUND_LAYERVERSION = 'ResourceNotFound.LayerVersion'
# Namespace不存在。
RESOURCENOTFOUND_NAMESPACE = 'ResourceNotFound.Namespace'
# 版本不存在。
RESOURCENOTFOUND_QUALIFIER = 'ResourceNotFound.Qualifier'
# 角色不存在。
RESOURCENOTFOUND_ROLE = 'ResourceNotFound.Role'
# Role不存在。
RESOURCENOTFOUND_ROLECHECK = 'ResourceNotFound.RoleCheck'
# Timer不存在。
RESOURCENOTFOUND_TIMER = 'ResourceNotFound.Timer'
# 并发内存配额资源未找到。
RESOURCENOTFOUND_TOTALCONCURRENCYMEMORY = 'ResourceNotFound.TotalConcurrencyMemory'
# 触发器不存在。
RESOURCENOTFOUND_TRIGGER = 'ResourceNotFound.Trigger'
# 版本不存在。
RESOURCENOTFOUND_VERSION = 'ResourceNotFound.Version'
<|fim▁hole|>RESOURCENOTFOUND_VPC = 'ResourceNotFound.Vpc'
# 余额不足,请先充值。
RESOURCEUNAVAILABLE_INSUFFICIENTBALANCE = 'ResourceUnavailable.InsufficientBalance'
# Namespace不可用。
RESOURCEUNAVAILABLE_NAMESPACE = 'ResourceUnavailable.Namespace'
# 未授权操作。
UNAUTHORIZEDOPERATION = 'UnauthorizedOperation'
# CAM鉴权失败。
UNAUTHORIZEDOPERATION_CAM = 'UnauthorizedOperation.CAM'
# 无访问代码权限。
UNAUTHORIZEDOPERATION_CODESECRET = 'UnauthorizedOperation.CodeSecret'
# 没有权限。
UNAUTHORIZEDOPERATION_CREATETRIGGER = 'UnauthorizedOperation.CreateTrigger'
# 没有权限的操作。
UNAUTHORIZEDOPERATION_DELETEFUNCTION = 'UnauthorizedOperation.DeleteFunction'
# 没有权限。
UNAUTHORIZEDOPERATION_DELETETRIGGER = 'UnauthorizedOperation.DeleteTrigger'
# 不是从控制台调用的该接口。
UNAUTHORIZEDOPERATION_NOTMC = 'UnauthorizedOperation.NotMC'
# Region错误。
UNAUTHORIZEDOPERATION_REGION = 'UnauthorizedOperation.Region'
# 没有权限访问您的Cos资源。
UNAUTHORIZEDOPERATION_ROLE = 'UnauthorizedOperation.Role'
# TempCos的Appid和请求账户的APPID不一致。
UNAUTHORIZEDOPERATION_TEMPCOSAPPID = 'UnauthorizedOperation.TempCosAppid'
# 无法进行此操作。
UNAUTHORIZEDOPERATION_UPDATEFUNCTIONCODE = 'UnauthorizedOperation.UpdateFunctionCode'
# 操作不支持。
UNSUPPORTEDOPERATION = 'UnsupportedOperation'
# 资源还有别名绑定,不支持当前操作,请解绑别名后重试。
UNSUPPORTEDOPERATION_ALIASBIND = 'UnsupportedOperation.AliasBind'
# 指定的配置AsyncRunEnable暂不支持,请修正后再试。
UNSUPPORTEDOPERATION_ASYNCRUNENABLE = 'UnsupportedOperation.AsyncRunEnable'
# Cdn不支持。
UNSUPPORTEDOPERATION_CDN = 'UnsupportedOperation.Cdn'
# Cos操作不支持。
UNSUPPORTEDOPERATION_COS = 'UnsupportedOperation.Cos'
# 指定的配置EipFixed暂不支持。
UNSUPPORTEDOPERATION_EIPFIXED = 'UnsupportedOperation.EipFixed'
# 不支持此地域。
UNSUPPORTEDOPERATION_REGION = 'UnsupportedOperation.Region'
# Trigger操作不支持。
UNSUPPORTEDOPERATION_TRIGGER = 'UnsupportedOperation.Trigger'
# 指定的配置暂不支持,请修正后再试。
UNSUPPORTEDOPERATION_UPDATEFUNCTIONEVENTINVOKECONFIG = 'UnsupportedOperation.UpdateFunctionEventInvokeConfig'
# 指定的配置VpcConfig暂不支持。
UNSUPPORTEDOPERATION_VPCCONFIG = 'UnsupportedOperation.VpcConfig'<|fim▁end|> | # VPC或子网不存在。 |
<|file_name|>TestMetaChangeFromDefault.py<|end_file_name|><|fim▁begin|># pylint: disable=preferred-module # FIXME: remove once migrated per GH-725
import unittest
from ansiblelint.rules import RulesCollection
from ansiblelint.rules.MetaChangeFromDefaultRule import MetaChangeFromDefaultRule
from ansiblelint.testing import RunFromText
DEFAULT_GALAXY_INFO = '''
galaxy_info:
author: your name
description: your description
company: your company (optional)
license: license (GPLv2, CC-BY, etc)
'''
class TestMetaChangeFromDefault(unittest.TestCase):
collection = RulesCollection()
collection.register(MetaChangeFromDefaultRule())<|fim▁hole|>
def setUp(self):
self.runner = RunFromText(self.collection)
def test_default_galaxy_info(self):
results = self.runner.run_role_meta_main(DEFAULT_GALAXY_INFO)
self.assertIn("Should change default metadata: author",
str(results))
self.assertIn("Should change default metadata: description",
str(results))
self.assertIn("Should change default metadata: company",
str(results))
self.assertIn("Should change default metadata: license",
str(results))<|fim▁end|> | |
<|file_name|>dart.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright (C) 2014 Canonical
#
# Authors:
# Didier Roche
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Dartlang module"""
from contextlib import suppress
from gettext import gettext as _
import logging
import os
import re
import umake.frameworks.baseinstaller
from umake.interactions import DisplayMessage
from umake.tools import add_env_to_user, MainLoop, get_current_arch, ChecksumType
from umake.ui import UI
logger = logging.getLogger(__name__)
_supported_archs = ['i386', 'amd64']
class DartCategory(umake.frameworks.BaseCategory):
def __init__(self):
super().__init__(name="Dart", description=_("Dartlang Development Environment"), logo_path=None)
class DartLangEditorRemoval(umake.frameworks.baseinstaller.BaseInstaller):
def __init__(self, **kwargs):
super().__init__(name="Dart Editor", description=_("Dart SDK with editor (not supported upstream anymore)"),
download_page=None, only_on_archs=_supported_archs, only_for_removal=True, **kwargs)
class DartLang(umake.frameworks.baseinstaller.BaseInstaller):
def __init__(self, **kwargs):
super().__init__(name="Dart SDK", description=_("Dart SDK (default)"), is_category_default=True,
only_on_archs=_supported_archs,
download_page="https://raw.githubusercontent.com/dart-lang/sdk/master/CHANGELOG.md",
dir_to_decompress_in_tarball="dart-sdk",
checksum_type=ChecksumType.sha256,
required_files_path=[os.path.join("bin", "dart")],
**kwargs)
arch_trans = {
"amd64": "x64",
"i386": "ia32"
# TODO: add arm
}
def parse_download_link(self, line, in_download):
"""Parse Dart SDK download links"""
in_download = False
p = re.search(r"^##\s(\d\S+)", line)
if p is not None:
in_download = True
else:
in_download = False
if in_download:
with suppress(AttributeError):
self.new_download_url = "https://storage.googleapis.com/dart-archive/channels/stable/" +\
"release/{}/sdk/".format(p.group(1)) +\
"dartsdk-linux-{}-release.zip".format(self.arch_trans[get_current_arch()]) +\
".sha256sum"
return ((None, None), in_download)
@MainLoop.in_mainloop_thread
def get_sha_and_start_download(self, download_result):
res = download_result[self.new_download_url]
checksum = res.buffer.getvalue().decode('utf-8').split()[0]
# you get and store self.download_url
url = re.sub('.sha256sum', '', self.new_download_url)
self.check_data_and_start_download(url, checksum)
def post_install(self):
"""Add go necessary env variables"""
add_env_to_user(self.name, {"PATH": {"value": os.path.join(self.install_path, "bin")}})
UI.delayed_display(DisplayMessage(self.RELOGIN_REQUIRE_MSG.format(self.name)))
class FlutterLang(umake.frameworks.baseinstaller.BaseInstaller):
def __init__(self, **kwargs):
super().__init__(name="Flutter SDK", description=_("Flutter SDK"),
only_on_archs=_supported_archs,
download_page="https://api.flutter.dev/flutter/footer.js",
dir_to_decompress_in_tarball="flutter",
required_files_path=[os.path.join("bin", "flutter")],
**kwargs)
def parse_download_link(self, line, in_download):
"""Parse Flutter SDK download links"""
url = None
in_download = False
if 'Flutter ' in line:
p = re.search(r"Flutter\s(\S+)", line)
if p is not None:
in_download = True
if in_download:
with suppress(AttributeError):
url = "https://storage.googleapis.com/flutter_infra/releases/stable/linux/" +\
"flutter_linux_v{}-stable.tar.xz".format(p.group(1))
return ((url, None), in_download)
def post_install(self):<|fim▁hole|> UI.delayed_display(DisplayMessage(self.RELOGIN_REQUIRE_MSG.format(self.name)))<|fim▁end|> | """Add flutter necessary env variables"""
add_env_to_user(self.name, {"PATH": {"value": os.path.join(self.install_path, "bin")}}) |
<|file_name|>TestPortProvider.java<|end_file_name|><|fim▁begin|>package org.jboss.resteasy.test;
import org.jboss.resteasy.client.jaxrs.ResteasyClientBuilder;
import org.jboss.resteasy.client.jaxrs.ResteasyWebTarget;
import org.jboss.resteasy.util.PortProvider;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URL;
import javax.ws.rs.client.ClientBuilder;
/**
* Test utility class
*
* @author <a href="[email protected]">Justin Edelson</a>
* @version $Revision$
*/
public class TestPortProvider
{
/**
* Creates a ResteasyWebTarget using base request path.
*/
public static ResteasyWebTarget createTarget(String path)
{
return (ResteasyWebTarget) ClientBuilder.newClient().target(generateURL(path));
}
/**
* Create a Resteasy client proxy with an empty base request path.
*
* @param clazz the client interface class
* @return the proxy object
*/
public static <T> T createProxy(Class<T> clazz)
{
return createProxy(clazz, generateBaseUrl());
}
/**
* Create a Resteasy client proxy.
*
* @param clazz the client interface class
* @return the proxy object
* @path the base request path
*/
public static <T> T createProxy(Class<T> clazz, String url)
{
ResteasyWebTarget target = (ResteasyWebTarget) ResteasyClientBuilder.newClient().target(url);
return target.proxy(clazz);
}
/**
* Create a URI for the provided path, using the configured port
*
* @param path the request path
* @return a full URI
*/
public static URI createURI(String path)
{
return URI.create(generateURL(path));
}
/**
* Create a URL for the provided path, using the configured port
*
* @param path the request path
* @return a full URL
*/
public static URL createURL(String path) throws MalformedURLException
{
return new URL(generateURL(path));
}
/**
* Generate a base URL incorporating the configured port.
*
* @return a full URL
*/
public static String generateBaseUrl()
{
return generateURL("");
}
/**
* Generate a URL incorporating the configured port.
*
* @param path the path
* @return a full URL
*/
public static String generateURL(String path)
{
return String.format("http://%s:%d%s", getHost(), getPort(), path);<|fim▁hole|> * then a system property (org.jboss.resteasy.port), and finally the default port (8081).
*
* @return the port number specified in either the environment or system properties
*/
public static int getPort()
{
return PortProvider.getPort();
}
/**
* Look up the configured hostname, first checking an environment variable (RESTEASY_HOST),
* then a system property (org.jboss.resteasy.host), and finally the default hostname (localhost).
*
* @return the host specified in either the environment or system properties
*/
public static String getHost()
{
return PortProvider.getHost();
}
}<|fim▁end|> | }
/**
* Look up the configured port number, first checking an environment variable (RESTEASY_PORT), |
<|file_name|>CallbackUtils.d.ts<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | declare function execCallbacks(cbs: Array<Function>, context?: any): void;
export { execCallbacks }; |
<|file_name|>create_azure_ad_context_feed.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Executable sample for creating a Azure AD Context Feed.
Creating other feeds requires changing this sample code.
"""
import argparse
import json
from typing import Any, Mapping
from google.auth.transport import requests
from common import chronicle_auth
from common import regions
CHRONICLE_API_BASE_URL = "https://backstory.googleapis.com"
def create_azure_ad_context_feed(http_session: requests.AuthorizedSession,
tokenendpoint: str, clientid: str,
clientsecret: str, retrievedevices: bool,
retrievegroups: bool) -> Mapping[str, Any]:
"""Creates a new Azure AD Context feed.
Args:
http_session: Authorized session for HTTP requests.
tokenendpoint: A string which represents endpoint to connect to.
clientid: A string which represents Id of the credential to use.
clientsecret: A string which represents secret of the credential to use.
retrievedevices: A boolean to indicate whether to retrieve devices or not.
retrievegroups: A boolean to indicate whether to retrieve groups or not.
Returns:
New Azure AD Feed.
Raises:
requests.exceptions.HTTPError: HTTP request resulted in an error<|fim▁hole|> url = f"{CHRONICLE_API_BASE_URL}/v1/feeds/"
body = {
"details": {
"feedSourceType": "API",
"logType": "AZURE_AD_CONTEXT",
"azureAdContextSettings": {
"authentication": {
"tokenEndpoint": tokenendpoint,
"clientId": clientid,
"clientSecret": clientsecret
},
"retrieveDevices": retrievedevices,
"retrieveGroups": retrievegroups
}
}
}
response = http_session.request("POST", url, json=body)
# Expected server response:
# {
# "name": "feeds/e0eb5fb0-8fbd-4f0f-b063-710943ad7812",
# "details": {
# "logType": "AZURE_AD_CONTEXT",
# "feedSourceType": "API",
# "azureAdContextSettings": {
# "authentication": {
# "tokenEndpoint": "tokenendpoint.example.com",
# "clientId": "clientid_example",
# "clientSecret": "clientsecret_example"
# },
# "retrieveDevices": true
# }
# },
# "feedState": "PENDING_ENABLEMENT"
# }
if response.status_code >= 400:
print(response.text)
response.raise_for_status()
return response.json()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
chronicle_auth.add_argument_credentials_file(parser)
regions.add_argument_region(parser)
parser.add_argument(
"-te",
"--tokenendpoint",
type=str,
required=True,
help="token endpoint")
parser.add_argument(
"-ci",
"--clientid",
type=str,
required=True,
help="client id")
parser.add_argument(
"-cs",
"--clientsecret",
type=str,
required=True,
help="client secret")
parser.add_argument(
"-rd",
"--retrievedevices",
type=bool,
required=True,
help="retrieve devices")
parser.add_argument(
"-rg",
"--retrievegroups",
type=str,
required=True,
help="retrieve groups")
args = parser.parse_args()
CHRONICLE_API_BASE_URL = regions.url(CHRONICLE_API_BASE_URL, args.region)
session = chronicle_auth.initialize_http_session(args.credentials_file)
new_feed = create_azure_ad_context_feed(session, args.tokenendpoint,
args.clientid, args.clientsecret,
args.retrievedevices,
args.retrievegroups)
print(json.dumps(new_feed, indent=2))<|fim▁end|> | (response.status_code >= 400).
""" |
<|file_name|>add-credit-card.directive.ts<|end_file_name|><|fim▁begin|>/*
* Copyright (c) [2015] - [2017] Red Hat, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Red Hat, Inc. - initial API and implementation
*/
'use strict';
interface ICreditCardElement extends ng.IAugmentedJQuery {
card: Function;
}
/**
* Defines a directive for creating a credit card component.
* @author Oleksii Kurinnyi
*/
export class AddCreditCard {
$timeout: ng.ITimeoutService;
restrict: string = 'E';
replace: boolean = false;
templateUrl: string = 'app/billing/card-info/add-credit-card/add-credit-card.html';
bindToController: boolean = true;
controller: string = 'AddCreditCardController';
controllerAs: string = 'addCreditCardController';
scope: {
[propName: string]: string
};
/**
* Default constructor that is using resource
* @ngInject for Dependency injection
*/
constructor ($timeout: ng.ITimeoutService) {
this.$timeout = $timeout;
this.scope = {
creditCard: '='
};
}
link($scope: ng.IScope, $element: ICreditCardElement): void {
($element.find('.addCreditCardForm') as ICreditCardElement).card({
// a selector or jQuery object for the container
// where you want the card to appear
container: '.card-wrapper', // *required*
numberInput: 'input[name="deskcardNumber"]', // optional — default input[name="number"]
expiryInput: 'input[name="deskexpires"]', // optional — default input[name="expiry"]
cvcInput: 'input[name="deskcvv"]', // optional — default input[name="cvc"]
nameInput: 'input[name="deskcardholder"]', // optional - defaults input[name="name"]
<|fim▁hole|> });
let deregistrationFn = $scope.$watch(() => { return $element.find('input[name="deskcardNumber"]').is(':visible'); }, (visible) => {
if (visible) {
deregistrationFn();
this.$timeout(() => {
$element.find('input[name="deskcardNumber"]').focus();
}, 100);
}
});
}
}<|fim▁end|> | // width: 200, // optional — default 350px
formatting: true // optional - default true |
<|file_name|>focusandshoot.py<|end_file_name|><|fim▁begin|>import sqlite3
import RPi.GPIO as GPIO
import os, sys, time
conn = sqlite3.connect( os.path.join( os.path.dirname(os.path.realpath(sys.argv[0])), 'db/timelapsecontroller.db'))
conn.row_factory = sqlite3.Row
sleep=2
def set_pid(pid=None):
c = conn.cursor()
try:
# Update the DB counter
c.execute("UPDATE timelapseconfig SET pid=?", ( int(pid), ) )
except sqlite3.Error as e:
print "An error occurred:", e.args[0]
# Save (commit) the changes
conn.commit()
print "Set the PID to be ", pid
def wakeup():
#Using Port 6 as Ground
#Port 7 is Live
#Sets up GPIO Pin 7 to Output
GPIO.setup(7, GPIO.OUT)
#Turns on GPIO Pin 7 - Enables Power to Pin 7 for focus / wake up.
GPIO.output(7, True)
time.sleep(2)
GPIO.output(7, False)
def running():<|fim▁hole|> config = c.fetchone()
if config['running'] and config['count'] < config['target']:
print "Running ({} of {})".format(config['count'], config['target'])
return True
except sqlite3.Error as e:
print "An error occurred:", e.args[0]
return False
def getsleep():
c = conn.cursor()
try:
c.execute('SELECT * FROM timelapseconfig')
config = c.fetchone()
return config['sleep']
except sqlite3.Error as e:
print "An error occurred:", e.args[0]
def shoot():
#Sets up GPIO Pin 11 to Output
GPIO.setup(11, GPIO.OUT)
#Pause for 2 Seconds (Hold Fire for 2 Seconds)
#Turns on GPIO Pin 11 - Enables Power to Pin 11 to Shoot
GPIO.output(11, True)
time.sleep(2)
GPIO.output(11, False)
def updatecounter():
c = conn.cursor()
try:
# Update the DB counter
c.execute("UPDATE timelapseconfig set count=count+1")
except sqlite3.Error as e:
print "An error occurred:", e.args[0]
# Save (commit) the changes
conn.commit()
print "Incrementing counter"
if __name__ == "__main__":
#Set the Board Mode
GPIO.setmode(GPIO.BOARD)
#Write (set) PID to config
set_pid(os.getpid())
while True:
if ( running() ):
wakeup()
shoot()
updatecounter()
#Pause for configured # of seconds (default 2)
sleep = getsleep()
print "Sleeping for %r seconds.." % sleep
time.sleep(sleep)
#Write (unset) PID to config
set_pid(None)
# close the DB conn
conn.close()
#Stops the script and End of script clean up of the GPIO Port
GPIO.cleanup()<|fim▁end|> | c = conn.cursor()
try:
c.execute('SELECT * FROM timelapseconfig') |
<|file_name|>collect.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import logging
import os
import sys
from typing import List, Union
import numpy as np
from ludwig.api import LudwigModel
from ludwig.backend import ALL_BACKENDS, LOCAL, Backend
from ludwig.constants import FULL, TEST, TRAINING, VALIDATION
from ludwig.contrib import contrib_command
from ludwig.globals import LUDWIG_VERSION
from ludwig.utils.print_utils import (logging_level_registry, print_boxed,
print_ludwig)
from ludwig.utils.strings_utils import make_safe_filename
logger = logging.getLogger(__name__)
def collect_activations(
model_path: str,
layers: List[str],
dataset: str,
data_format: str = None,
split: str = FULL,
batch_size: int = 128,
output_directory: str = 'results',
gpus: List[str] = None,
gpu_memory_limit: int =None,
allow_parallel_threads: bool = True,
backend: Union[Backend, str] = None,
debug: bool = False,
**kwargs
) -> List[str]:
"""
Uses the pretrained model to collect the tensors corresponding to a
datapoint in the dataset. Saves the tensors to the experiment directory
# Inputs
:param model_path: (str) filepath to pre-trained model.
:param layers: (List[str]) list of strings for layer names in the model
to collect activations.
:param dataset: (str) source
containing the data to make predictions.
:param data_format: (str, default: `None`) format to interpret data
sources. Will be inferred automatically if not specified. Valid
formats are `'auto'`, `'csv'`, `'excel'`, `'feather'`,
`'fwf'`, `'hdf5'` (cache file produced during previous training),
`'html'` (file containing a single HTML `<table>`), `'json'`, `'jsonl'`,
`'parquet'`, `'pickle'` (pickled Pandas DataFrame), `'sas'`, `'spss'`,
`'stata'`, `'tsv'`.
:param split: (str, default: `full`) split on which
to perform predictions. Valid values are `'training'`, `'validation'`,
`'test'` and `'full'`.
:param batch_size: (int, default `128`) size of batches for processing.
:param output_directory: (str, default: `'results'`) the directory that
will contain the training statistics, TensorBoard logs, the saved
model and the training progress files.
:param gpus: (list, default: `None`) list of GPUs that are available
for training.
:param gpu_memory_limit: (int, default: `None`) maximum memory in MB to
allocate per GPU device.
:param allow_parallel_threads: (bool, default: `True`) allow TensorFlow
to use multithreading parallelism to improve performance at
the cost of determinism.
:param backend: (Union[Backend, str]) `Backend` or string name
of backend to use to execute preprocessing / training steps.
:param debug: (bool, default: `False) if `True` turns on `tfdbg` with
`inf_or_nan` checks.
# Return
:return: (List[str]) list of filepath to `*.npy` files containing
the activations.
"""
logger.info('Dataset path: {}'.format(dataset)
)
logger.info('Model path: {}'.format(model_path))
logger.info('Output path: {}'.format(output_directory))
logger.info('\n')
model = LudwigModel.load(
model_path,
gpus=gpus,
gpu_memory_limit=gpu_memory_limit,
allow_parallel_threads=allow_parallel_threads,
backend=backend
)
# collect activations
print_boxed('COLLECT ACTIVATIONS')
collected_tensors = model.collect_activations(
layers,
dataset,
data_format=data_format,
split=split,
batch_size=batch_size,
debug=debug
)
# saving<|fim▁hole|> os.makedirs(output_directory, exist_ok=True)
saved_filenames = save_tensors(collected_tensors, output_directory)
logger.info('Saved to: {0}'.format(output_directory))
return saved_filenames
def collect_weights(
model_path: str,
tensors: List[str],
output_directory: str = 'results',
debug: bool = False,
**kwargs
) -> List[str]:
"""
Loads a pretrained model and collects weights.
# Inputs
:param model_path: (str) filepath to pre-trained model.
:param tensors: (list, default: `None`) List of tensor names to collect
weights
:param output_directory: (str, default: `'results'`) the directory where
collected weights will be stored.
:param debug: (bool, default: `False) if `True` turns on `tfdbg` with
`inf_or_nan` checks.
# Return
:return: (List[str]) list of filepath to `*.npy` files containing
the weights.
"""
logger.info('Model path: {}'.format(model_path))
logger.info('Output path: {}'.format(output_directory))
logger.info('\n')
model = LudwigModel.load(model_path)
# collect weights
print_boxed('COLLECT WEIGHTS')
collected_tensors = model.collect_weights(tensors)
# saving
os.makedirs(output_directory, exist_ok=True)
saved_filenames = save_tensors(collected_tensors, output_directory)
logger.info('Saved to: {0}'.format(output_directory))
return saved_filenames
def save_tensors(collected_tensors, output_directory):
filenames = []
for tensor_name, tensor_value in collected_tensors:
np_filename = os.path.join(
output_directory,
make_safe_filename(tensor_name) + '.npy'
)
np.save(np_filename, tensor_value.numpy())
filenames.append(np_filename)
return filenames
def print_model_summary(
model_path: str,
**kwargs
) -> None:
"""
Loads a pretrained model and prints names of weights and layers activations.
# Inputs
:param model_path: (str) filepath to pre-trained model.
# Return
:return: (`None`)
"""
model = LudwigModel.load(model_path)
collected_tensors = model.collect_weights()
names = [name for name, w in collected_tensors]
keras_model = model.model.get_connected_model(training=False)
keras_model.summary()
print('\nLayers:\n')
for layer in keras_model.layers:
print(layer.name)
print('\nWeights:\n')
for name in names:
print(name)
def cli_collect_activations(sys_argv):
"""Command Line Interface to communicate with the collection of tensors and
there are several options that can specified when calling this function:
--data_csv: Filepath for the input csv
--data_hdf5: Filepath for the input hdf5 file, if there is a csv file, this
is not read
--d: Refers to the dataset type of the file being read, by default is
*generic*
--s: Refers to the split of the data, can be one of: train, test,
validation, full
--m: Input model that is necessary to collect to the tensors, this is a
required *option*
--t: Tensors to collect
--od: Output directory of the model, defaults to results
--bs: Batch size
--g: Number of gpus that are to be used
--gf: Fraction of each GPUs memory to use.
--dbg: Debug if the model is to be started with python debugger
--v: Verbose: Defines the logging level that the user will be exposed to
"""
parser = argparse.ArgumentParser(
description='This script loads a pretrained model and uses it collect '
'tensors for each datapoint in the dataset.',
prog='ludwig collect_activations',
usage='%(prog)s [options]')
# ---------------
# Data parameters
# ---------------
parser.add_argument(
'--dataset',
help='input data file path',
required=True
)
parser.add_argument(
'--data_format',
help='format of the input data',
default='auto',
choices=['auto', 'csv', 'excel', 'feather', 'fwf', 'hdf5',
'html' 'tables', 'json', 'jsonl', 'parquet', 'pickle', 'sas',
'spss', 'stata', 'tsv']
)
parser.add_argument(
'-s',
'--split',
default=FULL,
choices=[TRAINING, VALIDATION, TEST, FULL],
help='the split to obtain the model activations from'
)
# ----------------
# Model parameters
# ----------------
parser.add_argument(
'-m',
'--model_path',
help='model to load',
required=True
)
parser.add_argument(
'-lyr',
'--layers',
help='tensors to collect',
nargs='+',
required=True
)
# -------------------------
# Output results parameters
# -------------------------
parser.add_argument(
'-od',
'--output_directory',
type=str,
default='results',
help='directory that contains the results'
)
# ------------------
# Generic parameters
# ------------------
parser.add_argument(
'-bs',
'--batch_size',
type=int,
default=128,
help='size of batches'
)
# ------------------
# Runtime parameters
# ------------------
parser.add_argument(
'-g',
'--gpus',
type=int,
default=0,
help='list of gpu to use'
)
parser.add_argument(
'-gml',
'--gpu_memory_limit',
type=int,
default=None,
help='maximum memory in MB to allocate per GPU device'
)
parser.add_argument(
'-dpt',
'--disable_parallel_threads',
action='store_false',
dest='allow_parallel_threads',
help='disable TensorFlow from using multithreading for reproducibility'
)
parser.add_argument(
"-b",
"--backend",
help='specifies backend to use for parallel / distributed execution, '
'defaults to local execution or Horovod if called using horovodrun',
choices=ALL_BACKENDS,
)
parser.add_argument(
'-dbg',
'--debug',
action='store_true',
default=False,
help='enables debugging mode'
)
parser.add_argument(
'-l',
'--logging_level',
default='info',
help='the level of logging to use',
choices=['critical', 'error', 'warning', 'info', 'debug', 'notset']
)
args = parser.parse_args(sys_argv)
args.logging_level = logging_level_registry[args.logging_level]
logging.getLogger('ludwig').setLevel(
args.logging_level
)
global logger
logger = logging.getLogger('ludwig.collect')
print_ludwig('Collect Activations', LUDWIG_VERSION)
collect_activations(**vars(args))
def cli_collect_weights(sys_argv):
"""Command Line Interface to collecting the weights for the model
--m: Input model that is necessary to collect to the tensors, this is a
required *option*
--t: Tensors to collect
--od: Output directory of the model, defaults to results
--dbg: Debug if the model is to be started with python debugger
--v: Verbose: Defines the logging level that the user will be exposed to
"""
parser = argparse.ArgumentParser(
description='This script loads a pretrained model '
'and uses it collect weights.',
prog='ludwig collect_weights',
usage='%(prog)s [options]'
)
# ----------------
# Model parameters
# ----------------
parser.add_argument(
'-m',
'--model_path',
help='model to load',
required=True
)
parser.add_argument(
'-t',
'--tensors',
help='tensors to collect',
nargs='+',
required=True
)
# -------------------------
# Output results parameters
# -------------------------
parser.add_argument(
'-od',
'--output_directory',
type=str,
default='results',
help='directory that contains the results'
)
# ------------------
# Runtime parameters
# ------------------
parser.add_argument(
'-dbg',
'--debug',
action='store_true',
default=False,
help='enables debugging mode'
)
parser.add_argument(
'-l',
'--logging_level',
default='info',
help='the level of logging to use',
choices=['critical', 'error', 'warning', 'info', 'debug', 'notset']
)
args = parser.parse_args(sys_argv)
args.logging_level = logging_level_registry[args.logging_level]
logging.getLogger('ludwig').setLevel(
args.logging_level
)
global logger
logger = logging.getLogger('ludwig.collect')
print_ludwig('Collect Weights', LUDWIG_VERSION)
collect_weights(**vars(args))
def cli_collect_summary(sys_argv):
"""Command Line Interface to collecting a summary of the model layers and weights.
--m: Input model that is necessary to collect to the tensors, this is a
required *option*
--v: Verbose: Defines the logging level that the user will be exposed to
"""
parser = argparse.ArgumentParser(
description='This script loads a pretrained model '
'and prints names of weights and layers activations '
'to use with other collect commands',
prog='ludwig collect_summary',
usage='%(prog)s [options]'
)
# ----------------
# Model parameters
# ----------------
parser.add_argument(
'-m',
'--model_path',
help='model to load',
required=True
)
# ------------------
# Runtime parameters
# ------------------
parser.add_argument(
'-l',
'--logging_level',
default='info',
help='the level of logging to use',
choices=['critical', 'error', 'warning', 'info', 'debug', 'notset']
)
args = parser.parse_args(sys_argv)
args.logging_level = logging_level_registry[args.logging_level]
logging.getLogger('ludwig').setLevel(
args.logging_level
)
global logger
logger = logging.getLogger('ludwig.collect')
print_ludwig('Collect Summary', LUDWIG_VERSION)
print_model_summary(**vars(args))
if __name__ == '__main__':
if len(sys.argv) > 1:
if sys.argv[1] == 'activations':
contrib_command("collect_activations", *sys.argv)
cli_collect_activations(sys.argv[2:])
elif sys.argv[1] == 'weights':
contrib_command("collect_weights", *sys.argv)
cli_collect_weights(sys.argv[2:])
elif sys.argv[1] == 'names':
contrib_command("collect_summary", *sys.argv)
cli_collect_summary(sys.argv[2:])
else:
print('Unrecognized command')
else:
print('Unrecognized command')<|fim▁end|> | |
<|file_name|>loads.rs<|end_file_name|><|fim▁begin|>// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -C no-prepopulate-passes
<|fim▁hole|> d: u8,
}
// CHECK-LABEL: @borrow
#[no_mangle]
pub fn borrow(x: &i32) -> &i32 {
// CHECK: load i32** %x{{.*}}, !nonnull
x
}
// CHECK-LABEL: @_box
#[no_mangle]
pub fn _box(x: Box<i32>) -> i32 {
// CHECK: load i32** %x{{.*}}, !nonnull
*x
}
// CHECK-LABEL: small_array_alignment
// The array is loaded as i32, but its alignment is lower, go with 1 byte to avoid target
// dependent alignment
#[no_mangle]
pub fn small_array_alignment(x: [i8; 4]) -> [i8; 4] {
// CHECK: [[VAR:%[0-9]+]] = load i32* %{{.*}}, align 1
// CHECK: ret i32 [[VAR]]
x
}
// CHECK-LABEL: small_struct_alignment
// The struct is loaded as i32, but its alignment is lower, go with 1 byte to avoid target
// dependent alignment
#[no_mangle]
pub fn small_struct_alignment(x: Bytes) -> Bytes {
// CHECK: [[VAR:%[0-9]+]] = load i32* %{{.*}}, align 1
// CHECK: ret i32 [[VAR]]
x
}<|fim▁end|> | pub struct Bytes {
a: u8,
b: u8,
c: u8, |
<|file_name|>artifactSource.js<|end_file_name|><|fim▁begin|>/*
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for
* license information.
*
* Code generated by Microsoft (R) AutoRest Code Generator.
* Changes may cause incorrect behavior and will be lost if the code is
* regenerated.
*/
'use strict';
const models = require('./index');
/**
* Properties of an artifact source.
*
* @extends models['Resource']
*/
class ArtifactSource extends models['Resource'] {
/**
* Create a ArtifactSource.
* @member {string} [displayName] The artifact source's display name.
* @member {string} [uri] The artifact source's URI.
* @member {string} [sourceType] The artifact source's type. Possible values
* include: 'VsoGit', 'GitHub'
* @member {string} [folderPath] The folder containing artifacts.
* @member {string} [armTemplateFolderPath] The folder containing Azure
* Resource Manager templates.
* @member {string} [branchRef] The artifact source's branch reference.
* @member {string} [securityToken] The security token to authenticate to the
* artifact source.
* @member {string} [status] Indicates if the artifact source is enabled
* (values: Enabled, Disabled). Possible values include: 'Enabled',
* 'Disabled'
* @member {date} [createdDate] The artifact source's creation date.
* @member {string} [provisioningState] The provisioning status of the
* resource.
* @member {string} [uniqueIdentifier] The unique immutable identifier of a
* resource (Guid).
*/
constructor() {
super();
}
/**
* Defines the metadata of ArtifactSource
*
* @returns {object} metadata of ArtifactSource
*
*/
mapper() {
return {
required: false,
serializedName: 'ArtifactSource',
type: {
name: 'Composite',
className: 'ArtifactSource',
modelProperties: {
id: {
required: false,
readOnly: true,
serializedName: 'id',
type: {
name: 'String'
}
},
name: {
required: false,
readOnly: true,
serializedName: 'name',
type: {
name: 'String'
}
},
type: {
required: false,
readOnly: true,
serializedName: 'type',
type: {
name: 'String'
}
},
location: {
required: false,
serializedName: 'location',
type: {
name: 'String'
}
},
tags: {
required: false,
serializedName: 'tags',
type: {
name: 'Dictionary',
value: {
required: false,
serializedName: 'StringElementType',
type: {
name: 'String'
}
}
}
},
displayName: {
required: false,
serializedName: 'properties.displayName',
type: {
name: 'String'
}
},
uri: {
required: false,
serializedName: 'properties.uri',
type: {
name: 'String'
}
},
sourceType: {
required: false,
serializedName: 'properties.sourceType',
type: {
name: 'String'
}
},
folderPath: {
required: false,
serializedName: 'properties.folderPath',
type: {
name: 'String'
}
},
armTemplateFolderPath: {
required: false,
serializedName: 'properties.armTemplateFolderPath',
type: {
name: 'String'
}
},
branchRef: {
required: false,
serializedName: 'properties.branchRef',
type: {
name: 'String'
}
},
securityToken: {
required: false,
serializedName: 'properties.securityToken',
type: {
name: 'String'
}
},
status: {
required: false,
serializedName: 'properties.status',
type: {
name: 'String'
}<|fim▁hole|> createdDate: {
required: false,
readOnly: true,
serializedName: 'properties.createdDate',
type: {
name: 'DateTime'
}
},
provisioningState: {
required: false,
serializedName: 'properties.provisioningState',
type: {
name: 'String'
}
},
uniqueIdentifier: {
required: false,
serializedName: 'properties.uniqueIdentifier',
type: {
name: 'String'
}
}
}
}
};
}
}
module.exports = ArtifactSource;<|fim▁end|> | }, |
<|file_name|>awstools.py<|end_file_name|><|fim▁begin|>import boto3
import logging
import time
from string import Template
from pyhocon import ConfigTree
from botocore.exceptions import ClientError
from typing import List, Any, Tuple, Dict
from . import Instance
from .instancemanager import InstanceManager
from ..utils import random_str, random_int
log = logging.getLogger()
def _interpolate_userscript_template_vals(script: bytes, **kwargs: str) -> bytes:
return Template(script.decode('utf-8')).substitute(kwargs).encode()
def _has_exit_status(instance) -> bool:
instance.reload()
return instance.state['Name'] == 'shutting-down' or instance.state['Name'] == 'terminated'
class AWSTools(InstanceManager):
"""The AWSTools class provides an abstraction over boto3 and EC2 for the use with CSAOpt
This is a context manager and creates required instances on `__enter__()`, disposing of the managed instances in
`__exit__()`. These two methods as well as :meth:`instancemanager.awstools.AWSTools.get_running_instances` are the
only methods called by the Runner (i.e. the only public methods).
This class will use boto3 to (1) create a security group, (2) configure ingress to the broker backend (currently
Redis, as used by Dramatiq). It then (3) creates as many worker instances as requested and runs 'user-data' scripts
after startup, which is to say, bash scripts that set up and the required software (Redis, CSAOpt Worker, etc.).
After the run AWSTools (4) terminates all managed instances and removes the security group.
Note:
If the AWS credentials are not provided in the config file, boto3 will look into
the following environment variables: `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`
How to create IAM credentials (i.e. AWS keys):
* Create (or reuse) IAM user with programmatic access
* Assign to a (potentially new) group with AmazonEC2FullAccess
* Store the access key and secret key
Args:
config: Configuration for current optimization run
internal_conf: Internal CSAOpt configuration
"""
def __init__(self, config: ConfigTree, internal_conf: ConfigTree) -> None:
self.region = config.get('remote.aws.region', internal_conf['remote.aws.default_region'])
if config.get('remote.aws.secret_key', False) and config.get('remote.aws.access_key', False):
self.ec2_resource: boto3.session.Session.resource = boto3.resource(
'ec2',
aws_access_key_id=config['remote.aws.access_key'],
aws_secret_access_key=config['remote.aws.secret_key'],
region_name=self.region)
else:
# This will look for the env variables
self.ec2_resource: boto3.session.Session.resource = boto3.resource('ec2', region_name=self.region)
self.ec2_client = self.ec2_resource.meta.client
# ec2.Instance is of <class 'boto3.resources.factory.ec2.Instance'> but this cannot be
# used as a type hint here because it is generated by the factory at runtime, I assume.
self.workers: List[Any] = []
self.broker: Any = None
self.security_group_prefix: str = internal_conf.get('remote.aws.security_group_prefix', 'csaopt_')
self.security_group_id: str = ''
self.worker_count: int = config['remote.aws.worker_count']
worker_ami_key = 'remote.aws.worker_ami'
broker_ami_key = 'remote.aws.broker_ami'
self.broker_ami = config.get(broker_ami_key, internal_conf[broker_ami_key])
self.worker_ami = config.get(worker_ami_key, internal_conf[worker_ami_key])
self.timeout_provision = config.get('remote.aws.timeout_provision',
internal_conf['remote.aws.timeout_provision'])
self.timeout_startup = config.get('remote.aws.timeout_startup', internal_conf['remote.aws.timeout_startup'])
self.broker_port = internal_conf.get('broker.defaults.remote_port')
self.broker_password = config.get('remote.aws.instances.broker_password', None)
if self.broker_password is None:
self.broker_password = random_str(32)
self.debug_on_cpu = config.get('debug.gpu_simulator', '')
self.terminate_on_exit = config.get('remote.terminate_on_exit', False)
self.use_existing_instances = False
existing_instances = config.get('remote.aws.instances', None)
if existing_instances is not None:
self.use_existing_instances = True
self.existing_instances = existing_instances
self.provision_args: Dict[str, str] = {
'broker_image':
config.get('remote.aws.broker_ami', internal_conf['remote.aws.broker_ami']),
'worker_image':
config.get('remote.aws.worker_ami', internal_conf['remote.aws.worker_ami']),
'broker_instance_type':
config.get('remote.aws.broker_instance_type', internal_conf['remote.aws.broker_instance_type']),
'worker_instance_type':
config.get('remote.aws.worker_instance_type', internal_conf['remote.aws.worker_instance_type'])
}
data_base = internal_conf['remote.aws.userdata_rel_path']
with open(data_base + '-broker.sh', 'rb') as broker_data, open(data_base + '-worker.sh', 'rb') as worker_data:
self.user_data_scripts: Dict[str, bytes] = {'broker': broker_data.read(), 'worker': worker_data.read()}
def _get_from_ids(self, broker_id: str, worker_ids: List[str]) -> Tuple[Any, Any]:
broker = self.ec2_resource.Instance(broker_id)
workers = map(lambda worker_id: self.ec2_resource.Instance(worker_id), worker_ids)
return broker, list(workers)
def _provision_instances(self, timeout_ms: int, count: int = 2, **kwargs: str) -> Tuple[Any, Any]:
"""Start and configure instances
Args:
timeout_ms: General timeout for the provisioning of requested instances
count: number of worker instances to be created
kwargs: Any other parameters that are required for startup
"""
broker_userdata = _interpolate_userscript_template_vals(
self.user_data_scripts['broker'], external_port=self.broker_port, redis_password=self.broker_password)
broker = self.ec2_resource.create_instances(
ImageId=kwargs['broker_image'],
MinCount=1,
MaxCount=1,
UserData=broker_userdata,
SecurityGroupIds=[self.security_group_id],
InstanceType=kwargs['broker_instance_type'])[0]
worker_userdata = _interpolate_userscript_template_vals(
self.user_data_scripts['worker'],
debug='1' if self.debug_on_cpu else 'off',
redis_host=broker.private_ip_address,
redis_port=self.broker_port,
redis_password=self.broker_password)
workers = self.ec2_resource.create_instances(
ImageId=kwargs['worker_image'],
MinCount=count,
MaxCount=count,
InstanceType=kwargs['worker_instance_type'],
UserData=worker_userdata,
SecurityGroupIds=[self.security_group_id])
return broker, workers
def __map_ec2_instance(self, instance: Any, is_broker: bool = False, **kwargs: Any) -> Instance:
"""Maps a boto/EC2 instance to the internal Instance type
Args:
instance: Instance object returned by boto3 (which has a runtime type and therefore untyped here)
is_broker: Flag indicating whether a given instance is a broker or not
kwargs: Any other parameters that should be available on the produced object
Returns:
An abstract instance object
"""
return Instance(instance.id, instance.public_ip_address, is_broker=is_broker, **kwargs)
def get_running_instances(self) -> Tuple[Instance, List[Instance]]:
"""Update and get currently managed instances
Returns:
A tuple of broker, [worker]
"""
self.broker.reload()
for worker in self.workers:
worker.reload()
broker_instance = self.__map_ec2_instance(
instance=self.broker, is_broker=True, port=self.broker_port, password=self.broker_password)
worker_instances = [self.__map_ec2_instance(w, queue_id=w.id) for w in self.workers]
return broker_instance, worker_instances
def _terminate_instances(self, timeout_ms: int) -> None:
"""Terminate all instances managed by AWSTools
Args:
timeout_ms: Timeout, in milliseconds, for the termination
"""
instance_ids = [self.broker.id] + [instance.id for instance in self.workers]
self.ec2_client.terminate_instances(InstanceIds=instance_ids)
def _wait_for_instances(self) -> None:
"""Block until broker and workers are up"""
self.broker.wait_until_running()
for worker in self.workers:
worker.wait_until_running()
def _run_start_scripts(self, timeout_ms: int) -> None:
"""Run any required setup procedures after the initial startup of managed instances
Args:
timeout_ms: Timeout, in milliseconds, for the termination
"""
raise NotImplementedError
def __enter__(self) -> InstanceManager:
"""On enter, AWSTools prepares the AWS security group and spins up the required intances
"""
if not self.use_existing_instances:
self.security_group_id = self._create_sec_group(self.security_group_prefix + random_str(10))
self.broker, self.workers = self._provision_instances(
count=self.worker_count, timeout_ms=self.timeout_provision, **self.provision_args)
log.debug('Provision Instances returned: {}, {}. Waiting for instances now'.format(
self.broker, self.workers))
else:
self.security_group_id = self.existing_instances['security_group']
self.broker, self.workers = self._get_from_ids(self.existing_instances['broker'],
self.existing_instances['workers'])
self._wait_for_instances()
log.debug('Waiting for instances returned')
return self
def __exit__(self, exc_type, exc_value, traceback):
"""On exit, AWSTools terminates the started instances and removes security groups"""
log.debug('Entered awstools\' __exit__ method with traceback: {}'.format(traceback))
if not self.terminate_on_exit:
return False
self._terminate_instances(self.timeout_provision)
log.debug('Terminate Instances call returned, waiting for termination')
all_instances = [self.broker] + self.workers
while (any((not _has_exit_status(instance) for instance in all_instances))):
log.debug('Waiting for instances to enter "shutting-down" or "terminated" state: {}'.format(
[(i.id, i.state) for i in all_instances]))
time.sleep(2.0)
log.debug('Remove Security Group')
self._remove_sec_group(self.security_group_id)
return False
def _remove_sec_group(self, group_id: str) -> None:
"""Removes the security group created by CSAOpt
Args:
group_id: Security group Id of group to be deleted
"""
if group_id is not None:
try:
self.ec2_client.delete_security_group(GroupId=group_id)
log.debug('Security group [{}] deleted'.format(group_id))
except ClientError as e:
log.error('Could not remove security group: {}'.format(e))
else:
log.warning('Cannot remove security group, because none was created. Skipping...')
def _create_sec_group(self, name: str) -> str:
"""Creates an AWS security group and assigns ingress permissions from the current network
Args:
name: Name of the security group
Returns:
AWS Identifier `GroupId` of the created security group
"""
try:
response = self.ec2_client.create_security_group(GroupName=name, Description='Security Group for CSAOpt')
security_group_id = response['GroupId']
log.debug('Created Security Group: ' + security_group_id)
data = self.ec2_client.authorize_security_group_ingress(
GroupId=security_group_id,
IpPermissions=[
{
'IpProtocol': 'tcp',
'FromPort': self.broker_port,
'ToPort': self.broker_port,
'IpRanges': [{
'CidrIp': '0.0.0.0/0'
}]
},
{ # Allow communication within the sec group
'IpProtocol': '-1',
'UserIdGroupPairs': [{
'GroupId': security_group_id
}]
}
])
log.debug('Authorized Security Group Ingress with result: {}'.format(data))
data = self.ec2_client.authorize_security_group_egress(
GroupId=security_group_id,
IpPermissions=[{ # Allow communication within the sec group
'IpProtocol': '-1',
'UserIdGroupPairs': [{
'GroupId': security_group_id
}]
}])
log.debug('Authorized Security Group Egress with result: {}'.format(data))
<|fim▁hole|> except ClientError as e:
log.exception('Could not create Security Group')
raise<|fim▁end|> | return security_group_id |
<|file_name|>scan_states.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# python script to generate an overview of the staes based on the input lex file.
#
# Copyright (C) 1997-2019 by Dimitri van Heesch.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation under the terms of the GNU General Public License is hereby
# granted. No representations are made about the suitability of this software
# for any purpose. It is provided "as is" without express or implied warranty.
# See the GNU General Public License for more details.
#
# Documents produced by Doxygen are derivative works derived from the
# input used in their production; they are not affected by this license.
#
import sys
import os
import re
def main():
if len(sys.argv)!=2:
sys.exit('Usage: %s <lex_file>' % sys.argv[0])
lex_file = sys.argv[1]
if (os.path.exists(lex_file)):
#write preamble
print("static const char *stateToString(int state)")
print("{")
print(" switch(state)")
print(" {")
print(" case INITIAL: return \"INITIAL\";")
with open(lex_file) as f:
for line in f:
if re.search(r'^%x', line) or re.search(r'^%s', line):
state = line.split()[1]
print(" case %s: return \"%s\";" % (state,state))
elif re.search(r'^%%', line):
break
else:
pass<|fim▁hole|> print("}")
if __name__ == '__main__':
main()<|fim▁end|> | f.close()
#write post
print(" }")
print(" return \"Unknown\";") |
<|file_name|>getComponents.ts<|end_file_name|><|fim▁begin|>import type { Prng } from '@dicebear/core';
import type { Options } from '../options';
import type { ComponentPickCollection } from '../static-types';
import { pickComponent } from './pickComponent';
type Props = {
prng: Prng;
options: Options;
};
export function getComponents({<|fim▁hole|> prng,
group: 'eyes',
values: options.eyes,
});
const eyebrowsComponent = pickComponent({
prng,
group: 'eyebrows',
values: options.eyebrows,
});
const mouthComponent = pickComponent({
prng,
group: 'mouth',
values: options.mouth,
});
const accessoiresComponent = pickComponent({
prng,
group: 'accessoires',
values: options.accessoires,
});
return {
eyes: eyesComponent,
eyebrows: eyebrowsComponent,
mouth: mouthComponent,
accessoires: prng.bool(options.accessoiresProbability)
? accessoiresComponent
: undefined,
};
}<|fim▁end|> | prng,
options,
}: Props): ComponentPickCollection {
const eyesComponent = pickComponent({ |
<|file_name|>unlocker_test.go<|end_file_name|><|fim▁begin|>package payouts
import (<|fim▁hole|> "os"
"testing"
"github.com/techievee/open-ethereum-pool/rpc"
"github.com/techievee/open-ethereum-pool/storage"
)
func TestMain(m *testing.M) {
os.Exit(m.Run())
}
func TestCalculateRewards(t *testing.T) {
blockReward, _ := new(big.Rat).SetString("5000000000000000000")
shares := map[string]int64{"0x0": 1000000, "0x1": 20000, "0x2": 5000, "0x3": 10, "0x4": 1}
expectedRewards := map[string]int64{"0x0": 4877996431, "0x1": 97559929, "0x2": 24389982, "0x3": 48780, "0x4": 4878}
totalShares := int64(1025011)
rewards , percent := calculateRewardsForShares(shares, totalShares, blockReward)
expectedTotalAmount := int64(8000000000)
totalAmount := int64(0)
for login, amount := range rewards {
totalAmount += amount
if expectedRewards[login] != amount {
t.Errorf("Amount for %v must be equal to %v vs %v , %v", login, expectedRewards[login], amount, percent)
}
}
if totalAmount != expectedTotalAmount {
t.Errorf("Total reward must be equal to block reward in Shannon: %v vs %v", expectedTotalAmount, totalAmount)
}
}
func TestChargeFee(t *testing.T) {
orig, _ := new(big.Rat).SetString("8000000000000000000")
value, _ := new(big.Rat).SetString("8000000000000000000")
expectedNewValue, _ := new(big.Rat).SetString("3750000000000000000")
expectedFee, _ := new(big.Rat).SetString("1250000000000000000")
newValue, fee := chargeFee(orig, 25.0)
if orig.Cmp(value) != 0 {
t.Error("Must not change original value")
}
if newValue.Cmp(expectedNewValue) != 0 {
t.Error("Must charge and deduct correct fee")
}
if fee.Cmp(expectedFee) != 0 {
t.Error("Must charge fee")
}
}
func TestWeiToShannonInt64(t *testing.T) {
wei, _ := new(big.Rat).SetString("1000000000000000000")
origWei, _ := new(big.Rat).SetString("1000000000000000000")
shannon := int64(1000000000)
if weiToShannonInt64(wei) != shannon {
t.Error("Must convert to Shannon")
}
if wei.Cmp(origWei) != 0 {
t.Error("Must charge original value")
}
}
func TestGetUncleReward(t *testing.T) {
rewards := make(map[int64]string)
expectedRewards := map[int64]string{
1: "7000000000000000000",
2: "6000000000000000000",
3: "5000000000000000000",
4: "4000000000000000000",
5: "3000000000000000000",
6: "2000000000000000000",
}
for i := int64(1); i < 7; i++ {
rewards[i] = getUncleReward(1, i+1).String()
}
for i, reward := range rewards {
if expectedRewards[i] != rewards[i] {
t.Errorf("Incorrect uncle reward for %v, expected %v vs %v", i, expectedRewards[i], reward)
}
}
}
func TestMatchCandidate(t *testing.T) {
gethBlock := &rpc.GetBlockReply{Hash: "0x12345A", Nonce: "0x1A"}
parityBlock := &rpc.GetBlockReply{Hash: "0x12345A", SealFields: []string{"0x0A", "0x1A"}}
candidate := &storage.BlockData{Nonce: "0x1a"}
orphan := &storage.BlockData{Nonce: "0x1abc"}
if !matchCandidate(gethBlock, candidate) {
t.Error("Must match with nonce")
}
if !matchCandidate(parityBlock, candidate) {
t.Error("Must match with seal fields")
}
if matchCandidate(gethBlock, orphan) {
t.Error("Must not match with orphan with nonce")
}
if matchCandidate(parityBlock, orphan) {
t.Error("Must not match orphan with seal fields")
}
block := &rpc.GetBlockReply{Hash: "0x12345A"}
immature := &storage.BlockData{Hash: "0x12345a", Nonce: "0x0"}
if !matchCandidate(block, immature) {
t.Error("Must match with hash")
}
}<|fim▁end|> | "math/big" |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>from setuptools import setup
setup(
name="sgf",
version="0.5",
description="Python library for reading and writing Smart Game Format",
license="MIT",
url="http://github.com/jtauber/sgf",
author="James Tauber",
author_email="[email protected]",
py_modules=["sgf"],
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",<|fim▁hole|><|fim▁end|> | "Topic :: Games/Entertainment :: Board Games",
"Topic :: Utilities",
],
) |
<|file_name|>contrail_plugin.py<|end_file_name|><|fim▁begin|># Copyright 2014 Juniper Networks. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at<|fim▁hole|># distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Hampapur Ajay, Praneet Bachheti, Rudra Rugge, Atul Moghe
from oslo.config import cfg
import requests
from neutron.api.v2 import attributes as attr
from neutron.common import exceptions as exc
from neutron.db import portbindings_base
from neutron.db import quota_db # noqa
from neutron.extensions import external_net
from neutron.extensions import portbindings
from neutron.extensions import securitygroup
from neutron import neutron_plugin_base_v2
from neutron.openstack.common import importutils
from neutron.openstack.common import jsonutils as json
from neutron.openstack.common import log as logging
from simplejson import JSONDecodeError
LOG = logging.getLogger(__name__)
vnc_opts = [
cfg.StrOpt('api_server_ip', default='127.0.0.1',
help='IP address to connect to VNC controller'),
cfg.StrOpt('api_server_port', default='8082',
help='Port to connect to VNC controller'),
cfg.DictOpt('contrail_extensions', default={},
help='Enable Contrail extensions(policy, ipam)'),
]
# ContrailError message have translated already.
# so there is no need to use i18n here.
class ContrailNotFoundError(exc.NotFound):
message = '%(msg)s'
class ContrailConflictError(exc.Conflict):
message = '%(msg)s'
class ContrailBadRequestError(exc.BadRequest):
message = '%(msg)s'
class ContrailServiceUnavailableError(exc.ServiceUnavailable):
message = '%(msg)s'
class ContrailNotAuthorizedError(exc.NotAuthorized):
message = '%(msg)s'
class InvalidContrailExtensionError(exc.ServiceUnavailable):
message = _("Invalid Contrail Extension: %(ext_name) %(ext_class)")
CONTRAIL_EXCEPTION_MAP = {
requests.codes.not_found: ContrailNotFoundError,
requests.codes.conflict: ContrailConflictError,
requests.codes.bad_request: ContrailBadRequestError,
requests.codes.service_unavailable: ContrailServiceUnavailableError,
requests.codes.unauthorized: ContrailNotAuthorizedError,
requests.codes.request_timeout: ContrailServiceUnavailableError,
}
class NeutronPluginContrailCoreV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
securitygroup.SecurityGroupPluginBase,
portbindings_base.PortBindingBaseMixin,
external_net.External_net):
supported_extension_aliases = ["security-group", "router",
"port-security", "binding", "agent",
"quotas", "external-net",
"allowed-address-pairs", "extra_dhcp_opt"]
PLUGIN_URL_PREFIX = '/neutron'
__native_bulk_support = False
# patch VIF_TYPES
portbindings.__dict__['VIF_TYPE_VROUTER'] = 'vrouter'
portbindings.VIF_TYPES.append(portbindings.VIF_TYPE_VROUTER)
def _parse_class_args(self):
"""Parse the contrailplugin.ini file.
Opencontrail supports extension such as ipam, policy, these extensions
can be configured in the plugin configuration file as shown below.
Plugin then loads the specified extensions.
contrail_extensions=ipam:<classpath>,policy:<classpath>
"""
contrail_extensions = cfg.CONF.APISERVER.contrail_extensions
# If multiple class specified for same extension, last one will win
# according to DictOpt behavior
for ext_name, ext_class in contrail_extensions.items():
try:
if not ext_class:
LOG.error(_('Malformed contrail extension...'))
continue
self.supported_extension_aliases.append(ext_name)
ext_class = importutils.import_class(ext_class)
ext_instance = ext_class()
ext_instance.set_core(self)
for method in dir(ext_instance):
for prefix in ['get', 'update', 'delete', 'create']:
if method.startswith('%s_' % prefix):
setattr(self, method,
ext_instance.__getattribute__(method))
except Exception:
LOG.exception(_("Contrail Backend Error"))
# Converting contrail backend error to Neutron Exception
raise InvalidContrailExtensionError(
ext_name=ext_name, ext_class=ext_class)
#keystone
self._authn_token = None
if cfg.CONF.auth_strategy == 'keystone':
kcfg = cfg.CONF.keystone_authtoken
body = '{"auth":{"passwordCredentials":{'
body += ' "username": "%s",' % (kcfg.admin_user)
body += ' "password": "%s"},' % (kcfg.admin_password)
body += ' "tenantName":"%s"}}' % (kcfg.admin_tenant_name)
self._authn_body = body
self._authn_token = cfg.CONF.keystone_authtoken.admin_token
self._keystone_url = "%s://%s:%s%s" % (
cfg.CONF.keystone_authtoken.auth_protocol,
cfg.CONF.keystone_authtoken.auth_host,
cfg.CONF.keystone_authtoken.auth_port,
"/v2.0/tokens")
def __init__(self):
super(NeutronPluginContrailCoreV2, self).__init__()
portbindings_base.register_port_dict_function()
cfg.CONF.register_opts(vnc_opts, 'APISERVER')
self._parse_class_args()
def _get_base_binding_dict(self):
binding = {
portbindings.VIF_TYPE: portbindings.VIF_TYPE_VROUTER,
portbindings.VIF_DETAILS: {
# TODO(praneetb): Replace with new VIF security details
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases
}
}
return binding
def get_agents(self, context, filters=None, fields=None):
# This method is implemented so that horizon is happy
return []
def _request_api_server(self, url, data=None, headers=None):
# Attempt to post to Api-Server
response = requests.post(url, data=data, headers=headers)
if (response.status_code == requests.codes.unauthorized):
# Get token from keystone and save it for next request
response = requests.post(self._keystone_url,
data=self._authn_body,
headers={'Content-type': 'application/json'})
if (response.status_code == requests.codes.ok):
# plan is to re-issue original request with new token
auth_headers = headers or {}
authn_content = json.loads(response.text)
self._authn_token = authn_content['access']['token']['id']
auth_headers['X-AUTH-TOKEN'] = self._authn_token
response = self._request_api_server(url, data, auth_headers)
else:
raise RuntimeError('Authentication Failure')
return response
def _request_api_server_authn(self, url, data=None, headers=None):
authn_headers = headers or {}
if self._authn_token is not None:
authn_headers['X-AUTH-TOKEN'] = self._authn_token
response = self._request_api_server(url, data, headers=authn_headers)
return response
def _relay_request(self, url_path, data=None):
"""Send received request to api server."""
url = "http://%s:%s%s" % (cfg.CONF.APISERVER.api_server_ip,
cfg.CONF.APISERVER.api_server_port,
url_path)
return self._request_api_server_authn(
url, data=data, headers={'Content-type': 'application/json'})
def _request_backend(self, context, data_dict, obj_name, action):
context_dict = self._encode_context(context, action, obj_name)
data = json.dumps({'context': context_dict, 'data': data_dict})
url_path = "%s/%s" % (self.PLUGIN_URL_PREFIX, obj_name)
response = self._relay_request(url_path, data=data)
try:
return response.status_code, response.json()
except JSONDecodeError:
return response.status_code, response.content
def _encode_context(self, context, operation, apitype):
cdict = {'user_id': getattr(context, 'user_id', ''),
'is_admin': getattr(context, 'is_admin', False),
'operation': operation,
'type': apitype,
'tenant_id': getattr(context, 'tenant_id', None)}
if context.roles:
cdict['roles'] = context.roles
if context.tenant:
cdict['tenant'] = context.tenant
return cdict
def _encode_resource(self, resource_id=None, resource=None, fields=None,
filters=None):
resource_dict = {}
if resource_id:
resource_dict['id'] = resource_id
if resource:
resource_dict['resource'] = resource
resource_dict['filters'] = filters
resource_dict['fields'] = fields
return resource_dict
def _prune(self, resource_dict, fields):
if fields:
return dict(((key, item) for key, item in resource_dict.items()
if key in fields))
return resource_dict
def _transform_response(self, status_code, info=None, obj_name=None,
fields=None):
if status_code == requests.codes.ok:
if not isinstance(info, list):
return self._prune(info, fields)
else:
return [self._prune(items, fields) for items in info]
self._raise_contrail_error(status_code, info, obj_name)
def _raise_contrail_error(self, status_code, info, obj_name):
if status_code == requests.codes.bad_request:
raise ContrailBadRequestError(
msg=info['message'], resource=obj_name)
error_class = CONTRAIL_EXCEPTION_MAP[status_code]
raise error_class(msg=info['message'])
def _create_resource(self, res_type, context, res_data):
"""Create a resource in API server.
This method encodes neutron model, and sends it to the
contrail api server.
"""
for key, value in res_data[res_type].items():
if value == attr.ATTR_NOT_SPECIFIED:
del res_data[res_type][key]
res_dict = self._encode_resource(resource=res_data[res_type])
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'CREATE')
res_dicts = self._transform_response(status_code, info=res_info,
obj_name=res_type)
LOG.debug("create_%(res_type)s(): %(res_dicts)s",
{'res_type': res_type, 'res_dicts': res_dicts})
return res_dicts
def _get_resource(self, res_type, context, id, fields):
"""Get a resource from API server.
This method gets a resource from the contrail api server
"""
res_dict = self._encode_resource(resource_id=id, fields=fields)
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'READ')
res_dicts = self._transform_response(status_code, info=res_info,
fields=fields, obj_name=res_type)
LOG.debug("get_%(res_type)s(): %(res_dicts)s",
{'res_type': res_type, 'res_dicts': res_dicts})
return res_dicts
def _update_resource(self, res_type, context, id, res_data):
"""Update a resource in API server.
This method updates a resource in the contrail api server
"""
res_dict = self._encode_resource(resource_id=id,
resource=res_data[res_type])
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'UPDATE')
res_dicts = self._transform_response(status_code, info=res_info,
obj_name=res_type)
LOG.debug("update_%(res_type)s(): %(res_dicts)s",
{'res_type': res_type, 'res_dicts': res_dicts})
return res_dicts
def _delete_resource(self, res_type, context, id):
"""Delete a resource in API server
This method deletes a resource in the contrail api server
"""
res_dict = self._encode_resource(resource_id=id)
LOG.debug("delete_%(res_type)s(): %(id)s",
{'res_type': res_type, 'id': id})
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'DELETE')
if status_code != requests.codes.ok:
self._raise_contrail_error(status_code, info=res_info,
obj_name=res_type)
def _list_resource(self, res_type, context, filters, fields):
res_dict = self._encode_resource(filters=filters, fields=fields)
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'READALL')
res_dicts = self._transform_response(status_code, info=res_info,
fields=fields, obj_name=res_type)
LOG.debug(
"get_%(res_type)s(): filters: %(filters)r data: %(res_dicts)r",
{'res_type': res_type, 'filters': filters,
'res_dicts': res_dicts})
return res_dicts
def _count_resource(self, res_type, context, filters):
res_dict = self._encode_resource(filters=filters)
status_code, res_count = self._request_backend(context, res_dict,
res_type, 'READCOUNT')
LOG.debug("get_%(res_type)s_count(): %(res_count)r",
{'res_type': res_type, 'res_count': res_count})
return res_count
def _get_network(self, context, id, fields=None):
return self._get_resource('network', context, id, fields)
def create_network(self, context, network):
"""Creates a new Virtual Network."""
return self._create_resource('network', context, network)
def get_network(self, context, network_id, fields=None):
"""Get the attributes of a particular Virtual Network."""
return self._get_network(context, network_id, fields)
def update_network(self, context, network_id, network):
"""Updates the attributes of a particular Virtual Network."""
return self._update_resource('network', context, network_id,
network)
def delete_network(self, context, network_id):
"""Creates a new Virtual Network.
Deletes the network with the specified network identifier
belonging to the specified tenant.
"""
self._delete_resource('network', context, network_id)
def get_networks(self, context, filters=None, fields=None):
"""Get the list of Virtual Networks."""
return self._list_resource('network', context, filters,
fields)
def get_networks_count(self, context, filters=None):
"""Get the count of Virtual Network."""
networks_count = self._count_resource('network', context, filters)
return networks_count['count']
def create_subnet(self, context, subnet):
"""Creates a new subnet, and assigns it a symbolic name."""
if subnet['subnet']['gateway_ip'] is None:
subnet['subnet']['gateway_ip'] = '0.0.0.0'
if subnet['subnet']['host_routes'] != attr.ATTR_NOT_SPECIFIED:
if (len(subnet['subnet']['host_routes']) >
cfg.CONF.max_subnet_host_routes):
raise exc.HostRoutesExhausted(subnet_id=subnet[
'subnet'].get('id', _('new subnet')),
quota=cfg.CONF.max_subnet_host_routes)
subnet_created = self._create_resource('subnet', context, subnet)
return self._make_subnet_dict(subnet_created)
def _make_subnet_dict(self, subnet):
if 'gateway_ip' in subnet and subnet['gateway_ip'] == '0.0.0.0':
subnet['gateway_ip'] = None
return subnet
def _get_subnet(self, context, subnet_id, fields=None):
subnet = self._get_resource('subnet', context, subnet_id, fields)
return self._make_subnet_dict(subnet)
def get_subnet(self, context, subnet_id, fields=None):
"""Get the attributes of a particular subnet."""
return self._get_subnet(context, subnet_id, fields)
def update_subnet(self, context, subnet_id, subnet):
"""Updates the attributes of a particular subnet."""
subnet = self._update_resource('subnet', context, subnet_id, subnet)
return self._make_subnet_dict(subnet)
def delete_subnet(self, context, subnet_id):
"""
Deletes the subnet with the specified subnet identifier
belonging to the specified tenant.
"""
self._delete_resource('subnet', context, subnet_id)
def get_subnets(self, context, filters=None, fields=None):
"""Get the list of subnets."""
return [self._make_subnet_dict(s)
for s in self._list_resource(
'subnet', context, filters, fields)]
def get_subnets_count(self, context, filters=None):
"""Get the count of subnets."""
subnets_count = self._count_resource('subnet', context, filters)
return subnets_count['count']
def _extend_port_dict_security_group(self, port_res, port_db):
# Security group bindings will be retrieved from the sqlalchemy
# model. As they're loaded eagerly with ports because of the
# joined load they will not cause an extra query.
port_res[securitygroup.SECURITYGROUPS] = port_db.get(
'security_groups', []) or []
return port_res
def _make_port_dict(self, port):
return port
def _get_port(self, context, id, fields=None):
port = self._get_resource('port', context, id, fields)
return self._make_port_dict(port)
def _update_ips_for_port(self, context, network_id, port_id, original_ips,
new_ips):
"""Add or remove IPs from the port."""
# These ips are still on the port and haven't been removed
prev_ips = []
# the new_ips contain all of the fixed_ips that are to be updated
if len(new_ips) > cfg.CONF.max_fixed_ips_per_port:
msg = _('Exceeded maximim amount of fixed ips per port')
raise exc.InvalidInput(error_message=msg)
# Remove all of the intersecting elements
for original_ip in original_ips[:]:
for new_ip in new_ips[:]:
if ('ip_address' in new_ip and
original_ip['ip_address'] == new_ip['ip_address']):
original_ips.remove(original_ip)
new_ips.remove(new_ip)
prev_ips.append(original_ip)
return new_ips, prev_ips
def create_port(self, context, port):
"""Creates a port on the specified Virtual Network."""
port = self._create_resource('port', context, port)
return self._make_port_dict(port)
def get_port(self, context, port_id, fields=None):
"""Get the attributes of a particular port."""
return self._get_port(context, port_id, fields)
def update_port(self, context, port_id, port):
"""Updates a port.
Updates the attributes of a port on the specified Virtual
Network.
"""
if 'fixed_ips' in port['port']:
original = self._get_port(context, port_id)
added_ips, prev_ips = self._update_ips_for_port(
context, original['network_id'], port_id,
original['fixed_ips'], port['port']['fixed_ips'])
port['port']['fixed_ips'] = prev_ips + added_ips
port = self._update_resource('port', context, port_id, port)
return self._make_port_dict(port)
def delete_port(self, context, port_id):
"""Deletes a port.
Deletes a port on a specified Virtual Network,
if the port contains a remote interface attachment,
the remote interface is first un-plugged and then the port
is deleted.
"""
self._delete_resource('port', context, port_id)
def get_ports(self, context, filters=None, fields=None):
"""Get all ports.
Retrieves all port identifiers belonging to the
specified Virtual Network with the specfied filter.
"""
return [self._make_port_dict(p)
for p in self._list_resource('port', context, filters, fields)]
def get_ports_count(self, context, filters=None):
"""Get the count of ports."""
ports_count = self._count_resource('port', context, filters)
return ports_count['count']
# Router API handlers
def create_router(self, context, router):
"""Creates a router.
Creates a new Logical Router, and assigns it
a symbolic name.
"""
return self._create_resource('router', context, router)
def get_router(self, context, router_id, fields=None):
"""Get the attributes of a router."""
return self._get_resource('router', context, router_id, fields)
def update_router(self, context, router_id, router):
"""Updates the attributes of a router."""
return self._update_resource('router', context, router_id,
router)
def delete_router(self, context, router_id):
"""Deletes a router."""
self._delete_resource('router', context, router_id)
def get_routers(self, context, filters=None, fields=None):
"""Retrieves all router identifiers."""
return self._list_resource('router', context, filters, fields)
def get_routers_count(self, context, filters=None):
"""Get the count of routers."""
routers_count = self._count_resource('router', context, filters)
return routers_count['count']
def add_router_interface(self, context, router_id, interface_info):
"""Add interface to a router."""
if not interface_info:
msg = _("Either subnet_id or port_id must be specified")
raise exc.BadRequest(resource='router', msg=msg)
if 'port_id' in interface_info:
if 'subnet_id' in interface_info:
msg = _("Cannot specify both subnet-id and port-id")
raise exc.BadRequest(resource='router', msg=msg)
res_dict = self._encode_resource(resource_id=router_id,
resource=interface_info)
status_code, res_info = self._request_backend(context, res_dict,
'router', 'ADDINTERFACE')
if status_code != requests.codes.ok:
self._raise_contrail_error(status_code, info=res_info,
obj_name='add_router_interface')
return res_info
def remove_router_interface(self, context, router_id, interface_info):
"""Delete interface from a router."""
if not interface_info:
msg = _("Either subnet_id or port_id must be specified")
raise exc.BadRequest(resource='router', msg=msg)
res_dict = self._encode_resource(resource_id=router_id,
resource=interface_info)
status_code, res_info = self._request_backend(context, res_dict,
'router', 'DELINTERFACE')
if status_code != requests.codes.ok:
self._raise_contrail_error(status_code, info=res_info,
obj_name='remove_router_interface')
return res_info
# Floating IP API handlers
def create_floatingip(self, context, floatingip):
"""Creates a floating IP."""
return self._create_resource('floatingip', context, floatingip)
def update_floatingip(self, context, fip_id, floatingip):
"""Updates the attributes of a floating IP."""
return self._update_resource('floatingip', context, fip_id,
floatingip)
def get_floatingip(self, context, fip_id, fields=None):
"""Get the attributes of a floating ip."""
return self._get_resource('floatingip', context, fip_id, fields)
def delete_floatingip(self, context, fip_id):
"""Deletes a floating IP."""
self._delete_resource('floatingip', context, fip_id)
def get_floatingips(self, context, filters=None, fields=None):
"""Retrieves all floating ips identifiers."""
return self._list_resource('floatingip', context, filters, fields)
def get_floatingips_count(self, context, filters=None):
"""Get the count of floating IPs."""
fips_count = self._count_resource('floatingip', context, filters)
return fips_count['count']
# Security Group handlers
def create_security_group(self, context, security_group):
"""Creates a Security Group."""
return self._create_resource('security_group', context,
security_group)
def get_security_group(self, context, sg_id, fields=None, tenant_id=None):
"""Get the attributes of a security group."""
return self._get_resource('security_group', context, sg_id, fields)
def update_security_group(self, context, sg_id, security_group):
"""Updates the attributes of a security group."""
return self._update_resource('security_group', context, sg_id,
security_group)
def delete_security_group(self, context, sg_id):
"""Deletes a security group."""
self._delete_resource('security_group', context, sg_id)
def get_security_groups(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
"""Retrieves all security group identifiers."""
return self._list_resource('security_group', context,
filters, fields)
def get_security_groups_count(self, context, filters=None):
return 0
def get_security_group_rules_count(self, context, filters=None):
return 0
def create_security_group_rule(self, context, security_group_rule):
"""Creates a security group rule."""
return self._create_resource('security_group_rule', context,
security_group_rule)
def delete_security_group_rule(self, context, sg_rule_id):
"""Deletes a security group rule."""
self._delete_resource('security_group_rule', context, sg_rule_id)
def get_security_group_rule(self, context, sg_rule_id, fields=None):
"""Get the attributes of a security group rule."""
return self._get_resource('security_group_rule', context,
sg_rule_id, fields)
def get_security_group_rules(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
"""Retrieves all security group rules."""
return self._list_resource('security_group_rule', context,
filters, fields)<|fim▁end|> | #
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software |
<|file_name|>py_group.py<|end_file_name|><|fim▁begin|># coding=UTF-8
import mysql.connector
import xlrd
import xlsxwriter
import os
from mysql.connector import errorcode
from datetime import datetime
# 符号化后的 Excel 文件名
EXCEL_NAME = '20170223_4.0.1_feedback_result_py'
DB_NAME = 'zl_crash'
config = {
'user': 'root',
'password': '123456',
'host': '127.0.0.1',
'database': 'zl_crash',
}
class Report(object):
'''
Report class used to encapsulate the row data in EXCEL
'''
def __init__(self, report_id, exception_type, device_id, exception_symbols, os_version):
self.report_id = report_id;
self.exception_type = exception_type;
self.device_id = device_id;
self.exception_symbols = exception_symbols;
self.os_version = os_version;
def main():
begin_time = datetime.now()
# 表名
table_name = 'report_' + begin_time.strftime("%Y_%m_%d_%H_%M_%S")
# 建表
create_table_in_db(table_name)
# 插入数据
insert_symbolication_result_into_db(table_name)
# 对数据进行分组并导出
generate_grouped_exception(table_name)
end_time = datetime.now()
print('耗时:' + str(end_time - begin_time))
def create_table_in_db(table_name):
'''
Create a table in database, and named as `table_name`
:param table_name: table_name
'''
SQLS = {}
SQLS['drop_report'] = (
"DROP TABLE IF EXISTS `" + table_name + "`")
SQLS['report'] = (
"CREATE TABLE `" + table_name + "` ( "
"`report_id` int(11) NOT NULL AUTO_INCREMENT, "
"`exception_type` varchar(255) DEFAULT NULL, "
"`device_id` varchar(255) DEFAULT NULL, "
"`exception_symbols` longtext, "
"`os_version` varchar(255) DEFAULT NULL, "
"PRIMARY KEY (`report_id`)"
") ENGINE=InnoDB DEFAULT CHARSET=utf8")
try:
conn = mysql.connector.connect(**config)
cursor = conn.cursor();
for name, sql in SQLS.items():
try:
print("Executing sql {}.".format(name))
cursor.execute(sql)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:
print('Table already exists.')
else:
print(err.msg)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err.msg)
finally:
cursor.close()
conn.close()
def insert_symbolication_result_into_db(table_name):
'''
Insert the symbolicated result into database
:param table_name: table_name in database
'''
try:
conn = mysql.connector.connect(**config)
# print('connected to db')
cursor = conn.cursor()
insert_report = (
"INSERT INTO " + table_name + " "
"(exception_type, device_id, exception_symbols, os_version) "
"VALUES (%s, %s, %s, %s)")
work_book = xlrd.open_workbook(EXCEL_NAME + '.xlsx')
sheet = work_book.sheets()[0]
nrows = sheet.nrows
ncols = sheet.ncols
row_index = 1
for row_index in range(1, nrows):
data_row = sheet.row_values(row_index)
# assert col < ncols
device_id = data_row[0]
os_version = data_row[1]
exception_type = data_row[2]
exception_symbols = data_row[3]
if exception_symbols == '':
continue
data_report = (exception_type, device_id, exception_symbols, os_version)
# insert report data
cursor.execute(insert_report, data_report)
conn.commit()
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err.msg)
finally:
cursor.close()
conn.close()
def generate_grouped_exception(table_name):
'''
According the group data in database, make all exception to group data.
:param table_name: table_name in zl_crash database
'''
EXCEPTION_TYPE_COUNT = {}
EXCEPTION_MAPPING = {}
try:
conn = mysql.connector.connect(**config)
cursor = conn.cursor()
group_exception_type = (
"SELECT exception_type, COUNT(*) as nums "
"FROM " + table_name + " GROUP BY exception_type")
query_specific_exception = (
"SELECT * FROM " + table_name + " "
"WHERE exception_type = %s")
cursor.execute(group_exception_type)
for (exception_type, nums) in cursor:
EXCEPTION_TYPE_COUNT[exception_type] = nums
# print("exception_type:" + exception_type + ", nums:" + str(nums))
for exception_type in EXCEPTION_TYPE_COUNT.keys():
cursor.execute(query_specific_exception, (exception_type,))
exception_list = []
for (report_id, exception_type, device_id, exception_symbols, os_version) in cursor:
report = Report(report_id, exception_type, device_id, exception_symbols, os_version)
exception_list.append(report)<|fim▁hole|> EXCEPTION_MAPPING[exception_type] = exception_list
write_grouped_exception_to_file(EXCEPTION_TYPE_COUNT, EXCEPTION_MAPPING)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err.msg)
finally:
cursor.close()
conn.close()
def write_grouped_exception_to_file(count, mapping):
'''
Export grouped exception to file
:param count: 字典 key:exception_type value:count
:param mapping: 字典 key:exception_type value:exception_list
'''
output_file_name = EXCEL_NAME + '_grouped.xlsx'
os.system('rm -rf ' + output_file_name)
workbook = xlsxwriter.Workbook(output_file_name)
worksheet = workbook.add_worksheet()
# 设置列宽
worksheet.set_column('A:A', 25)
worksheet.set_column('B:B', 10)
worksheet.set_column('C:C', 25)
worksheet.set_column('D:D', 40)
worksheet.set_column('E:E', 500)
# 粗体格式
bold = workbook.add_format({'font_size': 14,
'align': 'center',
'bold': True})
# 标题行
worksheet.write('A1', 'exception_type', bold)
worksheet.write('B1', 'count', bold)
worksheet.write('C1', 'os_version', bold)
worksheet.write('D1', 'device_id', bold)
worksheet.write('E1', 'symbols', bold)
# 写入 Excel Index 指示器
row_index = 1
col_index = 0
colors = ('#A8BAAA', '#FFF6CF', '#DCCDAE', '#B49D7E',
'#816854', '#334D5C', '#45B29D', '#EFC94C')
count_index = 0
pattern = 0.5
for (type, num) in count.items():
bg_color = colors[count_index % len(colors)]
col_format = workbook.add_format({'pattern': pattern,
'bg_color': bg_color})
num_col_format = workbook.add_format({'pattern': pattern,
'bg_color': bg_color,
'bold': True,
'align': 'center'})
count_index += 1
list = mapping[type]
for i in range(num):
report_item = list[i]
if i == 0:
worksheet.write(row_index, col_index, report_item.exception_type, col_format)
col_index += 1
worksheet.write(row_index, col_index, num, num_col_format)
col_index += 1
else:
worksheet.write(row_index, col_index, '', col_format)
col_index += 1
worksheet.write(row_index, col_index, '', col_format)
col_index += 1
worksheet.write(row_index, col_index, report_item.os_version, col_format)
col_index += 1
worksheet.write(row_index, col_index, report_item.device_id, col_format)
col_index += 1
worksheet.write(row_index, col_index, report_item.exception_symbols, col_format)
# 设置 index
row_index += 1
col_index = 0
# 关闭文件
workbook.close()
print("Exporting grouped data to " + output_file_name)
if __name__ == '__main__':
main()<|fim▁end|> | |
<|file_name|>triage-chiamata-modal.state.ts<|end_file_name|><|fim▁begin|>import { State, Selector, Action, StateContext } from '@ngxs/store';
import { Injectable } from '@angular/core';
import { DetttagliTipologieService } from '../../../../core/service/dettagli-tipologie/dettagli-tipologie.service';
import { TriageService } from '../../../../core/service/triage/triage.service';
import {
GetDettagliTipologieByCodTipologia,
ClearDettagliTipologie,
SetDettaglioTipologiaTriageChiamata,
SetTipologiaTriageChiamata,
SetTriageChiamata,
ClearTipologiaTriageChiamata,
ClearDettaglioTipologiaTriageChiamata,
ClearTriageChiamata, StartLoadingTriageChiamata, StopLoadingTriageChiamata
} from '../../actions/triage-modal/triage-modal.actions';
import { GetDettaglioTipologiaByCodTipologiaDto } from '../../../interface/dto/dettagli-tipologie/dettaglio-tipologia-dto.interface';
import { DettaglioTipologia } from '../../../interface/dettaglio-tipologia.interface';
import { TreeviewItem } from 'ngx-treeview';
import { ItemTriageData } from '../../../interface/item-triage-data.interface';
import { PosInterface } from '../../../interface/pos.interface';
import { StartLoadingDettagliTipologia, StopLoadingDettagliTipologia } from '../../../../features/home/store/actions/form-richiesta/scheda-telefonata.actions';
export interface TriageChiamataModalStateModel {
dettagliTipologia: DettaglioTipologia[];
codTipologiaSelezionata: number;
codDettaglioTipologiaSelezionato: number;
triage: TreeviewItem;
triageData: ItemTriageData[];
idTriage: string;
pos: PosInterface[];
loadingTriageChiamata: boolean;
}
export const TriageChiamataModalStateDefaults: TriageChiamataModalStateModel = {
dettagliTipologia: undefined,
codTipologiaSelezionata: undefined,
codDettaglioTipologiaSelezionato: undefined,
triage: undefined,
triageData: undefined,
idTriage: undefined,
pos: undefined,
loadingTriageChiamata: undefined
};
@Injectable()
@State<TriageChiamataModalStateModel>({
name: 'triageChiamataModal',
defaults: TriageChiamataModalStateDefaults
})
export class TriageChiamataModalState {
constructor(private detttagliTipologieService: DetttagliTipologieService,
private triageService: TriageService) {
}
@Selector()
static dettagliTipologia(state: TriageChiamataModalStateModel): DettaglioTipologia[] {
return state.dettagliTipologia;<|fim▁hole|> return state.triage;
}
@Selector()
static triageData(state: TriageChiamataModalStateModel): ItemTriageData[] {
return state.triageData;
}
@Selector()
static loadingTriageChiamata(state: TriageChiamataModalStateModel): boolean {
return state.loadingTriageChiamata;
}
@Action(GetDettagliTipologieByCodTipologia)
getDettagliTipologieByCodTipologia({ patchState, dispatch }: StateContext<TriageChiamataModalStateModel>, action: GetDettagliTipologieByCodTipologia): void {
dispatch(new StartLoadingTriageChiamata());
dispatch(new StartLoadingDettagliTipologia());
this.detttagliTipologieService.getDettaglioTipologiaByCodTipologia(action.codTipologia).subscribe((response: GetDettaglioTipologiaByCodTipologiaDto) => {
patchState({
dettagliTipologia: response.listaDettaglioTipologie
});
dispatch(new StopLoadingTriageChiamata());
dispatch(new StopLoadingDettagliTipologia());
});
}
@Action(ClearDettagliTipologie)
clearDettagliTipologie({ patchState }: StateContext<TriageChiamataModalStateModel>): void {
patchState({
dettagliTipologia: TriageChiamataModalStateDefaults.dettagliTipologia
});
}
@Action(SetTipologiaTriageChiamata)
setTipologiaTriageChiamata({ patchState }: StateContext<TriageChiamataModalStateModel>, action: SetTipologiaTriageChiamata): void {
patchState({
codTipologiaSelezionata: action.codTipologia
});
}
@Action(ClearTipologiaTriageChiamata)
clearTipologiaTriageChiamata({ patchState }: StateContext<TriageChiamataModalStateModel>): void {
patchState({
codTipologiaSelezionata: TriageChiamataModalStateDefaults.codTipologiaSelezionata
});
}
@Action(SetDettaglioTipologiaTriageChiamata)
setDettaglioTipologiaTriageChiamata({patchState, dispatch }: StateContext<TriageChiamataModalStateModel>, action: SetDettaglioTipologiaTriageChiamata): void {
patchState({
codDettaglioTipologiaSelezionato: action.codDettaglioTipologia,
pos: action.pos
});
dispatch(new SetTriageChiamata());
}
@Action(ClearDettaglioTipologiaTriageChiamata)
clearDettaglioTipologiaTriageChiamata({ patchState }: StateContext<TriageChiamataModalStateModel>): void {
patchState({
codDettaglioTipologiaSelezionato: TriageChiamataModalStateDefaults.codDettaglioTipologiaSelezionato
});
}
@Action(SetTriageChiamata)
setTriageChiamata({ getState, patchState, dispatch }: StateContext<TriageChiamataModalStateModel>): void {
const state = getState();
const codTipologiaSelezionata = state.codTipologiaSelezionata;
const codDettaglioTipologiaSelezionata = state.codDettaglioTipologiaSelezionato;
dispatch(new StartLoadingTriageChiamata());
this.triageService.get(codTipologiaSelezionata, codDettaglioTipologiaSelezionata).subscribe((res: { triage: { id: string, data: TreeviewItem }, triageData: ItemTriageData[] }) => {
patchState({
idTriage: res?.triage?.id,
triage: res?.triage?.data,
triageData: res?.triageData
});
dispatch(new StopLoadingTriageChiamata());
});
}
@Action(ClearTriageChiamata)
clearTriageChiamata({ patchState }: StateContext<TriageChiamataModalStateModel>): void {
patchState({
idTriage: TriageChiamataModalStateDefaults.idTriage,
triage: TriageChiamataModalStateDefaults.triage,
triageData: TriageChiamataModalStateDefaults.triageData,
pos: TriageChiamataModalStateDefaults.pos
});
}
@Action(StartLoadingTriageChiamata)
startLoadingTriageChiamata({ patchState }: StateContext<TriageChiamataModalStateModel>): void {
patchState({
loadingTriageChiamata: true
});
}
@Action(StopLoadingTriageChiamata)
stopLoadingTriageChiamata({ patchState }: StateContext<TriageChiamataModalStateModel>): void {
patchState({
loadingTriageChiamata: false
});
}
}<|fim▁end|> | }
@Selector()
static triage(state: TriageChiamataModalStateModel): TreeviewItem { |
<|file_name|>optional.rs<|end_file_name|><|fim▁begin|>use crate::reflect::value::value_ref::ReflectValueMut;
use crate::reflect::ReflectValueBox;
use crate::reflect::ReflectValueRef;<|fim▁hole|>use crate::reflect::RuntimeTypeBox;
#[derive(Debug, Clone)]
pub(crate) struct DynamicOptional {
elem: RuntimeTypeBox,
value: Option<ReflectValueBox>,
}
impl DynamicOptional {
pub fn none(elem: RuntimeTypeBox) -> DynamicOptional {
DynamicOptional { elem, value: None }
}
pub fn mut_or_default(&mut self) -> ReflectValueMut {
if let None = self.value {
self.value = Some(self.elem.default_value_ref().to_box());
}
self.value.as_mut().unwrap().as_value_mut()
}
pub fn clear(&mut self) {
self.value = None;
}
pub fn get(&self) -> Option<ReflectValueRef> {
self.value.as_ref().map(ReflectValueBox::as_value_ref)
}
pub fn set(&mut self, value: ReflectValueBox) {
assert_eq!(value.get_type(), self.elem);
self.value = Some(value);
}
}<|fim▁end|> | |
<|file_name|>price.service.ts<|end_file_name|><|fim▁begin|>import * as memotyCache from 'memory-cache';
import * as moment from 'moment';
import * as Bluebird from 'bluebird';
import * as _ from 'lodash';
import axios from 'axios';
import { HubData } from '../../eve-client/api/id-names-mapper';
const PRICE_ENDPOINT = 'https://esi.tech.ccp.is/latest/markets/{regionId}/orders/?type_id={itemId}';
export class PriceServiceResponse {
sell: PriceResponse;
buy: PriceResponse;
}
export function getPriceForItemOnStation(itemId: number, regionId: number, stationId: number) {
let priceSearchKey = '' + itemId + regionId;
let pricesOrError: PriceResponse[] & { code: number } = memotyCache.get(priceSearchKey);
if (pricesOrError) {
console.info(`price for ${priceSearchKey} has been found in cache, skipping CCP call`);
if (pricesOrError.code && pricesOrError.code === 404) {
return Bluebird.reject(pricesOrError);
}
return Bluebird.resolve(filterPrices(pricesOrError, stationId));
}
console.info(`price for ${priceSearchKey} not found in cache, executing CCP call`);
return new Bluebird<PriceServiceResponse>((resolve, reject) => {
axios.get(PRICE_ENDPOINT.replace('{regionId}', regionId.toString()).replace('{itemId}', itemId.toString()))<|fim▁hole|> memotyCache.put(priceSearchKey, { code: 404 }, diff);
reject({ code: 404 });
return;
}
memotyCache.put(priceSearchKey, result.data, diff);
console.info(`cache key ${priceSearchKey} has been added with ${(diff / 1000).toFixed(0)}s TTL`);
resolve(filterPrices(result.data, stationId));
})
.catch(err => {
console.error(err);
reject(err);
});
});
}
function filterPrices(prices: PriceResponse[], stationId: number): PriceServiceResponse {
let result = new PriceServiceResponse();
result.buy = _.maxBy(_.filter(prices, (order) => {
return order.location_id === stationId && order.is_buy_order;
}), record => record.price) || prices[0];
result.sell = _.minBy(_.filter(prices, (order) => {
return order.location_id === stationId && !order.is_buy_order;
}), record => record.price) || prices[0];
return result;
}
export interface PriceResponse {
order_id: number;
type_id: number;
location_id: number;
volume_total: number;
volume_remain: number;
min_volume: number;
price: number;
is_buy_order: number;
duration: number;
issued: string;
range: string;
}<|fim▁end|> | .then(result => {
let expires = moment(result.headers['expires'] + '+0000', 'ddd, DD MMM YYYY HH:mm:ss Z');
let diff = expires.diff(moment());
if (result.data.length === 0) { |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>import re
import sys
class URI():
def __init__( self, root_path ):
super().__init__()
if root_path[-1] != '/' or root_path[0] != '/':
raise ValueError( 'root_path must start and end with "/"' )
self.root_path = root_path
self.uri_regex = re.compile( r'^({0}|/)(([a-zA-Z0-9\-_.!~*<>]+/)*)([a-zA-Z0-9\-_.!~*<>]+)?(:([a-zA-Z0-9\-_.!~*\'<>]*:)*)?(\([a-zA-Z0-9\-_.!~*<>]+\))?$'.format( self.root_path ) )
def split( self, uri, root_optional=False ):
uri_match = self.uri_regex.match( uri )
if not uri_match:
raise ValueError( 'Unable to parse URI "{0}"'.format( uri ) )
( root, namespace, _, model, rec_id, _, action ) = uri_match.groups()
if root != self.root_path and not root_optional:
raise ValueError( 'URI does not start in the root_path' )
if namespace != '':
namespace_list = namespace.rstrip( '/' ).split( '/' )
else:
namespace_list = []
if rec_id is not None:
id_list = rec_id.strip( ':' ).split( ':' )
multi = len( id_list ) > 1<|fim▁hole|>
if action is not None:
action = action[ 1:-1 ]
return ( namespace_list, model, action, id_list, multi )
def build( self, namespace=None, model=None, action=None, id_list=None, in_root=True ):
"""
build a uri, NOTE: if model is None, id_list and action are skiped
"""
if in_root:
result = self.root_path
else:
result = '/'
if namespace is not None:
if not isinstance( namespace, list ):
namespace = [ namespace ]
if len( namespace ) > 0:
result = '{0}{1}/'.format( result, '/'.join( namespace ) )
if model is None:
return result
result = '{0}{1}'.format( result, model )
if id_list is not None and id_list != []:
if not isinstance( id_list, list ):
id_list = [ id_list ]
result = '{0}:{1}:'.format( result, ':'.join( id_list ) )
if action is not None:
result = '{0}({1})'.format( result, action )
return result
def extractIds( self, uri_list ): # TODO: should we make sure the namespace/model do not change in the list?
"""
extract the record IDs from the URI's in uri_list, can handle some/all/none
of the URIs having multiple IDs in them allready, does not force uniqunes
order should remain intact
"""
if isinstance( uri_list, str ):
uri_list = [ uri_list ]
if not isinstance( uri_list, list ):
raise ValueError( 'uri_list must be string or list of strings' )
result = []
for uri in uri_list:
uri_match = self.uri_regex.match( uri )
if not uri_match:
raise ValueError( 'Unable to parse URI "{0}"'.format( uri ) )
( _, _, _, _, rec_id, _, _ ) = uri_match.groups()
if rec_id is None:
continue
result += rec_id.strip( ':' ).split( ':' )
return result
def uriListToMultiURI( self, uri_list ):
"""
runs extract Ids on the list, then takes the first uri and applies all
the ids to it
"""
if not uri_list:
return []
id_list = self.extractIds( uri_list )
if not id_list:
return []
( namespace_list, model, action, _, _ ) = self.split( uri_list[0] )
return self.build( namespace_list, model, action, id_list, True )
# barrowed from https://www.python.org/dev/peps/pep-0257/
def doccstring_prep( docstring ):
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[ 1: ]:
stripped = line.lstrip()
if stripped:
indent = min( indent, len( line ) - len( stripped ) )
# Remove indentation (first line is special):
trimmed = [ lines[0].strip() ]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append( line[ indent: ].rstrip() )
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop( 0 )
# Return a single string:
return '\n'.join( trimmed )<|fim▁end|> | else:
id_list = None # id_list = [] is an empty list of ids, where None means the list is not even present
multi = False |
<|file_name|>fn.rs<|end_file_name|><|fim▁begin|>// Unlike C/C++, there's no restriction on the order of function definitions
fn main() {
// We can use this function here, and define it somewhere later
fizzbuzz_to(100);
}
// Function that returns a boolean value
fn is_divisible_by(lhs: u32, rhs: u32) -> bool {
// Corner case, early return
if rhs == 0 {
return false;
}
// This is an expression, the `return` keyword is not necessary here
lhs % rhs == 0<|fim▁hole|>fn fizzbuzz(n: u32) -> () {
if is_divisible_by(n, 15) {
println!("fizzbuzz");
} else if is_divisible_by(n, 3) {
println!("fizz");
} else if is_divisible_by(n, 5) {
println!("buzz");
} else {
println!("{}", n);
}
}
// When a function returns `()`, the return type can be omitted from the
// signature
fn fizzbuzz_to(n: u32) {
for n in range(1, n + 1) {
fizzbuzz(n);
}
}<|fim▁end|> | }
// Functions that "don't" return a value, actually return the unit type `()` |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | default_app_config = 'apps.datasetmanager.apps.datasetmanagerConfig' |
<|file_name|>copy_test.go<|end_file_name|><|fim▁begin|>// +build linux
package copy
import (
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"syscall"
"testing"
"time"
"github.com/containers/storage/pkg/system"
"golang.org/x/sys/unix"
"gotest.tools/assert"
is "gotest.tools/assert/cmp"
)
func TestCopy(t *testing.T) {
copyWithFileRange := true
copyWithFileClone := true
doCopyTest(t, ©WithFileRange, ©WithFileClone)
}
func TestCopyWithoutRange(t *testing.T) {
copyWithFileRange := false
copyWithFileClone := false
doCopyTest(t, ©WithFileRange, ©WithFileClone)
}
func TestCopyDir(t *testing.T) {
srcDir, err := ioutil.TempDir("", "srcDir")
assert.NilError(t, err)
populateSrcDir(t, srcDir, 3)
dstDir, err := ioutil.TempDir("", "testdst")
assert.NilError(t, err)
defer os.RemoveAll(dstDir)
assert.Check(t, DirCopy(srcDir, dstDir, Content, false))
assert.NilError(t, filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
relPath, err := filepath.Rel(srcDir, srcPath)
assert.NilError(t, err)
if relPath == "." {
return nil
}
dstPath := filepath.Join(dstDir, relPath)
assert.NilError(t, err)
// If we add non-regular dirs and files to the test
// then we need to add more checks here.
dstFileInfo, err := os.Lstat(dstPath)
assert.NilError(t, err)
srcFileSys := f.Sys().(*syscall.Stat_t)
dstFileSys := dstFileInfo.Sys().(*syscall.Stat_t)
t.Log(relPath)
if srcFileSys.Dev == dstFileSys.Dev {
assert.Check(t, srcFileSys.Ino != dstFileSys.Ino)
}
// Todo: check size, and ctim is not equal
/// on filesystems that have granular ctimes
assert.Check(t, is.DeepEqual(srcFileSys.Mode, dstFileSys.Mode))
assert.Check(t, is.DeepEqual(srcFileSys.Uid, dstFileSys.Uid))
assert.Check(t, is.DeepEqual(srcFileSys.Gid, dstFileSys.Gid))
assert.Check(t, is.DeepEqual(srcFileSys.Mtim, dstFileSys.Mtim))
return nil
}))
}
func randomMode(baseMode int) os.FileMode {
for i := 0; i < 7; i++ {
baseMode = baseMode | (1&rand.Intn(2))<<uint(i)
}
return os.FileMode(baseMode)
}
func populateSrcDir(t *testing.T, srcDir string, remainingDepth int) {
if remainingDepth == 0 {
return
}
aTime := time.Unix(rand.Int63(), 0)
mTime := time.Unix(rand.Int63(), 0)
for i := 0; i < 10; i++ {
dirName := filepath.Join(srcDir, fmt.Sprintf("srcdir-%d", i))
// Owner all bits set
assert.NilError(t, os.Mkdir(dirName, randomMode(0700)))
populateSrcDir(t, dirName, remainingDepth-1)
assert.NilError(t, system.Chtimes(dirName, aTime, mTime))
}
for i := 0; i < 10; i++ {
fileName := filepath.Join(srcDir, fmt.Sprintf("srcfile-%d", i))
// Owner read bit set
assert.NilError(t, ioutil.WriteFile(fileName, []byte{}, randomMode(0400)))
assert.NilError(t, system.Chtimes(fileName, aTime, mTime))
}
}
func doCopyTest(t *testing.T, copyWithFileRange, copyWithFileClone *bool) {
dir, err := ioutil.TempDir("", "storage-copy-check")
assert.NilError(t, err)
defer os.RemoveAll(dir)
srcFilename := filepath.Join(dir, "srcFilename")
dstFilename := filepath.Join(dir, "dstilename")
r := rand.New(rand.NewSource(0))
buf := make([]byte, 1024)
_, err = r.Read(buf)
assert.NilError(t, err)
assert.NilError(t, ioutil.WriteFile(srcFilename, buf, 0777))
fileinfo, err := os.Stat(srcFilename)
assert.NilError(t, err)<|fim▁hole|> readBuf, err := ioutil.ReadFile(dstFilename)
assert.NilError(t, err)
assert.Check(t, is.DeepEqual(buf, readBuf))
}
func TestCopyHardlink(t *testing.T) {
var srcFile1FileInfo, srcFile2FileInfo, dstFile1FileInfo, dstFile2FileInfo unix.Stat_t
srcDir, err := ioutil.TempDir("", "srcDir")
assert.NilError(t, err)
defer os.RemoveAll(srcDir)
dstDir, err := ioutil.TempDir("", "dstDir")
assert.NilError(t, err)
defer os.RemoveAll(dstDir)
srcFile1 := filepath.Join(srcDir, "file1")
srcFile2 := filepath.Join(srcDir, "file2")
dstFile1 := filepath.Join(dstDir, "file1")
dstFile2 := filepath.Join(dstDir, "file2")
assert.NilError(t, ioutil.WriteFile(srcFile1, []byte{}, 0777))
assert.NilError(t, os.Link(srcFile1, srcFile2))
assert.Check(t, DirCopy(srcDir, dstDir, Content, false))
assert.NilError(t, unix.Stat(srcFile1, &srcFile1FileInfo))
assert.NilError(t, unix.Stat(srcFile2, &srcFile2FileInfo))
assert.Equal(t, srcFile1FileInfo.Ino, srcFile2FileInfo.Ino)
assert.NilError(t, unix.Stat(dstFile1, &dstFile1FileInfo))
assert.NilError(t, unix.Stat(dstFile2, &dstFile2FileInfo))
assert.Check(t, is.Equal(dstFile1FileInfo.Ino, dstFile2FileInfo.Ino))
}<|fim▁end|> |
assert.NilError(t, CopyRegular(srcFilename, dstFilename, fileinfo, copyWithFileRange, copyWithFileClone)) |
<|file_name|>benefit_rating_lkpPersistence.java<|end_file_name|><|fim▁begin|>/**
* Copyright (c) 2000-2012 Liferay, Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License as published by the Free
* Software Foundation; either version 2.1 of the License, or (at your option)
* any later version.
*
* This library is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
* details.
*/
package com.iucn.whp.dbservice.service.persistence;
import com.iucn.whp.dbservice.model.benefit_rating_lkp;
import com.liferay.portal.service.persistence.BasePersistence;
/**
* The persistence interface for the benefit_rating_lkp service.
*
* <p>
* Caching information and settings can be found in <code>portal.properties</code>
* </p>
*
* @author alok.sen
* @see benefit_rating_lkpPersistenceImpl
* @see benefit_rating_lkpUtil
* @generated
*/
public interface benefit_rating_lkpPersistence extends BasePersistence<benefit_rating_lkp> {
/*
* NOTE FOR DEVELOPERS:
*
* Never modify or reference this interface directly. Always use {@link benefit_rating_lkpUtil} to access the benefit_rating_lkp persistence. Modify <code>service.xml</code> and rerun ServiceBuilder to regenerate this interface.
*/
/**
* Caches the benefit_rating_lkp in the entity cache if it is enabled.
*
* @param benefit_rating_lkp the benefit_rating_lkp
*/
public void cacheResult(
com.iucn.whp.dbservice.model.benefit_rating_lkp benefit_rating_lkp);
/**
* Caches the benefit_rating_lkps in the entity cache if it is enabled.
*
* @param benefit_rating_lkps the benefit_rating_lkps
*/
public void cacheResult(
java.util.List<com.iucn.whp.dbservice.model.benefit_rating_lkp> benefit_rating_lkps);
/**
* Creates a new benefit_rating_lkp with the primary key. Does not add the benefit_rating_lkp to the database.
*
* @param id the primary key for the new benefit_rating_lkp
* @return the new benefit_rating_lkp
*/
public com.iucn.whp.dbservice.model.benefit_rating_lkp create(long id);
/**
* Removes the benefit_rating_lkp with the primary key from the database. Also notifies the appropriate model listeners.
*
* @param id the primary key of the benefit_rating_lkp
* @return the benefit_rating_lkp that was removed
* @throws com.iucn.whp.dbservice.NoSuchbenefit_rating_lkpException if a benefit_rating_lkp with the primary key could not be found
* @throws SystemException if a system exception occurred
*/
public com.iucn.whp.dbservice.model.benefit_rating_lkp remove(long id)
throws com.iucn.whp.dbservice.NoSuchbenefit_rating_lkpException,
com.liferay.portal.kernel.exception.SystemException;
public com.iucn.whp.dbservice.model.benefit_rating_lkp updateImpl(
com.iucn.whp.dbservice.model.benefit_rating_lkp benefit_rating_lkp,
boolean merge)
throws com.liferay.portal.kernel.exception.SystemException;
/**
* Returns the benefit_rating_lkp with the primary key or throws a {@link com.iucn.whp.dbservice.NoSuchbenefit_rating_lkpException} if it could not be found.
*
* @param id the primary key of the benefit_rating_lkp
* @return the benefit_rating_lkp
* @throws com.iucn.whp.dbservice.NoSuchbenefit_rating_lkpException if a benefit_rating_lkp with the primary key could not be found
* @throws SystemException if a system exception occurred
*/
public com.iucn.whp.dbservice.model.benefit_rating_lkp findByPrimaryKey(
long id)
throws com.iucn.whp.dbservice.NoSuchbenefit_rating_lkpException,
com.liferay.portal.kernel.exception.SystemException;
/**
* Returns the benefit_rating_lkp with the primary key or returns <code>null</code> if it could not be found.
*
* @param id the primary key of the benefit_rating_lkp
* @return the benefit_rating_lkp, or <code>null</code> if a benefit_rating_lkp with the primary key could not be found
* @throws SystemException if a system exception occurred
*/
public com.iucn.whp.dbservice.model.benefit_rating_lkp fetchByPrimaryKey(
long id) throws com.liferay.portal.kernel.exception.SystemException;
/**
* Returns all the benefit_rating_lkps.
*
* @return the benefit_rating_lkps
* @throws SystemException if a system exception occurred
*/
public java.util.List<com.iucn.whp.dbservice.model.benefit_rating_lkp> findAll()
throws com.liferay.portal.kernel.exception.SystemException;
/**
* Returns a range of all the benefit_rating_lkps.
*
* <p>
* Useful when paginating results. Returns a maximum of <code>end - start</code> instances. <code>start</code> and <code>end</code> are not primary keys, they are indexes in the result set. Thus, <code>0</code> refers to the first result in the set. Setting both <code>start</code> and <code>end</code> to {@link com.liferay.portal.kernel.dao.orm.QueryUtil#ALL_POS} will return the full result set.
* </p>
*
* @param start the lower bound of the range of benefit_rating_lkps
* @param end the upper bound of the range of benefit_rating_lkps (not inclusive)
* @return the range of benefit_rating_lkps
* @throws SystemException if a system exception occurred
*/
public java.util.List<com.iucn.whp.dbservice.model.benefit_rating_lkp> findAll(
int start, int end)
throws com.liferay.portal.kernel.exception.SystemException;
/**
* Returns an ordered range of all the benefit_rating_lkps.
*
* <p>
* Useful when paginating results. Returns a maximum of <code>end - start</code> instances. <code>start</code> and <code>end</code> are not primary keys, they are indexes in the result set. Thus, <code>0</code> refers to the first result in the set. Setting both <code>start</code> and <code>end</code> to {@link com.liferay.portal.kernel.dao.orm.QueryUtil#ALL_POS} will return the full result set.
* </p><|fim▁hole|> * @param orderByComparator the comparator to order the results by (optionally <code>null</code>)
* @return the ordered range of benefit_rating_lkps
* @throws SystemException if a system exception occurred
*/
public java.util.List<com.iucn.whp.dbservice.model.benefit_rating_lkp> findAll(
int start, int end,
com.liferay.portal.kernel.util.OrderByComparator orderByComparator)
throws com.liferay.portal.kernel.exception.SystemException;
/**
* Removes all the benefit_rating_lkps from the database.
*
* @throws SystemException if a system exception occurred
*/
public void removeAll()
throws com.liferay.portal.kernel.exception.SystemException;
/**
* Returns the number of benefit_rating_lkps.
*
* @return the number of benefit_rating_lkps
* @throws SystemException if a system exception occurred
*/
public int countAll()
throws com.liferay.portal.kernel.exception.SystemException;
}<|fim▁end|> | *
* @param start the lower bound of the range of benefit_rating_lkps
* @param end the upper bound of the range of benefit_rating_lkps (not inclusive) |
<|file_name|>permissions.py<|end_file_name|><|fim▁begin|>from rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permissions(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD, or OPTIONS requests.<|fim▁hole|><|fim▁end|> | (request.method in permissions.SAFE_METHODS) or (obj.owner == request.user) |
<|file_name|>conf.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# AppTalk documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 25 00:32:14 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.imgmath',
'sphinx.ext.ifconfig',
'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'AppTalk'
copyright = '2017, Thomas Lee'
author = 'Thomas Lee'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme<|fim▁hole|>#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'AppTalkdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AppTalk.tex', 'AppTalk Documentation',
'Thomas Lee', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'apptalk', 'AppTalk Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AppTalk', 'AppTalk Documentation',
author, 'AppTalk', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# Projet logo and icon
html_logo = '_static/apptalk_logo_200x160.png'
html_favicon = '_static/favicon.ico'<|fim▁end|> | # further. For a list of options available for each theme, see the
# documentation. |
<|file_name|>build_binaries.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""
script to build the latest binaries for each vehicle type, ready to upload
Peter Barker, August 2017
based on build_binaries.sh by Andrew Tridgell, March 2013
"""
from __future__ import print_function
import datetime
import optparse
import os
import re
import shutil
import time
import subprocess
import sys
import zlib
# local imports
import generate_manifest
class build_binaries(object):
def __init__(self, tags):
self.tags = tags
self.dirty = False
def progress(self, string):
'''pretty-print progress'''
print("BB: %s" % string)
def run_git(self, args):
'''run git with args git_args; returns git's output'''
cmd_list = ["git"]
cmd_list.extend(args)
return self.run_program("BB-GIT", cmd_list)
def board_branch_bit(self, board):
'''return a fragment which might modify the branch name.
this was previously used to have a master-AVR branch etc
if the board type was apm1 or apm2'''
return None
def board_options(self, board):
'''return board-specific options'''
if board == "bebop":
return ["--static"]
return []
def run_waf(self, args):
if os.path.exists("waf"):
waf = "./waf"
else:
waf = os.path.join(".", "modules", "waf", "waf-light")
cmd_list = [waf]
cmd_list.extend(args)
self.run_program("BB-WAF", cmd_list)
def run_program(self, prefix, cmd_list):
self.progress("Running (%s)" % " ".join(cmd_list))
p = subprocess.Popen(cmd_list, bufsize=1, stdin=None,
stdout=subprocess.PIPE, close_fds=True,
stderr=subprocess.STDOUT)
output = ""
while True:
x = p.stdout.readline()
if len(x) == 0:
returncode = os.waitpid(p.pid, 0)
if returncode:
break
# select not available on Windows... probably...
time.sleep(0.1)
continue
output += x
x = x.rstrip()
print("%s: %s" % (prefix, x))
(_, status) = returncode
if status != 0:
self.progress("Process failed (%s)" %
str(returncode))
raise subprocess.CalledProcessError(
returncode, cmd_list)
return output
def run_make(self, args):
cmd_list = ["make"]
cmd_list.extend(args)
self.run_program("BB-MAKE", cmd_list)
def run_git_update_submodules(self):
'''if submodules are present initialise and update them'''
if os.path.exists(os.path.join(self.basedir, ".gitmodules")):
self.run_git(["submodule",
"update",
"--init",
"--recursive",
"-f"])
def checkout(self, vehicle, ctag, cboard=None, cframe=None):
'''attempt to check out a git tree. Various permutations are
attempted based on ctag - for examplle, if the board is avr and ctag
is bob we will attempt to checkout bob-AVR'''
if self.dirty:
self.progress("Skipping checkout for dirty build")
return True
self.progress("Trying checkout %s %s %s %s" %
(vehicle, ctag, cboard, cframe))
self.run_git(['stash'])
if ctag == "latest":
vtag = "master"
else:
vtag = "%s-%s" % (vehicle, ctag)
branches = []
if cframe is not None:
# try frame specific tag
branches.append("%s-%s" % (vtag, cframe))
if cboard is not None:
bbb = self.board_branch_bit(cboard)
if bbb is not None:
# try board type specific branch extension
branches.append("".join([vtag, bbb]))
branches.append(vtag)
for branch in branches:
try:
self.progress("Trying branch %s" % branch)
self.run_git(["checkout", "-f", branch])
self.run_git_update_submodules()
self.run_git(["log", "-1"])
return True
except subprocess.CalledProcessError as e:
self.progress("Checkout branch %s failed" % branch)
pass
self.progress("Failed to find tag for %s %s %s %s" %
(vehicle, ctag, cboard, cframe))
return False
def skip_board_waf(self, board):
'''check if we should skip this build because we don't support the
board in this release
'''
try:
if self.string_in_filepath(board,
os.path.join(self.basedir,
'Tools',
'ardupilotwaf',
'boards.py')):
return False
except IOError as e:
if e.errno != 2:
raise
# see if there's a hwdef.dat for this board:
if os.path.exists(os.path.join(self.basedir,
'libraries',
'AP_HAL_ChibiOS',
'hwdef',
board)):
self.progress("ChibiOS build: %s" % (board,))
return False
self.progress("Skipping unsupported board %s" % (board,))
return True
def skip_frame(self, board, frame):
'''returns true if this board/frame combination should not be built'''
if frame == "heli":
if board in ["bebop", "aerofc-v1", "skyviper-v2450"]:
self.progress("Skipping heli build for %s" % board)
return True
return False
def first_line_of_filepath(self, filepath):
'''returns the first (text) line from filepath'''
with open(filepath) as fh:
line = fh.readline()
return line
def skip_build(self, buildtag, builddir):
'''check if we should skip this build because we have already built
this version
'''
if os.getenv("FORCE_BUILD", False):
return False
if not os.path.exists(os.path.join(self.basedir, '.gitmodules')):
self.progress("Skipping build without submodules")
return True
bname = os.path.basename(builddir)
ldir = os.path.join(os.path.dirname(os.path.dirname(
os.path.dirname(builddir))), buildtag, bname) # FIXME: WTF
oldversion_filepath = os.path.join(ldir, "git-version.txt")
if not os.path.exists(oldversion_filepath):
self.progress("%s doesn't exist - building" % oldversion_filepath)
return False
oldversion = self.first_line_of_filepath(oldversion_filepath)
newversion = self.run_git(["log", "-1"])
newversion = newversion.splitlines()[0]
oldversion = oldversion.rstrip()
newversion = newversion.rstrip()
self.progress("oldversion=%s newversion=%s" %
(oldversion, newversion,))
if oldversion == newversion:
self.progress("Skipping build - version match (%s)" %
(newversion,))
return True
self.progress("%s needs rebuild" % (ldir,))
return False
def write_string_to_filepath(self, string, filepath):
'''writes the entirety of string to filepath'''
with open(filepath, "w") as x:
x.write(string)
def addfwversion_gitversion(self, destdir, src):
# create git-version.txt:
gitlog = self.run_git(["log", "-1"])
gitversion_filepath = os.path.join(destdir, "git-version.txt")
gitversion_content = gitlog
versionfile = os.path.join(src, "version.h")
if os.path.exists(versionfile):
content = self.read_string_from_filepath(versionfile)
match = re.search('define.THISFIRMWARE "([^"]+)"', content)
if match is None:
self.progress("Failed to retrieve THISFIRMWARE from version.h")
self.progress("Content: (%s)" % content)
self.progress("Writing version info to %s" %
(gitversion_filepath,))
gitversion_content += "\nAPMVERSION: %s\n" % (match.group(1))
else:
self.progress("%s does not exist" % versionfile)
self.write_string_to_filepath(gitversion_content, gitversion_filepath)
def addfwversion_firmwareversiontxt(self, destdir, src):
# create firmware-version.txt
versionfile = os.path.join(src, "version.h")
if not os.path.exists(versionfile):
self.progress("%s does not exist" % (versionfile,))
return
ss = ".*define +FIRMWARE_VERSION[ ]+(?P<major>\d+)[ ]*,[ ]*" \
"(?P<minor>\d+)[ ]*,[ ]*(?P<point>\d+)[ ]*,[ ]*" \
"(?P<type>[A-Z_]+)[ ]*"
content = self.read_string_from_filepath(versionfile)
match = re.search(ss, content)
if match is None:
self.progress("Failed to retrieve FIRMWARE_VERSION from version.h")
self.progress("Content: (%s)" % content)
return
ver = "%d.%d.%d-%s\n" % (int(match.group("major")),
int(match.group("minor")),<|fim▁hole|> self.progress("Writing version (%s) to %s" %
(ver, firmware_version_filepath,))
self.write_string_to_filepath(
ver, os.path.join(destdir, firmware_version_filepath))
def addfwversion(self, destdir, src):
'''write version information into destdir'''
self.addfwversion_gitversion(destdir, src)
self.addfwversion_firmwareversiontxt(destdir, src)
def read_string_from_filepath(self, filepath):
'''returns content of filepath as a string'''
with open(filepath, 'rb') as fh:
content = fh.read()
return content
def string_in_filepath(self, string, filepath):
'''returns true if string exists in the contents of filepath'''
return string in self.read_string_from_filepath(filepath)
def mkpath(self, path):
'''make directory path and all elements leading to it'''
'''distutils.dir_util.mkpath was playing up'''
try:
os.makedirs(path)
except OSError as e:
if e.errno != 17: # EEXIST
raise e
def copyit(self, afile, adir, tag, src):
'''copies afile into various places, adding metadata'''
bname = os.path.basename(adir)
tdir = os.path.join(os.path.dirname(os.path.dirname(
os.path.dirname(adir))), tag, bname)
if tag == "latest":
# we keep a permanent archive of all "latest" builds,
# their path including a build timestamp:
self.mkpath(adir)
self.progress("Copying %s to %s" % (afile, adir,))
shutil.copy(afile, adir)
self.addfwversion(adir, src)
# the most recent build of every tag is kept around:
self.progress("Copying %s to %s" % (afile, tdir))
self.mkpath(tdir)
self.addfwversion(tdir, src)
shutil.copy(afile, tdir)
def touch_filepath(self, filepath):
'''creates a file at filepath, or updates the timestamp on filepath'''
if os.path.exists(filepath):
os.utime(filepath, None)
else:
with open(filepath, "a"):
pass
def build_vehicle(self, tag, vehicle, boards, vehicle_binaries_subdir,
binaryname, px4_binaryname, frames=[None]):
'''build vehicle binaries'''
self.progress("Building %s %s binaries (cwd=%s)" %
(vehicle, tag, os.getcwd()))
# if not self.checkout(vehicle, tag):
# self.progress("Failed to check out (%s)" % tag)
# return
# # begin pointless checkout
# if not self.checkout(vehicle, "latest"):
# self.progress("Failed to check out (%s)" % "latest")
# return
# # end pointless checkout
for board in boards:
self.progress("Building board: %s" % board)
for frame in frames:
if frame is not None:
self.progress("Considering frame %s for board %s" %
(frame, board))
if frame is None:
framesuffix = ""
else:
framesuffix = "-%s" % frame
if not self.checkout(vehicle, tag, board, frame):
msg = ("Failed checkout of %s %s %s %s" %
(vehicle, board, tag, frame,))
self.progress(msg)
self.error_strings.append(msg)
continue
if self.skip_board_waf(board):
continue
self.progress("Building %s %s %s binaries %s" %
(vehicle, tag, board, frame))
ddir = os.path.join(self.binaries,
vehicle_binaries_subdir,
self.hdate_ym,
self.hdate_ymdhm,
"".join([board, framesuffix]))
if self.skip_build(tag, ddir):
continue
if self.skip_frame(board, frame):
continue
self.remove_tmpdir();
self.progress("Configuring for %s in %s" %
(board, self.buildroot))
try:
waf_opts = ["configure",
"--board", board,
"--out", self.buildroot,
"clean"]
waf_opts.extend(self.board_options(board))
self.run_waf(waf_opts)
except subprocess.CalledProcessError as e:
self.progress("waf configure failed")
continue
try:
target = os.path.join("bin",
"".join([binaryname, framesuffix]))
self.run_waf(["build", "--targets", target])
except subprocess.CalledProcessError as e:
msg = ("Failed build of %s %s%s %s" %
(vehicle, board, framesuffix, tag))
self.progress(msg)
self.error_strings.append(msg)
continue
bare_path = os.path.join(self.buildroot,
board,
"bin",
"".join([binaryname, framesuffix]))
files_to_copy = []
if os.path.exists(bare_path):
files_to_copy.append(bare_path)
for extension in [".px4", ".apj", ".abin"]:
filepath = "".join([bare_path, extension])
if os.path.exists(filepath):
files_to_copy.append(filepath)
for path in files_to_copy:
try:
self.copyit(path, ddir, tag, vehicle)
except Exception as e:
self.progress("Failed to copy %s to %s: %s" % (path, ddir, str(e)))
# why is touching this important? -pb20170816
self.touch_filepath(os.path.join(self.binaries,
vehicle_binaries_subdir, tag))
# PX4-building
board = "px4"
for frame in frames:
self.progress("Building frame %s for board %s" % (frame, board))
if frame is None:
framesuffix = ""
else:
framesuffix = "-%s" % frame
if not self.checkout(vehicle, tag, "PX4", frame):
msg = ("Failed checkout of %s %s %s %s" %
(vehicle, "PX4", tag, frame))
self.progress(msg)
self.error_strings.append(msg)
self.checkout(vehicle, "latest")
continue
try:
deadwood = "../Build.%s" % vehicle
if os.path.exists(deadwood):
shutil.rmtree(os.path.join(deadwood))
except Exception as e:
self.progress("FIXME: narrow exception (%s)" % repr(e))
self.progress("Building %s %s PX4%s binaries" %
(vehicle, tag, framesuffix))
ddir = os.path.join(self.binaries,
vehicle_binaries_subdir,
self.hdate_ym,
self.hdate_ymdhm,
"".join(["PX4", framesuffix]))
if self.skip_build(tag, ddir):
continue
for v in ["v1", "v2", "v3", "v4", "v4pro"]:
px4_v = "%s-%s" % (board, v)
if self.skip_board_waf(px4_v):
continue
self.progress("Configuring for %s in %s" %
(px4_v, self.buildroot))
try:
self.run_waf(["configure", "--board", px4_v,
"--out", self.buildroot, "clean"])
except subprocess.CalledProcessError as e:
self.progress("waf configure failed")
continue
try:
self.run_waf([
"build",
"--targets",
os.path.join("bin",
"".join([binaryname, framesuffix]))])
except subprocess.CalledProcessError as e:
msg = ("Failed build of %s %s%s %s for %s" %
(vehicle, board, framesuffix, tag, v))
self.progress(msg)
self.error_strings.append(msg)
continue
oldfile = os.path.join(self.buildroot, px4_v, "bin",
"%s%s.px4" % (binaryname, framesuffix))
newfile = "%s-%s.px4" % (px4_binaryname, v)
self.progress("Copying (%s) to (%s)" % (oldfile, newfile,))
try:
shutil.copyfile(oldfile, newfile)
except Exception as e:
self.progress("FIXME: narrow exception (%s)" % repr(e))
msg = ("Failed build copy of %s PX4%s %s for %s" %
(vehicle, framesuffix, tag, v))
self.progress(msg)
self.error_strings.append(msg)
continue
# FIXME: why the two stage copy?!
self.copyit(newfile, ddir, tag, vehicle)
self.checkout(vehicle, "latest")
def common_boards(self):
'''returns list of boards common to all vehicles'''
# note that while we do not use these for AntennaTracker!
return ["fmuv2",
"fmuv3",
"fmuv4",
"mindpx-v2",
"erlebrain2",
"navio",
"navio2",
"pxf",
"pxfmini"]
def build_arducopter(self, tag):
'''build Copter binaries'''
boards = []
boards.extend(["skyviper-v2450", "aerofc-v1", "bebop"])
boards.extend(self.common_boards()[:])
self.build_vehicle(tag,
"ArduCopter",
boards,
"Copter",
"arducopter",
"ArduCopter",
frames=[None, "heli"])
def build_arduplane(self, tag):
'''build Plane binaries'''
boards = self.common_boards()[:]
boards.append("disco")
self.build_vehicle(tag,
"ArduPlane",
boards,
"Plane",
"arduplane",
"ArduPlane")
def build_antennatracker(self, tag):
'''build Tracker binaries'''
boards = ['navio', 'navio2']
self.build_vehicle(tag,
"AntennaTracker",
boards,
"AntennaTracker",
"antennatracker",
"AntennaTracker",)
def build_rover(self, tag):
'''build Rover binaries'''
boards = self.common_boards()
self.build_vehicle(tag,
"APMrover2",
boards,
"Rover",
"ardurover",
"APMrover2")
def build_ardusub(self, tag):
'''build Sub binaries'''
self.build_vehicle(tag,
"ArduSub",
self.common_boards(),
"Sub",
"ardusub",
"ArduSub")
def generate_manifest(self):
'''generate manigest files for GCS to download'''
self.progress("Generating manifest")
base_url = 'http://firmware.ardupilot.org'
generator = generate_manifest.ManifestGenerator(self.binaries,
base_url)
content = generator.json()
new_json_filepath = os.path.join(self.binaries, "manifest.json.new")
self.write_string_to_filepath(content, new_json_filepath)
# provide a pre-compressed manifest. For reference, a 7M manifest
# "gzip -9"s to 300k in 1 second, "xz -e"s to 80k in 26 seconds
compressed = zlib.compress(content, 9)
new_json_filepath_gz = os.path.join(self.binaries,
"manifest.json.gz.new")
self.write_string_to_filepath(compressed, new_json_filepath_gz)
json_filepath = os.path.join(self.binaries, "manifest.json")
json_filepath_gz = os.path.join(self.binaries, "manifest.json.gz")
shutil.move(new_json_filepath, json_filepath)
shutil.move(new_json_filepath_gz, json_filepath_gz)
self.progress("Manifest generation successful")
def validate(self):
'''run pre-run validation checks'''
if "dirty" in self.tags:
if len(self.tags) > 1:
raise ValueError("dirty must be only tag if present (%s)" %
(str(self.tags)))
self.dirty = True
def pollute_env_from_file(self, filepath):
with open(filepath) as f:
for line in f:
try:
(name, value) = str.split(line, "=")
except ValueError as e:
self.progress("%s: split failed: %s" % (filepath, str(e)))
continue
value = value.rstrip()
self.progress("%s: %s=%s" % (filepath, name, value))
os.environ[name] = value
def remove_tmpdir(self):
if os.path.exists(self.tmpdir):
self.progress("Removing (%s)" % (self.tmpdir,))
shutil.rmtree(self.tmpdir)
def run(self):
self.validate()
prefix_bin_dirpath = os.path.join(os.environ.get('HOME'),
"prefix", "bin")
origin_env_path = os.environ.get("PATH")
os.environ["PATH"] = ':'.join([prefix_bin_dirpath, origin_env_path,
"/bin", "/usr/bin"])
self.tmpdir = os.path.join(os.getcwd(), 'build.tmp.binaries')
os.environ["TMPDIR"] = self.tmpdir
print(self.tmpdir)
self.remove_tmpdir();
self.progress("Building in %s" % self.tmpdir)
now = datetime.datetime.now()
self.progress(now)
if not self.dirty:
self.run_git(["checkout", "-f", "master"])
githash = self.run_git(["rev-parse", "HEAD"])
githash = githash.rstrip()
self.progress("git hash: %s" % str(githash))
self.hdate_ym = now.strftime("%Y-%m")
self.hdate_ymdhm = now.strftime("%Y-%m-%d-%H:%m")
self.mkpath(os.path.join("binaries", self.hdate_ym,
self.hdate_ymdhm))
self.binaries = os.path.join(os.getcwd(), "..", "buildlogs",
"binaries")
self.basedir = os.getcwd()
self.error_strings = []
if os.path.exists("config.mk"):
# FIXME: narrow exception
self.pollute_env_from_file("config.mk")
if not self.dirty:
self.run_git_update_submodules()
self.buildroot = os.path.join(os.environ.get("TMPDIR"),
"binaries.build")
if os.path.exists(self.buildroot):
shutil.rmtree(self.buildroot)
for tag in self.tags:
self.build_arducopter(tag)
self.build_arduplane(tag)
self.build_rover(tag)
self.build_antennatracker(tag)
self.build_ardusub(tag)
if os.path.exists(self.tmpdir):
shutil.rmtree(self.tmpdir)
self.generate_manifest()
for error_string in self.error_strings:
self.progress("%s" % error_string)
sys.exit(len(self.error_strings))
if __name__ == '__main__':
parser = optparse.OptionParser("build_binaries.py")
parser.add_option("", "--tags", action="append", type="string",
default=[], help="tags to build")
cmd_opts, cmd_args = parser.parse_args()
tags = cmd_opts.tags
if len(tags) == 0:
# FIXME: wedge this defaulting into parser somehow
tags = ["stable", "beta", "latest"]
bb = build_binaries(tags)
bb.run()<|fim▁end|> | int(match.group("point")),
match.group("type"))
firmware_version_filepath = "firmware-version.txt" |
<|file_name|>GetGoodsToExcel.java<|end_file_name|><|fim▁begin|>package org.goodsManagement.service.impl.PoiUtils;
import org.apache.poi.hssf.usermodel.HSSFSheet;
import org.apache.poi.hssf.usermodel.HSSFWorkbook;
import org.goodsManagement.po.GetGoodsDto;
import org.goodsManagement.vo.GetGoodsVO;
import org.springframework.stereotype.Component;
import java.util.ArrayList;
import java.util.List;
/**
* Created by lifei on 2015/9/23.
*/
@Component
public class GetGoodsToExcel {
/*public static void main(String[] args){
List<GetGoodsVO> list = new ArrayList<GetGoodsVO>();
GetGoodsVO a1 = new GetGoodsVO();
a1.setStaffname("大黄");
a1.setGoodname("屎");
a1.setGetnumber(2);
a1.setGoodtype("一大坨");
list.add(a1);
GetGoodsVO a2 = new GetGoodsVO();
a2.setStaffname("小黄");
a2.setGoodname("屎");
a2.setGetnumber(2);
a2.setGoodtype("一桶");
list.add(a2);
String path = "C:\\Users\\lifei\\Desktop\\getgood.xls";
GetGoodsToExcel.toExcel(list,path);
System.out.println("导出完成");
}*/
/**
*
* @param list
* 数据库表中人员领用记录的集合
* @param path
* 要写入的文件的路径
*/
public void addtoExcel(List<GetGoodsVO> list,String path){
HSSFWorkbook wb = new HSSFWorkbook();
HSSFSheet sheet = wb.createSheet("Outgoods");
String[] n = { "姓名", "物品名称号", "物品型号", "物品数量" };
Object[][] value = new Object[list.size() + 1][4];
for (int m = 0; m < n.length; m++) {
value[0][m] = n[m];
}
for (int i = 0; i < list.size(); i++) {
GetGoodsVO getGoodsVOg= (GetGoodsVO) list.get(i);
value[i + 1][0] = getGoodsVOg.getStaffname();
value[i + 1][1] = getGoodsVOg.getGoodname();
value[i + 1][2] = getGoodsVOg.getGoodtype();
value[i + 1][3] = getGoodsVOg.getGetnumber();<|fim▁hole|> }
}<|fim▁end|> | }
ExcelUtils.writeArrayToExcel(wb, sheet, list.size() + 1, 4, value);
ExcelUtils.writeWorkbook(wb, path); |
<|file_name|>rooms.js<|end_file_name|><|fim▁begin|>/**
* Rooms
* Pokemon Showdown - http://pokemonshowdown.com/
*
* Every chat room and battle is a room, and what they do is done in
* rooms.js. There's also a global room which every user is in, and
* handles miscellaneous things like welcoming the user.
*
* @license MIT license
*/
const TIMEOUT_EMPTY_DEALLOCATE = 10 * 60 * 1000;
const TIMEOUT_INACTIVE_DEALLOCATE = 40 * 60 * 1000;
const REPORT_USER_STATS_INTERVAL = 1000 * 60 * 10;
var fs = require('fs');
/* global Rooms: true */
var Rooms = module.exports = getRoom;
var rooms = Rooms.rooms = Object.create(null);
var Room = (function () {
function Room(roomid, title) {
this.id = roomid;
this.title = (title || roomid);
this.users = Object.create(null);
this.log = [];
this.bannedUsers = Object.create(null);
this.bannedIps = Object.create(null);
}
Room.prototype.title = "";
Room.prototype.type = 'chat';
Room.prototype.lastUpdate = 0;
Room.prototype.log = null;
Room.prototype.users = null;
Room.prototype.userCount = 0;
Room.prototype.send = function (message, errorArgument) {
if (errorArgument) throw new Error("Use Room#sendUser");
if (this.id !== 'lobby') message = '>' + this.id + '\n' + message;
Sockets.channelBroadcast(this.id, message);
};
Room.prototype.sendAuth = function (message) {
for (var i in this.users) {
var user = this.users[i];
if (user.connected && user.can('receiveauthmessages', null, this)) {
user.sendTo(this, message);
}
}
};
Room.prototype.sendUser = function (user, message) {
user.sendTo(this, message);
};
Room.prototype.add = function (message) {
if (typeof message !== 'string') throw new Error("Deprecated message type");
this.logEntry(message);
if (this.logTimes && message.substr(0, 3) === '|c|') {
message = '|c:|' + (~~(Date.now() / 1000)) + '|' + message.substr(3);
}
this.log.push(message);
};
Room.prototype.logEntry = function () {};
Room.prototype.addRaw = function (message) {
this.add('|raw|' + message);
};
Room.prototype.getLogSlice = function (amount) {
var log = this.log.slice(amount);
log.unshift('|:|' + (~~(Date.now() / 1000)));
return log;
};
Room.prototype.chat = function (user, message, connection) {
// Battle actions are actually just text commands that are handled in
// parseCommand(), which in turn often calls Simulator.prototype.sendFor().
// Sometimes the call to sendFor is done indirectly, by calling
// room.decision(), where room.constructor === BattleRoom.
message = CommandParser.parse(message, this, user, connection);
if (message) {
this.add('|c|' + user.getIdentity(this.id) + '|' + message);
}
this.update();
};
return Room;
})();
var GlobalRoom = (function () {
function GlobalRoom(roomid) {
this.id = roomid;
// init battle rooms
this.battleCount = 0;
this.searchers = [];
// Never do any other file IO synchronously
// but this is okay to prevent race conditions as we start up PS
this.lastBattle = 0;
try {
this.lastBattle = parseInt(fs.readFileSync('logs/lastbattle.txt')) || 0;
} catch (e) {} // file doesn't exist [yet]
this.chatRoomData = [];
try {
this.chatRoomData = JSON.parse(fs.readFileSync('config/chatrooms.json'));
if (!Array.isArray(this.chatRoomData)) this.chatRoomData = [];
} catch (e) {} // file doesn't exist [yet]
if (!this.chatRoomData.length) {
this.chatRoomData = [{
title: 'Lobby',
isOfficial: true,
autojoin: true
}, {
title: 'Staff',
isPrivate: true,
staffRoom: true,
staffAutojoin: true
}];
}
this.chatRooms = [];
this.autojoin = []; // rooms that users autojoin upon connecting
this.staffAutojoin = []; // rooms that staff autojoin upon connecting
for (var i = 0; i < this.chatRoomData.length; i++) {
if (!this.chatRoomData[i] || !this.chatRoomData[i].title) {
console.log('ERROR: Room number ' + i + ' has no data.');
continue;
}
var id = toId(this.chatRoomData[i].title);
console.log("NEW CHATROOM: " + id);
var room = Rooms.createChatRoom(id, this.chatRoomData[i].title, this.chatRoomData[i]);
this.chatRooms.push(room);
if (room.autojoin) this.autojoin.push(id);
if (room.staffAutojoin) this.staffAutojoin.push(id);
}
// this function is complex in order to avoid several race conditions
var self = this;
this.writeNumRooms = (function () {
var writing = false;
var lastBattle; // last lastBattle to be written to file
var finishWriting = function () {
writing = false;
if (lastBattle < self.lastBattle) {
self.writeNumRooms();
}
};
return function () {
if (writing) return;
// batch writing lastbattle.txt for every 10 battles
if (lastBattle >= self.lastBattle) return;
lastBattle = self.lastBattle + 10;
writing = true;
fs.writeFile('logs/lastbattle.txt.0', '' + lastBattle, function () {
// rename is atomic on POSIX, but will throw an error on Windows
fs.rename('logs/lastbattle.txt.0', 'logs/lastbattle.txt', function (err) {
if (err) {
// This should only happen on Windows.
fs.writeFile('logs/lastbattle.txt', '' + lastBattle, finishWriting);
return;
}
finishWriting();
});
});
};
})();
this.writeChatRoomData = (function () {
var writing = false;
var writePending = false; // whether or not a new write is pending
var finishWriting = function () {
writing = false;
if (writePending) {
writePending = false;
self.writeChatRoomData();
}
};
return function () {
if (writing) {
writePending = true;
return;
}
writing = true;
var data = JSON.stringify(self.chatRoomData).replace(/\{"title"\:/g, '\n{"title":').replace(/\]$/, '\n]');
fs.writeFile('config/chatrooms.json.0', data, function () {
// rename is atomic on POSIX, but will throw an error on Windows
fs.rename('config/chatrooms.json.0', 'config/chatrooms.json', function (err) {
if (err) {
// This should only happen on Windows.
fs.writeFile('config/chatrooms.json', data, finishWriting);
return;
}
finishWriting();
});
});
};
})();
// init users
this.users = {};
this.userCount = 0; // cache of `Object.size(this.users)`
this.maxUsers = 0;
this.maxUsersDate = 0;
this.reportUserStatsInterval = setInterval(
this.reportUserStats.bind(this),
REPORT_USER_STATS_INTERVAL
);
}
GlobalRoom.prototype.type = 'global';
GlobalRoom.prototype.formatListText = '|formats';
GlobalRoom.prototype.reportUserStats = function () {
if (this.maxUsersDate) {
LoginServer.request('updateuserstats', {
date: this.maxUsersDate,
users: this.maxUsers
}, function () {});
this.maxUsersDate = 0;
}
LoginServer.request('updateuserstats', {
date: Date.now(),
users: this.userCount
}, function () {});
};
GlobalRoom.prototype.getFormatListText = function () {
var formatListText = '|formats';
var curSection = '';
for (var i in Tools.data.Formats) {
var format = Tools.data.Formats[i];
if (!format.challengeShow && !format.searchShow) continue;
var section = format.section;
if (section === undefined) section = format.mod;
if (!section) section = '';
if (section !== curSection) {
curSection = section;
formatListText += '|,' + (format.column || 1) + '|' + section;
}
formatListText += '|' + format.name;
if (!format.challengeShow) formatListText += ',,';
else if (!format.searchShow) formatListText += ',';
if (format.team) formatListText += ',#';
}
return formatListText;
};
GlobalRoom.prototype.getRoomList = function (filter) {
var roomList = {};
var total = 0;
for (var i in Rooms.rooms) {
var room = Rooms.rooms[i];
if (!room || !room.active || room.isPrivate) continue;
if (filter && filter !== room.format && filter !== true) continue;
var roomData = {};
if (room.active && room.battle) {
if (room.battle.players[0]) roomData.p1 = room.battle.players[0].getIdentity();
if (room.battle.players[1]) roomData.p2 = room.battle.players[1].getIdentity();
}
if (!roomData.p1 || !roomData.p2) continue;
roomList[room.id] = roomData;
total++;
if (total >= 100) break;
}
return roomList;
};
GlobalRoom.prototype.getRooms = function () {
var roomsData = {official:[], chat:[], userCount: this.userCount, battleCount: this.battleCount};
for (var i = 0; i < this.chatRooms.length; i++) {
var room = this.chatRooms[i];
if (!room) continue;
if (room.isPrivate) continue;
(room.isOfficial ? roomsData.official : roomsData.chat).push({
title: room.title,
desc: room.desc,
userCount: room.userCount
});
}
return roomsData;
};
GlobalRoom.prototype.cancelSearch = function (user) {
var success = false;
user.cancelChallengeTo();
for (var i = 0; i < this.searchers.length; i++) {
var search = this.searchers[i];
var searchUser = Users.get(search.userid);
if (!searchUser.connected) {
this.searchers.splice(i, 1);
i--;
continue;
}
if (searchUser === user) {
this.searchers.splice(i, 1);
i--;
if (!success) {
searchUser.send('|updatesearch|' + JSON.stringify({searching: false}));
success = true;
}
continue;
}
}
return success;
};
GlobalRoom.prototype.searchBattle = function (user, formatid) {
if (!user.connected) return;
formatid = toId(formatid);
user.prepBattle(formatid, 'search', null, this.finishSearchBattle.bind(this, user, formatid));
};
GlobalRoom.prototype.finishSearchBattle = function (user, formatid, result) {
if (!result) return;
// tell the user they've started searching
var newSearchData = {
format: formatid
};
user.send('|updatesearch|' + JSON.stringify({searching: newSearchData}));
// get the user's rating before actually starting to search
var newSearch = {
userid: user.userid,
formatid: formatid,
team: user.team,
rating: 1000,
time: new Date().getTime()
};
var self = this;
user.doWithMMR(formatid, function (mmr, error) {
if (error) {
user.popup("Connection to ladder server failed with error: " + error + "; please try again later");
return;
}
newSearch.rating = mmr;
self.addSearch(newSearch, user);
});
};
GlobalRoom.prototype.matchmakingOK = function (search1, search2, user1, user2) {
// users must be different
if (user1 === user2) return false;
// users must have different IPs
if (user1.latestIp === user2.latestIp) return false;
// users must not have been matched immediately previously
if (user1.lastMatch === user2.userid || user2.lastMatch === user1.userid) return false;
// search must be within range
var searchRange = 100, formatid = search1.formatid, elapsed = Math.abs(search1.time - search2.time);
if (formatid === 'ou' || formatid === 'oucurrent' || formatid === 'randombattle') searchRange = 50;
searchRange += elapsed / 300; // +1 every .3 seconds
if (searchRange > 300) searchRange = 300;
if (Math.abs(search1.rating - search2.rating) > searchRange) return false;
user1.lastMatch = user2.userid;
user2.lastMatch = user1.userid;
return true;
};
GlobalRoom.prototype.addSearch = function (newSearch, user) {
if (!user.connected) return;
for (var i = 0; i < this.searchers.length; i++) {
var search = this.searchers[i];
var searchUser = Users.get(search.userid);
if (!searchUser || !searchUser.connected) {
this.searchers.splice(i, 1);
i--;
continue;
}
if (newSearch.formatid === search.formatid && searchUser === user) return; // only one search per format
if (newSearch.formatid === search.formatid && this.matchmakingOK(search, newSearch, searchUser, user)) {
this.cancelSearch(user, true);
this.cancelSearch(searchUser, true);
user.send('|updatesearch|' + JSON.stringify({searching: false}));
this.startBattle(searchUser, user, search.formatid, true, search.team, newSearch.team);
return;
}
}
this.searchers.push(newSearch);
};
GlobalRoom.prototype.send = function (message, user) {
if (user) {
user.sendTo(this, message);
} else {
Sockets.channelBroadcast(this.id, message);
}
};
GlobalRoom.prototype.sendAuth = function (message) {
for (var i in this.users) {
var user = this.users[i];
if (user.connected && user.can('receiveauthmessages', null, this)) {
user.sendTo(this, message);
}
}
};
GlobalRoom.prototype.add = function (message) {
if (rooms.lobby) rooms.lobby.add(message);
};
GlobalRoom.prototype.addRaw = function (message) {
if (rooms.lobby) rooms.lobby.addRaw(message);
};
GlobalRoom.prototype.addChatRoom = function (title) {
var id = toId(title);
if (rooms[id]) return false;
var chatRoomData = {
title: title
};
var room = Rooms.createChatRoom(id, title, chatRoomData);
this.chatRoomData.push(chatRoomData);
this.chatRooms.push(room);
this.writeChatRoomData();
return true;
};
GlobalRoom.prototype.deregisterChatRoom = function (id) {
id = toId(id);
var room = rooms[id];
if (!room) return false; // room doesn't exist
if (!room.chatRoomData) return false; // room isn't registered
// deregister from global chatRoomData
// looping from the end is a pretty trivial optimization, but the
// assumption is that more recently added rooms are more likely to
// be deleted
for (var i = this.chatRoomData.length - 1; i >= 0; i--) {
if (id === toId(this.chatRoomData[i].title)) {
this.chatRoomData.splice(i, 1);
this.writeChatRoomData();
break;
}
}
delete room.chatRoomData;
return true;
};
GlobalRoom.prototype.delistChatRoom = function (id) {
id = toId(id);
if (!rooms[id]) return false; // room doesn't exist
for (var i = this.chatRooms.length - 1; i >= 0; i--) {
if (id === this.chatRooms[i].id) {
this.chatRooms.splice(i, 1);
break;
}
}
};
GlobalRoom.prototype.removeChatRoom = function (id) {
id = toId(id);
var room = rooms[id];
if (!room) return false; // room doesn't exist
room.destroy();
return true;
};
GlobalRoom.prototype.autojoinRooms = function (user, connection) {
// we only autojoin regular rooms if the client requests it with /autojoin
// note that this restriction doesn't apply to staffAutojoin
for (var i = 0; i < this.autojoin.length; i++) {
user.joinRoom(this.autojoin[i], connection);
}
};
GlobalRoom.prototype.checkAutojoin = function (user, connection) {
if (user.isStaff) {
for (var i = 0; i < this.staffAutojoin.length; i++) {
user.joinRoom(this.staffAutojoin[i], connection);
}
}
};
GlobalRoom.prototype.onJoinConnection = function (user, connection) {
var initdata = '|updateuser|' + user.name + '|' + (user.named ? '1' : '0') + '|' + user.avatar + '\n';
connection.send(initdata + this.formatListText);
if (this.chatRooms.length > 2) connection.send('|queryresponse|rooms|null'); // should display room list
};
GlobalRoom.prototype.onJoin = function (user, connection, merging) {
if (!user) return false; // ???
if (this.users[user.userid]) return user;
this.users[user.userid] = user;
if (++this.userCount > this.maxUsers) {
this.maxUsers = this.userCount;
this.maxUsersDate = Date.now();
}
if (!merging) {
var initdata = '|updateuser|' + user.name + '|' + (user.named ? '1' : '0') + '|' + user.avatar + '\n';
connection.send(initdata + this.formatListText);
if (this.chatRooms.length > 2) connection.send('|queryresponse|rooms|null'); // should display room list
}
return user;
};
GlobalRoom.prototype.onRename = function (user, oldid, joining) {
delete this.users[oldid];
this.users[user.userid] = user;
return user;
};
GlobalRoom.prototype.onUpdateIdentity = function () {};
GlobalRoom.prototype.onLeave = function (user) {
if (!user) return; // ...
delete this.users[user.userid];
--this.userCount;
this.cancelSearch(user, true);
};
GlobalRoom.prototype.startBattle = function (p1, p2, format, rated, p1team, p2team) {
var newRoom;
p1 = Users.get(p1);
p2 = Users.get(p2);
if (!p1 || !p2) {
// most likely, a user was banned during the battle start procedure
this.cancelSearch(p1, true);
this.cancelSearch(p2, true);
return;
}
if (p1 === p2) {
this.cancelSearch(p1, true);
this.cancelSearch(p2, true);
p1.popup("You can't battle your own account. Please use something like Private Browsing to battle yourself.");
return;
}
if (this.lockdown) {
this.cancelSearch(p1, true);
this.cancelSearch(p2, true);
p1.popup("The server is shutting down. Battles cannot be started at this time.");
p2.popup("The server is shutting down. Battles cannot be started at this time.");
return;
}
//console.log('BATTLE START BETWEEN: ' + p1.userid + ' ' + p2.userid);
var i = this.lastBattle + 1;
var formaturlid = format.toLowerCase().replace(/[^a-z0-9]+/g, '');
while(rooms['battle-' + formaturlid + i]) {
i++;
}
this.lastBattle = i;
rooms.global.writeNumRooms();
newRoom = this.addRoom('battle-' + formaturlid + '-' + i, format, p1, p2, this.id, rated);
p1.joinRoom(newRoom);
p2.joinRoom(newRoom);
newRoom.joinBattle(p1, p1team);
newRoom.joinBattle(p2, p2team);
this.cancelSearch(p1, true);
this.cancelSearch(p2, true);
if (Config.reportbattles && rooms.lobby) {
rooms.lobby.add('|b|' + newRoom.id + '|' + p1.getIdentity() + '|' + p2.getIdentity());
}
if (Config.logladderip && rated) {
if (!this.ladderIpLog) {
this.ladderIpLog = fs.createWriteStream('logs/ladderip/ladderip.txt', {flags: 'a'});
}
this.ladderIpLog.write(p1.userid+': '+p1.latestIp+'\n');
this.ladderIpLog.write(p2.userid+': '+p2.latestIp+'\n');
}
return newRoom;
};
GlobalRoom.prototype.addRoom = function (room, format, p1, p2, parent, rated) {
room = Rooms.createBattle(room, format, p1, p2, parent, rated);
return room;
};
GlobalRoom.prototype.removeRoom = function (room) {};
GlobalRoom.prototype.chat = function (user, message, connection) {
if (rooms.lobby) return rooms.lobby.chat(user, message, connection);
message = CommandParser.parse(message, this, user, connection);
if (message) {
connection.sendPopup("You can't send messages directly to the server.");
}
};
return GlobalRoom;
})();
var BattleRoom = (function () {
function BattleRoom(roomid, format, p1, p2, parentid, rated) {
Room.call(this, roomid, "" + p1.name + " vs. " + p2.name);
this.modchat = (Config.battlemodchat || false);
format = '' + (format || '');
this.format = format;
this.auth = {};
//console.log("NEW BATTLE");
var formatid = toId(format);
if (rated && Tools.getFormat(formatid).rated !== false) {
rated = {
p1: p1.userid,
p2: p2.userid,
format: format
};
} else {
rated = false;
}
this.rated = rated;
this.battle = Simulator.create(this.id, format, rated, this);
this.parentid = parentid || '';
this.p1 = p1 || '';
this.p2 = p2 || '';
this.sideTicksLeft = [21, 21];
if (!rated) this.sideTicksLeft = [28, 28];
this.sideTurnTicks = [0, 0];
this.disconnectTickDiff = [0, 0];
if (Config.forcetimer) this.requestKickInactive(false);
}
BattleRoom.prototype = Object.create(Room.prototype);
BattleRoom.prototype.type = 'battle';
BattleRoom.prototype.resetTimer = null;
BattleRoom.prototype.resetUser = '';
BattleRoom.prototype.expireTimer = null;
BattleRoom.prototype.active = false;
BattleRoom.prototype.push = function (message) {
if (typeof message === 'string') {
this.log.push(message);
} else {
this.log = this.log.concat(message);
}
};
BattleRoom.prototype.win = function (winner) {
if (this.rated) {
var winnerid = toId(winner);<|fim▁hole|> if (winnerid === rated.p1) {
p1score = 1;
} else if (winnerid === rated.p2) {
p1score = 0;
}
var p1 = rated.p1;
if (Users.getExact(rated.p1)) p1 = Users.getExact(rated.p1).name;
var p2 = rated.p2;
if (Users.getExact(rated.p2)) p2 = Users.getExact(rated.p2).name;
//update.updates.push('[DEBUG] uri: ' + Config.loginserver + 'action.php?act=ladderupdate&serverid=' + Config.serverid + '&p1=' + encodeURIComponent(p1) + '&p2=' + encodeURIComponent(p2) + '&score=' + p1score + '&format=' + toId(rated.format) + '&servertoken=[token]');
if (!rated.p1 || !rated.p2) {
this.push('|raw|ERROR: Ladder not updated: a player does not exist');
} else {
winner = Users.get(winnerid);
if (winner && !winner.authenticated) {
this.sendUser(winner, '|askreg|' + winner.userid);
}
var p1rating, p2rating;
// update rankings
this.push('|raw|Ladder updating...');
var self = this;
LoginServer.request('ladderupdate', {
p1: p1,
p2: p2,
score: p1score,
format: toId(rated.format)
}, function (data, statusCode, error) {
if (!self.battle) {
console.log('room expired before ladder update was received');
return;
}
if (!data) {
self.addRaw('Ladder (probably) updated, but score could not be retrieved (' + error + ').');
// log the battle anyway
if (!Tools.getFormat(self.format).noLog) {
self.logBattle(p1score);
}
return;
} else if (data.errorip) {
self.addRaw("This server's request IP " + data.errorip + " is not a registered server.");
return;
} else {
try {
p1rating = data.p1rating;
p2rating = data.p2rating;
//self.add("Ladder updated.");
var oldacre = Math.round(data.p1rating.oldacre);
var acre = Math.round(data.p1rating.acre);
var reasons = '' + (acre - oldacre) + ' for ' + (p1score > 0.99 ? 'winning' : (p1score < 0.01 ? 'losing' : 'tying'));
if (reasons.substr(0, 1) !== '-') reasons = '+' + reasons;
self.addRaw(Tools.escapeHTML(p1) + '\'s rating: ' + oldacre + ' → <strong>' + acre + '</strong><br />(' + reasons + ')');
oldacre = Math.round(data.p2rating.oldacre);
acre = Math.round(data.p2rating.acre);
reasons = '' + (acre - oldacre) + ' for ' + (p1score > 0.99 ? 'losing' : (p1score < 0.01 ? 'winning' : 'tying'));
if (reasons.substr(0, 1) !== '-') reasons = '+' + reasons;
self.addRaw(Tools.escapeHTML(p2) + '\'s rating: ' + oldacre + ' → <strong>' + acre + '</strong><br />(' + reasons + ')');
Users.get(p1).cacheMMR(rated.format, data.p1rating);
Users.get(p2).cacheMMR(rated.format, data.p2rating);
self.update();
} catch(e) {
self.addRaw('There was an error calculating rating changes.');
self.update();
}
if (!Tools.getFormat(self.format).noLog) {
self.logBattle(p1score, p1rating, p2rating);
}
}
});
}
}
rooms.global.battleCount += 0 - (this.active ? 1 : 0);
this.active = false;
this.update();
};
// logNum = 0 : spectator log
// logNum = 1, 2 : player log
// logNum = 3 : replay log
BattleRoom.prototype.getLog = function (logNum) {
var log = [];
for (var i = 0; i < this.log.length; ++i) {
var line = this.log[i];
if (line === '|split') {
log.push(this.log[i + logNum + 1]);
i += 4;
} else {
log.push(line);
}
}
return log;
};
BattleRoom.prototype.getLogForUser = function (user) {
var logNum = this.battle.getSlot(user) + 1;
if (logNum < 0) logNum = 0;
return this.getLog(logNum);
};
BattleRoom.prototype.update = function (excludeUser) {
if (this.log.length <= this.lastUpdate) return;
Sockets.subchannelBroadcast(this.id, '>' + this.id + '\n\n' + this.log.slice(this.lastUpdate).join('\n'));
this.lastUpdate = this.log.length;
// empty rooms time out after ten minutes
var hasUsers = false;
for (var i in this.users) {
hasUsers = true;
break;
}
if (!hasUsers) {
if (!this.expireTimer) {
this.expireTimer = setTimeout(this.tryExpire.bind(this), TIMEOUT_EMPTY_DEALLOCATE);
}
} else {
if (this.expireTimer) clearTimeout(this.expireTimer);
this.expireTimer = setTimeout(this.tryExpire.bind(this), TIMEOUT_INACTIVE_DEALLOCATE);
}
};
BattleRoom.prototype.logBattle = function (p1score, p1rating, p2rating) {
var logData = this.battle.logData;
logData.p1rating = p1rating;
logData.p2rating = p2rating;
logData.endType = this.battle.endType;
if (!p1rating) logData.ladderError = true;
logData.log = BattleRoom.prototype.getLog.call(logData, 3); // replay log (exact damage)
var date = new Date();
var logfolder = date.format('{yyyy}-{MM}');
var logsubfolder = date.format('{yyyy}-{MM}-{dd}');
var curpath = 'logs/' + logfolder;
var self = this;
fs.mkdir(curpath, '0755', function () {
var tier = self.format.toLowerCase().replace(/[^a-z0-9]+/g, '');
curpath += '/' + tier;
fs.mkdir(curpath, '0755', function () {
curpath += '/' + logsubfolder;
fs.mkdir(curpath, '0755', function () {
fs.writeFile(curpath + '/' + self.id + '.log.json', JSON.stringify(logData));
});
});
}); // asychronicity
//console.log(JSON.stringify(logData));
};
BattleRoom.prototype.tryExpire = function () {
this.expire();
};
BattleRoom.prototype.getInactiveSide = function () {
if (this.battle.players[0] && !this.battle.players[1]) return 1;
if (this.battle.players[1] && !this.battle.players[0]) return 0;
return this.battle.inactiveSide;
};
BattleRoom.prototype.forfeit = function (user, message, side) {
if (!this.battle || this.battle.ended || !this.battle.started) return false;
if (!message) message = ' forfeited.';
if (side === undefined) {
if (user && user.userid === this.battle.playerids[0]) side = 0;
if (user && user.userid === this.battle.playerids[1]) side = 1;
}
if (side === undefined) return false;
var ids = ['p1', 'p2'];
var otherids = ['p2', 'p1'];
var name = 'Player ' + (side + 1);
if (user) {
name = user.name;
} else if (this.rated) {
name = this.rated[ids[side]];
}
this.add('|-message|' + name + message);
this.battle.endType = 'forfeit';
this.battle.send('win', otherids[side]);
rooms.global.battleCount += (this.battle.active ? 1 : 0) - (this.active ? 1 : 0);
this.active = this.battle.active;
this.update();
return true;
};
BattleRoom.prototype.sendPlayer = function (num, message) {
var player = this.battle.getPlayer(num);
if (!player) return false;
this.sendUser(player, message);
};
BattleRoom.prototype.kickInactive = function () {
clearTimeout(this.resetTimer);
this.resetTimer = null;
if (!this.battle || this.battle.ended || !this.battle.started) return false;
var inactiveSide = this.getInactiveSide();
var ticksLeft = [0, 0];
if (inactiveSide !== 1) {
// side 0 is inactive
this.sideTurnTicks[0]--;
this.sideTicksLeft[0]--;
}
if (inactiveSide !== 0) {
// side 1 is inactive
this.sideTurnTicks[1]--;
this.sideTicksLeft[1]--;
}
ticksLeft[0] = Math.min(this.sideTurnTicks[0], this.sideTicksLeft[0]);
ticksLeft[1] = Math.min(this.sideTurnTicks[1], this.sideTicksLeft[1]);
if (ticksLeft[0] && ticksLeft[1]) {
if (inactiveSide === 0 || inactiveSide === 1) {
// one side is inactive
var inactiveTicksLeft = ticksLeft[inactiveSide];
var inactiveUser = this.battle.getPlayer(inactiveSide);
if (inactiveTicksLeft % 3 === 0 || inactiveTicksLeft <= 4) {
this.send('|inactive|' + (inactiveUser ? inactiveUser.name : 'Player ' + (inactiveSide + 1)) + ' has ' + (inactiveTicksLeft * 10) + ' seconds left.');
}
} else {
// both sides are inactive
var inactiveUser0 = this.battle.getPlayer(0);
if (inactiveUser0 && (ticksLeft[0] % 3 === 0 || ticksLeft[0] <= 4)) {
this.sendUser(inactiveUser0, '|inactive|' + inactiveUser0.name + ' has ' + (ticksLeft[0] * 10) + ' seconds left.');
}
var inactiveUser1 = this.battle.getPlayer(1);
if (inactiveUser1 && (ticksLeft[1] % 3 === 0 || ticksLeft[1] <= 4)) {
this.sendUser(inactiveUser1, '|inactive|' + inactiveUser1.name + ' has ' + (ticksLeft[1] * 10) + ' seconds left.');
}
}
this.resetTimer = setTimeout(this.kickInactive.bind(this), 10 * 1000);
return;
}
if (inactiveSide < 0) {
if (ticksLeft[0]) inactiveSide = 1;
else if (ticksLeft[1]) inactiveSide = 0;
}
this.forfeit(this.battle.getPlayer(inactiveSide), ' lost due to inactivity.', inactiveSide);
this.resetUser = '';
};
BattleRoom.prototype.requestKickInactive = function (user, force) {
if (this.resetTimer) {
if (user) this.sendUser(user, '|inactive|The inactivity timer is already counting down.');
return false;
}
if (user) {
if (!force && this.battle.getSlot(user) < 0) return false;
this.resetUser = user.userid;
this.send('|inactive|Battle timer is now ON: inactive players will automatically lose when time\'s up. (requested by ' + user.name + ')');
} else if (user === false) {
this.resetUser = '~';
this.add('|inactive|Battle timer is ON: inactive players will automatically lose when time\'s up.');
}
// a tick is 10 seconds
var maxTicksLeft = 15; // 2 minutes 30 seconds
if (!this.battle.p1 || !this.battle.p2) {
// if a player has left, don't wait longer than 6 ticks (1 minute)
maxTicksLeft = 6;
}
if (!this.rated) maxTicksLeft = 30;
this.sideTurnTicks = [maxTicksLeft, maxTicksLeft];
var inactiveSide = this.getInactiveSide();
if (inactiveSide < 0) {
// add 10 seconds to bank if they're below 160 seconds
if (this.sideTicksLeft[0] < 16) this.sideTicksLeft[0]++;
if (this.sideTicksLeft[1] < 16) this.sideTicksLeft[1]++;
}
this.sideTicksLeft[0]++;
this.sideTicksLeft[1]++;
if (inactiveSide !== 1) {
// side 0 is inactive
var ticksLeft0 = Math.min(this.sideTicksLeft[0] + 1, maxTicksLeft);
this.sendPlayer(0, '|inactive|You have ' + (ticksLeft0 * 10) + ' seconds to make your decision.');
}
if (inactiveSide !== 0) {
// side 1 is inactive
var ticksLeft1 = Math.min(this.sideTicksLeft[1] + 1, maxTicksLeft);
this.sendPlayer(1, '|inactive|You have ' + (ticksLeft1 * 10) + ' seconds to make your decision.');
}
this.resetTimer = setTimeout(this.kickInactive.bind(this), 10 * 1000);
return true;
};
BattleRoom.prototype.nextInactive = function () {
if (this.resetTimer) {
this.update();
clearTimeout(this.resetTimer);
this.resetTimer = null;
this.requestKickInactive();
}
};
BattleRoom.prototype.stopKickInactive = function (user, force) {
if (!force && user && user.userid !== this.resetUser) return false;
if (this.resetTimer) {
clearTimeout(this.resetTimer);
this.resetTimer = null;
this.send('|inactiveoff|Battle timer is now OFF.');
return true;
}
return false;
};
BattleRoom.prototype.kickInactiveUpdate = function () {
if (!this.rated) return false;
if (this.resetTimer) {
var inactiveSide = this.getInactiveSide();
var changed = false;
if ((!this.battle.p1 || !this.battle.p2) && !this.disconnectTickDiff[0] && !this.disconnectTickDiff[1]) {
if ((!this.battle.p1 && inactiveSide === 0) || (!this.battle.p2 && inactiveSide === 1)) {
var inactiveUser = this.battle.getPlayer(inactiveSide);
if (!this.battle.p1 && inactiveSide === 0 && this.sideTurnTicks[0] > 7) {
this.disconnectTickDiff[0] = this.sideTurnTicks[0] - 7;
this.sideTurnTicks[0] = 7;
changed = true;
} else if (!this.battle.p2 && inactiveSide === 1 && this.sideTurnTicks[1] > 7) {
this.disconnectTickDiff[1] = this.sideTurnTicks[1] - 7;
this.sideTurnTicks[1] = 7;
changed = true;
}
if (changed) {
this.send('|inactive|' + (inactiveUser ? inactiveUser.name : 'Player ' + (inactiveSide + 1)) + ' disconnected and has a minute to reconnect!');
return true;
}
}
} else if (this.battle.p1 && this.battle.p2) {
// Only one of the following conditions should happen, but do
// them both since you never know...
if (this.disconnectTickDiff[0]) {
this.sideTurnTicks[0] = this.sideTurnTicks[0] + this.disconnectTickDiff[0];
this.disconnectTickDiff[0] = 0;
changed = 0;
}
if (this.disconnectTickDiff[1]) {
this.sideTurnTicks[1] = this.sideTurnTicks[1] + this.disconnectTickDiff[1];
this.disconnectTickDiff[1] = 0;
changed = 1;
}
if (changed !== false) {
var user = this.battle.getPlayer(changed);
this.send('|inactive|' + (user ? user.name : 'Player ' + (changed + 1)) + ' reconnected and has ' + (this.sideTurnTicks[changed] * 10) + ' seconds left!');
return true;
}
}
}
return false;
};
BattleRoom.prototype.decision = function (user, choice, data) {
this.battle.sendFor(user, choice, data);
if (this.active !== this.battle.active) {
rooms.global.battleCount += (this.battle.active ? 1 : 0) - (this.active ? 1 : 0);
this.active = this.battle.active;
}
this.update();
};
// This function is only called when the room is not empty.
// Joining an empty room calls this.join() below instead.
BattleRoom.prototype.onJoinConnection = function (user, connection) {
this.sendUser(connection, '|init|battle\n|title|' + this.title + '\n' + this.getLogForUser(user).join('\n'));
// this handles joining a battle in which a user is a participant,
// where the user has already identified before attempting to join
// the battle
this.battle.resendRequest(user);
};
BattleRoom.prototype.onJoin = function (user, connection) {
if (!user) return false;
if (this.users[user.userid]) return user;
if (user.named) {
this.add('|join|' + user.name);
this.update();
}
this.users[user.userid] = user;
this.userCount++;
this.sendUser(connection, '|init|battle\n|title|' + this.title + '\n' + this.getLogForUser(user).join('\n'));
return user;
};
BattleRoom.prototype.onRename = function (user, oldid, joining) {
if (joining) {
this.add('|join|' + user.name);
}
var resend = joining || !this.battle.playerTable[oldid];
if (this.battle.playerTable[oldid]) {
if (this.rated) {
this.add('|message|' + user.name + ' forfeited by changing their name.');
this.battle.lose(oldid);
this.battle.leave(oldid);
resend = false;
} else {
this.battle.rename();
}
}
delete this.users[oldid];
this.users[user.userid] = user;
this.update();
if (resend) {
// this handles a named user renaming themselves into a user in the
// battle (i.e. by using /nick)
this.battle.resendRequest(user);
}
return user;
};
BattleRoom.prototype.onUpdateIdentity = function () {};
BattleRoom.prototype.onLeave = function (user) {
if (!user) return; // ...
if (user.battles[this.id]) {
this.battle.leave(user);
rooms.global.battleCount += (this.battle.active ? 1 : 0) - (this.active ? 1 : 0);
this.active = this.battle.active;
} else if (!user.named) {
delete this.users[user.userid];
return;
}
delete this.users[user.userid];
this.userCount--;
this.add('|leave|' + user.name);
if (Object.isEmpty(this.users)) {
rooms.global.battleCount += 0 - (this.active ? 1 : 0);
this.active = false;
}
this.update();
this.kickInactiveUpdate();
};
BattleRoom.prototype.joinBattle = function (user, team) {
var slot;
if (this.rated) {
if (this.rated.p1 === user.userid) {
slot = 0;
} else if (this.rated.p2 === user.userid) {
slot = 1;
} else {
user.popup("This is a rated battle; your username must be " + this.rated.p1 + " or " + this.rated.p2 + " to join.");
return false;
}
}
if (this.battle.active) {
user.popup("This battle already has two players.");
return false;
}
this.auth[user.userid] = '\u2605';
this.battle.join(user, slot, team);
rooms.global.battleCount += (this.battle.active ? 1 : 0) - (this.active ? 1 : 0);
this.active = this.battle.active;
if (this.active) {
this.title = "" + this.battle.p1 + " vs. " + this.battle.p2;
this.send('|title|' + this.title);
}
this.update();
this.kickInactiveUpdate();
};
BattleRoom.prototype.leaveBattle = function (user) {
if (!user) return false; // ...
if (user.battles[this.id]) {
this.battle.leave(user);
} else {
return false;
}
this.auth[user.userid] = '+';
rooms.global.battleCount += (this.battle.active ? 1 : 0) - (this.active ? 1 : 0);
this.active = this.battle.active;
this.update();
this.kickInactiveUpdate();
return true;
};
BattleRoom.prototype.expire = function () {
this.send('|expire|');
this.destroy();
};
BattleRoom.prototype.destroy = function () {
// deallocate ourself
// remove references to ourself
for (var i in this.users) {
this.users[i].leaveRoom(this);
delete this.users[i];
}
this.users = null;
rooms.global.removeRoom(this.id);
// deallocate children and get rid of references to them
if (this.battle) {
this.battle.destroy();
}
this.battle = null;
if (this.resetTimer) {
clearTimeout(this.resetTimer);
}
this.resetTimer = null;
// get rid of some possibly-circular references
delete rooms[this.id];
};
return BattleRoom;
})();
var ChatRoom = (function () {
function ChatRoom(roomid, title, options) {
Room.call(this, roomid, title);
if (options) {
this.chatRoomData = options;
Object.merge(this, options);
}
this.logTimes = true;
this.logFile = null;
this.logFilename = '';
this.destroyingLog = false;
if (!this.modchat) this.modchat = (Config.chatmodchat || false);
if (Config.logchat) {
this.rollLogFile(true);
this.logEntry = function (entry, date) {
var timestamp = (new Date()).format('{HH}:{mm}:{ss} ');
this.logFile.write(timestamp + entry + '\n');
};
this.logEntry('NEW CHATROOM: ' + this.id);
if (Config.loguserstats) {
setInterval(this.logUserStats.bind(this), Config.loguserstats);
}
}
if (Config.reportjoinsperiod) {
this.userList = this.getUserList();
this.reportJoinsQueue = [];
}
}
ChatRoom.prototype = Object.create(Room.prototype);
ChatRoom.prototype.type = 'chat';
ChatRoom.prototype.reportRecentJoins = function () {
delete this.reportJoinsInterval;
if (this.reportJoinsQueue.length === 0) {
// nothing to report
return;
}
if (Config.reportjoinsperiod) {
this.userList = this.getUserList();
}
this.send(this.reportJoinsQueue.join('\n'));
this.reportJoinsQueue.length = 0;
};
ChatRoom.prototype.rollLogFile = function (sync) {
var mkdir = sync ? function (path, mode, callback) {
try {
fs.mkdirSync(path, mode);
} catch (e) {} // directory already exists
callback();
} : fs.mkdir;
var date = new Date();
var basepath = 'logs/chat/' + this.id + '/';
var self = this;
mkdir(basepath, '0755', function () {
var path = date.format('{yyyy}-{MM}');
mkdir(basepath + path, '0755', function () {
if (self.destroyingLog) return;
path += '/' + date.format('{yyyy}-{MM}-{dd}') + '.txt';
if (path !== self.logFilename) {
self.logFilename = path;
if (self.logFile) self.logFile.destroySoon();
self.logFile = fs.createWriteStream(basepath + path, {flags: 'a'});
// Create a symlink to today's lobby log.
// These operations need to be synchronous, but it's okay
// because this code is only executed once every 24 hours.
var link0 = basepath + 'today.txt.0';
try {
fs.unlinkSync(link0);
} catch (e) {} // file doesn't exist
try {
fs.symlinkSync(path, link0); // `basepath` intentionally not included
try {
fs.renameSync(link0, basepath + 'today.txt');
} catch (e) {} // OS doesn't support atomic rename
} catch (e) {} // OS doesn't support symlinks
}
var timestamp = +date;
date.advance('1 hour').reset('minutes').advance('1 second');
setTimeout(self.rollLogFile.bind(self), +date - timestamp);
});
});
};
ChatRoom.prototype.destroyLog = function (initialCallback, finalCallback) {
this.destroyingLog = true;
initialCallback();
if (this.logFile) {
this.logEntry = function () { };
this.logFile.on('close', finalCallback);
this.logFile.destroySoon();
} else {
finalCallback();
}
};
ChatRoom.prototype.logUserStats = function () {
var total = 0;
var guests = 0;
var groups = {};
Config.groupsranking.forEach(function (group) {
groups[group] = 0;
});
for (var i in this.users) {
var user = this.users[i];
++total;
if (!user.named) {
++guests;
}
++groups[user.group];
}
var entry = '|userstats|total:' + total + '|guests:' + guests;
for (var i in groups) {
entry += '|' + i + ':' + groups[i];
}
this.logEntry(entry);
};
ChatRoom.prototype.getUserList = function () {
var buffer = '';
var counter = 0;
for (var i in this.users) {
if (!this.users[i].named) {
continue;
}
counter++;
buffer += ',' + this.users[i].getIdentity(this.id);
}
var msg = '|users|' + counter + buffer;
return msg;
};
ChatRoom.prototype.reportJoin = function (entry) {
if (Config.reportjoinsperiod) {
if (!this.reportJoinsInterval) {
this.reportJoinsInterval = setTimeout(
this.reportRecentJoins.bind(this), Config.reportjoinsperiod
);
}
this.reportJoinsQueue.push(entry);
} else {
this.send(entry);
}
this.logEntry(entry);
};
ChatRoom.prototype.update = function () {
if (this.log.length <= this.lastUpdate) return;
var entries = this.log.slice(this.lastUpdate);
if (this.reportJoinsQueue && this.reportJoinsQueue.length) {
clearTimeout(this.reportJoinsInterval);
delete this.reportJoinsInterval;
Array.prototype.unshift.apply(entries, this.reportJoinsQueue);
this.reportJoinsQueue.length = 0;
this.userList = this.getUserList();
}
var update = entries.join('\n');
if (this.log.length > 100) {
this.log.splice(0, this.log.length - 100);
}
this.lastUpdate = this.log.length;
this.send(update);
};
ChatRoom.prototype.getIntroMessage = function () {
var html = this.introMessage || '';
if (this.modchat) {
if (html) html += '<br /><br />';
html += '<div class="broadcast-red">';
html += 'Must be rank ' + this.modchat + ' or higher to talk right now.';
html += '</div>';
}
if (html) return '\n|raw|<div class="infobox">' + html + '</div>';
return '';
};
ChatRoom.prototype.onJoinConnection = function (user, connection) {
var userList = this.userList ? this.userList : this.getUserList();
this.sendUser(connection, '|init|chat\n|title|' + this.title + '\n' + userList + '\n' + this.getLogSlice(-25).join('\n') + this.getIntroMessage());
if (global.Tournaments && Tournaments.get(this.id)) {
Tournaments.get(this.id).updateFor(user, connection);
}
};
ChatRoom.prototype.onJoin = function (user, connection, merging) {
if (!user) return false; // ???
if (this.users[user.userid]) return user;
if (user.named && Config.reportjoins) {
this.add('|j|' + user.getIdentity(this.id));
this.update();
} else if (user.named) {
var entry = '|J|' + user.getIdentity(this.id);
this.reportJoin(entry);
}
this.users[user.userid] = user;
this.userCount++;
if (!merging) {
var userList = this.userList ? this.userList : this.getUserList();
this.sendUser(connection, '|init|chat\n|title|' + this.title + '\n' + userList + '\n' + this.getLogSlice(-100).join('\n') + this.getIntroMessage());
}
if (global.Tournaments && Tournaments.get(this.id)) {
Tournaments.get(this.id).updateFor(user, connection);
}
return user;
};
ChatRoom.prototype.onRename = function (user, oldid, joining) {
delete this.users[oldid];
if (this.bannedUsers && (user.userid in this.bannedUsers || user.autoconfirmed in this.bannedUsers)) {
this.bannedUsers[oldid] = true;
for (var ip in user.ips) this.bannedIps[ip] = true;
user.leaveRoom(this);
var alts = user.getAlts();
for (var i = 0; i < alts.length; ++i) {
this.bannedUsers[toId(alts[i])] = true;
Users.getExact(alts[i]).leaveRoom(this);
}
return;
}
this.users[user.userid] = user;
var entry;
if (joining) {
if (Config.reportjoins) {
entry = '|j|' + user.getIdentity(this.id);
} else {
entry = '|J|' + user.getIdentity(this.id);
}
} else if (!user.named) {
entry = '|L| ' + oldid;
} else {
entry = '|N|' + user.getIdentity(this.id) + '|' + oldid;
}
if (Config.reportjoins) {
this.add(entry);
} else {
this.reportJoin(entry);
}
if (global.Tournaments && Tournaments.get(this.id)) {
Tournaments.get(this.id).updateFor(user);
}
return user;
};
/**
* onRename, but without a userid change
*/
ChatRoom.prototype.onUpdateIdentity = function (user) {
if (user && user.connected && user.named) {
if (!this.users[user.userid]) return false;
var entry = '|N|' + user.getIdentity(this.id) + '|' + user.userid;
this.reportJoin(entry);
}
};
ChatRoom.prototype.onLeave = function (user) {
if (!user) return; // ...
delete this.users[user.userid];
this.userCount--;
if (user.named && Config.reportjoins) {
this.add('|l|' + user.getIdentity(this.id));
} else if (user.named) {
var entry = '|L|' + user.getIdentity(this.id);
this.reportJoin(entry);
}
};
ChatRoom.prototype.destroy = function () {
// deallocate ourself
// remove references to ourself
for (var i in this.users) {
this.users[i].leaveRoom(this);
delete this.users[i];
}
this.users = null;
rooms.global.deregisterChatRoom(this.id);
rooms.global.delistChatRoom(this.id);
// get rid of some possibly-circular references
delete rooms[this.id];
};
return ChatRoom;
})();
// to make sure you don't get null returned, pass the second argument
function getRoom(roomid, fallback) {
if (roomid && roomid.id) return roomid;
if (!roomid) roomid = 'default';
if (!rooms[roomid] && fallback) {
return rooms.global;
}
return rooms[roomid];
}
Rooms.get = getRoom;
Rooms.createBattle = function (roomid, format, p1, p2, parent, rated) {
if (roomid && roomid.id) return roomid;
if (!p1 || !p2) return false;
if (!roomid) roomid = 'default';
if (!rooms[roomid]) {
// console.log("NEW BATTLE ROOM: " + roomid);
ResourceMonitor.countBattle(p1.latestIp, p1.name);
ResourceMonitor.countBattle(p2.latestIp, p2.name);
rooms[roomid] = new BattleRoom(roomid, format, p1, p2, parent, rated);
}
return rooms[roomid];
};
Rooms.createChatRoom = function (roomid, title, data) {
var room;
if ((room = rooms[roomid])) return room;
room = rooms[roomid] = new ChatRoom(roomid, title, data);
return room;
};
console.log("NEW GLOBAL: global");
rooms.global = new GlobalRoom('global');
Rooms.GlobalRoom = GlobalRoom;
Rooms.BattleRoom = BattleRoom;
Rooms.ChatRoom = ChatRoom;
Rooms.global = rooms.global;
Rooms.lobby = rooms.lobby;<|fim▁end|> | var rated = this.rated;
this.rated = false;
var p1score = 0.5;
|
<|file_name|>DBReplicationOffsetStore.java<|end_file_name|><|fim▁begin|>/*
* Copyright © 2021 Cask Data, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package io.cdap.delta.store;
import com.google.gson.Gson;
import io.cdap.cdap.spi.data.StructuredRow;
import io.cdap.cdap.spi.data.StructuredTable;
import io.cdap.cdap.spi.data.StructuredTableContext;
import io.cdap.cdap.spi.data.TableNotFoundException;
import io.cdap.cdap.spi.data.table.StructuredTableId;
import io.cdap.cdap.spi.data.table.StructuredTableSpecification;
import io.cdap.cdap.spi.data.table.field.Field;
import io.cdap.cdap.spi.data.table.field.FieldType;
import io.cdap.cdap.spi.data.table.field.Fields;
import io.cdap.delta.app.DeltaWorkerId;
import io.cdap.delta.app.OffsetAndSequence;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Optional;
import javax.annotation.Nullable;
/**
* A StructuredTable based database table to store replication offset and sequence data.
*/
public class DBReplicationOffsetStore {
private static final StructuredTableId TABLE_ID = new StructuredTableId("delta_offset_store");
private static final String NAMESPACE_COL = "namespace";
private static final String APP_GENERATION_COL = "app_generation";
private static final String APP_NAME_COL = "app_name";
private static final String INSTANCE_ID_COL = "instance_id";
private static final String OFFSET_COL = "offset_sequence";
private static final String UPDATED_COL = "last_updated";
public static final StructuredTableSpecification TABLE_SPEC = new StructuredTableSpecification.Builder()
.withId(TABLE_ID)
.withFields(new FieldType(NAMESPACE_COL, FieldType.Type.STRING),
new FieldType(APP_GENERATION_COL, FieldType.Type.LONG),
new FieldType(APP_NAME_COL, FieldType.Type.STRING),
new FieldType(INSTANCE_ID_COL, FieldType.Type.INTEGER),
new FieldType(OFFSET_COL, FieldType.Type.STRING),<|fim▁hole|> .withPrimaryKeys(NAMESPACE_COL, APP_NAME_COL, APP_GENERATION_COL, INSTANCE_ID_COL)
.build();
private final StructuredTable table;
private static final Gson GSON = new Gson();
private DBReplicationOffsetStore(StructuredTable table) {
this.table = table;
}
static DBReplicationOffsetStore get(StructuredTableContext context) {
try {
StructuredTable table = context.getTable(TABLE_ID);
return new DBReplicationOffsetStore(table);
} catch (TableNotFoundException e) {
throw new IllegalStateException(String.format(
"System table '%s' does not exist. Please check your system environment.", TABLE_ID.getName()), e);
}
}
@Nullable
public OffsetAndSequence getOffsets(DeltaWorkerId id)
throws IOException {
List<Field<?>> keys = getKey(id);
Optional<StructuredRow> row = table.read(keys);
if (!row.isPresent() || row.get().getString(OFFSET_COL) == null) {
return null;
}
String offsetStrJson = row.get().getString(OFFSET_COL);
return GSON.fromJson(offsetStrJson, OffsetAndSequence.class);
}
public void writeOffset(DeltaWorkerId id, OffsetAndSequence data)
throws IOException {
Collection<Field<?>> fields = getKey(id);
fields.add(Fields.stringField(OFFSET_COL, GSON.toJson(data)));
fields.add(Fields.longField(UPDATED_COL, System.currentTimeMillis()));
table.upsert(fields);
}
private List<Field<?>> getKey(DeltaWorkerId id) {
List<Field<?>> keyFields = new ArrayList<>(4);
keyFields.add(Fields.stringField(NAMESPACE_COL, id.getPipelineId().getNamespace()));
keyFields.add(Fields.stringField(APP_NAME_COL, id.getPipelineId().getApp()));
keyFields.add(Fields.longField(APP_GENERATION_COL, id.getPipelineId().getGeneration()));
keyFields.add(Fields.intField(INSTANCE_ID_COL, id.getInstanceId()));
return keyFields;
}
}<|fim▁end|> | new FieldType(UPDATED_COL, FieldType.Type.LONG)) |
<|file_name|>conftest.py<|end_file_name|><|fim▁begin|>import pytest
import tempfile
import os
import ConfigParser
def getConfig(optionname,thedefault,section,configfile):
"""read an option from a config file or set a default
send 'thedefault' as the data class you want to get a string back
i.e. 'True' will return a string<|fim▁hole|> #getConfig('something','adefaultvalue')
retvalue=thedefault
opttype=type(thedefault)
if os.path.isfile(configfile):
config = ConfigParser.ConfigParser()
config.readfp(open(configfile))
if config.has_option(section,optionname):
if opttype==bool:
retvalue=config.getboolean(section,optionname)
elif opttype==int:
retvalue=config.getint(section,optionname)
elif opttype==float:
retvalue=config.getfloat(section,optionname)
else:
retvalue=config.get(section,optionname)
return retvalue
@pytest.fixture
def options():
options=dict()
configFile='setup.cfg'
if pytest.config.inifile:
configFile=str(pytest.config.inifile)
options["esserver"]=getConfig('esserver','localhost:9200','mozdef',configFile)
options["loginput"]=getConfig('loginput','localhost:8080','mozdef',configFile)
options["webuiurl"]=getConfig('webuiurl','http://localhost/','mozdef',configFile)
options["kibanaurl"]=getConfig('kibanaurl','http://localhost:9090/','mozdef',configFile)
if pytest.config.option.verbose > 0:
options["verbose"]=True
print('Using options: \n\t%r' % options)
else:
options["verbose"]=False
return options
@pytest.fixture()
def cleandir():
newpath = tempfile.mkdtemp()
os.chdir(newpath)
def pytest_report_header(config):
if config.option.verbose > 0:
return ["reporting verbose test output"]
#def pytest_addoption(parser):
#parser.addoption("--esserver",
#action="store",
#default="localhost:9200",
#help="elastic search servers to use for testing")
#parser.addoption("--mozdefserver",
#action="store",
#default="localhost:8080",
#help="mozdef server to use for testing")<|fim▁end|> | True will return a bool
1 will return an int
""" |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>from json import JSONDecodeError
from flask import Blueprint, Flask, make_response, Response
from flask_cors import CORS
from google.appengine.api import wrap_wsgi_app
from werkzeug.routing import BaseConverter
from backend.api.handlers.district import (
district_events,
district_list_year,
district_rankings,
district_teams,
)
from backend.api.handlers.error import handle_404
from backend.api.handlers.event import (
event,
event_awards,
event_detail,
event_list_all,
event_list_year,
event_matches,
event_playoff_advancement,
event_teams,
event_teams_statuses,
)
from backend.api.handlers.helpers.profiled_jsonify import profiled_jsonify
from backend.api.handlers.match import match, zebra_motionworks
from backend.api.handlers.media import media_tags
from backend.api.handlers.status import status
from backend.api.handlers.team import (
team,
team_awards,
team_event_awards,
team_event_matches,
team_event_status,
team_events,
team_events_statuses_year,
team_history_districts,
team_history_robots,
team_list,
team_list_all,
team_matches,
team_media_tag,
team_media_year,
team_social_media,
team_years_participated,
)
from backend.api.handlers.trusted import (
add_event_media,
add_match_video,
add_match_zebra_motionworks_info,
delete_all_event_matches,
delete_event_matches,
update_event_alliances,
update_event_awards,
update_event_info,
update_event_matches,
update_event_rankings,
update_teams,
)
from backend.common.datafeed_parsers.exceptions import ParserInputException
from backend.common.flask_cache import configure_flask_cache
from backend.common.logging import configure_logging
from backend.common.middleware import install_middleware
from backend.common.url_converters import install_url_converters
class SimpleModelTypeConverter(BaseConverter):
regex = r"simple"
class ModelTypeConverter(BaseConverter):
regex = r"simple|keys"
class EventDetailTypeConverter(BaseConverter):
regex = r"alliances|district_points|insights|oprs|predictions|rankings"
configure_logging()
app = Flask(__name__)<|fim▁hole|>install_url_converters(app)
configure_flask_cache(app)
app.config["JSONIFY_PRETTYPRINT_REGULAR"] = True
app.url_map.converters["simple_model_type"] = SimpleModelTypeConverter
app.url_map.converters["model_type"] = ModelTypeConverter
app.url_map.converters["event_detail_type"] = EventDetailTypeConverter
api_v3 = Blueprint("apiv3", __name__, url_prefix="/api/v3")
CORS(
api_v3,
origins="*",
methods=["OPTIONS", "GET"],
allow_headers=["X-TBA-Auth-Key", "If-None-Match", "If-Modified-Since"],
)
# Overall Status
api_v3.add_url_rule("/status", view_func=status)
# District
api_v3.add_url_rule("/district/<string:district_key>/events", view_func=district_events)
api_v3.add_url_rule(
"/district/<string:district_key>/events/<model_type:model_type>",
view_func=district_events,
)
api_v3.add_url_rule("/district/<string:district_key>/teams", view_func=district_teams)
api_v3.add_url_rule(
"/district/<string:district_key>/teams/<model_type:model_type>",
view_func=district_teams,
)
api_v3.add_url_rule(
"/district/<string:district_key>/rankings", view_func=district_rankings
)
# District List
api_v3.add_url_rule("/districts/<int:year>", view_func=district_list_year)
# Event
api_v3.add_url_rule("/event/<string:event_key>", view_func=event)
api_v3.add_url_rule(
"/event/<string:event_key>/<simple_model_type:model_type>", view_func=event
)
api_v3.add_url_rule(
"/event/<string:event_key>/<event_detail_type:detail_type>",
view_func=event_detail,
)
api_v3.add_url_rule("/event/<string:event_key>/teams", view_func=event_teams)
api_v3.add_url_rule(
"/event/<string:event_key>/teams/<model_type:model_type>",
view_func=event_teams,
)
api_v3.add_url_rule(
"/event/<string:event_key>/teams/statuses", view_func=event_teams_statuses
)
api_v3.add_url_rule("event/<string:event_key>/matches", view_func=event_matches)
# api_v3.add_url_rule("event/<string:event_key>/matches/timeseries", view_func=TODO)
api_v3.add_url_rule(
"/event/<string:event_key>/matches/<model_type:model_type>",
view_func=event_matches,
)
api_v3.add_url_rule("/event/<string:event_key>/awards", view_func=event_awards)
api_v3.add_url_rule(
"/event/<string:event_key>/playoff_advancement", view_func=event_playoff_advancement
)
# Event List
api_v3.add_url_rule("/events/all", view_func=event_list_all)
api_v3.add_url_rule("/events/all/<model_type:model_type>", view_func=event_list_all)
api_v3.add_url_rule("/events/<int:year>", view_func=event_list_year)
api_v3.add_url_rule(
"/events/<int:year>/<model_type:model_type>", view_func=event_list_year
)
# Match
api_v3.add_url_rule("/match/<string:match_key>", view_func=match)
api_v3.add_url_rule(
"/match/<string:match_key>/<simple_model_type:model_type>", view_func=match
)
# api_v3.add_url_rule("/match/<string:match_key>/timeseries", view_func=TODO)
api_v3.add_url_rule(
"/match/<string:match_key>/zebra_motionworks", view_func=zebra_motionworks
)
# Media
api_v3.add_url_rule("/media/tags", view_func=media_tags)
# Team
api_v3.add_url_rule("/team/<string:team_key>", view_func=team)
api_v3.add_url_rule(
"/team/<string:team_key>/<simple_model_type:model_type>", view_func=team
)
# Team History
api_v3.add_url_rule(
"/team/<string:team_key>/years_participated", view_func=team_years_participated
)
api_v3.add_url_rule(
"/team/<string:team_key>/districts", view_func=team_history_districts
)
api_v3.add_url_rule("/team/<string:team_key>/robots", view_func=team_history_robots)
api_v3.add_url_rule("/team/<string:team_key>/social_media", view_func=team_social_media)
# Team Events
api_v3.add_url_rule("/team/<string:team_key>/events", view_func=team_events)
api_v3.add_url_rule(
"/team/<string:team_key>/events/<model_type:model_type>", view_func=team_events
)
api_v3.add_url_rule("/team/<string:team_key>/events/<int:year>", view_func=team_events)
api_v3.add_url_rule(
"/team/<string:team_key>/events/<int:year>/<model_type:model_type>",
view_func=team_events,
)
api_v3.add_url_rule(
"/team/<string:team_key>/events/<int:year>/statuses",
view_func=team_events_statuses_year,
)
# Team @ Event
api_v3.add_url_rule(
"/team/<string:team_key>/event/<string:event_key>/matches",
view_func=team_event_matches,
)
api_v3.add_url_rule(
"/team/<string:team_key>/event/<string:event_key>/matches/<model_type:model_type>",
view_func=team_event_matches,
)
api_v3.add_url_rule(
"/team/<string:team_key>/event/<string:event_key>/awards",
view_func=team_event_awards,
)
api_v3.add_url_rule(
"/team/<string:team_key>/event/<string:event_key>/status",
view_func=team_event_status,
)
# Team Awards
api_v3.add_url_rule("/team/<string:team_key>/awards", view_func=team_awards)
api_v3.add_url_rule("/team/<string:team_key>/awards/<int:year>", view_func=team_awards)
# Team Matches
api_v3.add_url_rule(
"/team/<string:team_key>/matches/<int:year>", view_func=team_matches
)
api_v3.add_url_rule(
"/team/<string:team_key>/matches/<int:year>/<model_type:model_type>",
view_func=team_matches,
)
# Team Media
api_v3.add_url_rule(
"/team/<string:team_key>/media/<int:year>", view_func=team_media_year
)
api_v3.add_url_rule(
"/team/<string:team_key>/media/tag/<string:media_tag>", view_func=team_media_tag
)
api_v3.add_url_rule(
"/team/<string:team_key>/media/tag/<string:media_tag>/<int:year>",
view_func=team_media_tag,
)
# Team List
api_v3.add_url_rule("/teams/all", view_func=team_list_all)
api_v3.add_url_rule("/teams/all/<model_type:model_type>", view_func=team_list_all)
api_v3.add_url_rule("/teams/<int:page_num>", view_func=team_list)
api_v3.add_url_rule(
"/teams/<int:page_num>/<model_type:model_type>", view_func=team_list
)
api_v3.add_url_rule("/teams/<int:year>/<int:page_num>", view_func=team_list)
api_v3.add_url_rule(
"/teams/<int:year>/<int:page_num>/<model_type:model_type>",
view_func=team_list,
)
# Trusted API
trusted_api = Blueprint("trusted_api", __name__, url_prefix="/api/trusted/v1")
CORS(
trusted_api,
origins="*",
methods=["OPTIONS", "POST"],
allow_headers=["Content-Type", "X-TBA-Auth-Id", "X-TBA-Auth-Sig"],
)
trusted_api.add_url_rule(
"/event/<string:event_key>/alliance_selections/update",
methods=["POST"],
view_func=update_event_alliances,
),
trusted_api.add_url_rule(
"/event/<string:event_key>/awards/update",
methods=["POST"],
view_func=update_event_awards,
),
trusted_api.add_url_rule(
"/event/<string:event_key>/info/update",
methods=["POST"],
view_func=update_event_info,
),
trusted_api.add_url_rule(
"/event/<string:event_key>/matches/update",
methods=["POST"],
view_func=update_event_matches,
)
trusted_api.add_url_rule(
"/event/<string:event_key>/matches/delete",
methods=["POST"],
view_func=delete_event_matches,
)
trusted_api.add_url_rule(
"/event/<string:event_key>/matches/delete_all",
methods=["POST"],
view_func=delete_all_event_matches,
)
trusted_api.add_url_rule(
"/event/<string:event_key>/match_videos/add",
methods=["POST"],
view_func=add_match_video,
)
trusted_api.add_url_rule(
"/event/<string:event_key>/media/add",
methods=["POST"],
view_func=add_event_media,
)
trusted_api.add_url_rule(
"/event/<string:event_key>/rankings/update",
methods=["POST"],
view_func=update_event_rankings,
)
trusted_api.add_url_rule(
"/event/<string:event_key>/team_list/update",
methods=["POST"],
view_func=update_teams,
)
trusted_api.add_url_rule(
"/event/<string:event_key>/zebra_motionworks/add",
methods=["POST"],
view_func=add_match_zebra_motionworks_info,
)
@trusted_api.errorhandler(JSONDecodeError)
@trusted_api.errorhandler(ParserInputException)
def handle_bad_input(e: Exception) -> Response:
return make_response(profiled_jsonify({"Error": f"{e}"}), 400)
app.register_blueprint(api_v3)
app.register_blueprint(trusted_api)
app.register_error_handler(404, handle_404)<|fim▁end|> | app.wsgi_app = wrap_wsgi_app(app.wsgi_app)
install_middleware(app) |
<|file_name|>test.spec.js<|end_file_name|><|fim▁begin|>'use strict';
var proxy = require('proxyquire');
var stubs = {
googlemaps: jasmine.createSpyObj('googlemaps', ['staticMap']),
request: jasmine.createSpy('request'),
'@noCallThru': true
};
describe('google-static-map', function() {
var uut;
describe('without auto-setting a key', function() {
beforeEach(function() {
uut = proxy('./index', stubs );
});
it('should not work without a api key', function() {
expect( uut ).toThrow(new Error('You must provide a google api console key'));
});
it('should provide a method to set a global api key', function() {
expect( uut.set ).toBeDefined();
uut = uut.set('some-key');
expect( uut ).not.toThrow( jasmine.any( Error ));
});
});
describe('with auto-setting a key', function() {
beforeEach(function() {
uut = proxy('./index', stubs ).set('some-key');
});
it('should get/set config options', function() {
var map = uut();
var set;
['zoom', 'resolution', 'mapType', 'markers', 'style'].forEach(function( key ) {
set = key + ' ' + key;
expect( map.config[key] ).toEqual( map[key]() );
var chain = map[key]( set );
expect( map.config[key] ).toEqual( set );<|fim▁hole|>
it('should relay staticMap call to googlemaps module', function() {
var map = uut();
var testAddress = 'Some Address, Some Country';
var staticMapReturn = 'http://some.where';
var requestReturn = 'request-return-value';
stubs.googlemaps.staticMap.andReturn( staticMapReturn );
stubs.request.andReturn( requestReturn );
var stream = map.address( testAddress ).staticMap().done();
expect( stream ).toEqual('request-return-value');
expect( stubs.googlemaps.staticMap ).toHaveBeenCalledWith(
testAddress,
map.config.zoom,
map.config.resolution,
false,
false,
map.config.mapType,
map.config.markers,
map.config.style,
map.config.paths
);
expect( stubs.request ).toHaveBeenCalledWith( staticMapReturn );
});
});
});<|fim▁end|> |
expect( chain ).toEqual( map );
});
}); |
<|file_name|>sparkread.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
'''
Input: file
Output: stdout
Tidy output of spark stderr file CSV format
Jeremy Schaub
$ ./sparkread.py [time_output_file]
'''
import sys
class Measurement:
'''
Data structure for /usr/bin/time measurement
'''
def __init__(self):
self.stage_times = ['0']
self.spill_count = -1
self._expected_length = 2
def fields(self):
'''
Returns a list of fields in the data structure
'''
fields = ['spill_count']
num_stages = len(self.stage_times) - 1
stage_header = ['stage %d [sec]' % i for i in range(num_stages)]
stage_header.append('total time [sec]')
fields.extend(stage_header)
return fields<|fim▁hole|> Returns a csv string with all header fields
'''
return ','.join(self.fields())
def rowcsv(self):
'''
Returns a csv string with all data fields
'''
values = [self.spill_count]
values.extend(self.stage_times)
return ','.join(values)
def headerhtml(self, fields=None):
'''
Returns an HTML string all header fields
'''
if not fields:
fields=self.fields()
row = '<tr>\n<th>%s</th>\n</tr>\n' % ('</th>\n<th>'.join(fields))
return row
def addfield(self, name=None, value=None):
if name not in self.fields():
self._expected_length += 1
setattr(self, name, value)
def htmlclass(self):
return "warning" if int(self.spill_count) != 0 else ""
def rowhtml(self, fields=None, rowclass=None):
''' Returns an html formatted string with all td cells in row '''
if not fields:
fields = self.fields()
if not rowclass:
rowclass = self.htmlclass()
values = [self.spill_count]
values.extend(self.stage_times)
html_row = '<tr class="%s">\n<td>' % (rowclass)
html_row += '</td>\n<td>'.join(values)
html_row += '</td>\n</tr>\n'
return html_row
def is_valid(self):
return len(self.fields()) == self._expected_length
def parse(self, spark_fn):
'''
This parses the output of the spark stderr file
'''
try:
with open(spark_fn, 'r') as f:
blob = f.read()
num_stages = len(blob.split('finished in ')[1:])
stage_times = ['' for i in range(num_stages)]
i = 0
total_time = 0
for a in blob.split('finished in ')[1:]:
stage_times[i] = a.split(' s\n')[0]
total_time += float(stage_times[i])
i += 1
stage_times.append(str(total_time))
self.stage_times = stage_times
self.spill_count = str(blob.lower().count('spill'))
if not self.is_valid():
sys.stderr.write('Not a valid spark file %s\n' % spark_fn)
assert False
except Exception as err:
sys.stderr.write('Problem parsing time file %s\n' % spark_fn)
sys.stderr.write(str(err) + '\n')
def main(spark_fn):
# Wrapper to print to stdout
m = Measurement()
m.parse(spark_fn)
sys.stdout.write('%s\n%s\n' % (m.headercsv(), m.rowcsv()))
if __name__ == '__main__':
main(sys.argv[1])<|fim▁end|> |
def headercsv(self):
''' |
<|file_name|>LifeCycle.java<|end_file_name|><|fim▁begin|>/*
* The MIT License
* Copyright © 2014 Cube Island
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package de.cubeisland.engine.modularity.core;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.Map;
import java.util.PriorityQueue;
import java.util.Queue;
import java.util.TreeMap;
import javax.inject.Provider;
import de.cubeisland.engine.modularity.core.graph.Dependency;
import de.cubeisland.engine.modularity.core.graph.DependencyInformation;
import de.cubeisland.engine.modularity.core.graph.meta.ModuleMetadata;
import de.cubeisland.engine.modularity.core.graph.meta.ServiceDefinitionMetadata;
import de.cubeisland.engine.modularity.core.graph.meta.ServiceImplementationMetadata;
import de.cubeisland.engine.modularity.core.graph.meta.ServiceProviderMetadata;
import de.cubeisland.engine.modularity.core.marker.Disable;
import de.cubeisland.engine.modularity.core.marker.Enable;
import de.cubeisland.engine.modularity.core.marker.Setup;
import de.cubeisland.engine.modularity.core.service.ServiceProvider;
import static de.cubeisland.engine.modularity.core.LifeCycle.State.*;
public class LifeCycle
{
private static final Field MODULE_META_FIELD;
private static final Field MODULE_MODULARITY_FIELD;
private static final Field MODULE_LIFECYCLE;
static
{
try
{
MODULE_META_FIELD = Module.class.getDeclaredField("metadata");
MODULE_META_FIELD.setAccessible(true);
MODULE_MODULARITY_FIELD = Module.class.getDeclaredField("modularity");
MODULE_MODULARITY_FIELD.setAccessible(true);
MODULE_LIFECYCLE = Module.class.getDeclaredField("lifeCycle");
MODULE_LIFECYCLE.setAccessible(true);
}
catch (NoSuchFieldException e)
{
throw new IllegalStateException();
}
}
private Modularity modularity;
private DependencyInformation info;
private State current = NONE;
private Object instance;
private Method enable;
private Method disable;
private Map<Integer, Method> setup = new TreeMap<Integer, Method>();
private Map<Dependency, SettableMaybe> maybes = new HashMap<Dependency, SettableMaybe>();
private Queue<LifeCycle> impls = new LinkedList<LifeCycle>();
public LifeCycle(Modularity modularity)
{
this.modularity = modularity;
}
public LifeCycle load(DependencyInformation info)
{
this.info = info;
this.current = LOADED;
return this;
}
public LifeCycle provide(ValueProvider provider)
{
this.instance = provider;
this.current = PROVIDED;
return this;
}
public LifeCycle initProvided(Object object)
{
this.instance = object;
this.current = PROVIDED;
return this;
}
public boolean isIn(State state)
{
return current == state;
}
public LifeCycle instantiate()
{
if (isIn(NONE))
{
throw new IllegalStateException("Cannot instantiate when not loaded");
}
if (isIn(LOADED))
{
try
{
if (info instanceof ServiceDefinitionMetadata)
{
ClassLoader classLoader = info.getClassLoader();
if (classLoader == null) // may happen when loading from classpath
{
classLoader = modularity.getClass().getClassLoader(); // get parent classloader then
}
Class<?> instanceClass = Class.forName(info.getClassName(), true, classLoader);
instance = new ServiceProvider(instanceClass, impls);
// TODO find impls in modularity and link them to this
// TODO transition all impls to INSTANTIATED?
}
else
{
this.instance = info.injectionPoints().get(INSTANTIATED.name(0)).inject(modularity, this);
if (instance instanceof Module)
{
MODULE_META_FIELD.set(instance, info);
MODULE_MODULARITY_FIELD.set(instance, modularity);
MODULE_LIFECYCLE.set(instance, this);
}
info.injectionPoints().get(INSTANTIATED.name(1)).inject(modularity, this);
findMethods();
}
}
catch (ClassNotFoundException e)
{
throw new IllegalStateException(e);
}
catch (IllegalAccessException e)
{
throw new IllegalStateException(e);
}
current = INSTANTIATED;
}
// else State already reached or provided
return this;
}
public LifeCycle setup()
{
if (isIn(NONE))
{
throw new IllegalStateException("Cannot instantiate when not loaded");
}
if (isIn(LOADED))
{
this.instantiate();
}
if (isIn(INSTANTIATED))
{
// TODO abstract those methods away
for (Method method : setup.values())
{
invoke(method);
}
for (LifeCycle impl : impls)
{
impl.setup();
}
current = SETUP;
}
// else reached or provided
return this;
}
public LifeCycle enable()
{
if (isIn(NONE))
{
throw new IllegalStateException("Cannot instantiate when not loaded");
}
if (isIn(LOADED))
{
this.instantiate();
}
if (isIn(INSTANTIATED))
{
this.setup();
}
if (isIn(SETUP))
{
this.modularity.log("Enable " + info.getIdentifier().name());
modularity.runEnableHandlers(getInstance());
invoke(enable);
for (SettableMaybe maybe : maybes.values())
{
maybe.provide(getProvided(this));
}
for (LifeCycle impl : impls)
{
impl.enable();
}
current = ENABLED;
}
return this;
}
public LifeCycle disable()
{
if (isIn(ENABLED))
{
modularity.runDisableHandlers(getInstance());
invoke(disable);
for (SettableMaybe maybe : maybes.values())
{
maybe.remove();
}
// TODO if active impl replace in service with inactive OR disable service too
// TODO if service disable all impls too
modularity.getGraph().getNode(info.getIdentifier()).getPredecessors(); // TODO somehow implement reload too
// TODO disable predecessors
for (LifeCycle impl : impls)
{
impl.disable();
}
current = DISABLED;
}
return this;
}
private void invoke(Method method)
{
if (method != null)
{
if (method.isAnnotationPresent(Setup.class))
{
info.injectionPoints().get(SETUP.name(method.getAnnotation(Setup.class).value()))
.inject(modularity, this);
}
else if (method.isAnnotationPresent(Enable.class))
{
info.injectionPoints().get(ENABLED.name()).inject(modularity, this);
}
else
{
try
{
method.invoke(instance);
}
catch (IllegalAccessException e)
{
throw new IllegalStateException(e);<|fim▁hole|> {
throw new IllegalStateException(e);
}
catch (InvocationTargetException e)
{
throw new IllegalStateException(e);
}
}
}
}
public boolean isInstantiated()
{
return instance != null;
}
private void findMethods()
{
// find enable and disable methods
Class<?> clazz = instance.getClass();
for (Method method : clazz.getMethods())
{
if (method.isAnnotationPresent(Enable.class))
{
enable = method;
}
if (method.isAnnotationPresent(Disable.class))
{
disable = method;
}
if (method.isAnnotationPresent(Setup.class))
{
int value = method.getAnnotation(Setup.class).value();
setup.put(value, method);
}
}
}
public Object getInstance()
{
return instance;
}
@SuppressWarnings("unchecked")
public Maybe getMaybe(LifeCycle other)
{
Dependency identifier = other == null ? null : other.getInformation().getIdentifier();
SettableMaybe maybe = maybes.get(identifier);
if (maybe == null)
{
maybe = new SettableMaybe(getProvided(other));
maybes.put(identifier, maybe);
}
return maybe;
}
public Object getProvided(LifeCycle lifeCycle)
{
boolean enable = true;
if (info instanceof ModuleMetadata)
{
enable = false;
}
if (instance == null)
{
this.instantiate();
}
if (enable)
{
this.enable(); // Instantiate Setup and enable dependency before providing it to someone else
}
Object toSet = instance;
if (toSet instanceof Provider)
{
toSet = ((Provider)toSet).get();
}
if (toSet instanceof ValueProvider)
{
toSet = ((ValueProvider)toSet).get(lifeCycle, modularity);
}
return toSet;
}
public void addImpl(LifeCycle impl)
{
this.impls.add(impl);
}
public DependencyInformation getInformation()
{
return info;
}
public enum State
{
NONE,
LOADED,
INSTANTIATED,
SETUP,
ENABLED,
DISABLED,
SHUTDOWN,
PROVIDED // TODO prevent changing / except shutdown?
;
public String name(Integer value)
{
return value == null ? name() : name() + ":" + value;
}
}
@Override
public String toString() {
return info.getIdentifier().name() + " " + super.toString();
}
}<|fim▁end|> | }
catch (IllegalArgumentException e) |
<|file_name|>_1710_database_panel_group.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the<|fim▁hole|>
# The slug of the panel group to be added to HORIZON_CONFIG. Required.
PANEL_GROUP = 'database'
# The display name of the PANEL_GROUP. Required.
PANEL_GROUP_NAME = _('Database')
# The slug of the dashboard the PANEL_GROUP associated with. Required.
PANEL_GROUP_DASHBOARD = 'project'<|fim▁end|> | # License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _ |
<|file_name|>table.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python2.5
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# An example script that demonstrates converting a proprietary format to a
# Google Transit Feed Specification file.
#
# You can load table.txt, the example input, in Excel. It contains three
# sections:
# 1) A list of global options, starting with a line containing the word
# 'options'. Each option has an name in the first column and most options
# have a value in the second column.
# 2) A table of stops, starting with a line containing the word 'stops'. Each
# row of the table has 3 columns: name, latitude, longitude
# 3) A list of routes. There is an empty row between each route. The first row
# for a route lists the short_name and long_name. After the first row the
# left-most column lists the stop names visited by the route. Each column
# contains the times a single trip visits the stops.
#
# This is very simple example which you could use as a base for your own
# transit feed builder.
from __future__ import print_function
import transitfeed
from optparse import OptionParser
import re
stops = {}
# table is a list of lists in this form
# [ ['Short Name', 'Long Name'],
# ['Stop 1', 'Stop 2', ...]
# [time_at_1, time_at_2, ...] # times for trip 1
# [time_at_1, time_at_2, ...] # times for trip 2
# ... ]
def AddRouteToSchedule(schedule, table):
if len(table) >= 2:
r = schedule.AddRoute(short_name=table[0][0], long_name=table[0][1], route_type='Bus')
for trip in table[2:]:
if len(trip) > len(table[1]):
print("ignoring %s" % trip[len(table[1]):])
trip = trip[0:len(table[1])]
t = r.AddTrip(schedule, headsign='My headsign')
trip_stops = [] # Build a list of (time, stopname) tuples
for i in range(0, len(trip)):
if re.search(r'\S', trip[i]):
trip_stops.append( (transitfeed.TimeToSecondsSinceMidnight(trip[i]), table[1][i]) )
trip_stops.sort() # Sort by time
for (time, stopname) in trip_stops:
t.AddStopTime(stop=stops[stopname.lower()], arrival_secs=time,
departure_secs=time)
def TransposeTable(table):
"""Transpose a list of lists, using None to extend all input lists to the
same length.
For example:
>>> TransposeTable(
[ [11, 12, 13],
[21, 22],
[31, 32, 33, 34]])
[ [11, 21, 31],
[12, 22, 32],
[13, None, 33],
[None, None, 34]]
"""
transposed = []
rows = len(table)
cols = max(len(row) for row in table)
for x in range(cols):
transposed.append([])
for y in range(rows):
if x < len(table[y]):
transposed[x].append(table[y][x])
else:
transposed[x].append(None)
return transposed
def ProcessOptions(schedule, table):
service_period = schedule.GetDefaultServicePeriod()
agency_name, agency_url, agency_timezone = (None, None, None)
for row in table[1:]:
command = row[0].lower()
if command == 'weekday':
service_period.SetWeekdayService()
elif command == 'start_date':
service_period.SetStartDate(row[1])
elif command == 'end_date':
service_period.SetEndDate(row[1])
elif command == 'add_date':
service_period.SetDateHasService(date=row[1])
elif command == 'remove_date':
service_period.SetDateHasService(date=row[1], has_service=False)
elif command == 'agency_name':
agency_name = row[1]
elif command == 'agency_url':<|fim▁hole|> agency_url = row[1]
elif command == 'agency_timezone':
agency_timezone = row[1]
if not (agency_name and agency_url and agency_timezone):
print("You must provide agency information")
schedule.NewDefaultAgency(agency_name=agency_name, agency_url=agency_url,
agency_timezone=agency_timezone)
def AddStops(schedule, table):
for name, lat_str, lng_str in table[1:]:
stop = schedule.AddStop(lat=float(lat_str), lng=float(lng_str), name=name)
stops[name.lower()] = stop
def ProcessTable(schedule, table):
if table[0][0].lower() == 'options':
ProcessOptions(schedule, table)
elif table[0][0].lower() == 'stops':
AddStops(schedule, table)
else:
transposed = [table[0]] # Keep route_short_name and route_long_name on first row
# Transpose rest of table. Input contains the stop names in table[x][0], x
# >= 1 with trips found in columns, so we need to transpose table[1:].
# As a diagram Transpose from
# [['stop 1', '10:00', '11:00', '12:00'],
# ['stop 2', '10:10', '11:10', '12:10'],
# ['stop 3', '10:20', '11:20', '12:20']]
# to
# [['stop 1', 'stop 2', 'stop 3'],
# ['10:00', '10:10', '10:20'],
# ['11:00', '11:11', '11:20'],
# ['12:00', '12:12', '12:20']]
transposed.extend(TransposeTable(table[1:]))
AddRouteToSchedule(schedule, transposed)
def main():
parser = OptionParser()
parser.add_option('--input', dest='input',
help='Path of input file')
parser.add_option('--output', dest='output',
help='Path of output file, should end in .zip')
parser.set_defaults(output='feed.zip')
(options, args) = parser.parse_args()
schedule = transitfeed.Schedule()
table = []
for line in open(options.input):
line = line.rstrip()
if not line:
ProcessTable(schedule, table)
table = []
else:
table.append(line.split('\t'))
ProcessTable(schedule, table)
schedule.WriteGoogleTransitFeed(options.output)
if __name__ == '__main__':
main()<|fim▁end|> | |
<|file_name|>cliente.py<|end_file_name|><|fim▁begin|>import xml.etree.ElementTree as ET
import requests
from flask import Flask
import batalha
import pokemon
import ataque
class Cliente:
def __init__(self, execute = False, ip = '127.0.0.1', port = 5000, npc = False):
self.ip = ip
self.port = port
self.npc = npc
if (execute):
self.iniciaBatalha()
def writeXML(self, pkmn):
#Escreve um XML a partir de um pokemon
root = ET.Element('battle_state')
ET.SubElement(root, "pokemon")
poke = root.find('pokemon')
ET.SubElement(poke, "name")
poke.find('name').text = pkmn.getNome()
ET.SubElement(poke, "level")
poke.find('level').text = str(pkmn.getLvl())
ET.SubElement(poke, "attributes")
poke_att = poke.find('attributes')
ET.SubElement(poke_att, "health")
poke_att.find('health').text = str(pkmn.getHp())
ET.SubElement(poke_att, "attack")
poke_att.find('attack').text = str(pkmn.getAtk())
ET.SubElement(poke_att, "defense")
poke_att.find('defense').text = str(pkmn.getDefe())
ET.SubElement(poke_att, "speed")
poke_att.find('speed').text = str(pkmn.getSpd())
ET.SubElement(poke_att, "special")
poke_att.find('special').text = str(pkmn.getSpc())
ET.SubElement(poke, "type")
ET.SubElement(poke, "type")
tipos = poke.findall('type')
tipos[0].text = str(pkmn.getTyp1())
tipos[1].text = str(pkmn.getTyp2())
for i in range(0, 4):
atk = pkmn.getAtks(i)<|fim▁hole|> poke_atk = poke.findall('attacks')
ET.SubElement(poke_atk[-1], "id")
poke_atk[-1].find('id').text = str(i + 1)
ET.SubElement(poke_atk[-1], "name")
poke_atk[-1].find('name').text = atk.getNome()
ET.SubElement(poke_atk[-1], "type")
poke_atk[-1].find('type').text = str(atk.getTyp())
ET.SubElement(poke_atk[-1], "power")
poke_atk[-1].find('power').text = str(atk.getPwr())
ET.SubElement(poke_atk[-1], "accuracy")
poke_atk[-1].find('accuracy').text = str(atk.getAcu())
ET.SubElement(poke_atk[-1], "power_points")
poke_atk[-1].find('power_points').text = str(atk.getPpAtual())
s = ET.tostring(root)
return s
def iniciaBatalha(self):
pkmn = pokemon.Pokemon()
xml = self.writeXML(pkmn)
try:
self.battle_state = requests.post('http://{}:{}/battle/'.format(self.ip, self.port), data = xml).text
except requests.exceptions.ConnectionError:
print("Não foi possível conectar ao servidor.")
return None
pkmn2 = pokemon.lePokemonXML(1, self.battle_state)
self.batalha = batalha.Batalha([pkmn, pkmn2])
if (self.npc):
self.batalha.pkmn[0].npc = True
print("Eu sou um NPC")
self.batalha.turno = 0
self.batalha.display.showPokemon(self.batalha.pkmn[0])
self.batalha.display.showPokemon(self.batalha.pkmn[1])
return self.atualizaBatalha()
def atualizaBatalha(self):
self.batalha.AlternaTurno()
root = ET.fromstring(self.battle_state)
for i in range(0,2):
pkmnXML = root[i]
atksXML = root[i].findall('attacks')
pkmn = self.batalha.pkmn[i]
pkmn.setHpAtual(int(pkmnXML.find('attributes').find('health').text))
self.batalha.showStatus()
if (not self.batalha.isOver()):
self.batalha.AlternaTurno()
if (self.batalha.pkmn[self.batalha.turno].npc):
id = self.batalha.EscolheAtaqueInteligente()
else:
id = self.batalha.EscolheAtaque()
self.batalha.pkmn[0].getAtks(id).decreasePp()
if (id == 4):
self.battle_state = requests.post('http://{}:{}/battle/attack/{}'.format(self.ip, self.port, 0)).text
else:
self.battle_state = requests.post('http://{}:{}/battle/attack/{}'.format(self.ip, self.port, id + 1)).text
self.simulaAtaque(id)
self.atualizaBatalha()
else:
self.batalha.showResults()
return 'FIM'
def sendShutdownSignal(self):
requests.post('http://{}:{}/shutdown'.format(self.ip, self.port))
def simulaAtaque(self, idCliente):
disp = self.batalha.display
root = ET.fromstring(self.battle_state)
pkmnCXML = root[0]
pkmnC = self.batalha.pkmn[0]
pkmnSXML = root[1]
pkmnS = self.batalha.pkmn[1]
atksXML = pkmnSXML.findall('attacks')
idServidor = self.descobreAtaqueUsado(atksXML, pkmnS)
if (int(pkmnSXML.find('attributes').find('health').text) > 0):
if (idCliente != 4):
if (idServidor != 4):
dmg = pkmnS.getHpAtual() - int(pkmnSXML.find('attributes').find('health').text)
if (dmg == 0):
disp.miss(pkmnC, pkmnS, pkmnC.getAtks(idCliente))
else:
disp.hit(pkmnC, pkmnS, pkmnC.getAtks(idCliente), dmg)
dmg = pkmnC.getHpAtual() - int(pkmnCXML.find('attributes').find('health').text)
if (dmg == 0):
disp.miss(pkmnS, pkmnC, pkmnS.getAtks(idServidor))
else:
disp.hit(pkmnS, pkmnC, pkmnS.getAtks(idServidor), dmg)
else:
dmgStruggle = pkmnC.getHpAtual() - int(pkmnCXML.find('attributes').find('health').text)
dmg = pkmnS.getHpAtual() - int(pkmnSXML.find('attributes').find('health').text) + round(dmgStruggle / 2, 0)
if (dmg == 0):
disp.miss(pkmnC, pkmnS, pkmnC.getAtks(idCliente))
else:
disp.hit(pkmnC, pkmnS, pkmnC.getAtks(idCliente), dmg)
disp.hit(pkmnS, pkmnC, pkmnS.getAtks(idServidor), dmgStruggle)
disp.hitSelf(pkmnS, round(dmgStruggle / 2, 0))
else:
if (idServidor != 4):
dmgStruggle = pkmnS.getHpAtual() - int(pkmnSXML.find('attributes').find('health').text)
disp.hit(pkmnC, pkmnS, pkmnC.getAtks(idCliente), dmgStruggle)
disp.hitSelf(pkmnC, round(dmgStruggle / 2, 0))
dmg = pkmnC.getHpAtual() - int(pkmnCXML.find('attributes').find('health').text) + round(dmgStruggle / 2, 0)
if (dmg == 0):
disp.miss(pkmnS, pkmnC, pkmnS.getAtks(idServidor))
else:
disp.hit(pkmnS, pkmnC, pkmnS.getAtks(idServidor), dmg)
else:
print('Ambos usam e se machucam com Struggle!')
else:
if (idCliente != 4):
dmg = pkmnS.getHpAtual() - int(pkmnSXML.find('attributes').find('health').text)
if (dmg == 0):
disp.miss(pkmnC, pkmnS, pkmnC.getAtks(idCliente))
else:
disp.hit(pkmnC, pkmnS, pkmnC.getAtks(idCliente), dmg)
else:
dmgStruggle = pkmnC.getHpAtual() - int(pkmnCXML.find('attributes').find('health').text)
disp.hit(pkmnC, pkmnS, pkmnC.getAtks(idServidor), dmgStruggle * 2)
disp.hitSelf(pkmnC, round(dmgStruggle, 0))
def descobreAtaqueUsado(self, atksXML, pkmn):
for i in range(0, len(atksXML)):
id = int(atksXML[i].find('id').text) - 1
ppXML = int(atksXML[i].find('power_points').text)
pp = pkmn.getAtks(id).getPpAtual()
if (pp != ppXML):
pkmn.getAtks(id).decreasePp()
return id
return id<|fim▁end|> | if (atk is not None):
ET.SubElement(poke, "attacks") |
<|file_name|>TestIntegration.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2010-2011 Ning, Inc.
*
* Ning licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/<|fim▁hole|>import com.google.inject.Guice;
import com.google.inject.Injector;
import com.ning.metrics.serialization.event.ThriftToThriftEnvelopeEvent;
import com.ning.metrics.serialization.writer.SyncType;
import org.joda.time.DateTime;
import org.skife.config.ConfigurationObjectFactory;
import org.testng.Assert;
import org.testng.annotations.AfterTest;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.Test;
import java.io.File;
import java.util.UUID;
@Test(enabled = false)
public class TestIntegration
{
private final File tmpDir = new File(System.getProperty("java.io.tmpdir"), "collector");
@SuppressWarnings("unused")
@BeforeTest(alwaysRun = true)
private void setupTmpDir()
{
if (!tmpDir.exists() && !tmpDir.mkdirs()) {
throw new RuntimeException("Failed to create: " + tmpDir);
}
if (!tmpDir.isDirectory()) {
throw new RuntimeException("Path points to something that's not a directory: " + tmpDir);
}
}
@SuppressWarnings("unused")
@AfterTest(alwaysRun = true)
private void cleanupTmpDir()
{
tmpDir.delete();
}
@Test(groups = "slow", enabled = false)
public void testGuiceThrift() throws Exception
{
System.setProperty("eventtracker.type", "SCRIBE");
System.setProperty("eventtracker.directory", tmpDir.getAbsolutePath());
System.setProperty("eventtracker.scribe.host", "127.0.0.1");
System.setProperty("eventtracker.scribe.port", "7911");
final Injector injector = Guice.createInjector(new CollectorControllerModule());
final CollectorController controller = injector.getInstance(CollectorController.class);
final ScribeSender sender = (ScribeSender) injector.getInstance(EventSender.class);
sender.createConnection();
fireThriftEvents(controller);
sender.close();
}
@Test(groups = "slow", enabled = false)
public void testScribeFactory() throws Exception
{
System.setProperty("eventtracker.type", "COLLECTOR");
System.setProperty("eventtracker.directory", tmpDir.getAbsolutePath());
System.setProperty("eventtracker.collector.host", "127.0.0.1");
System.setProperty("eventtracker.collector.port", "8080");
final EventTrackerConfig config = new ConfigurationObjectFactory(System.getProperties()).build(EventTrackerConfig.class);
final CollectorController controller = ScribeCollectorFactory.createScribeController(
config.getScribeHost(),
config.getScribePort(),
config.getScribeRefreshRate(),
config.getScribeMaxIdleTimeInMinutes(),
config.getSpoolDirectoryName(),
config.isFlushEnabled(),
config.getFlushIntervalInSeconds(),
SyncType.valueOf(config.getSyncType()),
config.getSyncBatchSize(),
config.getMaxUncommittedWriteCount(),
config.getMaxUncommittedPeriodInSeconds()
);
fireThriftEvents(controller);
}
private void fireThriftEvents(final CollectorController controller) throws Exception
{
controller.offerEvent(ThriftToThriftEnvelopeEvent.extractEvent("thrift", new DateTime(), new Click(UUID.randomUUID().toString(), new DateTime().getMillis(), "user agent")));
Assert.assertEquals(controller.getEventsReceived().get(), 1);
Assert.assertEquals(controller.getEventsLost().get(), 0);
controller.commit();
controller.flush();
Thread.sleep(5000);
}
}<|fim▁end|> |
package com.ning.metrics.eventtracker;
|
<|file_name|>err.rs<|end_file_name|><|fim▁begin|>use std::error::Error;
use std::fmt;
pub type Result<T> = ::std::result::Result<T, DrawError>;
/// The enum `DrawError` defines the possible errors
/// from constructor Position.
#[derive(Clone, Debug)]
pub enum DrawError {
OutOfSize(String),
}
impl fmt::Display for DrawError {
/// The function `fmt` formats the value using
/// the given formatter.
fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result {<|fim▁hole|>
impl Error for DrawError {
/// The function `description` returns a short description of
/// the error.
fn description(&self) -> &str {
match *self {
DrawError::OutOfSize(ref size) => size,
}
}
/// The function `cause` returns the lower-level cause of
/// this error if any.
fn cause(&self) -> Option<&Error> {
None
}
}<|fim▁end|> | Ok(())
}
} |
<|file_name|>tests.py<|end_file_name|><|fim▁begin|># -*- coding:utf-8 -*-
from __future__ import unicode_literals
import logging
import warnings
from admin_scripts.tests import AdminScriptTestCase
from django.core import mail
from django.core.files.temp import NamedTemporaryFile
from django.test import RequestFactory, TestCase, override_settings
from django.test.utils import patch_logger
from django.utils.deprecation import RemovedInNextVersionWarning
from django.utils.encoding import force_text
from django.utils.log import (
AdminEmailHandler, CallbackFilter, RequireDebugFalse, RequireDebugTrue,
)
from django.utils.six import StringIO
from .logconfig import MyEmailBackend
# logging config prior to using filter with mail_admins
OLD_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
class LoggingFiltersTest(TestCase):
def test_require_debug_false_filter(self):
"""
Test the RequireDebugFalse filter class.
"""
filter_ = RequireDebugFalse()
with self.settings(DEBUG=True):
self.assertEqual(filter_.filter("record is not used"), False)
with self.settings(DEBUG=False):
self.assertEqual(filter_.filter("record is not used"), True)
def test_require_debug_true_filter(self):
"""
Test the RequireDebugTrue filter class.
"""
filter_ = RequireDebugTrue()
with self.settings(DEBUG=True):
self.assertEqual(filter_.filter("record is not used"), True)
with self.settings(DEBUG=False):
self.assertEqual(filter_.filter("record is not used"), False)
class DefaultLoggingTest(TestCase):
def setUp(self):
self.logger = logging.getLogger('django')
self.old_stream = self.logger.handlers[0].stream
def tearDown(self):
self.logger.handlers[0].stream = self.old_stream
def test_django_logger(self):
"""
The 'django' base logger only output anything when DEBUG=True.
"""
output = StringIO()
self.logger.handlers[0].stream = output
self.logger.error("Hey, this is an error.")
self.assertEqual(output.getvalue(), '')
with self.settings(DEBUG=True):
self.logger.error("Hey, this is an error.")
self.assertEqual(output.getvalue(), 'Hey, this is an error.\n')
class WarningLoggerTests(TestCase):
"""
Tests that warnings output for RemovedInDjangoXXWarning (XX being the next
Django version) is enabled and captured to the logging system
"""
def setUp(self):
# If tests are invoke with "-Wall" (or any -W flag actually) then
# warning logging gets disabled (see configure_logging in django/utils/log.py).
# However, these tests expect warnings to be logged, so manually force warnings
# to the logs. Use getattr() here because the logging capture state is
# undocumented and (I assume) brittle.
self._old_capture_state = bool(getattr(logging, '_warnings_showwarning', False))
logging.captureWarnings(True)
# this convoluted setup is to avoid printing this deprecation to
# stderr during test running - as the test runner forces deprecations
# to be displayed at the global py.warnings level
self.logger = logging.getLogger('py.warnings')
self.outputs = []
self.old_streams = []
for handler in self.logger.handlers:
self.old_streams.append(handler.stream)
self.outputs.append(StringIO())
handler.stream = self.outputs[-1]
def tearDown(self):
for i, handler in enumerate(self.logger.handlers):
self.logger.handlers[i].stream = self.old_streams[i]
# Reset warnings state.
logging.captureWarnings(self._old_capture_state)
@override_settings(DEBUG=True)
def test_warnings_capture(self):
with warnings.catch_warnings():
warnings.filterwarnings('always')
warnings.warn('Foo Deprecated', RemovedInNextVersionWarning)
output = force_text(self.outputs[0].getvalue())
self.assertIn('Foo Deprecated', output)
def test_warnings_capture_debug_false(self):
with warnings.catch_warnings():
warnings.filterwarnings('always')
warnings.warn('Foo Deprecated', RemovedInNextVersionWarning)
output = force_text(self.outputs[0].getvalue())
self.assertNotIn('Foo Deprecated', output)
@override_settings(DEBUG=True)
def test_error_filter_still_raises(self):
with warnings.catch_warnings():
warnings.filterwarnings(
'error',
category=RemovedInNextVersionWarning
)
with self.assertRaises(RemovedInNextVersionWarning):
warnings.warn('Foo Deprecated', RemovedInNextVersionWarning)
class CallbackFilterTest(TestCase):
def test_sense(self):
f_false = CallbackFilter(lambda r: False)
f_true = CallbackFilter(lambda r: True)
self.assertEqual(f_false.filter("record"), False)
self.assertEqual(f_true.filter("record"), True)<|fim▁hole|>
def _callback(record):
collector.append(record)
return True
f = CallbackFilter(_callback)
f.filter("a record")
self.assertEqual(collector, ["a record"])
class AdminEmailHandlerTest(TestCase):
logger = logging.getLogger('django.request')
def get_admin_email_handler(self, logger):
# Inspired from views/views.py: send_log()
# ensuring the AdminEmailHandler does not get filtered out
# even with DEBUG=True.
admin_email_handler = [
h for h in logger.handlers
if h.__class__.__name__ == "AdminEmailHandler"
][0]
return admin_email_handler
def test_fail_silently(self):
admin_email_handler = self.get_admin_email_handler(self.logger)
self.assertTrue(admin_email_handler.connection().fail_silently)
@override_settings(
ADMINS=(('whatever admin', '[email protected]'),),
EMAIL_SUBJECT_PREFIX='-SuperAwesomeSubject-'
)
def test_accepts_args(self):
"""
Ensure that user-supplied arguments and the EMAIL_SUBJECT_PREFIX
setting are used to compose the email subject.
Refs #16736.
"""
message = "Custom message that says '%s' and '%s'"
token1 = 'ping'
token2 = 'pong'
admin_email_handler = self.get_admin_email_handler(self.logger)
# Backup then override original filters
orig_filters = admin_email_handler.filters
try:
admin_email_handler.filters = []
self.logger.error(message, token1, token2)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['[email protected]'])
self.assertEqual(mail.outbox[0].subject,
"-SuperAwesomeSubject-ERROR: Custom message that says 'ping' and 'pong'")
finally:
# Restore original filters
admin_email_handler.filters = orig_filters
@override_settings(
ADMINS=(('whatever admin', '[email protected]'),),
EMAIL_SUBJECT_PREFIX='-SuperAwesomeSubject-',
INTERNAL_IPS=('127.0.0.1',),
)
def test_accepts_args_and_request(self):
"""
Ensure that the subject is also handled if being
passed a request object.
"""
message = "Custom message that says '%s' and '%s'"
token1 = 'ping'
token2 = 'pong'
admin_email_handler = self.get_admin_email_handler(self.logger)
# Backup then override original filters
orig_filters = admin_email_handler.filters
try:
admin_email_handler.filters = []
rf = RequestFactory()
request = rf.get('/')
self.logger.error(message, token1, token2,
extra={
'status_code': 403,
'request': request,
}
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['[email protected]'])
self.assertEqual(mail.outbox[0].subject,
"-SuperAwesomeSubject-ERROR (internal IP): Custom message that says 'ping' and 'pong'")
finally:
# Restore original filters
admin_email_handler.filters = orig_filters
@override_settings(
ADMINS=(('admin', '[email protected]'),),
EMAIL_SUBJECT_PREFIX='',
DEBUG=False,
)
def test_subject_accepts_newlines(self):
"""
Ensure that newlines in email reports' subjects are escaped to avoid
AdminErrorHandler to fail.
Refs #17281.
"""
message = 'Message \r\n with newlines'
expected_subject = 'ERROR: Message \\r\\n with newlines'
self.assertEqual(len(mail.outbox), 0)
self.logger.error(message)
self.assertEqual(len(mail.outbox), 1)
self.assertNotIn('\n', mail.outbox[0].subject)
self.assertNotIn('\r', mail.outbox[0].subject)
self.assertEqual(mail.outbox[0].subject, expected_subject)
@override_settings(
ADMINS=(('admin', '[email protected]'),),
EMAIL_SUBJECT_PREFIX='',
DEBUG=False,
)
def test_truncate_subject(self):
"""
RFC 2822's hard limit is 998 characters per line.
So, minus "Subject: ", the actual subject must be no longer than 989
characters.
Refs #17281.
"""
message = 'a' * 1000
expected_subject = 'ERROR: aa' + 'a' * 980
self.assertEqual(len(mail.outbox), 0)
self.logger.error(message)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, expected_subject)
@override_settings(
ADMINS=(('admin', '[email protected]'),),
DEBUG=False,
)
def test_uses_custom_email_backend(self):
"""
Refs #19325
"""
message = 'All work and no play makes Jack a dull boy'
admin_email_handler = self.get_admin_email_handler(self.logger)
mail_admins_called = {'called': False}
def my_mail_admins(*args, **kwargs):
connection = kwargs['connection']
self.assertIsInstance(connection, MyEmailBackend)
mail_admins_called['called'] = True
# Monkeypatches
orig_mail_admins = mail.mail_admins
orig_email_backend = admin_email_handler.email_backend
mail.mail_admins = my_mail_admins
admin_email_handler.email_backend = (
'logging_tests.logconfig.MyEmailBackend')
try:
self.logger.error(message)
self.assertTrue(mail_admins_called['called'])
finally:
# Revert Monkeypatches
mail.mail_admins = orig_mail_admins
admin_email_handler.email_backend = orig_email_backend
@override_settings(
ADMINS=(('whatever admin', '[email protected]'),),
)
def test_emit_non_ascii(self):
"""
#23593 - AdminEmailHandler should allow Unicode characters in the
request.
"""
handler = self.get_admin_email_handler(self.logger)
record = self.logger.makeRecord('name', logging.ERROR, 'function', 'lno', 'message', None, None)
rf = RequestFactory()
url_path = '/º'
record.request = rf.get(url_path)
handler.emit(record)
self.assertEqual(len(mail.outbox), 1)
msg = mail.outbox[0]
self.assertEqual(msg.to, ['[email protected]'])
self.assertEqual(msg.subject, "[Django] ERROR (EXTERNAL IP): message")
self.assertIn("path:%s" % url_path, msg.body)
@override_settings(
MANAGERS=(('manager', '[email protected]'),),
DEBUG=False,
)
def test_customize_send_mail_method(self):
class ManagerEmailHandler(AdminEmailHandler):
def send_mail(self, subject, message, *args, **kwargs):
mail.mail_managers(subject, message, *args, connection=self.connection(), **kwargs)
handler = ManagerEmailHandler()
record = self.logger.makeRecord('name', logging.ERROR, 'function', 'lno', 'message', None, None)
self.assertEqual(len(mail.outbox), 0)
handler.emit(record)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['[email protected]'])
class SettingsConfigTest(AdminScriptTestCase):
"""
Test that accessing settings in a custom logging handler does not trigger
a circular import error.
"""
def setUp(self):
log_config = """{
'version': 1,
'handlers': {
'custom_handler': {
'level': 'INFO',
'class': 'logging_tests.logconfig.MyHandler',
}
}
}"""
self.write_settings('settings.py', sdict={'LOGGING': log_config})
def tearDown(self):
self.remove_settings('settings.py')
def test_circular_dependency(self):
# validate is just an example command to trigger settings configuration
out, err = self.run_manage(['validate'])
self.assertNoOutput(err)
self.assertOutput(out, "System check identified no issues (0 silenced).")
def dictConfig(config):
dictConfig.called = True
dictConfig.called = False
class SetupConfigureLogging(TestCase):
"""
Test that calling django.setup() initializes the logging configuration.
"""
@override_settings(LOGGING_CONFIG='logging_tests.tests.dictConfig',
LOGGING=OLD_LOGGING)
def test_configure_initializes_logging(self):
from django import setup
setup()
self.assertTrue(dictConfig.called)
@override_settings(DEBUG=True, ROOT_URLCONF='logging_tests.urls')
class SecurityLoggerTest(TestCase):
def test_suspicious_operation_creates_log_message(self):
with patch_logger('django.security.SuspiciousOperation', 'error') as calls:
self.client.get('/suspicious/')
self.assertEqual(len(calls), 1)
self.assertEqual(calls[0], 'dubious')
def test_suspicious_operation_uses_sublogger(self):
with patch_logger('django.security.DisallowedHost', 'error') as calls:
self.client.get('/suspicious_spec/')
self.assertEqual(len(calls), 1)
self.assertEqual(calls[0], 'dubious')
@override_settings(
ADMINS=(('admin', '[email protected]'),),
DEBUG=False,
)
def test_suspicious_email_admins(self):
self.client.get('/suspicious/')
self.assertEqual(len(mail.outbox), 1)
self.assertIn('path:/suspicious/,', mail.outbox[0].body)
class SettingsCustomLoggingTest(AdminScriptTestCase):
"""
Test that using a logging defaults are still applied when using a custom
callable in LOGGING_CONFIG (i.e., logging.config.fileConfig).
"""
def setUp(self):
logging_conf = """
[loggers]
keys=root
[handlers]
keys=stream
[formatters]
keys=simple
[logger_root]
handlers=stream
[handler_stream]
class=StreamHandler
formatter=simple
args=(sys.stdout,)
[formatter_simple]
format=%(message)s
"""
self.temp_file = NamedTemporaryFile()
self.temp_file.write(logging_conf.encode('utf-8'))
self.temp_file.flush()
sdict = {'LOGGING_CONFIG': '"logging.config.fileConfig"',
'LOGGING': 'r"%s"' % self.temp_file.name}
self.write_settings('settings.py', sdict=sdict)
def tearDown(self):
self.temp_file.close()
self.remove_settings('settings.py')
def test_custom_logging(self):
out, err = self.run_manage(['validate'])
self.assertNoOutput(err)
self.assertOutput(out, "System check identified no issues (0 silenced).")<|fim▁end|> |
def test_passes_on_record(self):
collector = [] |
<|file_name|>types.rs<|end_file_name|><|fim▁begin|>use std::sync::Arc;<|fim▁hole|>use std::str::SplitWhitespace;
pub type Params<'a> = SplitWhitespace<'a>;
pub type Flag = Arc<AtomicBool>;
pub const BK_CASTLE: u8 = 1;
pub const WK_CASTLE: u8 = BK_CASTLE << WHITE;
pub const BQ_CASTLE: u8 = 1 << 2;
pub const WQ_CASTLE: u8 = BQ_CASTLE << WHITE;
pub const KING_CASTLE: u8 = WK_CASTLE | BK_CASTLE;
pub const QUEEN_CASTLE: u8 = WQ_CASTLE | BQ_CASTLE;
pub const PAWN: u8 = 0;
pub const KNIGHT: u8 = 1 << 1;
pub const BISHOP: u8 = 2 << 1;
pub const ROOK: u8 = 3 << 1;
pub const QUEEN: u8 = 4 << 1;
pub const KING: u8 = 5 << 1;
pub const ALL: u8 = 6 << 1;
pub const EMPTY: u8 = 255;
pub const COLOR: u8 = 1;
pub const WHITE: u8 = COLOR;
pub const BLACK: u8 = 0;
pub const PIECE: u8 = 0b1110;
pub const I_WHITE: usize = WHITE as usize;
pub const I_BLACK: usize = BLACK as usize;
pub fn flip(c: u8) -> u8 {
c ^ WHITE
}
pub const PVALS: [u32; 12] = [1000, 1000,
4126, 4126,
4222, 4222,
6414, 6414,
12730, 12730,
300000, 300000];
pub fn p_val(piece: u8) -> u32 {
match piece {
EMPTY => 0,
_ => PVALS[piece as usize]
}
}
pub const KNIGHT_PROM: u32 = 1;
pub const BISHOP_PROM: u32 = 2;
pub const ROOK_PROM: u32 = 3;
pub const QUEEN_PROM: u32 = 4;
pub const CASTLES_KING: u32 = 1 << 3;
pub const CASTLES_QUEEN: u32 = 1 << 4;
pub const IS_CAPTURE: u32 = 1 << 5;
pub const DOUBLE_PAWN_PUSH: u32 = 1 << 6;
pub const EN_PASSANT: u32 = 1 << 7;<|fim▁end|> | use std::sync::atomic::AtomicBool; |
<|file_name|>user.service.spec.ts<|end_file_name|><|fim▁begin|>import {UserService} from "./user.service";
import {UserMongooseRepository} from "../repository/user-mongoose.repository";
import {UserRepository} from "../repository/user.repository";
import * as mongoose from "mongoose";
import {DummyUsers} from "../repository/user-mongoose.repository.spec";
import {sign} from "jsonwebtoken";
import {UserModel} from "../model/user.model";
import {UserError} from "../errors/user.errors";
import {PassportService} from "../../passport/service/passport.service";
import {config} from "../../../../config";
import {PassportError} from "../../passport/errors/passport.errors.enum";
/**
* Created by cturner on 19/09/2016.
*/
describe("UserMongooseModel Service", () => {
let userRepo: UserRepository = new UserMongooseRepository();
let userService: UserService = new UserService(new PassportService(userRepo), userRepo);
beforeAll( done => {
(mongoose as any).Promise = global.Promise;
mongoose.connect(config.mongo.testUri + "-user-service", config.mongo.options);
mongoose.connection.once("connected", done);
});
afterAll(done => {
mongoose.connection.close();
mongoose.connection.once("disconnected", done);
});
beforeEach(done => {
DummyUsers.create()
.then(done);
});
afterEach(done => {
mongoose.connection.db.dropDatabase(done);
});
describe("getting users", () => {
it("should fail if requesting user is invalid", done => {
userService.get("rubbishToken")
.catch( (err: PassportError) => {
expect(err).toBe(PassportError.InvalidToken);
done();
});
});
it("should succeed if requesting user id valid", done => {
userService.get(sign(DummyUsers.normalUserOne.key, config.secrets.sessionToken))
.then( (users: UserModel[]) => {
expect(users.length).toBe(DummyUsers.users.length);
done();
});
});
});
<|fim▁hole|> it("should fail if requesting user is invalid", done => {
userService.add("rubbishToken", DummyUsers.newUserOne)
.catch( (err: PassportError) => {
expect(err).toBe(PassportError.InvalidToken);
done();
});
});
it(`should fail if requesting user is doesn"t have the right roles`, done => {
userService.add(sign(DummyUsers.normalUserOne.key, config.secrets.sessionToken), DummyUsers.newUserOne)
.catch((err: UserError[]) => {
expect(err).toEqual([UserError.AddUserOnlyMembershipUsers]);
done();
});
});
it(`should fail if user is a duplicate and no name provided`, done => {
let user = DummyUsers.normalUserOne;
user.key = "";
user.name = "";
user.password = null;
userService.add(sign(DummyUsers.membershipUser.key, config.secrets.sessionToken), user)
.catch((err: UserError[]) => {
expect(err.length).toBe(2);
expect(err).toContain(UserError.NameRequired);
expect(err).toContain(UserError.EmailNotUnique);
done();
});
});
it(`should succeed if all is ok`, done => {
userService.add(sign(DummyUsers.membershipUser.key, config.secrets.sessionToken), DummyUsers.newUserOne)
.then((model: UserModel) => {
expect(model.name).toEqual(DummyUsers.newUserOne.name);
done();
})
.catch(err => {
console.log(err);
done();
});
});
});
describe("updating a user", () => {
it("should fail if requesting user is invalid", done => {
userService.update("rubbishToken", DummyUsers.normalUserOne)
.catch( (err: PassportError) => {
expect(err).toBe(PassportError.InvalidToken);
done();
});
});
it(`should fail if requesting user is doesn"t have the right roles`, done => {
let user = DummyUsers.normalUserTwo;
user.password = null;
user.name += "Updated";
userService.update(sign(DummyUsers.normalUserOne.key, config.secrets.sessionToken), user)
.catch((err: UserError[]) => {
expect(err).toEqual([UserError.NameEditNotPermitted]);
done();
});
});
it(`should fail if user is a duplicate and no name provided`, done => {
let user = DummyUsers.normalUserTwo;
user.name = "";
user.email = DummyUsers.normalUserOne.email;
userService.update(sign(DummyUsers.normalUserTwo.key, config.secrets.sessionToken), user)
.catch((err: UserError[]) => {
expect(err.length).toBe(2);
expect(err).toContain(UserError.NameRequired);
expect(err).toContain(UserError.EmailNotUnique);
done();
});
});
it(`expect modified date to be updated`, done => {
let user = DummyUsers.normalUserTwo;
user.name += "Updated";
user.email += "Updated";
userService.update(sign(DummyUsers.normalUserTwo.key, config.secrets.sessionToken), user)
.then((updated: UserModel) => {
expect(DummyUsers.normalUserOne.dateLastModified.isBefore(updated.dateLastModified)).toBe(true);
expect(user.key).toEqual(updated.key);
done();
});
});
});
});<|fim▁end|> | describe("adding a user", () => { |
<|file_name|>aqicn.js<|end_file_name|><|fim▁begin|>/**
* @fileOverview
* @name aqicn.js
* @author ctgnauh <[email protected]>
* @license MIT
*/
var request = require('request');
var cheerio = require('cheerio');
var info = require('./info.json');
/**
* 从 aqicn.org 上获取空气信息
* @module aqicn
*/
module.exports = {
// 一些多余的信息
info: info,
/**
* fetchWebPage 的 callback
* @callback module:aqicn~fetchWebPageCallback
* @param {object} error - 请求错误
* @param {object} result - 页面文本
*/
/**
* 抓取移动版 aqicn.org 页面。
* aqicn.org 桌面版在300kb以上,而移动版则不足70kb。所以使用移动版,链接后面加 /m/ 。
* @param {string} city - 城市或地区代码,详见[全部地区](http://aqicn.org/city/all/)
* @param {module:aqicn~fetchWebPageCallback} callback
*/
fetchWebPage: function (city, callback) {
'use strict';
var options = {
url: 'http://aqicn.org/city/' + city + '/m/',
headers: {
'User-Agent': 'wget'
}
};
request.get(options, function (err, res, body) {
if (err) {
callback(err, '');
} else {
callback(null, body);
}
});
},
/**
* 分析 html 文件并返回指定的 AQI 值
* @param {string} body - 页面文本
* @param {string} name - 污染物代码:pm25、pm10、o3、no2、so2、co
* @returns {number} AQI 值
*/
selectAQIText: function (body, name) {
'use strict';
var self = this;
var $ = cheerio.load(body);
var json;
var value;
try {
json = JSON.parse($('#table script').text().slice(12, -2)); // "genAqiTable({...})"
value = self.info.species.indexOf(name);
} catch (err) {
return NaN;
}
return json.d[value].iaqi;
},
/**
* 分析 html 文件并返回更新时间
* @param {string} body - 页面文本
* @returns {string} ISO格式的时间
*/
selectUpdateTime: function (body) {
'use strict';
var $ = cheerio.load(body);
var json;
try {
json = JSON.parse($('#table script').text().slice(12, -2)); // "genAqiTable({...})"
} catch (err) {
return new Date(0).toISOString();
}
return json.t;
},
/**
* 污染等级及相关信息
* @param {number} level - AQI 级别
* @param {string} lang - 语言:cn、en、jp、es、kr、ru、hk、fr、pl(但当前只有 cn 和 en)
* @returns {object} 由AQI级别、污染等级、对健康影响情况、建议采取的措施组成的对象
*/
selectInfoText: function (level, lang) {
'use strict';
var self = this;
if (level > 6 || level < 0) {
level = 0;
}
return {
value: level,
name: self.info.level[level].name[lang],
implication: self.info.level[level].implication[lang],
statement: self.info.level[level].statement[lang]
};
},
/**
* 计算 AQI,这里选取 aqicn.org 采用的算法,选取 AQI 中数值最大的一个
* @param {array} aqis - 包含全部 AQI 数值的数组
* @returns {number} 最大 AQI
*/
calculateAQI: function (aqis) {
'use strict';
return Math.max.apply(null, aqis);
},
/**
* 计算空气污染等级,分级标准详见[关于空气质量与空气污染指数](http://aqicn.org/?city=&size=xlarge&aboutaqi)
* @param {number} aqi - 最大 AQI
* @returns {number} AQI 级别
*/
calculateLevel: function (aqi) {
'use strict';
var level = 0;
if (aqi >= 0 && aqi <= 50) {
level = 1;
} else if (aqi >= 51 && aqi <= 100) {
level = 2;
} else if (aqi >= 101 && aqi <= 150) {
level = 3;
} else if (aqi >= 151 && aqi <= 200) {
level = 4;
} else if (aqi >= 201 && aqi <= 300) {
level = 5;
} else if (aqi > 300) {
level = 6;
}
return level;
},
/**
* getAQIs 的 callback
* @callback module:aqicn~getAQIsCallback
* @param {object} error - 请求错误
* @param {object} result - 包含全部污染物信息的对象
*/
/**
* 获取指定城市的全部 AQI 数值
* @param {string} city - 城市或地区代码,详见[全部地区](http://aqicn.org/city/all/)
* @param {string} lang - 语言:cn、en、jp、es、kr、ru、hk、fr、pl(但当前只有 cn 和 en)
* @param {module:aqicn~getAQIsCallback} callback
*/
getAQIs: function (city, lang, callback) {
'use strict';
var self = this;
self.fetchWebPage(city, function (err, body) {
if (err) {
callback(err);
}
var result = {};
var aqis = [];
// 城市代码
result.city = city;
// 数据提供时间
result.time = self.selectUpdateTime(body);
// 全部 AQI 值
self.info.species.forEach(function (name) {
var aqi = self.selectAQIText(body, name);
aqis.push(aqi);
result[name] = aqi;
});
// 主要 AQI 值
result.aqi = self.calculateAQI(aqis);
// AQI 等级及其它
var level = self.calculateLevel(result.aqi);
var levelInfo = self.selectInfoText(level, lang);
result.level = levelInfo;
callback(null, result);
});
},
/**
* getAQIByName 的 callback
* @callback module:aqicn~getAQIByNameCallback
* @param {object} error - 请求错误
* @param {object} result - 城市或地区代码与指定的 AQI
*/
/**
* 获取指定城市的指定污染物数值
* @param {string} city - 城市或地区代码
* @param {string} name - 污染物代码:pm25、pm10、o3、no2、so2、co
* @param {module:aqicn~getAQIByNameCallback} callback
*/
getAQIByName: function (city, name, callback) {
'use strict';
var self = this;
self.getAQIs(city, 'cn', function (err, res) {
if (err) {
callback(err);
}<|fim▁hole|> time: res.time
});
});
}
};<|fim▁end|> | callback(null, {
city: city,
value: res[name], |
<|file_name|>qureg.rs<|end_file_name|><|fim▁begin|>/* qureg.rs: Quantum register
Copyright (C) 2017 Michael Anthony Knyszek
This file is part of rust-libquantum
rust-libquantum is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
rust-libquantum is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
use quantum_sys::{self, quantum_reg};
use std::fmt::{self, Write};
/// A quantum register.
///
/// This structure is a wrapper around `quantum_reg` from the libquantum
/// library. Represents the most basic quantum data structure for which
/// elementary gate operations are implemented as methods.
pub struct QuReg {
reg: quantum_reg,<|fim▁hole|> /// Allocates a new `QuReg`.
///
/// The quantum register is allocated by libquantum given a specified
/// width (number of qubits) and an initialization value. Only the first
/// `width` bits of the `init` value will be used in initialization.
pub fn new(width: usize, init: u64) -> QuReg {
QuReg {
reg: unsafe { quantum_sys::quantum_new_qureg(init, width as i32) },
scratch: 0
}
}
/// Returns the current width of the quantum register, not including scratch space.
pub fn width(&self) -> usize {
debug_assert!(self.reg.width >= 0);
(self.reg.width as usize) - self.scratch
}
pub fn scratch(&self) -> usize {
self.scratch
}
/// Adds a `bits` scratch qubits to the quantum register.
///
/// Scratch qubits are added to the least-significant bit of the register
/// and initialized to zero.
///
/// Scratch qubits should not be preallocated to avoid registering them in
/// the underlying hashtable, however creating new basis states in the
/// scratch space could overfill the underlying data structure.
pub fn add_scratch(&mut self, bits: usize) {
self.scratch += bits;
unsafe { quantum_sys::quantum_addscratch(bits as i32, self.reg_ptr()) }
}
/// Compute the Kronecker (tensor) product of two registers.
///
/// Consumes the two registers to produce a new register which will contain
/// the tensor product of the two (loosely maps to concatenation).
pub fn tensor(mut self, mut other: QuReg) -> QuReg {
assert_eq!(self.scratch, 0);
assert_eq!(other.scratch, 0);
QuReg {
reg: unsafe { quantum_sys::quantum_kronecker(self.reg_ptr(), other.reg_ptr()) },
scratch: 0
}
}
/// Applies a controlled-NOT gate between two qubits in the quantum register.
pub fn cnot(&mut self, control: usize, target: usize) {
debug_assert!(control < (self.reg.width as usize));
debug_assert!(target < (self.reg.width as usize));
unsafe { quantum_sys::quantum_cnot(control as i32, target as i32, self.reg_ptr()) }
}
/// Applies a Toffoli gate between three qubits in the quantum register.
pub fn toffoli(&mut self, control1: usize, control2: usize, target: usize) {
debug_assert!(control1 < (self.reg.width as usize));
debug_assert!(control2 < (self.reg.width as usize));
debug_assert!(target < (self.reg.width as usize));
unsafe { quantum_sys::quantum_toffoli(control1 as i32, control2 as i32, target as i32, self.reg_ptr()) }
}
/// Applies a Pauli X (NOT) gate to a qubit in the quantum register.
pub fn sigma_x(&mut self, target: usize) {
debug_assert!(target < (self.reg.width as usize));
unsafe { quantum_sys::quantum_sigma_x(target as i32, self.reg_ptr()) }
}
/// Applies a Pauli Y (phase flip) gate to a qubit in the quantum register.
pub fn sigma_y(&mut self, target: usize) {
debug_assert!(target < (self.reg.width as usize));
unsafe { quantum_sys::quantum_sigma_y(target as i32, self.reg_ptr()) }
}
/// Applies a Pauli Z gate to a qubit in the quantum register.
pub fn sigma_z(&mut self, target: usize) {
debug_assert!(target < (self.reg.width as usize));
unsafe { quantum_sys::quantum_sigma_z(target as i32, self.reg_ptr()) }
}
/// Rotates a qubit around the x-axis in the Bloch sphere in the quantum register.
pub fn rotate_x(&mut self, target: usize, gamma: f32) {
debug_assert!(target < (self.reg.width as usize));
unsafe { quantum_sys::quantum_r_x(target as i32, gamma, self.reg_ptr()) }
}
/// Rotates a qubit around the y-axis in the Bloch sphere in the quantum register.
pub fn rotate_y(&mut self, target: usize, gamma: f32) {
debug_assert!(target < (self.reg.width as usize));
unsafe { quantum_sys::quantum_r_y(target as i32, gamma, self.reg_ptr()) }
}
/// Rotates a qubit around the z-axis in the Bloch sphere in the quantum register.
pub fn rotate_z(&mut self, target: usize, gamma: f32) {
debug_assert!(target < (self.reg.width as usize));
unsafe { quantum_sys::quantum_r_z(target as i32, gamma, self.reg_ptr()) }
}
/// Applies a global phase to a qubit in the quantum register.
pub fn phase(&mut self, target: usize, gamma: f32) {
debug_assert!(target < (self.reg.width as usize));
unsafe { quantum_sys::quantum_phase_scale(target as i32, gamma, self.reg_ptr()) }
}
/// Applies a phase shift to a qubit in the quantum register.
pub fn phaseby(&mut self, target: usize, gamma: f32) {
debug_assert!(target < (self.reg.width as usize));
unsafe { quantum_sys::quantum_phase_kick(target as i32, gamma, self.reg_ptr()) }
}
/// Applies the Hadamard gate to a qubit in the quantum register.
pub fn hadamard(&mut self, target: usize) {
debug_assert!(target < (self.reg.width as usize));
unsafe { quantum_sys::quantum_hadamard(target as i32, self.reg_ptr()) }
}
/// Applies the Hadamard transform to qubits in the quantum register.
///
/// More specifically, this method applies a Hadamard gate to the first
/// `width` qubits in the quantum register.
pub fn walsh(&mut self, width: usize) {
debug_assert!(width <= (self.reg.width as usize));
unsafe { quantum_sys::quantum_walsh(width as i32, self.reg_ptr()) }
}
/// Applies a controlled phase shift to a qubit in the quantum register.
///
/// The applied phase shift is by `pi/2**k` where `k = control - target`
pub fn cond_phase(&mut self, control: usize, target: usize) {
debug_assert!(control < (self.reg.width as usize));
debug_assert!(target < (self.reg.width as usize));
unsafe { quantum_sys::quantum_cond_phase(control as i32, target as i32, self.reg_ptr()) }
}
/// Applies a controlled arbitrary phase shift to a qubit in the quantum register.
///
/// The applied phase shift is by gamma.
pub fn cond_phaseby(&mut self, control: usize, target: usize, gamma: f32) {
debug_assert!(control < (self.reg.width as usize));
debug_assert!(target < (self.reg.width as usize));
unsafe { quantum_sys::quantum_cond_phase_kick(control as i32, target as i32, gamma, self.reg_ptr()) }
}
/// Applies the quantum Fourier transform to the quantum register.
///
/// More specifically, this method applies a QFT to the first
/// `width` qubits in the quantum register.
pub fn qft(&mut self, width: usize) {
debug_assert!(width <= (self.reg.width as usize));
unsafe { quantum_sys::quantum_qft(width as i32, self.reg_ptr()) }
}
/// Applies the inverse of the quantum Fourier transform to the quantum
/// register.
///
/// More specifically, this method applies an inverse QFT to the first
/// `width` qubits in the quantum register.
pub fn qft_inv(&mut self, width: usize) {
debug_assert!(width <= (self.reg.width as usize));
unsafe { quantum_sys::quantum_qft_inv(width as i32, self.reg_ptr()) }
}
/// Measures the entire quantum register and discards it.
///
/// Returns the result as the first `width` bits in an unsigned integer.
pub fn measure(self) -> usize {
unsafe { quantum_sys::quantum_measure(self.reg) as usize }
}
/// Measures a qubit in the quantum register and discards it.
///
/// Returns the result as a Boolean value.
pub fn measure_bit(&mut self, pos: usize) -> bool {
debug_assert!(pos < (self.reg.width as usize));
if pos < self.scratch {
self.scratch -= 1;
}
unsafe { quantum_sys::quantum_bmeasure(pos as i32, self.reg_ptr()) != 0 }
}
/// Measures a qubit in the quantum register without discarding it.
pub fn measure_bit_preserve(&mut self, pos: usize) -> bool {
debug_assert!(pos < (self.reg.width as usize));
unsafe { quantum_sys::quantum_bmeasure_bitpreserve(pos as i32, self.reg_ptr()) as usize != 0 }
}
/// Measures the `width` least significant bits of the register, discarding them.
pub fn measure_width(&mut self, width: usize) -> usize {
debug_assert!(width <= (self.reg.width as usize));
let mut result = 0;
for i in 0..width {
result |= (self.measure_bit(0) as usize) << i;
}
result
}
/// Measures the bit indicies specified in an iterator.
///
/// This method does not discard the qubits.
pub fn measure_partial<I>(&mut self, iter: I) -> usize
where I: IntoIterator<Item=usize> {
let mut result = 0;
for i in iter {
debug_assert!(i < (self.reg.width as usize));
result |= (self.measure_bit_preserve(i) as usize) << i;
}
result
}
/// Peeks at the quantum state, generating an informative string.
pub fn to_string(&self) -> Result<String, fmt::Error> {
let mut s = String::new();
let width = self.width() + self.scratch;
unsafe {
write!(&mut s, "({0}{1:+}i)|{2:03$b}>",
(*self.reg.amplitude).re,
(*self.reg.amplitude).im,
*self.reg.state, width)?;
for i in 1..(self.reg.size as isize) {
write!(&mut s, " + ({0}{1:+}i)|{2:03$b}>",
(*self.reg.amplitude.offset(i)).re,
(*self.reg.amplitude.offset(i)).im,
*self.reg.state.offset(i), width)?;
}
}
Ok(s)
}
#[inline]
unsafe fn reg_ptr(&mut self) -> *mut quantum_reg {
&mut self.reg as *mut quantum_reg
}
}
impl fmt::Debug for QuReg {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "QuReg({}, {})", self.width(), self.scratch)
}
}
impl Drop for QuReg {
/// Reclaims memory from the quantum_reg when a QuReg value is dropped.
fn drop(&mut self) {
unsafe { quantum_sys::quantum_delete_qureg(self.reg_ptr()); }
}
}<|fim▁end|> | scratch: usize,
}
impl QuReg { |
<|file_name|>menu.py<|end_file_name|><|fim▁begin|>"""
Menu Model [DiamondQuest]
Defines a menu.
Author(s): Wilfrantz Dede, Jason C. McDonald, Stanislav Schmidt
"""
# LICENSE (BSD-3-Clause)
# Copyright (c) 2020 MousePaw Media.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
#
# CONTRIBUTING
# See https://www.mousepawmedia.com/developers for information
# on how to contribute to our projects.
import abc
import collections
from enum import Enum
import pygame
from diamondquest.common import FontAttributes, FontAttributeDefaults
from diamondquest.model.game import GameModel
class MenuItem(abc.ABC):
"""An abstract base class for menu items.
Attributes
----------
key_down_listeners : dict
A dictionary storing key listeners.
"""
def __init__(self):
self.key_down_listeners = collections.defaultdict(list)
@property
@abc.abstractmethod
def text(self):
"""The text of the menu item."""
@property
@abc.abstractmethod
def text_attributes(self):
"""The text attributes of the menu item."""
def add_key_down_listener(self, key, listener):
"""Add a key down listener.
Parameters
----------
key : int
The key press that should be handled.
listener : function
The handler for the given key press. It should
take no parameters and not return anything.
"""
if listener not in self.key_down_listeners[key]:
self.key_down_listeners[key].append(listener)
def remove_key_down_listener(self, key, listener):
"""Remove a given key listener.
Parameters
----------
key : int
The key press that was handled by the listener.
listener : function
The listener to remove.
Returns
-------
status : bool
If the listener was found and removed then True is
returned, otherwise False.
"""
if listener in self.key_down_listeners[key]:
self.key_down_listeners[key].remove(listener)
return True
else:
return False
def handle_key_press(self, key):
"""Handle key presses when this item is focused.
Parameters
----------
key : int
The key that was pressed.
"""
for listener in self.key_down_listeners[key]:
listener()
class TextItem(MenuItem):
"""A menu item that is only static text."""
def __init__(
self, text, attributes=FontAttributeDefaults.MENU,
):
super().__init__()
self.raw_text = text
self.attributes = attributes
# icon
@property
def text(self):
return self.raw_text
@property
def text_attributes(self):
return self.attributes
class ButtonType(Enum):
STATIC = 0 # text never changes
SCROLL = 1 # left/right arrows scroll through options
INPUT = 2 # user can type into button text
class ButtonItem(MenuItem):
"""An interactive menu item."""
def __init__(
self,
text,
attributes=FontAttributeDefaults.MENU,
button_type=ButtonType.STATIC,
):
super().__init__()
self.text_item = TextItem(text, attributes)
self.button_type = button_type
@property
def text(self):
return self.text_item.text
@property
def text_attributes(self):
return self.text_item.text_attributes
class MenuType(Enum):
GAME = 0
DEV = 1
class MenuModel:
"""The model for the menu."""
menu_items = {} # a dictionary storing button instances
menus = {} # a dictionary storing menu instances
menu_in_use = MenuType.GAME # which menu the game is currently using
@classmethod
def initialize(cls):
cls.menu_items["text_existing_miner"] = TextItem(text="Existing Miner")
cls.menu_items["scroll_existing_miner"] = ButtonItem(
text="<none>", button_type=ButtonType.SCROLL
)
cls.menu_items["text_new_miner"] = TextItem(text="New Miner")
cls.menu_items["input_new_miner"] = ButtonItem(
text="Enter Name", button_type=ButtonType.INPUT
)
cls.menu_items["scroll_music_volume"] = ButtonItem(
text="Music: 10", button_type=ButtonType.SCROLL
)
cls.menu_items["scroll_sound_volume"] = ButtonItem(
text="Sound: 10", button_type=ButtonType.SCROLL
)
cls.menu_items["button_quit"] = ButtonItem(text="QUIT")
cls.menu_items["button_quit"].add_key_down_listener(<|fim▁hole|> pygame.K_RETURN, lambda: GameModel.stop_game()
)
cls.menus[MenuType.GAME] = MenuModel(
title="DiamondQuest",
items=[
cls.menu_items["text_existing_miner"],
cls.menu_items["scroll_existing_miner"],
cls.menu_items["text_new_miner"],
cls.menu_items["input_new_miner"],
cls.menu_items["scroll_music_volume"],
cls.menu_items["scroll_sound_volume"],
cls.menu_items["button_quit"],
],
)
cls.menus[MenuType.DEV] = MenuModel(title="DevMenu", items=[])
@classmethod
def get_menu(cls, menu_type=None):
"""Called by the View to get the contents of the menu."""
# If no specific menu is requested, get the default.
if menu_type is None:
menu_type = cls.menu_in_use
if menu_type not in cls.menus:
raise ValueError(f"No such menu type {menu_type}")
return cls.menus[menu_type]
@classmethod
def use_menu(cls, menu_type):
"""Select which menu to use by default."""
cls.menu_in_use = menu_type
def __init__(self, title, items):
self.title = TextItem(title)
self.items = items
self.selectable_items = [
i for i, item in enumerate(items) if isinstance(item, ButtonItem)
]
self.which_selected = 0 if len(self.selectable_items) > 0 else -1
@property
def selected_item_idx(self):
if self.which_selected == -1:
return -1
return self.selectable_items[self.which_selected]
def __iter__(self):
iter(self.items)
@classmethod
def select_next_item(cls):
menu = cls.get_menu()
n_items = len(menu.selectable_items)
menu.which_selected = (menu.which_selected + 1) % n_items
@classmethod
def select_prev_item(cls):
menu = cls.get_menu()
n_items = len(menu.selectable_items)
menu.which_selected = (menu.which_selected - 1 + n_items) % n_items
@classmethod
def get_selected_item(cls):
menu = cls.get_menu()
idx = menu.selected_item_idx
if idx > 0:
return menu.items[idx]
else:
return None<|fim▁end|> | |
<|file_name|>filter_bodies.py<|end_file_name|><|fim▁begin|>from __future__ import division
import json
import os
import copy
import collections
import argparse
import csv
import neuroglancer
import neuroglancer.cli
import numpy as np
class State(object):
def __init__(self, path):
self.path = path
self.body_labels = collections.OrderedDict()
def load(self):
if os.path.exists(self.path):
with open(self.path, 'r') as f:
self.body_labels = collections.OrderedDict(json.load(f))
def save(self):
tmp_path = self.path + '.tmp'
with open(tmp_path, 'w') as f:
f.write(json.dumps(self.body_labels.items()))
os.rename(tmp_path, self.path)
Body = collections.namedtuple('Body', ['segment_id', 'num_voxels', 'bbox_start', 'bbox_size'])
class Tool(object):
def __init__(self, state_path, bodies, labels, segmentation_url, image_url, num_to_prefetch):
self.state = State(state_path)
self.num_to_prefetch = num_to_prefetch
self.viewer = neuroglancer.Viewer()
self.bodies = bodies
self.state.load()
self.total_voxels = sum(x.num_voxels for x in bodies)
self.cumulative_voxels = np.cumsum([x.num_voxels for x in bodies])
with self.viewer.txn() as s:
s.layers['image'] = neuroglancer.ImageLayer(source=image_url)
s.layers['segmentation'] = neuroglancer.SegmentationLayer(source=segmentation_url)
s.show_slices = False
s.concurrent_downloads = 256
s.gpu_memory_limit = 2 * 1024 * 1024 * 1024
s.layout = '3d'
key_bindings = [
['bracketleft', 'prev-index'],
['bracketright', 'next-index'],
['home', 'first-index'],
['end', 'last-index'],
['control+keys', 'save'],
]
label_keys = ['keyd', 'keyf', 'keyg', 'keyh']
for label, label_key in zip(labels, label_keys):
key_bindings.append([label_key, 'label-%s' % label])
def label_func(s, label=label):
self.set_label(s, label)
self.viewer.actions.add('label-%s' % label, label_func)
self.viewer.actions.add('prev-index', self._prev_index)
self.viewer.actions.add('next-index', self._next_index)
self.viewer.actions.add('first-index', self._first_index)
self.viewer.actions.add('last-index', self._last_index)
self.viewer.actions.add('save', self.save)
with self.viewer.config_state.txn() as s:
for key, command in key_bindings:
s.input_event_bindings.viewer[key] = command
s.status_messages['help'] = ('KEYS: ' + ' | '.join('%s=%s' % (key, command)
for key, command in key_bindings))
self.index = -1
self.set_index(self._find_one_after_last_labeled_index())
def _find_one_after_last_labeled_index(self):
body_index = 0
while self.bodies[body_index].segment_id in self.state.body_labels:
body_index += 1
return body_index
def set_index(self, index):
if index == self.index:
return
body = self.bodies[index]
self.index = index
def modify_state_for_body(s, body):
s.layers['segmentation'].segments = frozenset([body.segment_id])
s.voxel_coordinates = body.bbox_start + body.bbox_size // 2
with self.viewer.txn() as s:
modify_state_for_body(s, body)
prefetch_states = []<|fim▁hole|> for i in range(self.num_to_prefetch):
prefetch_index = self.index + i + 1
if prefetch_index >= len(self.bodies):
break
prefetch_state = copy.deepcopy(self.viewer.state)
prefetch_state.layout = '3d'
modify_state_for_body(prefetch_state, self.bodies[prefetch_index])
prefetch_states.append(prefetch_state)
with self.viewer.config_state.txn() as s:
s.prefetch = [
neuroglancer.PrefetchState(state=prefetch_state, priority=-i)
for i, prefetch_state in enumerate(prefetch_states)
]
label = self.state.body_labels.get(body.segment_id, '')
with self.viewer.config_state.txn() as s:
s.status_messages['status'] = (
'[Segment %d/%d : %d/%d voxels labeled = %.3f fraction] label=%s' %
(index, len(self.bodies), self.cumulative_voxels[index], self.total_voxels,
self.cumulative_voxels[index] / self.total_voxels, label))
def save(self, s):
self.state.save()
def set_label(self, s, label):
self.state.body_labels[self.bodies[self.index].segment_id] = label
self.set_index(self.index + 1)
def _first_index(self, s):
self.set_index(0)
def _last_index(self, s):
self.set_index(max(0, self._find_one_after_last_labeled_index() - 1))
def _next_index(self, s):
self.set_index(self.index + 1)
def _prev_index(self, s):
self.set_index(max(0, self.index - 1))
if __name__ == '__main__':
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
ap.add_argument('--image-url', required=True, help='Neuroglancer data source URL for image')
ap.add_argument('--segmentation-url',
required=True,
help='Neuroglancer data source URL for segmentation')
ap.add_argument('--state', required=True, help='Path to proofreading state file')
ap.add_argument('--bodies', required=True, help='Path to list of bodies to proofread')
ap.add_argument('--labels', nargs='+', help='Labels to use')
ap.add_argument('--prefetch', type=int, default=10, help='Number of bodies to prefetch')
args = ap.parse_args()
neuroglancer.cli.handle_server_arguments(args)
bodies = []
with open(args.bodies, 'r') as f:
csv_reader = csv.DictReader(f)
for row in csv_reader:
bodies.append(
Body(
segment_id=int(row['id']),
num_voxels=int(row['num_voxels']),
bbox_start=np.array([
int(row['bbox.start.x']),
int(row['bbox.start.y']),
int(row['bbox.start.z'])
],
dtype=np.int64),
bbox_size=np.array(
[int(row['bbox.size.x']),
int(row['bbox.size.y']),
int(row['bbox.size.z'])],
dtype=np.int64),
))
tool = Tool(
state_path=args.state,
image_url=args.image_url,
segmentation_url=args.segmentation_url,
labels=args.labels,
bodies=bodies,
num_to_prefetch=args.prefetch,
)
print(tool.viewer)<|fim▁end|> | |
<|file_name|>issue-11958.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![forbid(warnings)]
// We shouldn't need to rebind a moved upvar as mut if it's already
// marked as mut
pub fn main() {
let mut x = 1;<|fim▁hole|><|fim▁end|> | let _thunk = Box::new(move|| { x = 2; });
} |
<|file_name|>progress_bar.rs<|end_file_name|><|fim▁begin|>// This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use Buildable;
use Orientable;
use Widget;
use ffi;
use glib::GString;
use glib::object::Cast;
use glib::object::IsA;
use glib::signal::SignalHandlerId;
use glib::signal::connect_raw;
use glib::translate::*;
use glib_ffi;
use pango;
use std::boxed::Box as Box_;
use std::fmt;
use std::mem::transmute;
glib_wrapper! {
pub struct ProgressBar(Object<ffi::GtkProgressBar, ffi::GtkProgressBarClass, ProgressBarClass>) @extends Widget, @implements Buildable, Orientable;
match fn {
get_type => || ffi::gtk_progress_bar_get_type(),
}
}
impl ProgressBar {
pub fn new() -> ProgressBar {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_progress_bar_new()).unsafe_cast()
}
}
}
impl Default for ProgressBar {
fn default() -> Self {
Self::new()
}
}
pub const NONE_PROGRESS_BAR: Option<&ProgressBar> = None;
pub trait ProgressBarExt: 'static {
fn get_ellipsize(&self) -> pango::EllipsizeMode;
fn get_fraction(&self) -> f64;
fn get_inverted(&self) -> bool;
fn get_pulse_step(&self) -> f64;
fn get_show_text(&self) -> bool;
fn get_text(&self) -> Option<GString>;
fn pulse(&self);
fn set_ellipsize(&self, mode: pango::EllipsizeMode);
fn set_fraction(&self, fraction: f64);
fn set_inverted(&self, inverted: bool);
fn set_pulse_step(&self, fraction: f64);
fn set_show_text(&self, show_text: bool);
fn set_text<'a, P: Into<Option<&'a str>>>(&self, text: P);
fn connect_property_ellipsize_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_property_fraction_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_property_inverted_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_property_pulse_step_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_property_show_text_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_property_text_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
}
impl<O: IsA<ProgressBar>> ProgressBarExt for O {
fn get_ellipsize(&self) -> pango::EllipsizeMode {
unsafe {
from_glib(ffi::gtk_progress_bar_get_ellipsize(self.as_ref().to_glib_none().0))
}
}
fn get_fraction(&self) -> f64 {
unsafe {
ffi::gtk_progress_bar_get_fraction(self.as_ref().to_glib_none().0)
}
}
fn get_inverted(&self) -> bool {
unsafe {
from_glib(ffi::gtk_progress_bar_get_inverted(self.as_ref().to_glib_none().0))
}
}
fn get_pulse_step(&self) -> f64 {
unsafe {
ffi::gtk_progress_bar_get_pulse_step(self.as_ref().to_glib_none().0)
}
}
fn get_show_text(&self) -> bool {
unsafe {
from_glib(ffi::gtk_progress_bar_get_show_text(self.as_ref().to_glib_none().0))
}
}
fn get_text(&self) -> Option<GString> {
unsafe {
from_glib_none(ffi::gtk_progress_bar_get_text(self.as_ref().to_glib_none().0))
}
}
fn pulse(&self) {
unsafe {
ffi::gtk_progress_bar_pulse(self.as_ref().to_glib_none().0);
}
}
fn set_ellipsize(&self, mode: pango::EllipsizeMode) {
unsafe {
ffi::gtk_progress_bar_set_ellipsize(self.as_ref().to_glib_none().0, mode.to_glib());
}
}
fn set_fraction(&self, fraction: f64) {
unsafe {
ffi::gtk_progress_bar_set_fraction(self.as_ref().to_glib_none().0, fraction);
}
}
fn set_inverted(&self, inverted: bool) {
unsafe {
ffi::gtk_progress_bar_set_inverted(self.as_ref().to_glib_none().0, inverted.to_glib());
}
}
fn set_pulse_step(&self, fraction: f64) {
unsafe {
ffi::gtk_progress_bar_set_pulse_step(self.as_ref().to_glib_none().0, fraction);
}
}
fn set_show_text(&self, show_text: bool) {
unsafe {
ffi::gtk_progress_bar_set_show_text(self.as_ref().to_glib_none().0, show_text.to_glib());
}
}
fn set_text<'a, P: Into<Option<&'a str>>>(&self, text: P) {
let text = text.into();
unsafe {
ffi::gtk_progress_bar_set_text(self.as_ref().to_glib_none().0, text.to_glib_none().0);
}
}
fn connect_property_ellipsize_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::ellipsize\0".as_ptr() as *const _,
Some(transmute(notify_ellipsize_trampoline::<Self, F> as usize)), Box_::into_raw(f))
}
}
fn connect_property_fraction_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::fraction\0".as_ptr() as *const _,
Some(transmute(notify_fraction_trampoline::<Self, F> as usize)), Box_::into_raw(f))
}
}
fn connect_property_inverted_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::inverted\0".as_ptr() as *const _,
Some(transmute(notify_inverted_trampoline::<Self, F> as usize)), Box_::into_raw(f))
}
}
fn connect_property_pulse_step_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::pulse-step\0".as_ptr() as *const _,
Some(transmute(notify_pulse_step_trampoline::<Self, F> as usize)), Box_::into_raw(f))
}
}
fn connect_property_show_text_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::show-text\0".as_ptr() as *const _,
Some(transmute(notify_show_text_trampoline::<Self, F> as usize)), Box_::into_raw(f))
}
}
fn connect_property_text_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::text\0".as_ptr() as *const _,
Some(transmute(notify_text_trampoline::<Self, F> as usize)), Box_::into_raw(f))
}
}
}
unsafe extern "C" fn notify_ellipsize_trampoline<P, F: Fn(&P) + 'static>(this: *mut ffi::GtkProgressBar, _param_spec: glib_ffi::gpointer, f: glib_ffi::gpointer)
where P: IsA<ProgressBar> {
let f: &F = transmute(f);
f(&ProgressBar::from_glib_borrow(this).unsafe_cast())
}
unsafe extern "C" fn notify_fraction_trampoline<P, F: Fn(&P) + 'static>(this: *mut ffi::GtkProgressBar, _param_spec: glib_ffi::gpointer, f: glib_ffi::gpointer)
where P: IsA<ProgressBar> {
let f: &F = transmute(f);
f(&ProgressBar::from_glib_borrow(this).unsafe_cast())
}
unsafe extern "C" fn notify_inverted_trampoline<P, F: Fn(&P) + 'static>(this: *mut ffi::GtkProgressBar, _param_spec: glib_ffi::gpointer, f: glib_ffi::gpointer)
where P: IsA<ProgressBar> {
let f: &F = transmute(f);
f(&ProgressBar::from_glib_borrow(this).unsafe_cast())
}
unsafe extern "C" fn notify_pulse_step_trampoline<P, F: Fn(&P) + 'static>(this: *mut ffi::GtkProgressBar, _param_spec: glib_ffi::gpointer, f: glib_ffi::gpointer)
where P: IsA<ProgressBar> {
let f: &F = transmute(f);
f(&ProgressBar::from_glib_borrow(this).unsafe_cast())
}
unsafe extern "C" fn notify_show_text_trampoline<P, F: Fn(&P) + 'static>(this: *mut ffi::GtkProgressBar, _param_spec: glib_ffi::gpointer, f: glib_ffi::gpointer)
where P: IsA<ProgressBar> {
let f: &F = transmute(f);<|fim▁hole|>
unsafe extern "C" fn notify_text_trampoline<P, F: Fn(&P) + 'static>(this: *mut ffi::GtkProgressBar, _param_spec: glib_ffi::gpointer, f: glib_ffi::gpointer)
where P: IsA<ProgressBar> {
let f: &F = transmute(f);
f(&ProgressBar::from_glib_borrow(this).unsafe_cast())
}
impl fmt::Display for ProgressBar {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ProgressBar")
}
}<|fim▁end|> | f(&ProgressBar::from_glib_borrow(this).unsafe_cast())
} |
<|file_name|>toxicity.js<|end_file_name|><|fim▁begin|>const Command = require('../../structures/Command');
const request = require('node-superfetch');<|fim▁hole|> constructor(client) {
super(client, {
name: 'toxicity',
aliases: ['perspective', 'comment-toxicity'],
group: 'analyze',
memberName: 'toxicity',
description: 'Determines the toxicity of text.',
credit: [
{
name: 'Perspective API',
url: 'https://www.perspectiveapi.com/#/'
}
],
args: [
{
key: 'text',
prompt: 'What text do you want to test the toxicity of?',
type: 'string'
}
]
});
}
async run(msg, { text }) {
try {
const { body } = await request
.post('https://commentanalyzer.googleapis.com/v1alpha1/comments:analyze')
.query({ key: GOOGLE_KEY })
.send({
comment: { text },
languages: ['en'],
requestedAttributes: { TOXICITY: {} }
});
const toxicity = Math.round(body.attributeScores.TOXICITY.summaryScore.value * 100);
if (toxicity >= 70) return msg.reply(`Likely to be perceived as toxic. (${toxicity}%)`);
if (toxicity >= 40) return msg.reply(`Unsure if this will be perceived as toxic. (${toxicity}%)`);
return msg.reply(`Unlikely to be perceived as toxic. (${toxicity}%)`);
} catch (err) {
return msg.reply(`Oh no, an error occurred: \`${err.message}\`. Try again later!`);
}
}
};<|fim▁end|> | const { GOOGLE_KEY } = process.env;
module.exports = class ToxicityCommand extends Command { |
<|file_name|>fragments.spec.js<|end_file_name|><|fim▁begin|>describe("Fragments", function () {
it('replace node with fragment', function () {
var node = render(
d('div', null,
'Hello',
d('div', null, 'World')
), document.body);
compare(node.dom, div(text('Hello'), div(text('World'))));
node = update(node,
d('div', null,
d('@', null, 1, 2, 3),
'Boom'
));
compare(node.dom, udiv(text(1), text(2), text(3), text('Boom')));
});
it('replace fragment with fragment', function () {
var node = render(
d('div', null,
'Hello',
d('@', null, 1, 2, 3),
'World'
), document.body);
compare(node.dom, div(text('Hello'), text(1), text(2), text(3), text('World')));
node = update(node,
d('div', null,
'Hello',
d('@', null, 4, 5, 6),
'World'
));
compare(node.dom, udiv(utext('Hello'), utext(4), utext(5), utext(6), utext('World')));
});
it('replace deep fragment with deep fragment', function () {
var node = render(
d('div', null,
'Hello',
0,
d('@', null,
1,
d('@', null,
4,
d('@', null, 7, 8),
5),
3),
'World'), document.body);
compare(node.dom, div(text('Hello'), text(0), text(1), text(4), text(7), text(8), text(5), text(3), text('World')));
node = update(node,
d('div', null,
'Hello',
d('@', null, 3, 4),
d('@', null,
1,
d('@', null,
d('@', null, 7, 8),
4,
5),
3),
'World'));
compare(node.dom, udiv(utext('Hello'), text(3), text(4), utext(1), text(7), text(8), text(4), utext(5), utext(3), utext('World')));
});<|fim▁hole|>
it("replace fragment with node", function () {
var node = render(
d('div', null,
d('@', null,
1,
d('@', null, 4, 5, 6),
3
)), document.body);
compare(node.dom, div(text(1), text(4), text(5), text(6), text(3)));
node = update(node,
d('div', null,
d('@', null,
1, 2, 3)));
compare(node.dom, udiv(utext(1), text(2), utext(3)));
});
it("set attrs", function () {
var node = render(
d('div', null,
d('@', {class: 'cls'}, 1, 2)), document.body);
compare(node.dom, div(text(1), text(2)));
node = update(node, d('div', null,
d('@', {class: 'cls'}, 1, 2)));
compare(node.dom, udiv(utext(1), utext(2)));
});
});<|fim▁end|> | |
<|file_name|>settings_db_init.py<|end_file_name|><|fim▁begin|><|fim▁hole|>import sqlite3
import os
def init():
"""
Creates and initializes settings database.
Doesn't do anything if the file already exists. Remove the local copy to recreate the database.
"""
if not os.path.isfile("settings.sqlite"):
app_db_connection = sqlite3.connect('settings.sqlite')
app_db = app_db_connection.cursor()
app_db.execute("CREATE TABLE oauth (site, rate_remaining, rate_reset)")
app_db.execute("INSERT INTO oauth VALUES ('reddit', 30, 60)")
app_db_connection.commit()
app_db_connection.close()
if __name__ == "__main__":
init()<|fim▁end|> | |
<|file_name|>service_plan_info.py<|end_file_name|><|fim▁begin|>from office365.runtime.client_value import ClientValue
class ServicePlanInfo(ClientValue):
"""Contains information about a service plan associated with a subscribed SKU. The servicePlans property of
the subscribedSku entity is a collection of servicePlanInfo."""
def __init__(self, _id=None, name=None, provisioning_status=None, applies_to=None):
"""
:param str applies_to: The object the service plan can be assigned to. Possible values:
"User" - service plan can be assigned to individual users.
"Company" - service plan can be assigned to the entire tenant.
:param str provisioning_status: The provisioning status of the service plan. Possible values:
"Success" - Service is fully provisioned.
"Disabled" - Service has been disabled.<|fim▁hole|> (for example, Intune_O365 service plan)
"PendingProvisioning" - Microsoft has added a new service to the product SKU and it has not been
activated in the tenant, yet.
:param str name: The name of the service plan.
:param str _id: The unique identifier of the service plan.
"""
super(ServicePlanInfo, self).__init__()
self.servicePlanId = _id
self.servicePlanName = name
self.provisioningStatus = provisioning_status
self.appliesTo = applies_to<|fim▁end|> | "PendingInput" - Service is not yet provisioned; awaiting service confirmation.
"PendingActivation" - Service is provisioned but requires explicit activation by administrator |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>// Copyright 2017, Igor Shaula
// Licensed under the MIT License <LICENSE or
// http://opensource.org/licenses/MIT>. This file
// may not be copied, modified, or distributed
// except according to those terms.
use super::enums::*;
use super::RegKey;
use std::error::Error;
use std::fmt;
use std::io;
use winapi::shared::minwindef::DWORD;
macro_rules! read_value {
($s:ident) => {
match mem::replace(&mut $s.f_name, None) {
Some(ref s) => $s.key.get_value(s).map_err(DecoderError::IoError),
None => Err(DecoderError::NoFieldName),
}
};
}
macro_rules! parse_string {
($s:ident) => {{
let s: String = read_value!($s)?;
s.parse()
.map_err(|e| DecoderError::ParseError(format!("{:?}", e)))
}};
}
macro_rules! no_impl {
($e:expr) => {
Err(DecoderError::DecodeNotImplemented($e.to_owned()))
};
}
#[cfg(feature = "serialization-serde")]
mod serialization_serde;
#[derive(Debug)]
pub enum DecoderError {
DecodeNotImplemented(String),
DeserializerError(String),
IoError(io::Error),
ParseError(String),
NoFieldName,
}
impl fmt::Display for DecoderError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl Error for DecoderError {}
impl From<io::Error> for DecoderError {<|fim▁hole|>}
pub type DecodeResult<T> = Result<T, DecoderError>;
#[derive(Debug)]
enum DecoderReadingState {
WaitingForKey,
WaitingForValue,
}
#[derive(Debug)]
enum DecoderEnumerationState {
EnumeratingKeys(DWORD),
EnumeratingValues(DWORD),
}
#[derive(Debug)]
pub struct Decoder {
key: RegKey,
f_name: Option<String>,
reading_state: DecoderReadingState,
enumeration_state: DecoderEnumerationState,
}
const DECODER_SAM: DWORD = KEY_QUERY_VALUE | KEY_ENUMERATE_SUB_KEYS;
impl Decoder {
pub fn from_key(key: &RegKey) -> DecodeResult<Decoder> {
key.open_subkey_with_flags("", DECODER_SAM)
.map(Decoder::new)
.map_err(DecoderError::IoError)
}
fn new(key: RegKey) -> Decoder {
Decoder {
key,
f_name: None,
reading_state: DecoderReadingState::WaitingForKey,
enumeration_state: DecoderEnumerationState::EnumeratingKeys(0),
}
}
}<|fim▁end|> | fn from(err: io::Error) -> DecoderError {
DecoderError::IoError(err)
} |
<|file_name|>PageControl.cpp<|end_file_name|><|fim▁begin|>/*
GWEN
Copyright (c) 2012 Facepunch Studios
See license in Gwen.h
*/
#include <rose/Rose.hpp>
#include <rose/Utility.hpp>
#include <rose/Skin.hpp>
#include <rose/ctl/PageControl.hpp>
#include <rose/ctl/Controls.hpp>
using namespace rose;
using namespace rose::ctl;
GWEN_CONTROL_CONSTRUCTOR(PageControl)
{
m_iPages = 0;
m_iCurrentPage = 0;
SetUseFinishButton(true);
for(int i = 0; i < MaxPages; i++)
{
m_pPages[i] = NULL;
}
Widget* pControls = new Widget(this);
pControls->Dock(EWP_BOTTOM);
pControls->SetSize(24, 24);
pControls->SetMargin(Margin(10, 10, 10, 10));
m_Finish = new ctl::Button(pControls);
m_Finish->SetText("Finish");
m_Finish->Dock(EWP_RIGHT);
m_Finish->onPress.Add(this, &ThisClass::Finish);
m_Finish->SetSize(70);
m_Finish->SetMargin(Margin(4, 0, 0, 0));
m_Finish->Hide();
m_Next = new ctl::Button(pControls);
m_Next->SetText("Next >");
m_Next->Dock(EWP_RIGHT);
m_Next->onPress.Add(this, &ThisClass::NextPage);
m_Next->SetSize(70);
m_Next->SetMargin(Margin(4, 0, 0, 0));
m_Back = new ctl::Button(pControls);
m_Back->SetText("< Back");
m_Back->Dock(EWP_RIGHT);
m_Back->onPress.Add(this, &ThisClass::PreviousPage);
m_Back->SetSize(70);
m_Label = new ctl::Label(pControls);
m_Label->Dock(EWP_FILL);
m_Label->SetAlignment(EWP_LEFT | EWP_CENTERV);
m_Label->SetText("Page 1 or 2");
}
void PageControl::SetPageCount(unsigned int iNum)
{
if(iNum >= MaxPages)
{
iNum = MaxPages;
}
for(unsigned int i = 0; i < iNum; i++)
{
if(!m_pPages[i])
{
m_pPages[i] = new ctl::Widget(this);
m_pPages[i]->Dock(EWP_FILL);
}
}
m_iPages = iNum;
// Setting to -1 to force the page to change
m_iCurrentPage = -1;
HideAll();
ShowPage(0);
}
void PageControl::HideAll()
{
for(int i = 0; i < MaxPages; i++)
{
if(!m_pPages[i])
{
continue;
}
m_pPages[i]->Hide();
}
}
void PageControl::ShowPage(unsigned int i)
{
if(m_iCurrentPage == i)
{
return;
}
if(m_pPages[i])
{
m_pPages[i]->Show();
m_pPages[i]->Dock(EWP_FILL);
}
m_iCurrentPage = i;
m_Back->SetDisabled(m_iCurrentPage == 0);
m_Next->SetDisabled(m_iCurrentPage >= m_iPages);
m_Label->SetText(Utility::Format("Page %i of %i", m_iCurrentPage + 1, m_iPages));
if(GetUseFinishButton())
{
bool bFinished = m_iCurrentPage >= m_iPages - 1;
m_Next->SetHidden(bFinished);
m_Finish->SetHidden(!bFinished);
}
{
EventInfo info;
info.Integer = i;
info.Control = m_pPages[i];
onPageChanged.Call(this, info);
}
}
ctl::Widget* PageControl::GetPage(unsigned int i)
{
return m_pPages[i];
}
ctl::Widget * PageControl::GetCurrentPage()
{
return GetPage(GetPageNumber());
}
void PageControl::NextPage()
{
if(m_iCurrentPage >= m_iPages - 1)
{
return;
}
if(m_pPages[m_iCurrentPage])
{
m_pPages[m_iCurrentPage]->Dock(EWP_NONE);
//Anim::Add(m_pPages[m_iCurrentPage], new Anim::Pos::X(m_pPages[m_iCurrentPage]->X(), Width() * -1, 0.2f, true, 0.0f, -1));
}
ShowPage(m_iCurrentPage + 1);
if(m_pPages[m_iCurrentPage])
{
m_pPages[m_iCurrentPage]->Dock(EWP_NONE);
//Anim::Add(m_pPages[m_iCurrentPage], new Anim::Pos::X(Width(), 0, 0.2f, false, 0.0f, -1));
}
}
void PageControl::PreviousPage()
{
if(m_iCurrentPage == 0)
{
return;
}
if(m_pPages[m_iCurrentPage])
{
m_pPages[m_iCurrentPage]->Dock(EWP_NONE);
//Anim::Add(m_pPages[m_iCurrentPage], new Anim::Pos::X(m_pPages[m_iCurrentPage]->X(), Width(), 0.3f, true, 0.0f, -1));<|fim▁hole|>
if(m_pPages[m_iCurrentPage])
{
m_pPages[m_iCurrentPage]->Dock(EWP_NONE);
//Anim::Add(m_pPages[m_iCurrentPage], new Anim::Pos::X(Width() * -1, 0, 0.3f, false, 0.0f, -1));
}
}
void PageControl::Finish()
{
onFinish.Call(this);
}<|fim▁end|> | }
ShowPage(m_iCurrentPage - 1); |
<|file_name|>library.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015 Alex Meade
# Copyright (c) 2015 Rushil Chugh
# Copyright (c) 2015 Navneet Singh
# Copyright (c) 2015 Yogesh Kshirsagar
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import math
import socket
import time
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import loopingcall
from cinder import utils as cinder_utils
from cinder.volume.drivers.netapp.eseries import client
from cinder.volume.drivers.netapp.eseries import exception as eseries_exc
from cinder.volume.drivers.netapp.eseries import host_mapper
from cinder.volume.drivers.netapp.eseries import utils
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(na_opts.netapp_basicauth_opts)
CONF.register_opts(na_opts.netapp_connection_opts)
CONF.register_opts(na_opts.netapp_eseries_opts)
CONF.register_opts(na_opts.netapp_transport_opts)
CONF.register_opts(na_opts.netapp_san_opts)
class NetAppESeriesLibrary(object):
"""Executes commands relating to Volumes."""
VERSION = "1.0.0"
REQUIRED_FLAGS = ['netapp_server_hostname', 'netapp_controller_ips',
'netapp_login', 'netapp_password',
'netapp_storage_pools']
SLEEP_SECS = 5
HOST_TYPES = {'aix': 'AIX MPIO',
'avt': 'AVT_4M',
'factoryDefault': 'FactoryDefault',
'hpux': 'HP-UX TPGS',
'linux_atto': 'LnxTPGSALUA',
'linux_dm_mp': 'LnxALUA',
'linux_mpp_rdac': 'Linux',
'linux_pathmanager': 'LnxTPGSALUA_PM',
'macos': 'MacTPGSALUA',
'ontap': 'ONTAP',
'svc': 'SVC',
'solaris_v11': 'SolTPGSALUA',
'solaris_v10': 'Solaris',
'vmware': 'VmwTPGSALUA',
'windows':
'Windows 2000/Server 2003/Server 2008 Non-Clustered',
'windows_atto': 'WinTPGSALUA',
'windows_clustered':
'Windows 2000/Server 2003/Server 2008 Clustered'
}
# NOTE(ameade): This maps what is reported by the e-series api to a
# consistent set of values that are reported by all NetApp drivers
# to the cinder scheduler.
SSC_DISK_TYPE_MAPPING = {
'scsi': 'SCSI',
'fibre': 'FCAL',
'sas': 'SAS',
'sata': 'SATA',
}
SSC_UPDATE_INTERVAL = 60 # seconds
WORLDWIDENAME = 'worldWideName'
DEFAULT_HOST_TYPE = 'linux_dm_mp'
def __init__(self, driver_name, driver_protocol="iSCSI",
configuration=None, **kwargs):
self.configuration = configuration
self.configuration.append_config_values(na_opts.netapp_basicauth_opts)
self.configuration.append_config_values(
na_opts.netapp_connection_opts)
self.configuration.append_config_values(na_opts.netapp_transport_opts)
self.configuration.append_config_values(na_opts.netapp_eseries_opts)
self.configuration.append_config_values(na_opts.netapp_san_opts)
self.lookup_service = fczm_utils.create_lookup_service()
self._backend_name = self.configuration.safe_get(
"volume_backend_name") or "NetApp_ESeries"
self.driver_name = driver_name
self.driver_protocol = driver_protocol
self._stats = {}
self._ssc_stats = {}
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
self.context = context
na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration)
self._client = self._create_rest_client(self.configuration)
self._check_mode_get_or_register_storage_system()
if self.configuration.netapp_enable_multiattach:
self._ensure_multi_attach_host_group_exists()
def _create_rest_client(self, configuration):
port = configuration.netapp_server_port
scheme = configuration.netapp_transport_type.lower()
if port is None:
if scheme == 'http':
port = 8080
elif scheme == 'https':
port = 8443
return client.RestClient(
scheme=scheme,
host=configuration.netapp_server_hostname,
port=port,
service_path=configuration.netapp_webservice_path,
username=configuration.netapp_login,
password=configuration.netapp_password)
def _start_periodic_tasks(self):
ssc_periodic_task = loopingcall.FixedIntervalLoopingCall(
self._update_ssc_info)
ssc_periodic_task.start(interval=self.SSC_UPDATE_INTERVAL)
def check_for_setup_error(self):
self._check_host_type()
self._check_multipath()
self._check_storage_system()
self._start_periodic_tasks()
def _check_host_type(self):
host_type = (self.configuration.netapp_host_type
or self.DEFAULT_HOST_TYPE)
self.host_type = self.HOST_TYPES.get(host_type)
if not self.host_type:
raise exception.NetAppDriverException(
_('Configured host type is not supported.'))
def _check_multipath(self):
if not self.configuration.use_multipath_for_image_xfer:
LOG.warning(_LW('Production use of "%(backend)s" backend requires '
'the Cinder controller to have multipathing '
'properly set up and the configuration option '
'"%(mpflag)s" to be set to "True".'),
{'backend': self._backend_name,
'mpflag': 'use_multipath_for_image_xfer'})
def _ensure_multi_attach_host_group_exists(self):
try:
host_group = self._client.get_host_group_by_name(
utils.MULTI_ATTACH_HOST_GROUP_NAME)
LOG.info(_LI("The multi-attach E-Series host group '%(label)s' "
"already exists with clusterRef %(clusterRef)s"),
host_group)
except exception.NotFound:
host_group = self._client.create_host_group(
utils.MULTI_ATTACH_HOST_GROUP_NAME)
LOG.info(_LI("Created multi-attach E-Series host group %(label)s "
"with clusterRef %(clusterRef)s"), host_group)
def _check_mode_get_or_register_storage_system(self):
"""Does validity checks for storage system registry and health."""
def _resolve_host(host):
try:
ip = na_utils.resolve_hostname(host)
return ip
except socket.gaierror as e:
LOG.error(_LE('Error resolving host %(host)s. Error - %(e)s.'),
{'host': host, 'e': e})
raise exception.NoValidHost(
_("Controller IP '%(host)s' could not be resolved: %(e)s.")
% {'host': host, 'e': e})
ips = self.configuration.netapp_controller_ips
ips = [i.strip() for i in ips.split(",")]
ips = [x for x in ips if _resolve_host(x)]
host = na_utils.resolve_hostname(
self.configuration.netapp_server_hostname)
if host in ips:
LOG.info(_LI('Embedded mode detected.'))
system = self._client.list_storage_systems()[0]
else:
LOG.info(_LI('Proxy mode detected.'))
system = self._client.register_storage_system(
ips, password=self.configuration.netapp_sa_password)
self._client.set_system_id(system.get('id'))
def _check_storage_system(self):
"""Checks whether system is registered and has good status."""
try:
system = self._client.list_storage_system()
except exception.NetAppDriverException:
with excutils.save_and_reraise_exception():
LOG.info(_LI("System with controller addresses [%s] is not "
"registered with web service."),
self.configuration.netapp_controller_ips)
password_not_in_sync = False
if system.get('status', '').lower() == 'passwordoutofsync':
password_not_in_sync = True
new_pwd = self.configuration.netapp_sa_password
self._client.update_stored_system_password(new_pwd)
time.sleep(self.SLEEP_SECS)
sa_comm_timeout = 60
comm_time = 0
while True:
system = self._client.list_storage_system()
status = system.get('status', '').lower()
# wait if array not contacted or
# password was not in sync previously.
if ((status == 'nevercontacted') or
(password_not_in_sync and status == 'passwordoutofsync')):
LOG.info(_LI('Waiting for web service array communication.'))
time.sleep(self.SLEEP_SECS)
comm_time = comm_time + self.SLEEP_SECS
if comm_time >= sa_comm_timeout:
msg = _("Failure in communication between web service and"
" array. Waited %s seconds. Verify array"
" configuration parameters.")
raise exception.NetAppDriverException(msg %
sa_comm_timeout)
else:
break
msg_dict = {'id': system.get('id'), 'status': status}
if (status == 'passwordoutofsync' or status == 'notsupported' or
status == 'offline'):
raise exception.NetAppDriverException(
_("System %(id)s found with bad status - "
"%(status)s.") % msg_dict)
LOG.info(_LI("System %(id)s has %(status)s status."), msg_dict)
return True
def _get_volume(self, uid):
label = utils.convert_uuid_to_es_fmt(uid)
return self._get_volume_with_label_wwn(label)
def _get_volume_with_label_wwn(self, label=None, wwn=None):
"""Searches volume with label or wwn or both."""
if not (label or wwn):
raise exception.InvalidInput(_('Either volume label or wwn'
' is required as input.'))
wwn = wwn.replace(':', '').upper() if wwn else None
eseries_volume = None
for vol in self._client.list_volumes():
if label and vol.get('label') != label:
continue
if wwn and vol.get(self.WORLDWIDENAME).upper() != wwn:
continue
eseries_volume = vol
break
if not eseries_volume:
raise KeyError()
return eseries_volume
def _get_snapshot_group_for_snapshot(self, snapshot_id):
label = utils.convert_uuid_to_es_fmt(snapshot_id)
for group in self._client.list_snapshot_groups():
if group['label'] == label:
return group
msg = _("Specified snapshot group with label %s could not be found.")
raise exception.NotFound(msg % label)
def _get_latest_image_in_snapshot_group(self, snapshot_id):
group = self._get_snapshot_group_for_snapshot(snapshot_id)
images = self._client.list_snapshot_images()
if images:
filtered_images = filter(lambda img: (img['pitGroupRef'] ==
group['pitGroupRef']),
images)
sorted_imgs = sorted(filtered_images, key=lambda x: x[
'pitTimestamp'])
return sorted_imgs[0]
msg = _("No snapshot image found in snapshot group %s.")
raise exception.NotFound(msg % group['label'])
def _is_volume_containing_snaps(self, label):
"""Checks if volume contains snapshot groups."""
vol_id = utils.convert_es_fmt_to_uuid(label)
for snap in self._client.list_snapshot_groups():
if snap['baseVolume'] == vol_id:
return True
return False
def get_pool(self, volume):
"""Return pool name where volume resides.
:param volume: The volume hosted by the driver.
:return: Name of the pool where given volume is hosted.
"""
eseries_volume = self._get_volume(volume['name_id'])
storage_pool = self._client.get_storage_pool(
eseries_volume['volumeGroupRef'])
if storage_pool:
return storage_pool.get('label')
def create_volume(self, volume):
"""Creates a volume."""
LOG.debug('create_volume on %s', volume['host'])
# get E-series pool label as pool name
eseries_pool_label = volume_utils.extract_host(volume['host'],
level='pool')
if eseries_pool_label is None:
msg = _("Pool is not available in the volume host field.")
raise exception.InvalidHost(reason=msg)
eseries_volume_label = utils.convert_uuid_to_es_fmt(volume['name_id'])
# get size of the requested volume creation
size_gb = int(volume['size'])
self._create_volume(eseries_pool_label,
eseries_volume_label,
size_gb)
def _create_volume(self, eseries_pool_label, eseries_volume_label,
size_gb):
"""Creates volume with given label and size."""
if self.configuration.netapp_enable_multiattach:
volumes = self._client.list_volumes()
# NOTE(ameade): Ensure we do not create more volumes than we could
# map to the multi attach ESeries host group.
if len(volumes) > utils.MAX_LUNS_PER_HOST_GROUP:
msg = (_("Cannot create more than %(req)s volumes on the "
"ESeries array when 'netapp_enable_multiattach' is "
"set to true.") %
{'req': utils.MAX_LUNS_PER_HOST_GROUP})
raise exception.NetAppDriverException(msg)
target_pool = None
pools = self._get_storage_pools()
for pool in pools:
if pool["label"] == eseries_pool_label:
target_pool = pool
break
if not target_pool:
msg = _("Pools %s does not exist")
raise exception.NetAppDriverException(msg % eseries_pool_label)
try:
vol = self._client.create_volume(target_pool['volumeGroupRef'],
eseries_volume_label, size_gb)
LOG.info(_LI("Created volume with "
"label %s."), eseries_volume_label)
except exception.NetAppDriverException as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error creating volume. Msg - %s."), e)
return vol
def _schedule_and_create_volume(self, label, size_gb):
"""Creates volume with given label and size."""
avl_pools = self._get_sorted_available_storage_pools(size_gb)
for pool in avl_pools:
try:
vol = self._client.create_volume(pool['volumeGroupRef'],
label, size_gb)
LOG.info(_LI("Created volume with label %s."), label)
return vol
except exception.NetAppDriverException as e:
LOG.error(_LE("Error creating volume. Msg - %s."), e)
msg = _("Failure creating volume %s.")
raise exception.NetAppDriverException(msg % label)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
label = utils.convert_uuid_to_es_fmt(volume['id'])
size = volume['size']
dst_vol = self._schedule_and_create_volume(label, size)
try:
src_vol = None
src_vol = self._create_snapshot_volume(snapshot['id'])
self._copy_volume_high_prior_readonly(src_vol, dst_vol)
LOG.info(_LI("Created volume with label %s."), label)
except exception.NetAppDriverException:
with excutils.save_and_reraise_exception():
self._client.delete_volume(dst_vol['volumeRef'])
finally:
if src_vol:
try:
self._client.delete_snapshot_volume(src_vol['id'])
except exception.NetAppDriverException as e:
LOG.error(_LE("Failure deleting snap vol. Error: %s."), e)
else:
LOG.warning(_LW("Snapshot volume not found."))
def _create_snapshot_volume(self, snapshot_id):
"""Creates snapshot volume for given group with snapshot_id."""
group = self._get_snapshot_group_for_snapshot(snapshot_id)
LOG.debug("Creating snap vol for group %s", group['label'])
image = self._get_latest_image_in_snapshot_group(snapshot_id)
label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
capacity = int(image['pitCapacity']) / units.Gi
storage_pools = self._get_sorted_available_storage_pools(capacity)
s_id = storage_pools[0]['volumeGroupRef']
return self._client.create_snapshot_volume(image['pitRef'], label,
group['baseVolume'], s_id)
def _copy_volume_high_prior_readonly(self, src_vol, dst_vol):
"""Copies src volume to dest volume."""
LOG.info(_LI("Copying src vol %(src)s to dest vol %(dst)s."),
{'src': src_vol['label'], 'dst': dst_vol['label']})
try:
job = None
job = self._client.create_volume_copy_job(src_vol['id'],
dst_vol['volumeRef'])
while True:
j_st = self._client.list_vol_copy_job(job['volcopyRef'])
if (j_st['status'] == 'inProgress' or j_st['status'] ==
'pending' or j_st['status'] == 'unknown'):
time.sleep(self.SLEEP_SECS)
continue
if j_st['status'] == 'failed' or j_st['status'] == 'halted':
LOG.error(_LE("Vol copy job status %s."), j_st['status'])
raise exception.NetAppDriverException(
_("Vol copy job for dest %s failed.") %
dst_vol['label'])
LOG.info(_LI("Vol copy job completed for dest %s."),
dst_vol['label'])
break
finally:
if job:
try:
self._client.delete_vol_copy_job(job['volcopyRef'])
except exception.NetAppDriverException:
LOG.warning(_LW("Failure deleting "
"job %s."), job['volcopyRef'])
else:
LOG.warning(_LW('Volume copy job for src vol %s not found.'),
src_vol['id'])
LOG.info(_LI('Copy job to dest vol %s completed.'), dst_vol['label'])
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
snapshot = {'id': uuid.uuid4(), 'volume_id': src_vref['id'],
'volume': src_vref}
self.create_snapshot(snapshot)
try:
self.create_volume_from_snapshot(volume, snapshot)
finally:
try:
self.delete_snapshot(snapshot)
except exception.NetAppDriverException:
LOG.warning(_LW("Failure deleting temp snapshot %s."),
snapshot['id'])
def delete_volume(self, volume):
"""Deletes a volume."""
try:
vol = self._get_volume(volume['name_id'])
self._client.delete_volume(vol['volumeRef'])
except exception.NetAppDriverException:
LOG.warning(_LI("Volume %s already deleted."), volume['id'])
return
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
snap_grp, snap_image = None, None
snapshot_name = utils.convert_uuid_to_es_fmt(snapshot['id'])
os_vol = snapshot['volume']
vol = self._get_volume(os_vol['name_id'])
vol_size_gb = int(vol['totalSizeInBytes']) / units.Gi
pools = self._get_sorted_available_storage_pools(vol_size_gb)
try:
snap_grp = self._client.create_snapshot_group(
snapshot_name, vol['volumeRef'], pools[0]['volumeGroupRef'])
snap_image = self._client.create_snapshot_image(
snap_grp['pitGroupRef'])
LOG.info(_LI("Created snap grp with label %s."), snapshot_name)
except exception.NetAppDriverException:
with excutils.save_and_reraise_exception():
if snap_image is None and snap_grp:
self.delete_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
try:
snap_grp = self._get_snapshot_group_for_snapshot(snapshot['id'])
except exception.NotFound:
LOG.warning(_LW("Snapshot %s already deleted."), snapshot['id'])
return
self._client.delete_snapshot_group(snap_grp['pitGroupRef'])
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a volume."""
pass
def create_export(self, context, volume):
"""Exports the volume."""
pass
def remove_export(self, context, volume):
"""Removes an export for a volume."""
pass
def map_volume_to_host(self, volume, eseries_volume, initiators):
"""Ensures the specified initiator has access to the volume."""
existing_maps = self._client.get_volume_mappings_for_volume(
eseries_volume)
host = self._get_or_create_host(initiators, self.host_type)
# There can only be one or zero mappings on a volume in E-Series
current_map = existing_maps[0] if existing_maps else None
if self.configuration.netapp_enable_multiattach and current_map:
self._ensure_multi_attach_host_group_exists()
mapping = host_mapper.map_volume_to_multiple_hosts(self._client,
volume,
eseries_volume,
host,
current_map)
else:
mapping = host_mapper.map_volume_to_single_host(
self._client, volume, eseries_volume, host, current_map,
self.configuration.netapp_enable_multiattach)
return mapping
def initialize_connection_fc(self, volume, connector):
"""Initializes the connection and returns connection info.
Assigns the specified volume to a compute node/host so that it can be
used from that host.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
Example return values:
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': '500a098280feeba5',
'access_mode': 'rw',
'initiator_target_map': {
'21000024ff406cc3': ['500a098280feeba5'],
'21000024ff406cc2': ['500a098280feeba5']
}
}
}<|fim▁hole|> 'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['500a098280feeba5', '500a098290feeba5',
'500a098190feeba5', '500a098180feeba5'],
'access_mode': 'rw',
'initiator_target_map': {
'21000024ff406cc3': ['500a098280feeba5',
'500a098290feeba5'],
'21000024ff406cc2': ['500a098190feeba5',
'500a098180feeba5']
}
}
}
"""
initiators = [fczm_utils.get_formatted_wwn(wwpn)
for wwpn in connector['wwpns']]
eseries_vol = self._get_volume(volume['name_id'])
mapping = self.map_volume_to_host(volume, eseries_vol,
initiators)
lun_id = mapping['lun']
initiator_info = self._build_initiator_target_map_fc(connector)
target_wwpns, initiator_target_map, num_paths = initiator_info
if target_wwpns:
msg = ("Successfully fetched target details for LUN %(id)s "
"and initiator(s) %(initiators)s.")
msg_fmt = {'id': volume['id'], 'initiators': initiators}
LOG.debug(msg, msg_fmt)
else:
msg = _('Failed to get LUN target details for the LUN %s.')
raise exception.VolumeBackendAPIException(data=msg % volume['id'])
target_info = {'driver_volume_type': 'fibre_channel',
'data': {'target_discovered': True,
'target_lun': int(lun_id),
'target_wwn': target_wwpns,
'access_mode': 'rw',
'initiator_target_map': initiator_target_map}}
return target_info
def terminate_connection_fc(self, volume, connector, **kwargs):
"""Disallow connection from connector.
Return empty data if other volumes are in the same zone.
The FibreChannel ZoneManager doesn't remove zones
if there isn't an initiator_target_map in the
return of terminate_connection.
:returns: data - the target_wwns and initiator_target_map if the
zone is to be removed, otherwise the same map with
an empty dict for the 'data' key
"""
eseries_vol = self._get_volume(volume['name_id'])
initiators = [fczm_utils.get_formatted_wwn(wwpn)
for wwpn in connector['wwpns']]
host = self._get_host_with_matching_port(initiators)
mappings = eseries_vol.get('listOfMappings', [])
# There can only be one or zero mappings on a volume in E-Series
mapping = mappings[0] if mappings else None
if not mapping:
raise eseries_exc.VolumeNotMapped(volume_id=volume['id'],
host=host['label'])
host_mapper.unmap_volume_from_host(self._client, volume, host, mapping)
info = {'driver_volume_type': 'fibre_channel',
'data': {}}
if len(self._client.get_volume_mappings_for_host(
host['hostRef'])) == 0:
# No more exports for this host, so tear down zone.
LOG.info(_LI("Need to remove FC Zone, building initiator "
"target map."))
initiator_info = self._build_initiator_target_map_fc(connector)
target_wwpns, initiator_target_map, num_paths = initiator_info
info['data'] = {'target_wwn': target_wwpns,
'initiator_target_map': initiator_target_map}
return info
def _build_initiator_target_map_fc(self, connector):
"""Build the target_wwns and the initiator target map."""
# get WWPNs from controller and strip colons
all_target_wwpns = self._client.list_target_wwpns()
all_target_wwpns = [six.text_type(wwpn).replace(':', '')
for wwpn in all_target_wwpns]
target_wwpns = []
init_targ_map = {}
num_paths = 0
if self.lookup_service:
# Use FC SAN lookup to determine which ports are visible.
dev_map = self.lookup_service.get_device_mapping_from_network(
connector['wwpns'],
all_target_wwpns)
for fabric_name in dev_map:
fabric = dev_map[fabric_name]
target_wwpns += fabric['target_port_wwn_list']
for initiator in fabric['initiator_port_wwn_list']:
if initiator not in init_targ_map:
init_targ_map[initiator] = []
init_targ_map[initiator] += fabric['target_port_wwn_list']
init_targ_map[initiator] = list(set(
init_targ_map[initiator]))
for target in init_targ_map[initiator]:
num_paths += 1
target_wwpns = list(set(target_wwpns))
else:
initiator_wwns = connector['wwpns']
target_wwpns = all_target_wwpns
for initiator in initiator_wwns:
init_targ_map[initiator] = target_wwpns
return target_wwpns, init_targ_map, num_paths
def initialize_connection_iscsi(self, volume, connector):
"""Allow connection to connector and return connection info."""
initiator_name = connector['initiator']
eseries_vol = self._get_volume(volume['name_id'])
mapping = self.map_volume_to_host(volume, eseries_vol,
[initiator_name])
lun_id = mapping['lun']
msg_fmt = {'id': volume['id'], 'initiator_name': initiator_name}
LOG.debug("Mapped volume %(id)s to the initiator %(initiator_name)s.",
msg_fmt)
iscsi_details = self._get_iscsi_service_details()
iscsi_portal = self._get_iscsi_portal_for_vol(eseries_vol,
iscsi_details)
LOG.debug("Successfully fetched target details for volume %(id)s and "
"initiator %(initiator_name)s.", msg_fmt)
iqn = iscsi_portal['iqn']
address = iscsi_portal['ip']
port = iscsi_portal['tcp_port']
properties = na_utils.get_iscsi_connection_properties(lun_id, volume,
iqn, address,
port)
return properties
def _get_iscsi_service_details(self):
"""Gets iscsi iqn, ip and port information."""
ports = []
hw_inventory = self._client.list_hardware_inventory()
iscsi_ports = hw_inventory.get('iscsiPorts')
if iscsi_ports:
for port in iscsi_ports:
if (port.get('ipv4Enabled') and port.get('iqn') and
port.get('ipv4Data') and
port['ipv4Data'].get('ipv4AddressData') and
port['ipv4Data']['ipv4AddressData']
.get('ipv4Address') and port['ipv4Data']
['ipv4AddressData'].get('configState')
== 'configured'):
iscsi_det = {}
iscsi_det['ip'] =\
port['ipv4Data']['ipv4AddressData']['ipv4Address']
iscsi_det['iqn'] = port['iqn']
iscsi_det['tcp_port'] = port.get('tcpListenPort')
iscsi_det['controller'] = port.get('controllerId')
ports.append(iscsi_det)
if not ports:
msg = _('No good iscsi portals found for %s.')
raise exception.NetAppDriverException(
msg % self._client.get_system_id())
return ports
def _get_iscsi_portal_for_vol(self, volume, portals, anyController=True):
"""Get the iscsi portal info relevant to volume."""
for portal in portals:
if portal.get('controller') == volume.get('currentManager'):
return portal
if anyController and portals:
return portals[0]
msg = _('No good iscsi portal found in supplied list for %s.')
raise exception.NetAppDriverException(
msg % self._client.get_system_id())
def _get_or_create_host(self, port_ids, host_type):
"""Fetch or create a host by given port."""
try:
host = self._get_host_with_matching_port(port_ids)
ht_def = self._get_host_type_definition(host_type)
if host.get('hostTypeIndex') != ht_def.get('index'):
try:
host = self._client.update_host_type(
host['hostRef'], ht_def)
except exception.NetAppDriverException as e:
LOG.warning(_LW("Unable to update host type for host with "
"label %(l)s. %(e)s"),
{'l': host['label'], 'e': e.msg})
return host
except exception.NotFound as e:
LOG.warning(_LW("Message - %s."), e.msg)
return self._create_host(port_ids, host_type)
def _get_host_with_matching_port(self, port_ids):
"""Gets or creates a host with given port id."""
# Remove any extra colons
port_ids = [six.text_type(wwpn).replace(':', '')
for wwpn in port_ids]
hosts = self._client.list_hosts()
for port_id in port_ids:
for host in hosts:
if host.get('hostSidePorts'):
ports = host.get('hostSidePorts')
for port in ports:
address = port.get('address').upper().replace(':', '')
if address == port_id.upper():
return host
msg = _("Host with ports %(ports)s not found.")
raise exception.NotFound(msg % {'ports': port_ids})
def _create_host(self, port_ids, host_type, host_group=None):
"""Creates host on system with given initiator as port_id."""
LOG.info(_LI("Creating host with ports %s."), port_ids)
host_label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
host_type = self._get_host_type_definition(host_type)
port_type = self.driver_protocol.lower()
return self._client.create_host_with_ports(host_label,
host_type,
port_ids,
group_id=host_group,
port_type=port_type)
def _get_host_type_definition(self, host_type):
"""Gets supported host type if available on storage system."""
host_types = self._client.list_host_types()
for ht in host_types:
if ht.get('name', 'unknown').lower() == host_type.lower():
return ht
raise exception.NotFound(_("Host type %s not supported.") % host_type)
def terminate_connection_iscsi(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
eseries_vol = self._get_volume(volume['name_id'])
initiator = connector['initiator']
host = self._get_host_with_matching_port([initiator])
mappings = eseries_vol.get('listOfMappings', [])
# There can only be one or zero mappings on a volume in E-Series
mapping = mappings[0] if mappings else None
if not mapping:
raise eseries_exc.VolumeNotMapped(volume_id=volume['id'],
host=host['label'])
host_mapper.unmap_volume_from_host(self._client, volume, host, mapping)
def get_volume_stats(self, refresh=False):
"""Return the current state of the volume service."""
if refresh:
if not self._ssc_stats:
self._update_ssc_info()
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Update volume statistics."""
LOG.debug("Updating volume stats.")
data = dict()
data["volume_backend_name"] = self._backend_name
data["vendor_name"] = "NetApp"
data["driver_version"] = self.VERSION
data["storage_protocol"] = self.driver_protocol
data["pools"] = []
for storage_pool in self._get_storage_pools():
cinder_pool = {}
cinder_pool["pool_name"] = storage_pool.get("label")
cinder_pool["QoS_support"] = False
cinder_pool["reserved_percentage"] = 0
tot_bytes = int(storage_pool.get("totalRaidedSpace", 0))
used_bytes = int(storage_pool.get("usedSpace", 0))
cinder_pool["free_capacity_gb"] = ((tot_bytes - used_bytes) /
units.Gi)
cinder_pool["total_capacity_gb"] = tot_bytes / units.Gi
pool_ssc_stats = self._ssc_stats.get(
storage_pool["volumeGroupRef"])
if pool_ssc_stats:
cinder_pool.update(pool_ssc_stats)
data["pools"].append(cinder_pool)
self._stats = data
self._garbage_collect_tmp_vols()
@cinder_utils.synchronized("netapp_update_ssc_info", external=False)
def _update_ssc_info(self):
"""Periodically runs to update ssc information from the backend.
The self._ssc_stats attribute is updated with the following format.
{<volume_group_ref> : {<ssc_key>: <ssc_value>}}
"""
LOG.info(_LI("Updating storage service catalog information for "
"backend '%s'"), self._backend_name)
self._ssc_stats = \
self._update_ssc_disk_encryption(self._get_storage_pools())
self._ssc_stats = \
self._update_ssc_disk_types(self._get_storage_pools())
def _update_ssc_disk_types(self, volume_groups):
"""Updates the given ssc dictionary with new disk type information.
:param volume_groups: The volume groups this driver cares about
"""
ssc_stats = copy.deepcopy(self._ssc_stats)
all_disks = self._client.list_drives()
relevant_disks = filter(lambda x: x.get('currentVolumeGroupRef') in
volume_groups, all_disks)
for drive in relevant_disks:
current_vol_group = drive.get('currentVolumeGroupRef')
if current_vol_group not in ssc_stats:
ssc_stats[current_vol_group] = {}
if drive.get("driveMediaType") == 'ssd':
ssc_stats[current_vol_group]['netapp_disk_type'] = 'SSD'
else:
disk_type = drive.get('interfaceType').get('driveType')
ssc_stats[current_vol_group]['netapp_disk_type'] = \
self.SSC_DISK_TYPE_MAPPING.get(disk_type, 'unknown')
return ssc_stats
def _update_ssc_disk_encryption(self, volume_groups):
"""Updates the given ssc dictionary with new disk encryption information.
:param volume_groups: The volume groups this driver cares about
"""
ssc_stats = copy.deepcopy(self._ssc_stats)
all_pools = self._client.list_storage_pools()
relevant_pools = filter(lambda x: x.get('volumeGroupRef') in
volume_groups, all_pools)
for pool in relevant_pools:
current_vol_group = pool.get('volumeGroupRef')
if current_vol_group not in ssc_stats:
ssc_stats[current_vol_group] = {}
ssc_stats[current_vol_group]['netapp_disk_encryption'] = 'true' \
if pool['securityType'] == 'enabled' else 'false'
return ssc_stats
def _get_storage_pools(self):
conf_enabled_pools = []
for value in self.configuration.netapp_storage_pools.split(','):
if value:
conf_enabled_pools.append(value.strip().lower())
filtered_pools = []
storage_pools = self._client.list_storage_pools()
for storage_pool in storage_pools:
# Check if pool can be used
if (storage_pool.get('raidLevel') == 'raidDiskPool'
and storage_pool['label'].lower() in conf_enabled_pools):
filtered_pools.append(storage_pool)
return filtered_pools
def _get_sorted_available_storage_pools(self, size_gb):
"""Returns storage pools sorted on available capacity."""
size = size_gb * units.Gi
sorted_pools = sorted(self._get_storage_pools(), key=lambda x:
(int(x.get('totalRaidedSpace', 0))
- int(x.get('usedSpace', 0))), reverse=True)
avl_pools = filter(lambda x: ((int(x.get('totalRaidedSpace', 0)) -
int(x.get('usedSpace', 0)) >= size)),
sorted_pools)
if not avl_pools:
LOG.warning(_LW("No storage pool found with available capacity "
"%s."), size_gb)
return avl_pools
def extend_volume(self, volume, new_size):
"""Extend an existing volume to the new size."""
stage_1, stage_2 = 0, 0
src_vol = self._get_volume(volume['name_id'])
src_label = src_vol['label']
stage_label = 'tmp-%s' % utils.convert_uuid_to_es_fmt(uuid.uuid4())
extend_vol = {'id': uuid.uuid4(), 'size': new_size}
self.create_cloned_volume(extend_vol, volume)
new_vol = self._get_volume(extend_vol['id'])
try:
stage_1 = self._client.update_volume(src_vol['id'], stage_label)
stage_2 = self._client.update_volume(new_vol['id'], src_label)
new_vol = stage_2
LOG.info(_LI('Extended volume with label %s.'), src_label)
except exception.NetAppDriverException:
if stage_1 == 0:
with excutils.save_and_reraise_exception():
self._client.delete_volume(new_vol['id'])
if stage_2 == 0:
with excutils.save_and_reraise_exception():
self._client.update_volume(src_vol['id'], src_label)
self._client.delete_volume(new_vol['id'])
def _garbage_collect_tmp_vols(self):
"""Removes tmp vols with no snapshots."""
try:
if not na_utils.set_safe_attr(self, 'clean_job_running', True):
LOG.warning(_LW('Returning as clean tmp '
'vol job already running.'))
return
for vol in self._client.list_volumes():
label = vol['label']
if (label.startswith('tmp-') and
not self._is_volume_containing_snaps(label)):
try:
self._client.delete_volume(vol['volumeRef'])
except exception.NetAppDriverException as e:
LOG.debug("Error deleting vol with label %s: %s",
(label, e))
finally:
na_utils.set_safe_attr(self, 'clean_job_running', False)
@cinder_utils.synchronized('manage_existing')
def manage_existing(self, volume, existing_ref):
"""Brings an existing storage object under Cinder management."""
vol = self._get_existing_vol_with_manage_ref(volume, existing_ref)
label = utils.convert_uuid_to_es_fmt(volume['id'])
if label == vol['label']:
LOG.info(_LI("Volume with given ref %s need not be renamed during"
" manage operation."), existing_ref)
managed_vol = vol
else:
managed_vol = self._client.update_volume(vol['id'], label)
LOG.info(_LI("Manage operation completed for volume with new label"
" %(label)s and wwn %(wwn)s."),
{'label': label, 'wwn': managed_vol[self.WORLDWIDENAME]})
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
"""
vol = self._get_existing_vol_with_manage_ref(volume, existing_ref)
return int(math.ceil(float(vol['capacity']) / units.Gi))
def _get_existing_vol_with_manage_ref(self, volume, existing_ref):
try:
return self._get_volume_with_label_wwn(
existing_ref.get('source-name'), existing_ref.get('source-id'))
except exception.InvalidInput:
reason = _('Reference must contain either source-name'
' or source-id element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
except KeyError:
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=_('Volume not found on configured storage pools.'))
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object. Logs a
message to indicate the volume is no longer under Cinder's control.
"""
managed_vol = self._get_volume(volume['id'])
LOG.info(_LI("Unmanaged volume with current label %(label)s and wwn "
"%(wwn)s."), {'label': managed_vol['label'],
'wwn': managed_vol[self.WORLDWIDENAME]})<|fim▁end|> |
or
{ |
<|file_name|>tests.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Minio Python Library for Amazon S3 Compatible Cloud Storage,
# (C) 2015, 2016, 2017, 2018 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import os
import io
from sys import exit
import uuid
import shutil
import inspect
import json
from random import random
from string import ascii_lowercase
import time
import traceback
from datetime import datetime, timedelta
import urllib3
import certifi
import hashlib
from threading import Thread
from minio import Minio, PostPolicy, CopyConditions
from minio.error import (APINotImplemented, NoSuchBucketPolicy, ResponseError,
PreconditionFailed, BucketAlreadyOwnedByYou,
BucketAlreadyExists, InvalidBucketError)
class LimitedRandomReader(object):
"""
LimitedRandomReader returns a Reader that upon read
returns random data, but stops with EOF after *limit*
bytes.
LimitedRandomReader is compatible with BufferedIOBase.
returns a class:`LimitedRandomReader` that upon read
provides random data and stops with EOF after *limit*
bytes
:param limit: Trigger EOF after limit bytes.
"""
def __init__(self, limit):
self._limit = limit
self._offset_location = 0
def read(self, amt=64*1024):
"""
Similar to :meth:`io.read`, with amt option.
:param amt:
How much of the content to read.
"""
# If offset is bigger than size. Treat it as EOF return here.
if self._offset_location == self._limit:
# return empty bytes to indicate EOF.
return b''
# make translation table from 0..255 to 97..122
bal = [c.encode('ascii') for c in ascii_lowercase]
amt = min(amt, self._limit - self._offset_location)
data = b''.join([bal[int(random() * 26)] for _ in range(amt)])
self._offset_location += len(data)
return data
class LogOutput(object):
"""
LogOutput is the class for log output. It is required standard for all
SDK tests controlled by mint.
Here are its attributes:
'name': name of the SDK under test, e.g. 'minio-py'
'function': name of the method/api under test with its signature
The following python code can be used to
pull args information of a <method> and to
put together with the method name:
<method>.__name__+'('+', '.join(args_list)+')'
e.g. 'remove_object(bucket_name, object_name)'
'args': method/api arguments with their values, in
dictionary form: {'arg1': val1, 'arg2': val2, ...}
'duration': duration of the whole test in milliseconds,
defaults to 0
'alert': any extra information user is needed to be alerted about,
like whether this is a Blocker/Gateway/Server related
issue, etc., defaults to None
'message': descriptive error message, defaults to None
'error': stack-trace/exception message(only in case of failure),
actual low level exception/error thrown by the program,
defaults to None
'status': exit status, possible values are 'PASS', 'FAIL', 'NA',
defaults to 'PASS'
"""
PASS = 'PASS'
FAIL = 'FAIL'
NA = 'NA'
def __init__(self, meth, test_name):
self.__args_list = inspect.getargspec(meth).args[1:]
self.__name = 'minio-py:'+test_name
self.__function = meth.__name__+'('+', '.join(self.__args_list)+')'
self.__args = {}
self.__duration = 0
self.__alert = ''
self.__message = None
self.__error = None
self.__status = self.PASS
self.__start_time = time.time()
@property
def name(self): return self.__name
@property
def function(self): return self.__function
@property
def args(self): return self.__args
@name.setter
def name(self, val): self.__name = val
@function.setter
def function(self, val): self.__function = val
@args.setter
def args(self, val): self.__args = val
def json_report(self, err_msg='', alert='', status=''):
self.__args = {k: v for k, v in self.__args.items() if v and v != ''}
entry = {'name': self.__name,
'function': self.__function,
'args': self.__args,
'duration': int(round((time.time() - self.__start_time)*1000)),
'alert': str(alert),
'message': str(err_msg),
'error': traceback.format_exc() if err_msg and err_msg != '' else '',
'status': status if status and status != '' else \
self.FAIL if err_msg and err_msg != '' else self.PASS
}
return json.dumps({k: v for k, v in entry.items() if v and v != ''})
def generate_bucket_name():
return "minio-py-test-" + uuid.uuid4().__str__()
def is_s3(client):
return "s3.amazonaws" in client._endpoint_url
def test_make_bucket_default_region(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "make_bucket(bucket_name, location)"
# Get a unique bucket_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
# Default location
log_output.args['location'] = "default value ('us-east-1')"
try:
# Create a bucket with default bucket location
client.make_bucket(bucket_name)
# Check if bucket was created properly
log_output.function = 'bucket_exists(bucket_name)'
client.bucket_exists(bucket_name)
# Remove bucket
log_output.function = 'remove_bucket(bucket_name)'
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
log_output.function = 'make_bucket(bucket_name, location)'
print(log_output.json_report())
def test_make_bucket_with_region(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "make_bucket(bucket_name, location)"
# Get a unique bucket_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
# A non-default location
log_output.args['location'] = location = 'us-west-1'
try:
# Create a bucket with default bucket location
client.make_bucket(bucket_name, location)
# Check if bucket was created properly
log_output.function = 'bucket_exists(bucket_name)'
client.bucket_exists(bucket_name)
# Remove bucket
log_output.function = 'remove_bucket(bucket_name)'
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
log_output.function = 'make_bucket(bucket_name, location)'
print(log_output.json_report())
def test_negative_make_bucket_invalid_name(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "make_bucket(bucket_name, location)"
# Get a unique bucket_name
bucket_name = generate_bucket_name()
# Default location
log_output.args['location'] = "default value ('us-east-1')"
# Create an array of invalid bucket names to test
invalid_bucket_name_list = [bucket_name+'.', '.'+bucket_name, bucket_name+'...'+'abcd']
for name in invalid_bucket_name_list:
log_output.args['bucket_name'] = name
try:
# Create a bucket
client.make_bucket(name)
# Check if bucket was created properly
log_output.function = 'bucket_exists(bucket_name)'
client.bucket_exists(name)
# Remove bucket
log_output.function = 'remove_bucket(bucket_name)'
client.remove_bucket(name)
except InvalidBucketError as err:
pass
except Exception as err:
raise Exception(err)
# Test passes
log_output.function = 'make_bucket(bucket_name, location)'
log_output.args['bucket_name'] = invalid_bucket_name_list
print(log_output.json_report())
def test_make_bucket_recreate(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "make_bucket(bucket_name, location)"
# Get a unique bucket_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
# s3 amazon has a bug and can let a bucket to be recreated for
# 'us-east-1' region, as opposed to the expected failure behavior.
# Until this issue is fixed by amazon, the following
# location manipulation will be used in our testing.
location = 'us-west-1' if is_s3(client) else 'us-east-1'
failed_as_expected = False
try:
client.make_bucket(bucket_name, location)
client.make_bucket(bucket_name, location)
except BucketAlreadyOwnedByYou as err:
# Expected this exception. Test passes
failed_as_expected = True
print(log_output.json_report())
except BucketAlreadyExists as err:
# Expected this exception. Test passes
failed_as_expected = True
print(log_output.json_report())
except Exception as err:
raise Exception(err)
if not failed_as_expected:
print(log_output.json_report("Recreating the same bucket SHOULD have failed!"))
exit()
def test_list_buckets(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "list_buckets( )"
# Get a unique bucket_name
bucket_name = generate_bucket_name()
try:
client.make_bucket(bucket_name)
# List all buckets.
buckets = client.list_buckets()
for bucket in buckets:
# bucket object should be of a valid value.
if bucket.name and bucket.creation_date:
continue
raise ValueError('list_bucket api failure')
except Exception as err:
raise Exception(err)
finally:
client.remove_bucket(bucket_name)
# Test passes
print(log_output.json_report())
def test_fput_object_small_file(client, testfile, log_output):
# default value for log_output.function attribute is;
# log_output.function = "fput_object(bucket_name, object_name, file_path, content_type, metadata)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
log_output.args['file_path'] = testfile
log_output.args['metadata'] = metadata = {'x-amz-storage-class': 'STANDARD_IA'}
try:
client.make_bucket(bucket_name)
# upload local small file.
if is_s3(client):
client.fput_object(bucket_name, object_name+'-f', testfile,
metadata)
else:
client.fput_object(bucket_name, object_name+'-f', testfile)
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name+'-f')
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_fput_object_large_file(client, largefile, log_output):
# default value for log_output.function attribute is;
# log_output.function = "fput_object(bucket_name, object_name, file_path, content_type, metadata)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
log_output.args['file_path'] = largefile
log_output.args['metadata'] = metadata = {'x-amz-storage-class': 'STANDARD_IA'}
# upload local large file through multipart.
try:
client.make_bucket(bucket_name)
if is_s3(client):
client.fput_object(bucket_name, object_name+'-large', largefile,
metadata)
else:
client.fput_object(bucket_name, object_name+'-large', largefile)
client.stat_object(bucket_name, object_name+'-large')
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name+'-large')
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_fput_object_with_content_type(client, testfile, log_output):
# default value for log_output.function attribute is;
# log_output.function = "fput_object(bucket_name, object_name, file_path, content_type, metadata)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
log_output.args['file_path'] = testfile
log_output.args['content_type'] = content_type = 'application/octet-stream'
log_output.args['metadata'] = metadata = {'x-amz-storage-class': 'STANDARD_IA'}
try:
client.make_bucket(bucket_name)
# upload local small file with content_type defined.
if is_s3(client):
client.fput_object(bucket_name, object_name+'-f', testfile,
content_type, metadata)
else:
client.fput_object(bucket_name, object_name+'-f', testfile,
content_type)
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name+'-f')
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_copy_object_no_copy_condition(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "copy_object(bucket_name, object_name, object_source, conditions)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
object_name = uuid.uuid4().__str__()
log_output.args['object_source'] = object_source = object_name+'-source'
log_output.args['object_name'] = object_copy = object_name+'-copy'
try:
client.make_bucket(bucket_name)
# Upload a streaming object of 1MiB
KB_1 = 1024 # 1KiB.
KB_1_reader = LimitedRandomReader(KB_1)
client.put_object(bucket_name, object_source, KB_1_reader, KB_1)
# Perform a server side copy of an object
client.copy_object(bucket_name, object_copy,
'/'+bucket_name+'/'+object_source)
st_obj = client.stat_object(bucket_name, object_copy)
validate_stat_data(st_obj, KB_1, {})
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_source)
client.remove_object(bucket_name, object_copy)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_copy_object_etag_match(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "copy_object(bucket_name, object_name, object_source, conditions)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
object_name = uuid.uuid4().__str__()
log_output.args['object_source'] = object_source = object_name+'-source'
log_output.args['object_name'] = object_copy = object_name+'-copy'
try:
client.make_bucket(bucket_name)
# Upload a streaming object of 1MiB
KB_1 = 1024 # 1KiB.
KB_1_reader = LimitedRandomReader(KB_1)
client.put_object(bucket_name, object_source, KB_1_reader, KB_1)
# Perform a server side copy of an object
client.copy_object(bucket_name, object_copy,
'/'+bucket_name+'/'+object_source)
# Verification
source_etag = client.stat_object(bucket_name, object_source).etag
copy_conditions = CopyConditions()
copy_conditions.set_match_etag(source_etag)
log_output.args['conditions'] = {'set_match_etag': source_etag}
client.copy_object(bucket_name, object_copy,
'/'+bucket_name+'/'+object_source,
copy_conditions)
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_source)
client.remove_object(bucket_name, object_copy)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_copy_object_negative_etag_match(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "copy_object(bucket_name, object_name, object_source, conditions)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
object_name = uuid.uuid4().__str__()
log_output.args['object_source'] = object_source = object_name+'-source'
log_output.args['object_name'] = object_copy = object_name+'-copy'
try:
client.make_bucket(bucket_name)
# Upload a streaming object of 1MiB
KB_1 = 1024 # 1KiB.
KB_1_reader = LimitedRandomReader(KB_1)
client.put_object(bucket_name, object_source, KB_1_reader, KB_1)
try:
# Perform a server side copy of an object
# with incorrect pre-conditions and fail
etag = 'test-etag'
copy_conditions = CopyConditions()
copy_conditions.set_match_etag(etag)
log_output.args['conditions'] = {'set_match_etag': etag}
client.copy_object(bucket_name, object_copy,
'/'+bucket_name+'/'+object_source,
copy_conditions)
except PreconditionFailed as err:
if err.message != 'At least one of the preconditions you specified did not hold.':
raise Exception(err)
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_source)
client.remove_object(bucket_name, object_copy)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_copy_object_modified_since(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "copy_object(bucket_name, object_name, object_source, conditions)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
object_name = uuid.uuid4().__str__()
log_output.args['object_source'] = object_source = object_name+'-source'
log_output.args['object_name'] = object_copy = object_name+'-copy'
try:
client.make_bucket(bucket_name)
# Upload a streaming object of 1MiB
KB_1 = 1024 # 1KiB.
KB_1_reader = LimitedRandomReader(KB_1)
client.put_object(bucket_name, object_source, KB_1_reader, KB_1)
# Set up the 'modified_since' copy condition
copy_conditions = CopyConditions()
t = (2014, 4, 1, 0, 0, 0, 0, 0, 0)
mod_since = datetime.utcfromtimestamp(time.mktime(t))
copy_conditions.set_modified_since(mod_since)
date_pretty = mod_since.strftime('%c')
log_output.args['conditions'] = {'set_modified_since':date_pretty}
# Perform a server side copy of an object
# and expect the copy to complete successfully
client.copy_object(bucket_name, object_copy,
'/'+bucket_name+'/'+object_source,
copy_conditions)
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_source)
client.remove_object(bucket_name, object_copy)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_copy_object_unmodified_since(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "copy_object(bucket_name, object_name, object_source, conditions)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
object_name = uuid.uuid4().__str__()
log_output.args['object_source'] = object_source = object_name+'-source'
log_output.args['object_name'] = object_copy = object_name+'-copy'
try:
client.make_bucket(bucket_name)
# Upload a streaming object of 1MiB
KB_1 = 1024 # 1KiB.
KB_1_reader = LimitedRandomReader(KB_1)
client.put_object(bucket_name, object_source, KB_1_reader, KB_1)
# Set up the 'modified_since' copy condition
copy_conditions = CopyConditions()
t = (2014, 4, 1, 0, 0, 0, 0, 0, 0)
unmod_since = datetime.utcfromtimestamp(time.mktime(t))
copy_conditions.set_unmodified_since(unmod_since)
date_pretty = unmod_since.strftime('%c')
log_output.args['conditions'] = {'set_unmodified_since': date_pretty}
try:
# Perform a server side copy of an object and expect
# the copy to fail since the creation/modification
# time is now, way later than unmodification time, April 1st, 2014
client.copy_object(bucket_name, object_copy,
'/'+bucket_name+'/'+object_source,
copy_conditions)
except PreconditionFailed as err:
if err.message != 'At least one of the preconditions you specified did not hold.':
raise Exception(err)
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_source)
client.remove_object(bucket_name, object_copy)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_put_object(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "put_object(bucket_name, object_name, data, length, content_type, metadata)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
client.make_bucket(bucket_name)
# Put/Upload a streaming object of 1MiB
log_output.args['length'] = MB_1 = 1024*1024 # 1MiB.
MB_1_reader = LimitedRandomReader(MB_1)
log_output.args['data'] = 'LimitedRandomReader(MB_1)'
client.put_object(bucket_name, object_name, MB_1_reader, MB_1)
client.stat_object(bucket_name, object_name)
# Put/Upload a streaming object of 11MiB
log_output.args['length'] = MB_11 = 11*1024*1024 # 11MiB.
MB_11_reader = LimitedRandomReader(MB_11)
log_output.args['data'] = 'LimitedRandomReader(MB_11)'
log_output.args['metadata'] = metadata = {'x-amz-meta-testing': 'value','test-key':'value2'}
log_output.args['content_type'] = content_type='application/octet-stream'
client.put_object(bucket_name,
object_name+'-metadata',
MB_11_reader,
MB_11,
content_type,
metadata)
# Stat on the uploaded object to check if it exists
# Fetch saved stat metadata on a previously uploaded object with metadata.
st_obj = client.stat_object(bucket_name, object_name+'-metadata')
if 'X-Amz-Meta-Testing' not in st_obj.metadata:
raise ValueError("Metadata key 'x-amz-meta-testing' not found")
value = st_obj.metadata['X-Amz-Meta-Testing']
if value != 'value':
raise ValueError('Metadata key has unexpected'
' value {0}'.format(value))
if 'X-Amz-Meta-Test-Key' not in st_obj.metadata:
raise ValueError("Metadata key 'x-amz-meta-test-key' not found")
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name)
client.remove_object(bucket_name, object_name+'-metadata')
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_negative_put_object_with_path_segment(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "put_object(bucket_name, object_name, data, length, content_type, metadata)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = "/a/b/c/" + uuid.uuid4().__str__()
try:
client.make_bucket(bucket_name)
log_output.args['length'] = 0 # Keep 0 bytes body to check for error.
log_output.args['data'] = ''
client.put_object(bucket_name,
object_name,
io.BytesIO(b''), 0)
except ResponseError as err:
pass
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def validate_stat_data(st_obj, expected_size, expected_meta):
received_modification_time = st_obj.last_modified
received_etag = st_obj.etag
received_metadata = st_obj.metadata
received_content_type = st_obj.content_type
received_size = st_obj.size
received_is_dir = st_obj.is_dir
if not isinstance(received_modification_time, time.struct_time):
raise ValueError('Incorrect last_modified time type'
', received type: ', type(received_modification_time))
if not received_etag or received_etag == '':
raise ValueError('No Etag value is returned.')
if received_content_type != 'application/octet-stream':
raise ValueError('Incorrect content type. Expected: ',
"'application/octet-stream', received: ",
received_content_type)
if received_size != expected_size:
raise ValueError('Incorrect file size. Expected: 11534336',
', received: ', received_size)
if received_is_dir != False:
raise ValueError('Incorrect file type. Expected: is_dir=False',
', received: is_dir=', received_is_dir)
if not all(i in expected_meta.items() for i in received_metadata.items()):
raise ValueError("Metadata key 'x-amz-meta-testing' not found")
def test_stat_object(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "stat_object(bucket_name, object_name)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
client.make_bucket(bucket_name)
# Put/Upload a streaming object of 1MiB
log_output.args['length'] = MB_1 = 1024*1024 # 1MiB.
MB_1_reader = LimitedRandomReader(MB_1)
log_output.args['data'] = 'LimitedRandomReader(MB_1)'
client.put_object(bucket_name, object_name, MB_1_reader, MB_1)
client.stat_object(bucket_name, object_name)
# Put/Upload a streaming object of 11MiB
log_output.args['length'] = MB_11 = 11*1024*1024 # 11MiB.
MB_11_reader = LimitedRandomReader(MB_11)
log_output.args['data'] = 'LimitedRandomReader(MB_11)'
log_output.args['metadata'] = metadata = {'X-Amz-Meta-Testing': 'value'}
log_output.args['content_type'] = content_type='application/octet-stream'
client.put_object(bucket_name,
object_name+'-metadata',
MB_11_reader,
MB_11,
content_type,
metadata)
# Get the stat on the uploaded object
st_obj = client.stat_object(bucket_name, object_name+'-metadata')
# Verify the collected stat data.
validate_stat_data(st_obj, MB_11, metadata)
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name)
client.remove_object(bucket_name, object_name+'-metadata')
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_remove_object(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "remove_object(bucket_name, object_name)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
client.make_bucket(bucket_name)
KB_1 = 1024 # 1KiB.
KB_1_reader = LimitedRandomReader(KB_1)
client.put_object(bucket_name, object_name, KB_1_reader, KB_1)
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_get_object(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "get_object(bucket_name, object_name, request_headers)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
newfile = 'newfile جديد'
MB_1 = 1024*1024 # 1MiB.
MB_1_reader = LimitedRandomReader(MB_1)
client.make_bucket(bucket_name)
client.put_object(bucket_name, object_name, MB_1_reader, MB_1)
# Get/Download a full object, iterate on response to save to disk
object_data = client.get_object(bucket_name, object_name)
with open(newfile, 'wb') as file_data:
shutil.copyfileobj(object_data, file_data)
except Exception as err:
raise Exception(err)
finally:
try:
os.remove(newfile)
client.remove_object(bucket_name, object_name)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_fget_object(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "fget_object(bucket_name, object_name, file_path, request_headers)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
log_output.args['file_path'] = newfile_f = 'newfile-f 新'
try:
MB_1 = 1024*1024 # 1MiB.
MB_1_reader = LimitedRandomReader(MB_1)
client.make_bucket(bucket_name)
client.put_object(bucket_name, object_name, MB_1_reader, MB_1)
# Get/Download a full object and save locally at path
client.fget_object(bucket_name, object_name, newfile_f)
except Exception as err:
raise Exception(err)
finally:
try:
os.remove(newfile_f)
client.remove_object(bucket_name, object_name)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_get_partial_object_with_default_length(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "get_partial_object(bucket_name, object_name, offset, length, request_headers)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
newfile = 'newfile'
MB_1 = 1024*1024 # 1MiB.
length = 1000
log_output.args['offset'] = offset = MB_1 - length
MB_1_reader = LimitedRandomReader(MB_1)
client.make_bucket(bucket_name)
client.put_object(bucket_name, object_name, MB_1_reader, MB_1)
# Get half of the object
object_data = client.get_partial_object(bucket_name, object_name, offset)
with open(newfile, 'wb') as file_data:
for d in object_data:
file_data.write(d)
#Check if the new file is the right size
new_file_size = os.path.getsize('./newfile')
if new_file_size != length:
raise ValueError('Unexpected file size after running ')
except Exception as err:
raise Exception(err)
finally:
try:
# os.remove(newfile)
client.remove_object(bucket_name, object_name)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_get_partial_object(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "get_partial_object(bucket_name, object_name, offset, length, request_headers)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
newfile = 'newfile'
MB_1 = 1024*1024 # 1MiB.
log_output.args['offset'] = offset = int(MB_1/2)
log_output.args['length'] = length = int(MB_1/2)-1000
MB_1_reader = LimitedRandomReader(MB_1)
client.make_bucket(bucket_name)
client.put_object(bucket_name, object_name, MB_1_reader, MB_1)
# Get half of the object
object_data = client.get_partial_object(bucket_name, object_name, offset, length)
with open(newfile, 'wb') as file_data:
for d in object_data:
file_data.write(d)
#Check if the new file is the right size
new_file_size = os.path.getsize('./newfile')
if new_file_size != length:
raise ValueError('Unexpected file size after running ')
except Exception as err:
raise Exception(err)
finally:
try:
# os.remove(newfile)
client.remove_object(bucket_name, object_name)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_list_objects(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "list_objects(bucket_name, prefix, recursive)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
client.make_bucket(bucket_name)
MB_1 = 1024*1024 # 1MiB.
MB_1_reader = LimitedRandomReader(MB_1)
client.put_object(bucket_name, object_name+"-1", MB_1_reader, MB_1)<|fim▁hole|> log_output.args['recursive'] = is_recursive = True
objects = client.list_objects(bucket_name, None, is_recursive)
for obj in objects:
_, _, _, _, _, _ = obj.bucket_name,\
obj.object_name,\
obj.last_modified,\
obj.etag, obj.size,\
obj.content_type
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name+"-1")
client.remove_object(bucket_name, object_name+"-2")
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def count_objects(objects):
no_of_files = 0
for obj in objects:
_, _, _, _, _, _ = obj.bucket_name,\
obj.object_name,\
obj.last_modified,\
obj.etag, obj.size,\
obj.content_type
no_of_files += 1
return no_of_files
def list_objects_api_test(client, bucket_name, expected_no, *argv):
# argv is composed of prefix and recursive arguments of
# list_objects api. They are both supposed to be passed as strings.
no_of_files = count_objects(client.list_objects(bucket_name, *argv) ) # expect all objects to be listed
if expected_no != no_of_files:
raise ValueError("Listed no of objects ({}), does not match the expected no of objects ({})".format(no_of_files, expected_no))
def test_list_objects_with_prefix(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "list_objects(bucket_name, prefix, recursive)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
client.make_bucket(bucket_name)
MB_1 = 1024*1024 # 1MiB.
no_of_created_files = 4
path_prefix = ''
# Create files and directories
for i in range(no_of_created_files):
str_i = str(i)
MB_1_reader = LimitedRandomReader(MB_1)
client.put_object(bucket_name, path_prefix + str_i + '_' + object_name, MB_1_reader, MB_1)
path_prefix += str_i + '/'
# Created files and directory structure
# ._<bucket_name>/
# |___0_<object_name>
# |___0/
# |___1_<object_name>
# |___1/
# |___2_<object_name>
# |___2/
# |___3_<object_name>
#
# Test and verify list_objects api outputs
# List objects recursively with NO prefix
log_output.args['recursive'] = recursive = 'True'
log_output.args['prefix'] = prefix = '' # no prefix
list_objects_api_test(client, bucket_name,
no_of_created_files,
prefix, recursive)
# List objects at the top level with no prefix and no recursive option
# Expect only the top 2 objects to be listed
log_output.args['recursive'] = recursive = ''
log_output.args['prefix'] = prefix = ''
list_objects_api_test(client, bucket_name, 2)
# List objects for '0' directory/prefix without recursive option
# Expect 2 object (directory '0' and '0_' object) to be listed
log_output.args['prefix'] = prefix = '0'
list_objects_api_test(client, bucket_name, 2, prefix)
# List objects for '0/' directory/prefix without recursive option
# Expect only 2 objects under directory '0/' to be listed, non-recursive
log_output.args['prefix'] = prefix = '0/'
list_objects_api_test(client, bucket_name, 2, prefix)
# List objects for '0/' directory/prefix, recursively
# Expect 2 objects to be listed
log_output.args['prefix'] = prefix = '0/'
log_output.args['recursive'] = recursive = 'True'
list_objects_api_test(client, bucket_name, 3, prefix, recursive)
# List object with '0/1/2/' directory/prefix, non-recursive
# Expect the single object under directory '0/1/2/' to be listed
log_output.args['prefix'] = prefix = '0/1/2/'
list_objects_api_test(client, bucket_name, 1, prefix)
except Exception as err:
raise Exception(err)
finally:
try:
path_prefix = ''
for i in range(no_of_created_files):
str_i = str(i)
client.remove_object(bucket_name, path_prefix + str_i + '_' + object_name)
path_prefix += str_i + '/'
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
log_output.args['recursive'] = 'Several prefix/recursive combinations are tested'
log_output.args['prefix'] = 'Several prefix/recursive combinations are tested'
print(log_output.json_report())
def test_list_objects_with_1001_files(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "list_objects(bucket_name, prefix, recursive)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
object_name = uuid.uuid4().__str__()
log_output.args['object_name'] = object_name + '_0 ~ ' + object_name + '_1000'
try:
client.make_bucket(bucket_name)
KB_1 = 1024 # 1KiB.
no_of_created_files = 2000
path_prefix = ''
# Create 1001 1KiB files under bucket_name at the same layer
for i in range(no_of_created_files):
str_i = str(i)
KB_1_reader = LimitedRandomReader(KB_1)
client.put_object(bucket_name, path_prefix + object_name + '_' + str_i, KB_1_reader, KB_1)
# List objects and check if 1001 files are returned
list_objects_api_test(client, bucket_name, no_of_created_files)
except Exception as err:
raise Exception(err)
finally:
try:
path_prefix = ''
for i in range(no_of_created_files):
str_i = str(i)
client.remove_object(bucket_name, path_prefix + object_name + '_' + str_i)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_list_objects_v2(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "list_objects(bucket_name, prefix, recursive)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
client.make_bucket(bucket_name)
MB_1 = 1024*1024 # 1MiB.
MB_1_reader = LimitedRandomReader(MB_1)
client.put_object(bucket_name, object_name+"-1", MB_1_reader, MB_1)
MB_1_reader = LimitedRandomReader(MB_1)
client.put_object(bucket_name, object_name+"-2", MB_1_reader, MB_1)
# List all object paths in bucket using V2 API.
log_output.args['recursive'] = is_recursive = True
objects = client.list_objects_v2(bucket_name, None, is_recursive)
for obj in objects:
_, _, _, _, _, _ = obj.bucket_name,\
obj.object_name,\
obj.last_modified,\
obj.etag, obj.size,\
obj.content_type
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name+"-1")
client.remove_object(bucket_name, object_name+"-2")
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
# Helper method for test_list_incomplete_uploads
# and test_remove_incomplete_uploads tests
def create_upload_ids(client, b_name, o_name, n):
# Create 'n' many incomplete upload ids and
# return the list of created upload ids
upload_ids_created = []
for i in range(n):
upload_id = client._new_multipart_upload(b_name, o_name, {})
upload_ids_created.append(upload_id)
return upload_ids_created
# Helper method for test_list_incomplete_uploads
# and test_remove_incomplete_uploads tests
def collect_incomplete_upload_ids(client, b_name, o_name):
# Collect the upload ids from 'list_incomplete_uploads'
# command, and return the list of created upload ids
upload_ids_listed = []
for obj in client.list_incomplete_uploads(b_name, o_name, False):
upload_ids_listed.append(obj.upload_id)
return upload_ids_listed
def test_remove_incomplete_upload(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "remove_incomplete_upload(bucket_name, object_name)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
client.make_bucket(bucket_name)
no_of_upload_ids = 3
# Create 'no_of_upload_ids' many incomplete upload ids
create_upload_ids(client, bucket_name, object_name, no_of_upload_ids)
# Remove all of the created upload ids
client.remove_incomplete_upload(bucket_name, object_name)
# Get the list of incomplete upload ids for object_name
# using 'list_incomplete_uploads' command
upload_ids_listed = collect_incomplete_upload_ids(client,
bucket_name,
object_name)
# Verify listed/returned upload id list
if upload_ids_listed:
# The list is not empty
raise ValueError("There are still upload ids not removed")
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_presigned_get_object_default_expiry(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "presigned_get_object(bucket_name, object_name, expires, response_headers)"
_http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where())
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
client.make_bucket(bucket_name)
MB_1 = 1024*1024 # 1MiB.
MB_1_reader = LimitedRandomReader(MB_1)
client.put_object(bucket_name, object_name, MB_1_reader, MB_1)
presigned_get_object_url = client.presigned_get_object(bucket_name,
object_name)
response = _http.urlopen('GET', presigned_get_object_url)
if response.status != 200:
raise ResponseError(response,
'GET',
bucket_name,
object_name).get_exception()
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_presigned_get_object_expiry_5sec(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "presigned_get_object(bucket_name, object_name, expires, response_headers)"
_http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where())
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
client.make_bucket(bucket_name)
KB_1 = 1024 # 1KiB.
KB_1_reader = LimitedRandomReader(KB_1)
client.put_object(bucket_name, object_name, KB_1_reader, KB_1)
presigned_get_object_url = client.presigned_get_object(bucket_name,
object_name,
timedelta(seconds=5))
response = _http.urlopen('GET', presigned_get_object_url)
if response.status != 200:
raise ResponseError(response,
'GET',
bucket_name,
object_name).get_exception()
# Wait for 5 seconds for the presigned url to expire
time.sleep(5)
response = _http.urlopen('GET', presigned_get_object_url)
# Success with an expired url is considered to be a failure
if response.status == 200:
raise ValueError('Presigned get url failed to expire!')
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_presigned_get_object_response_headers(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "presigned_get_object(bucket_name, object_name, expires, response_headers)"
_http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where())
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
client.make_bucket(bucket_name)
KB_1 = 1024 # 1KiB.
KB_1_reader = LimitedRandomReader(KB_1)
client.put_object(bucket_name, object_name, KB_1_reader, KB_1)
content_type = 'text/plain'
content_language = 'en_US'
response_headers = {'response-content-type': content_type,
'response-content-language': content_language}
presigned_get_object_url = client.presigned_get_object(bucket_name,
object_name,
timedelta(seconds=5),
response_headers)
response = _http.urlopen('GET', presigned_get_object_url)
returned_content_type = response.headers['Content-Type']
returned_content_language = response.headers['Content-Language']
if response.status != 200 or returned_content_type != content_type or\
returned_content_language != content_language:
raise ResponseError(response,
'GET',
bucket_name,
object_name).get_exception()
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_presigned_put_object_default_expiry(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "presigned_put_object(bucket_name, object_name, expires)"
_http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where())
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
client.make_bucket(bucket_name)
presigned_put_object_url = client.presigned_put_object(bucket_name,
object_name)
MB_1 = 1024*1024 # 1MiB.
response = _http.urlopen('PUT',
presigned_put_object_url,
LimitedRandomReader(MB_1))
if response.status != 200:
raise ResponseError(response,
'PUT',
bucket_name,
object_name).get_exception()
client.stat_object(bucket_name, object_name)
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_presigned_put_object_expiry_5sec(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "presigned_put_object(bucket_name, object_name, expires)"
_http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where())
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
KB_1 = 1024 # 1KiB.
try:
client.make_bucket(bucket_name)
presigned_put_object_url = client.presigned_put_object(bucket_name,
object_name,
timedelta(seconds=5))
# Wait for 5 seconds for the presigned url to expire
time.sleep(5)
response = _http.urlopen('PUT',
presigned_put_object_url,
LimitedRandomReader(KB_1))
if response.status == 200:
raise ValueError('Presigned put url failed to expire!')
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_presigned_post_policy(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "presigned_post_policy(post_policy)"
bucket_name = generate_bucket_name()
no_of_days = 10
prefix = 'objectPrefix/'
try:
client.make_bucket(bucket_name)
# Post policy.
policy = PostPolicy()
policy.set_bucket_name(bucket_name)
policy.set_key_startswith(prefix)
expires_date = datetime.utcnow()+timedelta(days=no_of_days)
policy.set_expires(expires_date)
# post_policy arg is a class. To avoid displaying meaningless value
# for the class, policy settings are made part of the args for
# clarity and debugging purposes.
log_output.args['post_policy'] = {'bucket_name': bucket_name,
'prefix': prefix,
'expires_in_days': no_of_days}
client.presigned_post_policy(policy)
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_thread_safe(client, test_file, log_output):
# Get a unique bucket_name and object_name
no_of_threads = 5
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
# Create sha-sum value for the user provided
# source file, 'test_file'
with open(test_file, 'rb') as f:
contents = f.read()
test_file_sha_sum = hashlib.sha256(contents).hexdigest()
# Create the bucket
client.make_bucket(bucket_name)
# Put/Upload 'no_of_threads' many objects
# simultaneously using multi-threading
for i in range(no_of_threads):
thrd = Thread(target=client.fput_object,
args=(bucket_name, object_name, test_file))
thrd.start()
thrd.join()
# A list of exceptions raised by get_object_and_check
# called in multiple threads.
exceptions = []
# get_object_and_check() downloads an object, stores it in a file
# and then calculates its checksum. In case of mismatch, a new
# exception is generated and saved in exceptions.
def get_object_and_check(client, bckt_name, obj_name, no,
expected_sha_sum):
try:
obj_data = client.get_object(bckt_name, obj_name)
local_file = 'copied_file_'+str(no)
# Create a file with the returned data
with open(local_file, 'wb') as file_data:
shutil.copyfileobj(obj_data, file_data)
with open(local_file, 'rb') as f:
contents = f.read()
copied_file_sha_sum = hashlib.sha256(contents).hexdigest()
# Compare sha-sum values of the source file and the copied one
if expected_sha_sum != copied_file_sha_sum:
raise ValueError(
'Sha-sum mismatch on multi-threaded put and get objects')
except Exception as err:
exceptions.append(Exception(err))
finally:
# Remove downloaded file
os.path.isfile(local_file) and os.remove(local_file)
# Get/Download 'no_of_threads' many objects
# simultaneously using multi-threading
thrd_list = []
for i in range(no_of_threads):
# Create dynamic/varying names for to be created threads
thrd_name = 'thread_'+str(i)
vars()[thrd_name] = Thread(target=get_object_and_check,
args=(client, bucket_name,
object_name, i, test_file_sha_sum))
vars()[thrd_name].start()
thrd_list.append(vars()[thrd_name])
# Wait until all threads to finish
for t in thrd_list:
t.join()
if len(exceptions) > 0:
raise exceptions[0]
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_get_bucket_policy(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "get_bucket_policy(bucket_name)"
# Get a unique bucket_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
try:
client.make_bucket(bucket_name)
client.get_bucket_policy(bucket_name)
except APINotImplemented:
print(log_output.json_report(alert='Not Implemented', status=LogOutput.NA))
except NoSuchBucketPolicy:
# Test passes
print(log_output.json_report())
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
def get_policy_actions(stat):
actions = []
for s in stat:
action = s.get('Action')
if action not in actions:
actions.append(action)
# flatten nested lists in actions
flattened_actions = []
for a in actions:
if isinstance(a, list):
for aa in a:
flattened_actions.append(aa)
else:
flattened_actions.append(a)
actions = [s.replace('s3:', '') for s in flattened_actions]
return actions
def policy_validated(client, bucket_name, policy):
policy_dict = json.loads(client.get_bucket_policy(bucket_name).decode("utf-8"))
actions = get_policy_actions(policy_dict.get('Statement'))
actions.sort()
expected_actions = get_policy_actions(policy.get('Statement'))
expected_actions.sort()
if expected_actions != actions:
return False
return True
def test_set_bucket_policy_readonly(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "set_bucket_policy(bucket_name, policy)"
# Get a unique bucket_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
try:
client.make_bucket(bucket_name)
# read-only policy
policy = {
"Version":"2012-10-17",
"Statement":[
{
"Sid":"",
"Effect":"Allow",
"Principal":{"AWS":"*"},
"Action":"s3:GetBucketLocation",
"Resource":"arn:aws:s3:::"+bucket_name
},
{
"Sid":"",
"Effect":"Allow",
"Principal":{"AWS":"*"},
"Action":"s3:ListBucket",
"Resource":"arn:aws:s3:::"+bucket_name
},
{
"Sid":"",
"Effect":"Allow",
"Principal":{"AWS":"*"},
"Action":"s3:GetObject",
"Resource":"arn:aws:s3:::"+bucket_name+"/*"
}
]
}
# Set read-only policy
client.set_bucket_policy(bucket_name, json.dumps(policy))
# Validate if the policy is set correctly
if not policy_validated(client, bucket_name, policy):
raise ValueError('Failed to set ReadOnly bucket policy')
except APINotImplemented:
print(log_output.json_report(alert='Not Implemented',
status=LogOutput.NA))
except Exception as err:
raise Exception(err)
else:
# Test passes
print(log_output.json_report())
finally:
try:
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
def test_set_bucket_policy_readwrite(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "set_bucket_policy(bucket_name, prefix, policy_access)"
# Get a unique bucket_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
try:
client.make_bucket(bucket_name)
# Read-write policy
policy = {
"Version": "2012-10-17",
"Statement": [
{
"Action": ["s3:GetBucketLocation"],
"Sid": "",
"Resource": ["arn:aws:s3:::"+bucket_name],
"Effect": "Allow",
"Principal": {"AWS": "*"}
},
{
"Action": ["s3:ListBucket"],
"Sid": "",
"Resource": ["arn:aws:s3:::"+bucket_name],
"Effect": "Allow",
"Principal": {"AWS": "*"}
},
{
"Action": ["s3:ListBucketMultipartUploads"],
"Sid": "",
"Resource": ["arn:aws:s3:::"+bucket_name],
"Effect": "Allow",
"Principal": {"AWS": "*"}
},
{
"Action": ["s3:ListMultipartUploadParts",
"s3:GetObject",
"s3:AbortMultipartUpload",
"s3:DeleteObject",
"s3:PutObject"],
"Sid": "",
"Resource": ["arn:aws:s3:::"+bucket_name+"/*"],
"Effect": "Allow",
"Principal": {"AWS": "*"}
}
]
}
# Set read-write policy
client.set_bucket_policy(bucket_name, json.dumps(policy))
# Validate if the policy is set correctly
if not policy_validated(client, bucket_name, policy):
raise ValueError('Failed to set ReadOnly bucket policy')
except APINotImplemented:
print(log_output.json_report(alert='Not Implemented', status=LogOutput.NA))
except Exception as err:
raise Exception(err)
else:
# Test passes
print(log_output.json_report())
finally:
try:
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
def test_remove_objects(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "remove_objects(bucket_name, objects_iter)"
# Get a unique bucket_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
try:
MB_1 = 1024*1024 # 1MiB.
client.make_bucket(bucket_name)
# Upload some new objects to prepare for multi-object delete test.
object_names = []
for i in range(10):
curr_object_name = "prefix"+"-{}".format(i)
client.put_object(bucket_name, curr_object_name, LimitedRandomReader(MB_1), MB_1)
object_names.append(curr_object_name)
log_output.args['objects_iter'] = objects_iter = object_names
# delete the objects in a single library call.
for del_err in client.remove_objects(bucket_name, objects_iter):
raise ValueError("Remove objects err: {}".format(del_err))
except Exception as err:
raise Exception(err)
finally:
try:
# Try to clean everything to keep our server intact
for del_err in client.remove_objects(bucket_name, objects_iter):
raise ValueError("Remove objects err: {}".format(del_err))
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_remove_bucket(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "remove_bucket(bucket_name)"
# Get a unique bucket_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
try:
if is_s3(client):
log_output.args['location'] = location = 'us-east-1'
client.make_bucket(bucket_name+'.unique', location)
else:
client.make_bucket(bucket_name)
except Exception as err:
raise Exception(err)
finally:
try:
# Removing bucket. This operation will only work if your bucket is empty.
if is_s3(client):
client.remove_bucket(bucket_name+'.unique')
else:
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def isFullMode():
return os.getenv("MINT_MODE") == "full"
def main():
"""
Functional testing of minio python library.
"""
try:
access_key = os.getenv('ACCESS_KEY', 'Q3AM3UQ867SPQQA43P2F')
secret_key = os.getenv('SECRET_KEY',
'zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG')
server_endpoint = os.getenv('SERVER_ENDPOINT', 'play.minio.io:9000')
secure = os.getenv('ENABLE_HTTPS', '1') == '1'
if server_endpoint == 'play.minio.io:9000':
access_key = 'Q3AM3UQ867SPQQA43P2F'
secret_key = 'zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG'
secure = True
client = Minio(server_endpoint, access_key, secret_key, secure=secure)
# Check if we are running in the mint environment.
data_dir = os.getenv('DATA_DIR')
if data_dir == None:
os.environ['DATA_DIR'] = data_dir = '/mint/data'
is_mint_env = (os.path.exists(data_dir) and\
os.path.exists(os.path.join(data_dir, 'datafile-1-MB')) and\
os.path.exists(os.path.join(data_dir, 'datafile-11-MB')))
# Enable trace
# import sys
# client.trace_on(sys.stderr)
testfile = 'datafile-1-MB'
largefile = 'datafile-11-MB'
if is_mint_env :
## Choose data files
testfile = os.path.join(data_dir, 'datafile-1-MB')
largefile = os.path.join(data_dir, 'datafile-11-MB')
else:
with open(testfile, 'wb') as file_data:
shutil.copyfileobj(LimitedRandomReader(1024*1024), file_data)
with open(largefile, 'wb') as file_data:
shutil.copyfileobj(LimitedRandomReader(11*1024*1024), file_data)
if isFullMode():
log_output = LogOutput(client.make_bucket, 'test_make_bucket_default_region')
test_make_bucket_default_region(client, log_output)
log_output = LogOutput(client.make_bucket, 'test_make_bucket_with_region')
test_make_bucket_with_region(client, log_output)
log_output = LogOutput(client.make_bucket, 'test_negative_make_bucket_invalid_name')
test_negative_make_bucket_invalid_name(client, log_output)
log_output = LogOutput(client.make_bucket, 'test_make_bucket_recreate')
test_make_bucket_recreate(client, log_output)
log_output = LogOutput(client.list_buckets, 'test_list_buckets')
test_list_buckets(client, log_output)
log_output = LogOutput(client.fput_object, 'test_fput_object_small_file')
test_fput_object_small_file(client, testfile, log_output)
log_output = LogOutput(client.fput_object, 'test_fput_object_large_file')
test_fput_object_large_file(client, largefile, log_output)
log_output = LogOutput(client.fput_object, 'test_fput_object_with_content_type')
test_fput_object_with_content_type(client, testfile, log_output)
log_output = LogOutput(client.copy_object, 'test_copy_object_no_copy_condition')
test_copy_object_no_copy_condition(client, log_output)
log_output = LogOutput(client.copy_object, 'test_copy_object_etag_match')
test_copy_object_etag_match(client, log_output)
log_output = LogOutput(client.copy_object, 'test_copy_object_negative_etag_match')
test_copy_object_negative_etag_match(client, log_output)
log_output = LogOutput(client.copy_object, 'test_copy_object_modified_since')
test_copy_object_modified_since(client, log_output)
log_output = LogOutput(client.copy_object, 'test_copy_object_unmodified_since')
test_copy_object_unmodified_since(client, log_output)
log_output = LogOutput(client.put_object, 'test_put_object')
test_put_object(client, log_output)
log_output = LogOutput(client.put_object, 'test_negative_put_object_with_path_segment')
test_negative_put_object_with_path_segment(client, log_output)
log_output = LogOutput(client.stat_object, 'test_stat_object')
test_stat_object(client, log_output)
log_output = LogOutput(client.get_object, 'test_get_object')
test_get_object(client, log_output)
log_output = LogOutput(client.fget_object, 'test_fget_object')
test_fget_object(client, log_output)
log_output = LogOutput(client.get_partial_object, 'test_get_partial_object_with_default_length')
test_get_partial_object_with_default_length(client, log_output)
log_output = LogOutput(client.get_partial_object, 'test_get_partial_object')
test_get_partial_object(client, log_output)
log_output = LogOutput(client.list_objects, 'test_list_objects')
test_list_objects(client, log_output)
log_output = LogOutput(client.list_objects, 'test_list_objects_with_prefix')
test_list_objects_with_prefix(client, log_output)
log_output = LogOutput(client.list_objects, 'test_list_objects_with_1001_files')
test_list_objects_with_1001_files(client, log_output)
log_output = LogOutput(client.remove_incomplete_upload, 'test_remove_incomplete_upload')
test_remove_incomplete_upload(client, log_output)
log_output = LogOutput(client.list_objects_v2, 'test_list_objects_v2')
test_list_objects_v2(client, log_output)
log_output = LogOutput(client.presigned_get_object, 'test_presigned_get_object_default_expiry')
test_presigned_get_object_default_expiry(client, log_output)
log_output = LogOutput(client.presigned_get_object, 'test_presigned_get_object_expiry_5sec')
test_presigned_get_object_expiry_5sec(client, log_output)
log_output = LogOutput(client.presigned_get_object, 'test_presigned_get_object_response_headers')
test_presigned_get_object_response_headers(client, log_output)
log_output = LogOutput(client.presigned_put_object, 'test_presigned_put_object_default_expiry')
test_presigned_put_object_default_expiry(client, log_output)
log_output = LogOutput(client.presigned_put_object, 'test_presigned_put_object_expiry_5sec')
test_presigned_put_object_expiry_5sec(client, log_output)
log_output = LogOutput(client.presigned_post_policy, 'test_presigned_post_policy')
test_presigned_post_policy(client, log_output)
log_output = LogOutput(client.put_object, 'test_thread_safe')
test_thread_safe(client, testfile, log_output)
log_output = LogOutput(client.get_bucket_policy, 'test_get_bucket_policy')
test_get_bucket_policy(client,log_output)
log_output = LogOutput(client.set_bucket_policy, 'test_set_bucket_policy_readonly')
test_set_bucket_policy_readonly(client, log_output)
log_output = LogOutput(client.set_bucket_policy, 'test_set_bucket_policy_readwrite')
test_set_bucket_policy_readwrite(client, log_output)
else:
# Quick mode tests
log_output = LogOutput(client.make_bucket, 'test_make_bucket_default_region')
test_make_bucket_default_region(client, log_output)
log_output = LogOutput(client.list_buckets, 'test_list_buckets')
test_list_buckets(client, log_output)
log_output = LogOutput(client.put_object, 'test_put_object')
test_put_object(client, log_output)
log_output = LogOutput(client.stat_object, 'test_stat_object')
test_stat_object(client, log_output)
log_output = LogOutput(client.get_object, 'test_get_object')
test_get_object(client, log_output)
log_output = LogOutput(client.list_objects, 'test_list_objects')
test_list_objects(client, log_output)
log_output = LogOutput(client.remove_incomplete_upload, 'test_remove_incomplete_upload')
test_remove_incomplete_upload(client, log_output)
log_output = LogOutput(client.presigned_get_object, 'test_presigned_get_object_default_expiry')
test_presigned_get_object_default_expiry(client, log_output)
log_output = LogOutput(client.presigned_put_object, 'test_presigned_put_object_default_expiry')
test_presigned_put_object_default_expiry(client, log_output)
log_output = LogOutput(client.presigned_post_policy, 'test_presigned_post_policy')
test_presigned_post_policy(client, log_output)
log_output = LogOutput(client.copy_object, 'test_copy_object_no_copy_condition')
test_copy_object_no_copy_condition(client, log_output)
log_output = LogOutput(client.get_bucket_policy, 'test_get_bucket_policy')
test_get_bucket_policy(client,log_output)
log_output = LogOutput(client.set_bucket_policy, 'test_set_bucket_policy_readonly')
test_set_bucket_policy_readonly(client, log_output)
# Remove all objects.
log_output = LogOutput(client.remove_object, 'test_remove_object')
test_remove_object(client, log_output)
log_output = LogOutput(client.remove_objects, 'test_remove_objects')
test_remove_objects(client, log_output)
log_output = LogOutput(client.remove_bucket, 'test_remove_bucket')
test_remove_bucket(client, log_output)
# Remove temporary files.
if not is_mint_env:
os.remove(testfile)
os.remove(largefile)
except Exception as err:
print(log_output.json_report(err))
exit(1)
if __name__ == "__main__":
# Execute only if run as a script
main()<|fim▁end|> | MB_1_reader = LimitedRandomReader(MB_1)
client.put_object(bucket_name, object_name+"-2", MB_1_reader, MB_1)
# List all object paths in bucket. |
<|file_name|>date_and_time.py<|end_file_name|><|fim▁begin|>"""Template filters and tags for helping with dates and datetimes"""
# pylint: disable=W0702,C0103
from django import template
from nav.django.settings import DATETIME_FORMAT, SHORT_TIME_FORMAT
from django.template.defaultfilters import date, time
from datetime import timedelta<|fim▁hole|>
@register.filter
def default_datetime(value):
"""Returns the date as represented by the default datetime format"""
try:
v = date(value, DATETIME_FORMAT)
except:
return value
return v
@register.filter
def short_time_format(value):
"""Returns the value formatted as a short time format
The SHORT_TIME_FORMAT is a custom format not available in the template
"""
try:
return time(value, SHORT_TIME_FORMAT)
except:
return value
@register.filter
def remove_microseconds(delta):
"""Removes microseconds from timedelta"""
try:
return delta - timedelta(microseconds=delta.microseconds)
except:
return delta<|fim▁end|> |
register = template.Library()
|
<|file_name|>images_client.py<|end_file_name|><|fim▁begin|># Copyright 2012 OpenStack Foundation
# All Rights Reserved.<|fim▁hole|># a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from six.moves.urllib import parse as urllib
from tempest_lib import exceptions as lib_exc
from tempest.api_schema.response.compute.v2_1 import images as schema
from tempest.common import service_client
from tempest.common import waiters
class ImagesClient(service_client.ServiceClient):
def create_image(self, server_id, name, meta=None):
"""Creates an image of the original server."""
post_body = {
'createImage': {
'name': name,
}
}
if meta is not None:
post_body['createImage']['metadata'] = meta
post_body = json.dumps(post_body)
resp, body = self.post('servers/%s/action' % server_id,
post_body)
self.validate_response(schema.create_image, resp, body)
return service_client.ResponseBody(resp, body)
def list_images(self, detail=False, **params):
"""Returns a list of all images filtered by any parameters."""
url = 'images'
_schema = schema.list_images
if detail:
url += '/detail'
_schema = schema.list_images_details
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(_schema, resp, body)
return service_client.ResponseBodyList(resp, body['images'])
def show_image(self, image_id):
"""Returns the details of a single image."""
resp, body = self.get("images/%s" % image_id)
self.expected_success(200, resp.status)
body = json.loads(body)
self.validate_response(schema.get_image, resp, body)
return service_client.ResponseBody(resp, body['image'])
def delete_image(self, image_id):
"""Deletes the provided image."""
resp, body = self.delete("images/%s" % image_id)
self.validate_response(schema.delete, resp, body)
return service_client.ResponseBody(resp, body)
def wait_for_image_status(self, image_id, status):
"""Waits for an image to reach a given status."""
waiters.wait_for_image_status(self, image_id, status)
def list_image_metadata(self, image_id):
"""Lists all metadata items for an image."""
resp, body = self.get("images/%s/metadata" % image_id)
body = json.loads(body)
self.validate_response(schema.image_metadata, resp, body)
return service_client.ResponseBody(resp, body['metadata'])
def set_image_metadata(self, image_id, meta):
"""Sets the metadata for an image."""
post_body = json.dumps({'metadata': meta})
resp, body = self.put('images/%s/metadata' % image_id, post_body)
body = json.loads(body)
self.validate_response(schema.image_metadata, resp, body)
return service_client.ResponseBody(resp, body['metadata'])
def update_image_metadata(self, image_id, meta):
"""Updates the metadata for an image."""
post_body = json.dumps({'metadata': meta})
resp, body = self.post('images/%s/metadata' % image_id, post_body)
body = json.loads(body)
self.validate_response(schema.image_metadata, resp, body)
return service_client.ResponseBody(resp, body['metadata'])
def show_image_metadata_item(self, image_id, key):
"""Returns the value for a specific image metadata key."""
resp, body = self.get("images/%s/metadata/%s" % (image_id, key))
body = json.loads(body)
self.validate_response(schema.image_meta_item, resp, body)
return service_client.ResponseBody(resp, body['meta'])
def set_image_metadata_item(self, image_id, key, meta):
"""Sets the value for a specific image metadata key."""
post_body = json.dumps({'meta': meta})
resp, body = self.put('images/%s/metadata/%s' % (image_id, key),
post_body)
body = json.loads(body)
self.validate_response(schema.image_meta_item, resp, body)
return service_client.ResponseBody(resp, body['meta'])
def delete_image_metadata_item(self, image_id, key):
"""Deletes a single image metadata key/value pair."""
resp, body = self.delete("images/%s/metadata/%s" %
(image_id, key))
self.validate_response(schema.delete, resp, body)
return service_client.ResponseBody(resp, body)
def is_resource_deleted(self, id):
try:
self.show_image(id)
except lib_exc.NotFound:
return True
return False
@property
def resource_type(self):
"""Returns the primary type of resource this client works with."""
return 'image'<|fim▁end|> | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain |
<|file_name|>stat.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
'''
Created on 21 janv. 2016
@author: christian
'''
import falcon
import os
import config
import db
class Stat(object):<|fim▁hole|> '''
def on_get(self, req, resp):
'''Return global statistics
'''
dbc = db.connect()
cur = dbc.cursor()
query = """select format('{"nb_maps":%s,"nb_addr":%s,"last_map":"%s"}',
count(*),
count(distinct(address)),
left(max(time)::text,19)) as stats from maps;"""
cur.execute(query)
stats = cur.fetchone()[0]
resp.set_header('X-Powered-By', 'OpenEvacMap')
if stats is None:
resp.status = falcon.HTTP_404
else:
resp.status = falcon.HTTP_200
resp.set_header('Access-Control-Allow-Origin', '*')
resp.set_header('Access-Control-Allow-Headers',
'X-Requested-With')
resp.body = (stats)
cur.close()
dbc.close()<|fim▁end|> | '''
Get global statistics |
<|file_name|>ui.js<|end_file_name|><|fim▁begin|>'use strict';
/* global $: true */
/* global animation: true */
/* global boidWeights: true */
//Slider for selecting initial number of boids
//---------------------------------------------
$('#numBoidsSlider').slider({
min: 0,
max: 400,
step: 10,
value: animation.numBoids
});
$('#numBoidsVal').text(animation.numBoids);
$('#numBoidsSlider').on('slide', function (slideEvt) {
$('#numBoidsVal').text(slideEvt.value);
animation.numBoids = slideEvt.value;
});
//Sliders for weights
//--------------------
$('#slider1').slider({
min: 0,
max: 20,
step: 0.1,
value: boidWeights.separation
});
$('#slider1val').text(boidWeights.separation);
$('#slider1').on('slide', function (slideEvt) {
$('#slider1val').text(slideEvt.value);
boidWeights.separation = slideEvt.value;<|fim▁hole|>});
$('#slider2').slider({
min: 0,
max: 20,
step: 0.1,
value: boidWeights.alginment
});
$('#slider2').on('slide', function (slideEvt) {
$('#slider2val').text(boidWeights.alginment);
$('#slider2val').text(slideEvt.value);
boidWeights.alginment = slideEvt.value;
});
$('#slider3').slider({
min: 0,
max: 20,
step: 0.1,
value: boidWeights.cohesion
});
$('#slider3val').text(boidWeights.cohesion);
$('#slider3').on('slide', function (slideEvt) {
$('#slider3val').text(slideEvt.value);
boidWeights.cohesion = slideEvt.value;
});
$('#slider4').slider({
min: 0,
max: 20,
step: 0.1,
value: boidWeights.obstacle
});
$('#slider4val').text(boidWeights.obstacle);
$('#slider4').on('slide', function (slideEvt) {
$('#slider4val').text(slideEvt.value);
boidWeights.obstacle = slideEvt.value;
});
$('#slider5').slider({
min: 0,
max: 20,
step: 0.1,
value: boidWeights.predators
});
$('#slider5val').text(boidWeights.predators);
$('#slider5').on('slide', function (slideEvt) {
$('#slider5val').text(slideEvt.value);
boidWeights.predators = slideEvt.value;
});<|fim▁end|> | |
<|file_name|>express.js<|end_file_name|><|fim▁begin|>var express = require('express'),
logger = require('morgan'),
bodyParser = require('body-parser'),
cookieParser = require('cookie-parser'),
session = require('express-session'),
passport = require('passport');
module.exports = function (app, config) {
app.set('views', config.rootPath, '/server/views');
app.set('view engine', 'ejs');
app.use(logger('dev'));
app.use(cookieParser());
app.use(bodyParser.json());
app.use(bodyParser.urlencoded({ extended: false }));
app.use(session({
secret: 'interval trainer unicorns',
name: 'jab-ita',
//store: sessionStore, // connect-mongo session store
proxy: true,
resave: true,
saveUninitialized: true
}));
//app.use(session({secret: 'interval trainer unicorns'}));
app.use(passport.initialize());<|fim▁hole|> app.use(passport.session());
//app.use(express.static(config.rootPath, '/public'));
app.use(function (req, res, next) {
res.header("Access-Control-Allow-Origin", "*");
res.setHeader('Access-Control-Allow-Methods', 'GET, POST, PUT, DELETE');
res.header("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept");
next();
});
// catch 404 and forward to error handler
app.use(function (req, res, next) {
var err = new Error('Not Found');
err.status = 404;
next(err);
});
};<|fim▁end|> |