query
stringlengths 8
6.75k
| document
stringlengths 9
1.89M
| negatives
sequencelengths 19
19
| metadata
dict |
---|---|---|---|
Returns the value of the 'go_package' option of the first .proto file found in the same directory as projectFile | func detectGoPackageForProject(projectFile string) (string, error) {
var goPkg string
projectDir := filepath.Dir(projectFile)
if err := filepath.Walk(projectDir, func(protoFile string, info os.FileInfo, err error) error {
// already set
if goPkg != "" {
return nil
}
if !strings.HasSuffix(protoFile, ".proto") {
return nil
}
// search for go_package on protos in the same dir as the project.json
if projectDir != filepath.Dir(protoFile) {
return nil
}
content, err := ioutil.ReadFile(protoFile)
if err != nil {
return err
}
lines := strings.Split(string(content), "\n")
for _, line := range lines {
goPackage := goPackageStatementRegex.FindStringSubmatch(line)
if len(goPackage) == 0 {
continue
}
if len(goPackage) != 2 {
return errors.Errorf("parsing go_package error: from %v found %v", line, goPackage)
}
goPkg = goPackage[1]
break
}
return nil
}); err != nil {
return "", err
}
if goPkg == "" {
return "", errors.Errorf("no go_package statement found in root dir of project %v", projectFile)
}
return goPkg, nil
} | [
"func (c *common) GetPackage() string { return c.file.GetPackage() }",
"func (pkg *goPackage) firstGoFile() string {\n\tgoSrcs := []platformStringsBuilder{\n\t\tpkg.library.sources,\n\t\tpkg.binary.sources,\n\t\tpkg.test.sources,\n\t}\n\tfor _, sb := range goSrcs {\n\t\tif sb.strs != nil {\n\t\t\tfor s := range sb.strs {\n\t\t\t\tif strings.HasSuffix(s, \".go\") {\n\t\t\t\t\treturn s\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}",
"func (fd *File) GoPackagePath() string {\n\treturn fd.builder.GoPackagePath\n}",
"func GoPackage(packageName string) string {\n\tif packageName == \"\" {\n\t\treturn \"\"\n\t}\n\tsplit := strings.Split(packageName, \".\")\n\treturn split[len(split)-1] + \"pb\"\n}",
"func goPackageName(pkg *protoPackage) string {\n\tif opt, ok := pkg.options[\"go_package\"]; ok {\n\t\tif i := strings.IndexByte(opt, ';'); i >= 0 {\n\t\t\treturn opt[i+1:]\n\t\t} else if i := strings.LastIndexByte(opt, '/'); i >= 0 {\n\t\t\treturn opt[i+1:]\n\t\t} else {\n\t\t\treturn opt\n\t\t}\n\t}\n\tif pkg.name != \"\" {\n\t\treturn strings.Replace(pkg.name, \".\", \"_\", -1)\n\t}\n\tif len(pkg.files) == 1 {\n\t\tfor s := range pkg.files {\n\t\t\treturn strings.TrimSuffix(s, \".proto\")\n\t\t}\n\t}\n\treturn \"\"\n}",
"func GoPackageOption(options []proto.Option) (string, string, bool) {\n\tfor _, opt := range options {\n\t\tif opt.Name != \"go_package\" {\n\t\t\tcontinue\n\t\t}\n\t\tparts := strings.SplitN(opt.Constant.Source, \";\", 2)\n\t\tswitch len(parts) {\n\t\tcase 0:\n\t\t\treturn \"\", \"\", true\n\t\tcase 1:\n\t\t\treturn parts[0], \"\", true\n\t\tcase 2:\n\t\t\treturn parts[0], parts[1], true\n\t\tdefault:\n\t\t\treturn parts[0], strings.Join(parts[1:], \";\"), true\n\t\t}\n\t}\n\n\treturn \"\", \"\", false\n}",
"func goPackageName(d *descriptor.FileDescriptorProto) (name string, explicit bool) {\n\t// Does the file have a \"go_package\" option?\n\tif _, pkg, ok := goPackageOption(d); ok {\n\t\treturn pkg, true\n\t}\n\n\t// Does the file have a package clause?\n\tif pkg := d.GetPackage(); pkg != \"\" {\n\t\treturn pkg, false\n\t}\n\t// Use the file base name.\n\treturn baseName(d.GetName()), false\n}",
"func (d *FileDescriptor) goPackageName() (name string, explicit bool) {\n\t// Does the file have a \"go_package\" option?\n\tif _, pkg, ok := d.goPackageOption(); ok {\n\t\treturn pkg, true\n\t}\n\n\t// Does the file have a package clause?\n\tif pkg := d.GetPackage(); pkg != \"\" {\n\t\treturn pkg, false\n\t}\n\t// Use the file base name.\n\treturn baseName(d.GetName()), false\n}",
"func (c *common) PackageName() string { return uniquePackageOf(c.file) }",
"func (d *FileDescriptor) goFileName(pathType pathType) string {\n\tname := *d.Name\n\tif ext := path.Ext(name); ext == \".proto\" || ext == \".protodevel\" {\n\t\tname = name[:len(name)-len(ext)]\n\t}\n\tname += \".cobra.pb.go\"\n\n\tif pathType == pathTypeSourceRelative {\n\t\treturn name\n\t}\n\n\t// Does the file have a \"go_package\" option?\n\t// If it does, it may override the filename.\n\tif impPath, _, ok := d.goPackageOption(); ok && impPath != \"\" {\n\t\t// Replace the existing dirname with the declared import path.\n\t\t_, name = path.Split(name)\n\t\tname = path.Join(impPath, name)\n\t\treturn name\n\t}\n\n\treturn name\n}",
"func (pp *protoPackage) pkgPath() string {\n\treturn strings.Replace(pp.Pkg, \".\", \"/\", -1)\n}",
"func goPkg(fileName string) (string, error) {\n\tcontent, err := os.ReadFile(fileName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar pkgName string\n\tif match := goPkgOptRe.FindSubmatch(content); len(match) > 0 {\n\t\tpn, err := strconv.Unquote(string(match[1]))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tpkgName = pn\n\t}\n\tif p := strings.IndexRune(pkgName, ';'); p > 0 {\n\t\tpkgName = pkgName[:p]\n\t}\n\treturn pkgName, nil\n}",
"func (project Project) Package() (string, error) {\n\n\tif project.packageName != \"\" {\n\t\treturn project.packageName, nil\n\t}\n\n\tgoModPath := project.RelPath(GoModFileName)\n\tif !project.FileExists(goModPath) {\n\t\treturn \"\", errors.New(\"Failed to determine the package name for this project\")\n\t}\n\n\tb, err := ioutil.ReadFile(goModPath)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Failed to read the go.mod file\")\n\t}\n\n\tmod, err := gomod.Parse(goModPath, b)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Failed to parse the go.mod file\")\n\t}\n\n\tproject.packageName = strings.TrimSuffix(mod.Name, \"/\")\n\n\treturn project.packageName, nil\n\n}",
"func (f *FileStruct) GetPersistPackageOption() string {\n\tif f.Desc == nil || f.Desc.GetOptions() == nil {\n\t\treturn \"\"\n\t}\n\tif proto.HasExtension(f.Desc.GetOptions(), persist.E_Package) {\n\t\tpkg, err := proto.GetExtension(f.Desc.GetOptions(), persist.E_Package)\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).Debug(\"Error\")\n\t\t\treturn \"\"\n\t\t}\n\t\t//logrus.WithField(\"pkg\", *pkg.(*string)).Info(\"Package\")\n\t\treturn *pkg.(*string)\n\t}\n\tlogrus.WithField(\"File Options\", f.Desc.GetOptions()).Debug(\"file options\")\n\treturn \"\"\n}",
"func Which(s protoreflect.FullName) ProtoFile {\r\n\treturn wellKnownTypes[s]\r\n}",
"func GetPackageName(source string) string {\n\tfileNode, err := parser.ParseFile(\"\", source, nil, parser.ImportsOnly)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\treturn fileNode.Name.Name()\n}",
"func goPkg(fname string) (string, error) {\n\tcontent, err := ioutil.ReadFile(fname)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar pkgName string\n\tif match := goPkgOptRe.FindSubmatch(content); len(match) > 0 {\n\t\tpn, err := strconv.Unquote(string(match[1]))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tpkgName = pn\n\t}\n\tif p := strings.IndexRune(pkgName, ';'); p > 0 {\n\t\tpkgName = pkgName[:p]\n\t}\n\treturn pkgName, nil\n}",
"func ProtoFromFileDescriptor(d protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto {\n\tif imp, ok := d.(protoreflect.FileImport); ok {\n\t\td = imp.FileDescriptor\n\t}\n\ttype canProto interface {\n\t\tFileDescriptorProto() *descriptorpb.FileDescriptorProto\n\t}\n\tif res, ok := d.(canProto); ok {\n\t\treturn res.FileDescriptorProto()\n\t}\n\tif res, ok := d.(DescriptorProtoWrapper); ok {\n\t\tif fd, ok := res.AsProto().(*descriptorpb.FileDescriptorProto); ok {\n\t\t\treturn fd\n\t\t}\n\t}\n\treturn protodesc.ToFileDescriptorProto(d)\n}",
"func goPackageOption(d *descriptor.FileDescriptorProto) (impPath, pkg string, ok bool) {\n\tpkg = d.GetOptions().GetGoPackage()\n\tif pkg == \"\" {\n\t\treturn\n\t}\n\tok = true\n\t// The presence of a slash implies there's an import path.\n\tslash := strings.LastIndex(pkg, \"/\")\n\tif slash < 0 {\n\t\treturn\n\t}\n\timpPath, pkg = pkg, pkg[slash+1:]\n\t// A semicolon-delimited suffix overrides the package name.\n\tsc := strings.IndexByte(impPath, ';')\n\tif sc < 0 {\n\t\treturn\n\t}\n\timpPath, pkg = impPath[:sc], impPath[sc+1:]\n\treturn\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewQueueManager instantiates a new QueueManager object This constructor will assign default values to properties that have it defined, and makes sure properties required by API are set, but the set of arguments will change when the set of required properties is changed | func NewQueueManager(name string, clusters []string, aliasQueues []AliasQueue, remoteQueues []RemoteQueue, clusterQueues []ClusterQueue, ) *QueueManager {
this := QueueManager{}
this.Name = name
this.Clusters = clusters
this.AliasQueues = aliasQueues
this.RemoteQueues = remoteQueues
this.ClusterQueues = clusterQueues
return &this
} | [
"func New() *QueueManager {\n\treturn &QueueManager{\n\t\thandlers: make(map[string]Handler),\n\t}\n}",
"func NewQueueManager(logger log.Logger, cfg config.QueueConfig, externalLabels labels.Labels, relabelConfigs []*relabel.Config, client StorageClient, flushDeadline time.Duration) *QueueManager {\n\tif logger == nil {\n\t\tlogger = log.NewNopLogger()\n\t} else {\n\t\tlogger = log.With(logger, \"queue\", client.Name())\n\t}\n\tt := &QueueManager{\n\t\tlogger: logger,\n\t\tflushDeadline: flushDeadline,\n\t\tcfg: cfg,\n\t\texternalLabels: externalLabels,\n\t\trelabelConfigs: relabelConfigs,\n\t\tclient: client,\n\t\tqueueName: client.Name(),\n\n\t\tlogLimiter: rate.NewLimiter(logRateLimit, logBurst),\n\t\tnumShards: cfg.MinShards,\n\t\treshardChan: make(chan int),\n\t\tquit: make(chan struct{}),\n\n\t\tsamplesIn: newEWMARate(ewmaWeight, shardUpdateDuration),\n\t\tsamplesOut: newEWMARate(ewmaWeight, shardUpdateDuration),\n\t\tsamplesOutDuration: newEWMARate(ewmaWeight, shardUpdateDuration),\n\t}\n\tt.shards = t.newShards(t.numShards)\n\tnumShards.WithLabelValues(t.queueName).Set(float64(t.numShards))\n\tshardCapacity.WithLabelValues(t.queueName).Set(float64(t.cfg.Capacity))\n\n\t// Initialize counter labels to zero.\n\tsentBatchDuration.WithLabelValues(t.queueName)\n\tsucceededSamplesTotal.WithLabelValues(t.queueName)\n\tfailedSamplesTotal.WithLabelValues(t.queueName)\n\tdroppedSamplesTotal.WithLabelValues(t.queueName)\n\n\treturn t\n}",
"func NewQueueManager(channelLength int) *QueueManager {\n\tmanager := &QueueManager{\n\t\tmapping: make(map[string]*Queue),\n\t\tchannelLength: channelLength,\n\t\tevents: make(chan *QueueEvent, 1),\n\t\tlistenerChan: make(chan queueEventListener, 1),\n\t\tlisteners: make([]chan<- *QueueEvent, 0),\n\t}\n\tmanager.Add(DefaultQueueName)\n\tgo manager.run()\n\treturn manager\n}",
"func NewQueueManager(q amboy.Queue) Manager {\n\treturn &queueManager{\n\t\tqueue: q,\n\t}\n}",
"func NewQueue() *Queue {\n return &Queue{member: make([]interface{}, 0)}\n}",
"func (e *Engine) newQueue(\n\tcfg configkit.RichApplication,\n\tds persistence.DataStore,\n) *queue.Queue {\n\treturn &queue.Queue{\n\t\tRepository: ds,\n\t\tMarshaler: e.opts.Marshaler,\n\t\t// TODO: https://github.com/dogmatiq/verity/issues/102\n\t\t// Make buffer size configurable.\n\t\tBufferSize: 0,\n\t}\n}",
"func NewQueueManagerWithDefaults() *QueueManager {\n\tthis := QueueManager{}\n\treturn &this\n}",
"func SetupQueue(handler interface{}, fallbackHandler interface{}) {\n\tq = memqueue.NewQueue(&msgqueue.Options{\n\t\tHandler: handler,\n\t\tFallbackHandler: fallbackHandler,\n\t\tMaxWorkers: config.Config.MaxWorkers,\n\t\tMaxFetchers: config.Config.MaxFetchers,\n\t\tRetryLimit: 1,\n\t})\n}",
"func NewQueue() Queue {\n\treturn Queue{}\n}",
"func NewQueue(cli *clientv3.Client) (Queue, error) {\n\t// issue linearized read to ensure leader election\n\tglog.Infof(\"GET request to endpoint %v\", cli.Endpoints())\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\t_, err := cli.Get(ctx, \"foo\")\n\tcancel()\n\tglog.Infof(\"GET request succeeded on endpoint %v\", cli.Endpoints())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tctx, cancel = context.WithCancel(context.Background())\n\treturn &queue{\n\t\tcli: cli,\n\t\trootCtx: ctx,\n\t\trootCancel: cancel,\n\t}, nil\n}",
"func newQueueMeta(conf *Conf) *queueMeta {\n\treturn &queueMeta{conf: conf}\n}",
"func NewQueue(maximumCapacity int, initialCapacity int, factory TokenFactory) *Queue {\n\tq := &Queue{\n\t\tmaxCapacity: maximumCapacity,\n\t\tavailableTokens: make(chan (Token), maximumCapacity),\n\t\tcommittedTokens: make(chan (Token), maximumCapacity),\n\t\tdiscardTokens: make(chan (Token), maximumCapacity),\n\t\tcloseTokens: make(chan (Token)),\n\t}\n\n\tfor i := 0; i < maximumCapacity; i++ {\n\t\ttoken := factory()\n\t\tif token == nil {\n\t\t\treturn nil\n\t\t}\n\t\tq.discardTokens <- token\n\t\tq.validTokens = append(q.validTokens, token)\n\t}\n\n\tq.EnableDisableTokens(initialCapacity)\n\n\treturn q\n}",
"func NewQueue(l int) *Queue {\n\tif l == -1 {\n\t\treturn &Queue{\n\t\t\tQueue: make([]types.Event, 0),\n\t\t\tL: int(^uint(0) >> 1), // max integer value, architecture independent\n\t\t}\n\t}\n\tq := &Queue{\n\t\tQueue: make([]types.Event, 0, l),\n\t\tL: l,\n\t}\n\tlog.WithFields(log.Fields{\"Capacity\": q.L}).Debugf(\"Creating queue\")\n\treturn q\n}",
"func setupManager(username string, password string, brokerIp string, brokerPort int, manager *Manager, exchange string, queueName string) error {\n\tamqpURI := getAmqpUri(username, password, brokerIp, brokerPort)\n\tmanager.logger.Debugf(\"dialing %s\", amqpURI)\n\tvar err error\n\tmanager.Connection, err = amqp.Dial(amqpURI)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanager.logger.Debugf(\"got Connection, getting Channel\")\n\tmanager.Channel, err = manager.Connection.Channel()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanager.logger.Debugf(\"got Channel, declaring Exchange (%q)\", exchange)\n\n\tmanager.logger.Debugf(\"declared Exchange, declaring Queue %q\", queueName)\n\tqueue, err := manager.Channel.QueueDeclare(\n\t\tqueueName,\n\t\ttrue,\n\t\ttrue,\n\t\tfalse,\n\t\tfalse,\n\t\tnil,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanager.logger.Debugf(\"declared Queue (%q, %d messages, %d consumers), binding to Exchange\",\n\t\tqueue.Name, queue.Messages, queue.Consumers)\n\n\tif err = manager.Channel.QueueBind(\n\t\tqueue.Name, // name of the queue\n\t\tqueue.Name, // bindingKey\n\t\texchange, // sourceExchange\n\t\tfalse, // noWait\n\t\tnil, // arguments\n\t); err != nil {\n\t\treturn err\n\t}\n\n\tmanager.logger.Debug(\"Queue bound to Exchange, starting Consume\")\n\treturn nil\n}",
"func New(ttl time.Duration) *Queue {\n\n\treturn &Queue{\n\t\ttaskList: []*apiq.Task{},\n\t\tTTL: ttl,\n\t}\n}",
"func NewQueue() *queue {\n\treturn &queue{nil, nil, 0}\n}",
"func NewQueue(action func(interface{}) error) *QueueWorker {\n\treturn &QueueWorker{\n\t\taction: action,\n\t\tlatch: &Latch{},\n\t\tmaxWork: DefaultQueueWorkerMaxWork,\n\t}\n}",
"func NewQueue(maxWorkers int, maxQueue int) *Queue {\n\tq := make(chan Job, maxQueue)\n\treturn &Queue{\n\t\tq,\n\t\ttrue,\n\t\t&Dispatcher{\n\t\t\tjobQueue: q,\n\t\t\tworkerPool: make(chan chan Job, maxWorkers),\n\t\t\tMaxWorkers: maxWorkers,\n\t\t},\n\t}\n}",
"func (t *OpenconfigQos_Qos_Queues) NewQueue(Name string) (*OpenconfigQos_Qos_Queues_Queue, error){\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.Queue == nil {\n\t\tt.Queue = make(map[string]*OpenconfigQos_Qos_Queues_Queue)\n\t}\n\n\tkey := Name\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.Queue[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list Queue\", key)\n\t}\n\n\tt.Queue[key] = &OpenconfigQos_Qos_Queues_Queue{\n\t\tName: &Name,\n\t}\n\n\treturn t.Queue[key], nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewQueueManagerWithDefaults instantiates a new QueueManager object This constructor will only assign default values to properties that have it defined, but it doesn't guarantee that properties required by API are set | func NewQueueManagerWithDefaults() *QueueManager {
this := QueueManager{}
return &this
} | [
"func New() *QueueManager {\n\treturn &QueueManager{\n\t\thandlers: make(map[string]Handler),\n\t}\n}",
"func NewQueueManager(logger log.Logger, cfg config.QueueConfig, externalLabels labels.Labels, relabelConfigs []*relabel.Config, client StorageClient, flushDeadline time.Duration) *QueueManager {\n\tif logger == nil {\n\t\tlogger = log.NewNopLogger()\n\t} else {\n\t\tlogger = log.With(logger, \"queue\", client.Name())\n\t}\n\tt := &QueueManager{\n\t\tlogger: logger,\n\t\tflushDeadline: flushDeadline,\n\t\tcfg: cfg,\n\t\texternalLabels: externalLabels,\n\t\trelabelConfigs: relabelConfigs,\n\t\tclient: client,\n\t\tqueueName: client.Name(),\n\n\t\tlogLimiter: rate.NewLimiter(logRateLimit, logBurst),\n\t\tnumShards: cfg.MinShards,\n\t\treshardChan: make(chan int),\n\t\tquit: make(chan struct{}),\n\n\t\tsamplesIn: newEWMARate(ewmaWeight, shardUpdateDuration),\n\t\tsamplesOut: newEWMARate(ewmaWeight, shardUpdateDuration),\n\t\tsamplesOutDuration: newEWMARate(ewmaWeight, shardUpdateDuration),\n\t}\n\tt.shards = t.newShards(t.numShards)\n\tnumShards.WithLabelValues(t.queueName).Set(float64(t.numShards))\n\tshardCapacity.WithLabelValues(t.queueName).Set(float64(t.cfg.Capacity))\n\n\t// Initialize counter labels to zero.\n\tsentBatchDuration.WithLabelValues(t.queueName)\n\tsucceededSamplesTotal.WithLabelValues(t.queueName)\n\tfailedSamplesTotal.WithLabelValues(t.queueName)\n\tdroppedSamplesTotal.WithLabelValues(t.queueName)\n\n\treturn t\n}",
"func NewQueueManager(channelLength int) *QueueManager {\n\tmanager := &QueueManager{\n\t\tmapping: make(map[string]*Queue),\n\t\tchannelLength: channelLength,\n\t\tevents: make(chan *QueueEvent, 1),\n\t\tlistenerChan: make(chan queueEventListener, 1),\n\t\tlisteners: make([]chan<- *QueueEvent, 0),\n\t}\n\tmanager.Add(DefaultQueueName)\n\tgo manager.run()\n\treturn manager\n}",
"func DefaultQueue(queue string) func(*Locker) error {\n\treturn func(l *Locker) error {\n\t\tl.DefaultQueue = queue\n\t\treturn nil\n\t}\n}",
"func NewDefaultClient() QueueClient {\n\treturn &inMemoryQueue{queues: make(map[string][]string)}\n}",
"func Default() *JobManager {\n\tif _default == nil {\n\t\t_defaultLock.Lock()\n\t\tdefer _defaultLock.Unlock()\n\n\t\tif _default == nil {\n\t\t\t_default = New()\n\t\t}\n\t}\n\treturn _default\n}",
"func NewDefault(db *bolt.DB) (q queue.WaitQueue, err error) {\n\treturn New(db, DefaultBucket, DefaultMemQueueSize, DefaultBufSize)\n}",
"func DefaultQueue(queue string) func(*Config) error {\n\treturn func(c *Config) error {\n\t\tc.DefaultQueue = queue\n\t\treturn nil\n\t}\n}",
"func NewQueueManager(name string, clusters []string, aliasQueues []AliasQueue, remoteQueues []RemoteQueue, clusterQueues []ClusterQueue, ) *QueueManager {\n\tthis := QueueManager{}\n\tthis.Name = name\n\tthis.Clusters = clusters\n\tthis.AliasQueues = aliasQueues\n\tthis.RemoteQueues = remoteQueues\n\tthis.ClusterQueues = clusterQueues\n\treturn &this\n}",
"func NewRemoteQueueWithDefaults() *RemoteQueue {\n\tthis := RemoteQueue{}\n\treturn &this\n}",
"func DefaultQueueSettings() QueueSettings {\n\treturn QueueSettings{\n\t\tEnabled: true,\n\t\tNumConsumers: 10,\n\t\t// For 5000 queue elements at 100 requests/sec gives about 50 sec of survival of destination outage.\n\t\t// This is a pretty decent value for production.\n\t\t// User should calculate this from the perspective of how many seconds to buffer in case of a backend outage,\n\t\t// multiply that by the number of requests per seconds.\n\t\tQueueSize: 5000,\n\t\tPersistentStorageEnabled: false,\n\t}\n}",
"func SetupQueue(handler interface{}, fallbackHandler interface{}) {\n\tq = memqueue.NewQueue(&msgqueue.Options{\n\t\tHandler: handler,\n\t\tFallbackHandler: fallbackHandler,\n\t\tMaxWorkers: config.Config.MaxWorkers,\n\t\tMaxFetchers: config.Config.MaxFetchers,\n\t\tRetryLimit: 1,\n\t})\n}",
"func NewQueueManager(q amboy.Queue) Manager {\n\treturn &queueManager{\n\t\tqueue: q,\n\t}\n}",
"func NewDefaultManager() Manager {\n\ts := &defaultManager{}\n\ts.completed.Store(false)\n\ts.managedResourceInfos = make(map[*ResourceInfo]struct{})\n\ts.managedStatus = make(map[schema.GroupResource]*updateStatus)\n\treturn s\n}",
"func GetDefaultQueueSetting(queueName string) TransactionTransportConnectionQueueSettings {\n\treturn TransactionTransportConnectionQueueSettings{\n\t\tQueueName: queueName,\n\t\tDurable: true,\n\t\tAutoDelete: false,\n\t\tExclusive: false,\n\t\tNoWait: false,\n\t\tArgs: nil,\n\t}\n}",
"func NewDefault(m map[string]interface{}) (share.Manager, error) {\n\tc := &config{}\n\tif err := mapstructure.Decode(m, c); err != nil {\n\t\terr = errors.Wrap(err, \"error creating a new manager\")\n\t\treturn nil, err\n\t}\n\n\ts, err := metadata.NewCS3Storage(c.GatewayAddr, c.ProviderAddr, c.ServiceUserID, c.ServiceUserIdp, c.MachineAuthAPIKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tindexer := indexer.CreateIndexer(s)\n\n\tclient, err := pool.GetGatewayServiceClient(c.GatewayAddr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn New(client, s, indexer)\n}",
"func NewQueue(maxQueueSize, maxFlowSize uint64, helper Interface) *Queue {\n\tif maxFlowSize > maxQueueSize {\n\t\tpanic(\"MaxFlowSize > MaxQueueSize\")\n\t}\n\n\tif helper == nil {\n\t\tpanic(\"helper is nil\")\n\t}\n\n\tq := new(Queue)\n\tq.cond.L = &q.lock\n\tq.maxQueueSize = maxQueueSize\n\tq.maxFlowSize = maxFlowSize\n\tq.helper = helper\n\tq.flows = make(map[uint64]*flowInfo)\n\n\treturn q\n}",
"func New(mqURL string) (models.MessageQueue, error) {\n\tmq, err := newmq(mqURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &metricMQ{mq}, nil\n}",
"func NewDefaultMQService() *mqServiceImpl {\n\treturn &mqServiceImpl{}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetClusters returns the Clusters field value | func (o *QueueManager) GetClusters() []string {
if o == nil {
var ret []string
return ret
}
return o.Clusters
} | [
"func (c *ClientImpl) GetClusters(ctx context.Context, hcpHostURL string) (models.ClusterResp, error) {\n\tspan, _ := opentracing.StartSpanFromContext(ctx, \"Get Clusters\")\n\tdefer span.Finish()\n\n\tsession, err := c.getSession(ctx, hcpHostURL, hcpUserName, hcpPassword)\n\tif err != nil {\n\t\treturn models.ClusterResp{}, err\n\t}\n\n\tstatus = Failure\n\tmonitor := metrics.StartExternalCall(externalSvcName, \"Fetch Clusters\")\n\tdefer func() { monitor.RecordWithStatus(status) }()\n\n\tresp, err := mlopsHttp.ExecuteHTTPRequest(\n\t\tctx,\n\t\tc.client,\n\t\thcpHostURL+clusterPathV2,\n\t\thttp.MethodGet,\n\t\tmap[string]string{sessionHeader: session},\n\t\tbytes.NewReader(nil),\n\t)\n\tif err != nil {\n\t\treturn models.ClusterResp{}, errors.Wrapf(err, \"while fetching clusters in MLOps controller platform.\")\n\t}\n\tresp.Body.Close()\n\n\tstatus = Success\n\n\terr = c.deleteSession(ctx, hcpHostURL, session)\n\tif err != nil {\n\t\treturn models.ClusterResp{}, err\n\t}\n\n\tclustersResp := models.ClusterResp{}\n\tjson.NewDecoder(resp.Body).Decode(&clustersResp)\n\n\treturn clustersResp, nil\n}",
"func (a *Client) GetClusters(params *GetClustersParams, opts ...ClientOption) (*GetClustersOK, *GetClustersMultiStatus, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetClustersParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"GetClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/kubernetes-protection/entities/kubernetes/clusters/v1\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\", \"application/octet-stream\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetClustersReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tswitch value := result.(type) {\n\tcase *GetClustersOK:\n\t\treturn value, nil, nil\n\tcase *GetClustersMultiStatus:\n\t\treturn nil, value, nil\n\t}\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for kubernetes_protection: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func (o AppProjectSpecSyncWindowsOutput) Clusters() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v AppProjectSpecSyncWindows) []string { return v.Clusters }).(pulumi.StringArrayOutput)\n}",
"func (a *Client) GetClusters(params *GetClustersParams, opts ...ClientOption) (*GetClustersOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetClustersParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"GetClusters\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/get-clusters\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &GetClustersReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetClustersOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for GetClusters: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func Clusters() (clusters map[string][]string) {\n\tclusters = make(map[string][]string)\n\tif addr := AccessConsulAddr(); addr != \"\" && Region() != \"\" {\n\t\treturn getClustersFromConsul(addr, Region())\n\t}\n\tcs := Get(\"Key-ClusterMgrCluster\").(map[string]string)\n\tfor key, value := range cs {\n\t\tclusters[key] = strings.Split(value, \" \")\n\t}\n\treturn\n}",
"func (p *Client) Clusters(namespace string) *Cluster {\n\treturn &Cluster{p.ProvisioningV1Interface.Clusters(namespace), p.ts}\n}",
"func (a ClustersAPI) Get(clusterID string) (httpmodels.GetResp, error) {\n\tvar clusterInfo httpmodels.GetResp\n\n\tdata := struct {\n\t\tClusterID string `json:\"cluster_id,omitempty\" url:\"cluster_id,omitempty\"`\n\t}{\n\t\tclusterID,\n\t}\n\tresp, err := a.Client.performQuery(http.MethodGet, \"/clusters/get\", data, nil)\n\tif err != nil {\n\t\treturn clusterInfo, err\n\t}\n\n\terr = json.Unmarshal(resp, &clusterInfo)\n\treturn clusterInfo, err\n}",
"func (c *cloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}",
"func clusters(ctx context.Context, projID string) ([]string, error) {\n\tLogf(ctx, \"finding your GKE clusters...\")\n\treturn gcloud(\n\t\tctx,\n\t\t\"--project\", projID,\n\t\t\"container\",\n\t\t\"clusters\",\n\t\t\"list\",\n\t\t\"--format\", \"value(name)\",\n\t)\n}",
"func (h *httpCloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}",
"func (e *ECSClient) GetClusters() ([]*types.ECSCluster, error) {\n\tcArns := []*string{}\n\tparams := &ecs.ListClustersInput{\n\t\tMaxResults: aws.Int64(e.apiMaxResults),\n\t}\n\n\t// Get cluster IDs\n\tlog.Debugf(\"Getting cluster list for region\")\n\tfor {\n\t\tresp, err := e.client.ListClusters(params)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, c := range resp.ClusterArns {\n\t\t\tcArns = append(cArns, c)\n\t\t}\n\t\tif resp.NextToken == nil || aws.StringValue(resp.NextToken) == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tparams.NextToken = resp.NextToken\n\t}\n\n\t// Get service descriptions\n\t// TODO: this has a 100 cluster limit, split calls in 100 by 100\n\tparams2 := &ecs.DescribeClustersInput{\n\t\tClusters: cArns,\n\t}\n\tresp2, err := e.client.DescribeClusters(params2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcs := []*types.ECSCluster{}\n\tlog.Debugf(\"Getting cluster descriptions\")\n\tfor _, c := range resp2.Clusters {\n\t\tec := &types.ECSCluster{\n\t\t\tID: aws.StringValue(c.ClusterArn),\n\t\t\tName: aws.StringValue(c.ClusterName),\n\t\t}\n\t\tcs = append(cs, ec)\n\t}\n\n\tlog.Debugf(\"Got %d clusters\", len(cs))\n\treturn cs, nil\n}",
"func (cp *CloudProvider) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}",
"func (c *Client) GetClusters(ctx context.Context) <-chan GetClusterResult {\n\t// TODO Make the concurrency configurable\n\tconcurrency := int(math.Min(5, float64(runtime.NumCPU())))\n\tresults := make(chan GetClusterResult, concurrency)\n\n\tclusterNames, err := c.GetClusterNames(ctx)\n\tif err != nil {\n\t\tclose(results)\n\t\treturn results\n\t}\n\n\tvar wg sync.WaitGroup\n\n\tgo func() {\n\t\tdefer close(results)\n\t\tfor _, clusterName := range clusterNames {\n\t\t\twg.Add(1)\n\t\t\tgo func(name string) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tcluster, err := c.GetCluster(ctx, name)\n\t\t\t\tresult := GetClusterResult{Cluster: cluster, Error: err}\n\t\t\t\tresults <- result\n\t\t\t}(clusterName)\n\t\t}\n\t\twg.Wait()\n\t}()\n\n\treturn results\n}",
"func (o *QueueManager) GetClustersOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Clusters, true\n}",
"func (c *RetentionScriptResolver) Clusters() []graphql.ID {\n\tids := make([]graphql.ID, len(c.clusterIDs))\n\tfor i, c := range c.clusterIDs {\n\t\tids[i] = graphql.ID(c.String())\n\t}\n\treturn ids\n}",
"func ECSGetClusters() ([]string, error) {\n\n\t// get the aws sdk client config\n\tcfg, err := config.LoadDefaultConfig(context.TODO())\n\tif err != nil {\n\t\tpanic(\"configuration error, \" + err.Error())\n\t}\n\n\tclient := ecs.NewFromConfig(cfg)\n\n\tinput := &ecs.ListClustersInput{}\n\n\tresult, err := client.ListClusters(context.TODO(), input)\n\tif result == nil {\n\n\t\treturn []string{}, err\n\t}\n\treturn result.ClusterArns, err\n\n}",
"func (client ContainerEngineClient) ListClusters(ctx context.Context, request ListClustersRequest) (response ListClustersResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.DefaultRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.listClusters, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = ListClustersResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = ListClustersResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(ListClustersResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into ListClustersResponse\")\n\t}\n\treturn\n}",
"func (client OpenShiftManagedClustersClient) Get(ctx context.Context, resourceGroupName string, resourceName string) (result v20180930preview.OpenShiftManagedCluster, err error) {\n\treq, err := client.GetPreparer(ctx, resourceGroupName, resourceName)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"containerservice.OpenShiftManagedClustersClient\", \"Get\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.GetSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"containerservice.OpenShiftManagedClustersClient\", \"Get\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.GetResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"containerservice.OpenShiftManagedClustersClient\", \"Get\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}",
"func FetchClusters(c *gin.Context) {\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, \"Start listing clusters\")\n\n\tvar clusters []banzaiSimpleTypes.ClusterSimple\n\tvar response []*cloud.ClusterRepresentation\n\tdatabase.Find(&clusters)\n\n\tif len(clusters) <= 0 {\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, \"No clusters found\")\n\t\tcloud.SetResponseBodyJson(c, http.StatusNotFound, gin.H{\n\t\t\tcloud.JsonKeyStatus: http.StatusNotFound,\n\t\t\tcloud.JsonKeyMessage: \"No clusters found!\",\n\t\t})\n\t\treturn\n\t}\n\n\tfor _, cl := range clusters {\n\t\tclust := cloud.GetClusterRepresentation(&cl)\n\t\tif clust != nil {\n\t\t\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, fmt.Sprintf(\"Append %#v cluster representation to response\", clust))\n\t\t\tresponse = append(response, clust)\n\t\t}\n\n\t}\n\tcloud.SetResponseBodyJson(c, http.StatusOK, gin.H{\n\t\tcloud.JsonKeyStatus: http.StatusOK,\n\t\tcloud.JsonKeyData: response,\n\t})\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetClustersOk returns a tuple with the Clusters field value and a boolean to check if the value has been set. | func (o *QueueManager) GetClustersOk() (*[]string, bool) {
if o == nil {
return nil, false
}
return &o.Clusters, true
} | [
"func NewGetClustersOK() *GetClustersOK {\n\treturn &GetClustersOK{}\n}",
"func (o *VirtualizationVmwareVcenterAllOf) GetClusterCountOk() (*int64, bool) {\n\tif o == nil || o.ClusterCount == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterCount, true\n}",
"func (o *ResourceLimits) GetK8sClustersProvisionedOk() (*int32, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.K8sClustersProvisioned, true\n}",
"func (o *ComputeBaseCluster) GetStorageClustersOk() ([]StorageBaseClusterRelationship, bool) {\n\tif o == nil || o.StorageClusters == nil {\n\t\treturn nil, false\n\t}\n\treturn o.StorageClusters, true\n}",
"func NewDescribeClustersOK() *DescribeClustersOK {\n\n\treturn &DescribeClustersOK{}\n}",
"func NewDescribeClustersOK() *DescribeClustersOK {\n\treturn &DescribeClustersOK{}\n}",
"func (o *ClusterNodesConfigDto) GetClusterNodesOk() (*[]NodeConfigDto, bool) {\n\tif o == nil || o.ClusterNodes == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterNodes, true\n}",
"func (o *ListClustersOnEndpointUsingGETOK) IsSuccess() bool {\n\treturn true\n}",
"func (c *cloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}",
"func (a *Client) GetClusters(params *GetClustersParams, opts ...ClientOption) (*GetClustersOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetClustersParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"GetClusters\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/get-clusters\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &GetClustersReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetClustersOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for GetClusters: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func (cp *CloudProvider) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}",
"func (o AppProjectSpecSyncWindowsOutput) Clusters() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v AppProjectSpecSyncWindows) []string { return v.Clusters }).(pulumi.StringArrayOutput)\n}",
"func (a *Client) GetClusters(params *GetClustersParams, opts ...ClientOption) (*GetClustersOK, *GetClustersMultiStatus, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetClustersParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"GetClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/kubernetes-protection/entities/kubernetes/clusters/v1\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\", \"application/octet-stream\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetClustersReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tswitch value := result.(type) {\n\tcase *GetClustersOK:\n\t\treturn value, nil, nil\n\tcase *GetClustersMultiStatus:\n\t\treturn nil, value, nil\n\t}\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for kubernetes_protection: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func (o *RemoteQueue) GetClusterVisibilityOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.ClusterVisibility, true\n}",
"func (o *ProjectDeploymentRuleResponse) GetClusterOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Cluster, true\n}",
"func (o *NiatelemetryNexusDashboardsAllOf) GetClusterNameOk() (*string, bool) {\n\tif o == nil || o.ClusterName == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterName, true\n}",
"func (h *httpCloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}",
"func (o *VirtualizationIweClusterAllOf) GetClusterNameOk() (*string, bool) {\n\tif o == nil || o.ClusterName == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterName, true\n}",
"func (o *ClusterSummaryDTO) GetClusteredOk() (*bool, bool) {\n\tif o == nil || o.Clustered == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Clustered, true\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SetClusters sets field value | func (o *QueueManager) SetClusters(v []string) {
o.Clusters = v
} | [
"func (s *RaftDatabase) SetClusters(clusters int) {\n\ts.clusters = clusters\n}",
"func setSomeClusterValues(ch chan error, manager ConfigManager) error {\n\t// prepare expected cluster config\n\tconf := new(ClusterConfig)\n\tconf.ClusterId = \"myClusterID\"\n\tconf.Description = \"myDescription\"\n\n\tif err := manager.SetClusterConf(conf); err != nil {\n\t\treturn err\n\t}\n\n\treturn <-ch\n}",
"func (_m *Clusterer) SetKubeConfig(c string) {\n\t_m.Called(c)\n}",
"func (store *CenterStore) SetCenters(clust core.Clust) {\n\tstore.centers[len(clust)] = clust\n}",
"func (tr *Cluster) SetParameters(params map[string]interface{}) error {\n\tp, err := json.TFParser.Marshal(params)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn json.TFParser.Unmarshal(p, &tr.Spec.ForProvider)\n}",
"func (c *Cluster) SetServerCoordinates(url string, serverCA []byte, user, password string, clientCert, clientKey []byte) error {\n\tc.Server = url\n\n\t// Create kube config\n\tu := &api.AuthInfo{}\n\tif password != \"\" {\n\t\tu.Username = user\n\t\tu.Password = password\n\t} else {\n\t\tu.ClientCertificateData = clientCert\n\t\tu.ClientKeyData = clientKey\n\t}\n\n\tkc := api.Config{\n\t\tKind: \"Config\",\n\t\tAPIVersion: \"v1\",\n\t\tPreferences: api.Preferences{},\n\t\tClusters: map[string]*api.Cluster{\n\t\t\tc.Name: {\n\t\t\t\tServer: c.Server,\n\t\t\t\tCertificateAuthorityData: serverCA,\n\t\t\t},\n\t\t},\n\t\tAuthInfos: map[string]*api.AuthInfo{\n\t\t\tuser: u,\n\t\t},\n\t\tContexts: map[string]*api.Context{\n\t\t\t\"default\": &api.Context{\n\t\t\t\tCluster: c.Name,\n\t\t\t\tAuthInfo: user,\n\t\t\t},\n\t\t},\n\t\tCurrentContext: \"default\",\n\t}\n\n\td, err := clientcmd.Write(kc)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp := filepath.Join(c.Path, \".kube\", \"config\")\n\terr = ioutil.WriteFile(p, d, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.log.V(2).Info(\"Write file\", \"path\", p)\n\n\t// Create clientset from kube/config\n\tconfig, err := clientcmd.BuildConfigFromFlags(\"\", p)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.log.V(2).Info(\"Read config\", \"path\", p)\n\t// create the clientset\n\tclient, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.log.V(3).Info(\"Created client\")\n\n\tc.client = client\n\n\treturn nil\n}",
"func (_m *Clusterer) SetKubeContext(c string) {\n\t_m.Called(c)\n}",
"func setClusterRoles() cmds.StartupHook {\n\treturn func(ctx context.Context, wg *sync.WaitGroup, args cmds.StartupHookArgs) error {\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\t<-args.APIServerReady\n\t\t\tlogrus.Info(\"Applying Cluster Role Bindings\")\n\n\t\t\tcs, err := newClient(args.KubeConfigAdmin, nil)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatalf(\"clusterrole: new k8s client: %s\", err.Error())\n\t\t\t}\n\n\t\t\tif err := setKubeletAPIServerRoleBinding(ctx, cs); err != nil {\n\t\t\t\tlogrus.Fatalf(\"psp: set kubeletAPIServerRoleBinding: %s\", err.Error())\n\t\t\t}\n\n\t\t\tif err := setKubeProxyServerRoleBinding(ctx, cs); err != nil {\n\t\t\t\tlogrus.Fatalf(\"psp: set kubeProxyServerRoleBinding: %s\", err.Error())\n\t\t\t}\n\n\t\t\tif err := setTunnelControllerRoleBinding(ctx, cs); err != nil {\n\t\t\t\tlogrus.Fatalf(\"psp: set tunnelControllerRoleBinding: %s\", err.Error())\n\t\t\t}\n\n\t\t\tif err := setCloudControllerManagerRoleBinding(ctx, cs); err != nil {\n\t\t\t\tlogrus.Fatalf(\"ccm: set cloudControllerManagerRoleBinding: %s\", err.Error())\n\t\t\t}\n\n\t\t\tlogrus.Info(\"Cluster Role Bindings applied successfully\")\n\t\t}()\n\t\treturn nil\n\t}\n}",
"func (s *ListClustersOutput) SetClusters(v []*ClusterSummary) *ListClustersOutput {\n\ts.Clusters = v\n\treturn s\n}",
"func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) {\n\tdaemon.clusterProvider = clusterProvider\n\tdaemon.netController.SetClusterProvider(clusterProvider)\n\tdaemon.attachableNetworkLock = locker.New()\n}",
"func (cg *CGroup) SetCPUShare(limit int64) error {\n\tversion := cgControllers[\"cpu\"]\n\tswitch version {\n\tcase Unavailable:\n\t\treturn ErrControllerMissing\n\tcase V1:\n\t\treturn cg.rw.Set(version, \"cpu\", \"cpu.shares\", fmt.Sprintf(\"%d\", limit))\n\tcase V2:\n\t\treturn cg.rw.Set(version, \"cpu\", \"cpu.weight\", fmt.Sprintf(\"%d\", limit))\n\t}\n\n\treturn ErrUnknownVersion\n}",
"func (o *ServerProperties) SetCores(v int32) {\n\n\to.Cores = &v\n\n}",
"func (c *Client) SetSlaves(v []interface{}) {\n\tc.slaves = make([]string,0,len(v))\n\tfor _, vv := range v {\n\t\tc.slaves = append(c.slaves, vv.(string))\n\t}\n}",
"func (o AppProjectSpecSyncWindowsOutput) Clusters() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v AppProjectSpecSyncWindows) []string { return v.Clusters }).(pulumi.StringArrayOutput)\n}",
"func (m *MockBuilder) Clusters() []string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Clusters\")\n\tret0, _ := ret[0].([]string)\n\treturn ret0\n}",
"func (o *V0037Node) SetCores(v int32) {\n\to.Cores = &v\n}",
"func (d *DefaultDriver) SetClusterOpts(n node.Node, rtOpts map[string]string) error {\n\treturn &errors.ErrNotSupported{\n\t\tType: \"Function\",\n\t\tOperation: \"SetClusterOpts()\",\n\t}\n}",
"func TestModifyClusterWithProxyOverride(t *testing.T) {\n\tconf := clientcmdapi.Config{\n\t\tClusters: map[string]*clientcmdapi.Cluster{\n\t\t\t\"my-cluster\": {\n\t\t\t\tServer: \"https://192.168.0.1\",\n\t\t\t\tTLSServerName: \"to-be-cleared\",\n\t\t\t\tProxyURL: \"https://192.168.0.2\",\n\t\t\t},\n\t\t},\n\t}\n\ttest := setClusterTest{\n\t\tdescription: \"Testing 'kubectl config set-cluster' with an existing cluster\",\n\t\tconfig: conf,\n\t\targs: []string{\"my-cluster\"},\n\t\tflags: []string{\n\t\t\t\"--server=https://192.168.0.99\",\n\t\t\t\"--proxy-url=https://192.168.0.100\",\n\t\t},\n\t\texpected: `Cluster \"my-cluster\" set.` + \"\\n\",\n\t\texpectedConfig: clientcmdapi.Config{\n\t\t\tClusters: map[string]*clientcmdapi.Cluster{\n\t\t\t\t\"my-cluster\": {Server: \"https://192.168.0.99\", ProxyURL: \"https://192.168.0.100\"},\n\t\t\t},\n\t\t},\n\t}\n\ttest.run(t)\n}",
"func (_m *Resource) SetClusterName(clusterName string) {\n\t_m.Called(clusterName)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetAliasQueues returns the AliasQueues field value | func (o *QueueManager) GetAliasQueues() []AliasQueue {
if o == nil {
var ret []AliasQueue
return ret
}
return o.AliasQueues
} | [
"func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.AliasQueues, true\n}",
"func (o *QueueManager) SetAliasQueues(v []AliasQueue) {\n\to.AliasQueues = v\n}",
"func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tdefer un(lock(b))\n\n\tqs := make(map[string]int)\n\tfor q, items := range b.heaps {\n\t\tif len(qq.MatchPrefix) != 0 || len(qq.MatchExact) != 0 {\n\t\t\tif !matchesPrefix(q, qq.MatchPrefix...) && !matchesExact(q, qq.MatchExact...) {\n\t\t\t\t// no match\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tqs[q] = items.Len()\n\t\tif qq.Limit > 0 && len(qs) >= qq.Limit {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn qs, nil\n}",
"func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}",
"func GetQueues(c *gin.Context) {\n\t//TODO: create a while both back and front until value is != nil\n\tsize := len(queue)\n\tlog.Printf(\"squeue: %v\", queue)\n\tif size == 0 {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue don't have any item!\",\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"queues\": queue,\n\t})\n\tlog.Printf(\"equeue: %v\", queue)\n}",
"func (gores *Gores) Queues() []string {\n\tqueues := make([]string, 0)\n\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tdata, _ := conn.Do(\"SMEMBERS\", watchedQueues)\n\tfor _, q := range data.([]interface{}) {\n\t\tqueues = append(queues, string(q.([]byte)))\n\t}\n\n\treturn queues\n}",
"func (i *Inspector) Queues() ([]string, error) {\n\treturn i.rdb.AllQueues()\n}",
"func GetAvailableQueues(w http.ResponseWriter, r *http.Request, p httprouter.Params) {\n\tvar queueNames []string\n\tfor k := range queue.ListQueues() {\n\t\tqueueNames = append(queueNames, k)\n\t}\n\n\tresponseBody := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{queueNames}\n\n\tresponse, err := json.Marshal(responseBody)\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\tslogger.Error(err.Error())\n\t\treturn\n\t}\n\n\t_, err = w.Write(response)\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\tslogger.Error(err.Error())\n\t\treturn\n\t}\n}",
"func (s *API) ListQueues(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"ListQueues\")\n\n\tqueueNamePrefix := req.FormValue(\"QueueNamePrefix\")\n\tvar queues []string\n\tfor k, v := range s.sqs.queues {\n\t\tif strings.HasPrefix(k, queueNamePrefix) {\n\t\t\tqueues = append(queues, v.url)\n\t\t}\n\t}\n\n\tresponse := ListQueuesResponse{\n\t\tResult: ListQueuesResult{queues},\n\t\tMetaData: ResponseMetaData{\"00000000-0000-0000-0000-000000000000\"},\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\tenc := xml.NewEncoder(w)\n\tenc.Indent(\" \", \" \")\n\tif err := enc.Encode(response); err != nil {\n\t\tlog.Errorf(\"error: %s\", err)\n\t}\n}",
"func (t *TopicCache) GetQueue(projectName, serviceName string) []string {\n\tt.RLock()\n\tdefer t.RUnlock()\n\n\tif len(t.inQueue[projectName+serviceName]) >= 100 {\n\t\treturn t.inQueue[projectName+serviceName][:99]\n\t}\n\n\treturn t.inQueue[projectName+serviceName]\n}",
"func GetAvailableQueues() ([]string, error) {\n\tclient := &http.Client{}\n\n\tres, err := client.Get(Host + \"/queues\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trespBody := res.Body\n\tdefer respBody.Close()\n\n\tavailableQueues := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{}\n\tif err := json.NewDecoder(respBody).Decode(&availableQueues); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn availableQueues.Queues, nil\n}",
"func (s *SessionManager) GetAliases() []string {\r\n\taliases := []string{}\r\n\tfor k := range s.configMap {\r\n\t\taliases = append(aliases, k)\r\n\t}\r\n\treturn aliases\r\n}",
"func (s *QSvc) Queues(ctx context.Context, req *pb.QueuesRequest) (*pb.QueuesResponse, error) {\n\tqueueMap, err := s.impl.Queues(ctx,\n\t\tentroq.MatchPrefix(req.MatchPrefix...),\n\t\tentroq.MatchExact(req.MatchExact...),\n\t\tentroq.LimitQueues(int(req.Limit)))\n\tif err != nil {\n\t\treturn nil, autoCodeErrorf(\"failed to get queues: %w\", err)\n\t}\n\tresp := new(pb.QueuesResponse)\n\tfor name, count := range queueMap {\n\t\tresp.Queues = append(resp.Queues, &pb.QueueStats{\n\t\t\tName: name,\n\t\t\tNumTasks: int32(count),\n\t\t})\n\t}\n\treturn resp, nil\n}",
"func (connection *redisConnection) getConsumingQueues() ([]string, error) {\n\treturn connection.redisClient.SMembers(connection.queuesKey)\n}",
"func (h *Hospital) ConsumeQueues(ctx context.Context, t *testing.T) (events int, messages []string) {\n\tt.Helper()\n\treturn h.ConsumeQueuesWithLimit(ctx, t, -1, true)\n}",
"func (a *adapter) queueLookup(queueName string) (*sqs.GetQueueUrlOutput, error) {\n\treturn a.sqsClient.GetQueueUrl(&sqs.GetQueueUrlInput{\n\t\tQueueName: &queueName,\n\t})\n}",
"func (a *AfcNumQueues) Get(client sophos.ClientInterface, options ...sophos.Option) (err error) {\n\treturn get(client, \"/api/nodes/afc.num_queues\", &a.Value, options...)\n}",
"func (bs *BeanstalkdConnectionPool) ListQueues() (queueNames []string, err error) {\n\tqueueNames, err = bs.getGlobalConnect().ListTubes()\n\treturn\n}",
"func getQueueName(arn string) string {\n\tlastIndexOfColon := strings.LastIndex(arn, \":\")\n\treturn arn[lastIndexOfColon+1:]\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetAliasQueuesOk returns a tuple with the AliasQueues field value and a boolean to check if the value has been set. | func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) {
if o == nil {
return nil, false
}
return &o.AliasQueues, true
} | [
"func (o *QueueManager) GetAliasQueues() []AliasQueue {\n\tif o == nil {\n\t\tvar ret []AliasQueue\n\t\treturn ret\n\t}\n\n\treturn o.AliasQueues\n}",
"func (o *QueueManager) SetAliasQueues(v []AliasQueue) {\n\to.AliasQueues = v\n}",
"func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.ClusterQueues, true\n}",
"func (o *QueueManager) GetRemoteQueuesOk() (*[]RemoteQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueues, true\n}",
"func (*OpenconfigQos_Qos_Interfaces_Interface_Output_Queues) IsYANGGoStruct() {}",
"func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues) IsYANGGoStruct() {}",
"func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tdefer un(lock(b))\n\n\tqs := make(map[string]int)\n\tfor q, items := range b.heaps {\n\t\tif len(qq.MatchPrefix) != 0 || len(qq.MatchExact) != 0 {\n\t\t\tif !matchesPrefix(q, qq.MatchPrefix...) && !matchesExact(q, qq.MatchExact...) {\n\t\t\t\t// no match\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tqs[q] = items.Len()\n\t\tif qq.Limit > 0 && len(qs) >= qq.Limit {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn qs, nil\n}",
"func (*OpenconfigQos_Qos_Queues) IsYANGGoStruct() {}",
"func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}",
"func (c *Context) HasQueuesMap(key string) bool {\n\treturn c.makross.HasQueuesMap(key)\n}",
"func (o *LinkLinkinfoInfoSlaveData) GetQueueIdOk() (*int32, bool) {\n\tif o == nil || o.QueueId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.QueueId, true\n}",
"func (o *VnicEthAdapterPolicyAllOf) GetTxQueueSettingsOk() (*VnicEthTxQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.TxQueueSettings.Get(), o.TxQueueSettings.IsSet()\n}",
"func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue) IsYANGGoStruct() {}",
"func (m *Makross) HasQueuesMap(key string) bool {\n\tif value, okay := m.QueuesMap.Load(key); okay {\n\t\tif pqueue, okay := value.(*prior.PriorityQueue); okay {\n\t\t\tif pqueue.Length() > 0 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}",
"func (o *TimeseriesWidgetExpressionAlias) GetAliasNameOk() (*string, bool) {\n\tif o == nil || o.AliasName == nil {\n\t\treturn nil, false\n\t}\n\treturn o.AliasName, true\n}",
"func (o *VnicEthAdapterPolicyInventory) GetTxQueueSettingsOk() (*VnicEthTxQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.TxQueueSettings.Get(), o.TxQueueSettings.IsSet()\n}",
"func (*OpenconfigQos_Qos_Interfaces_Interface_Input_Queues) IsYANGGoStruct() {}",
"func (o *VnicEthAdapterPolicyAllOf) GetRxQueueSettingsOk() (*VnicEthRxQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RxQueueSettings.Get(), o.RxQueueSettings.IsSet()\n}",
"func isValidQueue(q string) bool {\n\tchunks := strings.Split(q, \"/\")\n\treturn len(chunks) == 6 &&\n\t\tchunks[0] == \"projects\" &&\n\t\tchunks[1] != \"\" &&\n\t\tchunks[2] == \"locations\" &&\n\t\tchunks[3] != \"\" &&\n\t\tchunks[4] == \"queues\" &&\n\t\tchunks[5] != \"\"\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SetAliasQueues sets field value | func (o *QueueManager) SetAliasQueues(v []AliasQueue) {
o.AliasQueues = v
} | [
"func (p *Process) CmdSetQueue(pac teoapi.Packet) (err error) {\n\tdata := pac.RemoveTrailingZero(pac.Data())\n\trequest := cdb.KeyValue{Cmd: pac.Cmd()}\n\tif err = request.UnmarshalText(data); err != nil {\n\t\treturn\n\t} else if err = p.tcdb.SetQueue(request.Key, request.Value); err != nil {\n\t\treturn\n\t}\n\t// Return only Value for text requests and all fields for json\n\tresponce := request\n\tresponce.Value = nil\n\tif !request.RequestInJSON {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), responce.Value)\n\t} else if retdata, err := responce.MarshalText(); err == nil {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), retdata)\n\t}\n\treturn\n}",
"func (s *Filters) SetQueues(v []*string) *Filters {\n\ts.Queues = v\n\treturn s\n}",
"func (o *QueueManager) GetAliasQueues() []AliasQueue {\n\tif o == nil {\n\t\tvar ret []AliasQueue\n\t\treturn ret\n\t}\n\n\treturn o.AliasQueues\n}",
"func UpdateQueues(db *storm.DB, torrentQueues TorrentQueues) {\n\ttorrentQueues.ID = 5\n\terr := db.Save(&torrentQueues)\n\tif err != nil {\n\t\tLogger.WithFields(logrus.Fields{\"database\": db, \"error\": err}).Error(\"Unable to write Queues to database!\")\n\t}\n}",
"func (tcdb *Teocdb) SetQueue(key string, value []byte) (err error) {\n\treturn tcdb.session.Query(`UPDATE queue SET lock = '', data = ? WHERE key = ? AND time = toTimestamp(now()) AND random = UUID()`,\n\t\tvalue, key).Exec()\n}",
"func (c *Consumer) SetQueueBind(bind *QueueBind) *Consumer {\n\tif bind != nil {\n\t\tc.mutex.Lock()\n\t\tc.bind = bind\n\t\tc.mutex.Unlock()\n\t}\n\treturn c\n}",
"func (s *UserDataFilters) SetQueues(v []*string) *UserDataFilters {\n\ts.Queues = v\n\treturn s\n}",
"func (s *Service) SetQueue(q amboy.Queue) error {\n\tif s.closer != nil {\n\t\treturn errors.New(\"cannot set a new queue, Service is already open\")\n\t}\n\n\ts.queue = q\n\treturn nil\n}",
"func (c *Consumer) SetQueueName(withPrefix bool, name string) *Consumer {\n\tif name == \"\" {\n\t\tname = c.getExchangeTopic()\n\t}\n\tnewQueueName := GenerateQueueName(withPrefix, name)\n\tc.mutex.Lock()\n\tc.declare.SetName(newQueueName)\n\tc.bind.SetName(newQueueName)\n\tc.mutex.Unlock()\n\treturn c\n}",
"func (q *Queue) Set(ctx context.Context, ds *linux.MsqidDS) error {\n\tq.mu.Lock()\n\tdefer q.mu.Unlock()\n\n\tcreds := auth.CredentialsFromContext(ctx)\n\tif ds.MsgQbytes > maxQueueBytes && !creds.HasCapabilityIn(linux.CAP_SYS_RESOURCE, q.obj.UserNS) {\n\t\t// \"An attempt (IPC_SET) was made to increase msg_qbytes beyond the\n\t\t// system parameter MSGMNB, but the caller is not privileged (Linux:\n\t\t// does not have the CAP_SYS_RESOURCE capability).\"\n\t\treturn linuxerr.EPERM\n\t}\n\n\tif err := q.obj.Set(ctx, &ds.MsgPerm); err != nil {\n\t\treturn err\n\t}\n\n\tq.maxBytes = ds.MsgQbytes\n\tq.changeTime = ktime.NowFromContext(ctx)\n\treturn nil\n}",
"func (c *Client) QueueBind(\n\texchange, queue, key string,\n\topts *QueueBindOpts,\n\tconnOpts *ConnectOpts) error {\n\n\tdefaultOpts := DefaultQueueBindOpts()\n\n\tif opts != nil {\n\t\tdefaultOpts = opts\n\t}\n\n\tdefaultConnOpts := DefaultConnectOpts()\n\tif connOpts != nil {\n\t\tdefaultConnOpts = connOpts\n\t}\n\n\tconn, err := c.connect(defaultConnOpts)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\tch, err := conn.Channel()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer ch.Close()\n\n\terr = ch.QueueBind(\n\t\tqueue,\n\t\tkey,\n\t\texchange,\n\t\tdefaultOpts.NoWait,\n\t\tdefaultOpts.Args,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func SetQueueSettings(ctx *context.Context) {\n\tqid := ctx.ParamsInt64(\"qid\")\n\tmq := queue.GetManager().GetManagedQueue(qid)\n\tif mq == nil {\n\t\tctx.Status(http.StatusNotFound)\n\t\treturn\n\t}\n\tif _, ok := mq.Managed.(queue.ManagedPool); !ok {\n\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.pool.none\"))\n\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\treturn\n\t}\n\n\tmaxNumberStr := ctx.FormString(\"max-number\")\n\tnumberStr := ctx.FormString(\"number\")\n\ttimeoutStr := ctx.FormString(\"timeout\")\n\n\tvar err error\n\tvar maxNumber, number int\n\tvar timeout time.Duration\n\tif len(maxNumberStr) > 0 {\n\t\tmaxNumber, err = strconv.Atoi(maxNumberStr)\n\t\tif err != nil {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.maxnumberworkers.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t\tif maxNumber < -1 {\n\t\t\tmaxNumber = -1\n\t\t}\n\t} else {\n\t\tmaxNumber = mq.MaxNumberOfWorkers()\n\t}\n\n\tif len(numberStr) > 0 {\n\t\tnumber, err = strconv.Atoi(numberStr)\n\t\tif err != nil || number < 0 {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.numberworkers.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tnumber = mq.BoostWorkers()\n\t}\n\n\tif len(timeoutStr) > 0 {\n\t\ttimeout, err = time.ParseDuration(timeoutStr)\n\t\tif err != nil {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.timeout.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t} else {\n\t\ttimeout = mq.BoostTimeout()\n\t}\n\n\tmq.SetPoolSettings(maxNumber, number, timeout)\n\tctx.Flash.Success(ctx.Tr(\"admin.monitor.queue.settings.changed\"))\n\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n}",
"func (k *Keeper) setAlias(ctx sdkTypes.Context, alias string, aliasData *Alias, aliasOwnerData *AliasOwner) {\n\townerStore := ctx.KVStore(k.ownersStoreKey)\n\taliasKey := getAliasKey(alias)\n\taliasInfo := k.cdc.MustMarshalBinaryLengthPrefixed(aliasData)\n\n\townerKey := aliasData.Owner.String()\n\taliasOwnerInfo := k.cdc.MustMarshalBinaryLengthPrefixed(aliasOwnerData)\n\n\townerStore.Set([]byte(ownerKey), aliasOwnerInfo)\n\townerStore.Set([]byte(aliasKey), aliasInfo)\n\n\t// Remove from namestore after approved and set into ownerstore\n\tnameStore := ctx.KVStore(k.namesStoreKey)\n\tnameStore.Delete([]byte(ownerKey))\n\tnameStore.Delete([]byte(aliasKey))\n}",
"func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}",
"func Queue(name string) SubscribeOption {\n\treturn func(o *SubscribeOptions) {\n\t\to.Queue = name\n\t}\n}",
"func (router *EventRouter) BindQueue(queue string, exchange string) {\n\tif router.lastError == nil {\n\t\trouter.DeclareExchange(exchange)\n\t}\n\tif router.lastError == nil {\n\t\trouter.DeclareQueue(queue)\n\t}\n\tif router.lastError == nil {\n\t\trouter.lastError = router.channel.QueueBind(queue, \"\", exchange, false, nil)\n\t}\n}",
"func (e *LifecycleEvent) SetQueueURL(url string) { e.queueURL = url }",
"func (rm *RouterMux) SetAlias(route string, aliases ...string) {\n\tfor _, alias := range aliases {\n\t\trm.aliases[alias] = route\n\t}\n}",
"func (c *Consumer) SetQueueDeclare(declare *QueueDeclare) *Consumer {\n\tif declare != nil {\n\t\tc.mutex.Lock()\n\t\tc.declare = declare\n\t\tc.mutex.Unlock()\n\t}\n\treturn c\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetRemoteQueues returns the RemoteQueues field value | func (o *QueueManager) GetRemoteQueues() []RemoteQueue {
if o == nil {
var ret []RemoteQueue
return ret
}
return o.RemoteQueues
} | [
"func (o *QueueManager) GetRemoteQueuesOk() (*[]RemoteQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueues, true\n}",
"func (o *RemoteQueue) GetRemoteQueue() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.RemoteQueue\n}",
"func (o *QueueManager) SetRemoteQueues(v []RemoteQueue) {\n\to.RemoteQueues = v\n}",
"func GetQueues(c *gin.Context) {\n\t//TODO: create a while both back and front until value is != nil\n\tsize := len(queue)\n\tlog.Printf(\"squeue: %v\", queue)\n\tif size == 0 {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue don't have any item!\",\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"queues\": queue,\n\t})\n\tlog.Printf(\"equeue: %v\", queue)\n}",
"func (o *RemoteQueue) GetRemoteQueueOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueue, true\n}",
"func (gores *Gores) Queues() []string {\n\tqueues := make([]string, 0)\n\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tdata, _ := conn.Do(\"SMEMBERS\", watchedQueues)\n\tfor _, q := range data.([]interface{}) {\n\t\tqueues = append(queues, string(q.([]byte)))\n\t}\n\n\treturn queues\n}",
"func GetRemoteHosts() []string {\r\n\tret := make([]string, 0)\r\n\r\n\tmutex.RLock()\r\n\tdefer mutex.RUnlock()\r\n\r\n\tnodeKey := hex.EncodeToString(GetNodePubKey())\r\n\tfor pubKey, item := range nodes {\r\n\t\tif pubKey != nodeKey && !item.Stopped {\r\n\t\t\tret = append(ret, item.TCPAddress)\r\n\t\t}\r\n\t}\r\n\treturn ret\r\n}",
"func (o *RemoteQueue) GetRemoteQueueManager() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.RemoteQueueManager\n}",
"func (i *Inspector) Queues() ([]string, error) {\n\treturn i.rdb.AllQueues()\n}",
"func GetAvailableQueues() ([]string, error) {\n\tclient := &http.Client{}\n\n\tres, err := client.Get(Host + \"/queues\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trespBody := res.Body\n\tdefer respBody.Close()\n\n\tavailableQueues := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{}\n\tif err := json.NewDecoder(respBody).Decode(&availableQueues); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn availableQueues.Queues, nil\n}",
"func (p *Project) Queues() (*[]Queue, error) {\n qs := make([]Queue, 0)\n err := Mongo.Get(\"queue\", bson.M{\"project\": p.ID}, MaxQueuesPerProject, &qs)\n return &qs, err\n}",
"func (a *Client) GetMsgVpnQueues(params *GetMsgVpnQueuesParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnQueuesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnQueuesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnQueues\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/queues\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnQueuesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnQueuesOK), nil\n\n}",
"func GetRemoteServers() ([]*remoteServer, error) {\n\ts, err := getStorage()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif s.RemoteServers == nil {\n\t\treturn make([]*remoteServer, 0), nil\n\t}\n\n\treturn s.RemoteServers, nil\n}",
"func (storage *SrvStorage) GetVhostQueues(vhost string) []*queue.Queue {\n\tvar queues []*queue.Queue\n\tstorage.db.Iterate(\n\t\tfunc(key []byte, value []byte) {\n\t\t\tif !bytes.HasPrefix(key, []byte(queuePrefix)) || getVhostFromKey(string(key)) != vhost {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tq := &queue.Queue{}\n\t\t\tq.Unmarshal(value, storage.protoVersion)\n\t\t\tqueues = append(queues, q)\n\t\t},\n\t)\n\n\treturn queues\n}",
"func GetAvailableQueues(w http.ResponseWriter, r *http.Request, p httprouter.Params) {\n\tvar queueNames []string\n\tfor k := range queue.ListQueues() {\n\t\tqueueNames = append(queueNames, k)\n\t}\n\n\tresponseBody := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{queueNames}\n\n\tresponse, err := json.Marshal(responseBody)\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\tslogger.Error(err.Error())\n\t\treturn\n\t}\n\n\t_, err = w.Write(response)\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\tslogger.Error(err.Error())\n\t\treturn\n\t}\n}",
"func (a *Client) GetMsgVpnJndiQueues(params *GetMsgVpnJndiQueuesParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnJndiQueuesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnJndiQueuesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnJndiQueues\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/jndiQueues\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnJndiQueuesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnJndiQueuesOK), nil\n\n}",
"func (connection *redisConnection) GetOpenQueues() ([]string, error) {\n\treturn connection.redisClient.SMembers(queuesKey)\n}",
"func (connection *redisConnection) getConsumingQueues() ([]string, error) {\n\treturn connection.redisClient.SMembers(connection.queuesKey)\n}",
"func (bs *BeanstalkdConnectionPool) ListQueues() (queueNames []string, err error) {\n\tqueueNames, err = bs.getGlobalConnect().ListTubes()\n\treturn\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetRemoteQueuesOk returns a tuple with the RemoteQueues field value and a boolean to check if the value has been set. | func (o *QueueManager) GetRemoteQueuesOk() (*[]RemoteQueue, bool) {
if o == nil {
return nil, false
}
return &o.RemoteQueues, true
} | [
"func (o *RemoteQueue) GetRemoteQueueOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueue, true\n}",
"func (o *RemoteQueue) GetRemoteQueueManagerOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueueManager, true\n}",
"func (o *RemoteQueue) GetLocalQueueOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.LocalQueue, true\n}",
"func (o *QueueManager) GetRemoteQueues() []RemoteQueue {\n\tif o == nil {\n\t\tvar ret []RemoteQueue\n\t\treturn ret\n\t}\n\n\treturn o.RemoteQueues\n}",
"func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.AliasQueues, true\n}",
"func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.ClusterQueues, true\n}",
"func (o *QueueManager) SetRemoteQueues(v []RemoteQueue) {\n\to.RemoteQueues = v\n}",
"func (o *VnicEthAdapterPolicyInventory) GetRxQueueSettingsOk() (*VnicEthRxQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RxQueueSettings.Get(), o.RxQueueSettings.IsSet()\n}",
"func (o *VnicEthAdapterPolicyAllOf) GetRxQueueSettingsOk() (*VnicEthRxQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RxQueueSettings.Get(), o.RxQueueSettings.IsSet()\n}",
"func (o *RemoteQueue) GetRemoteQueue() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.RemoteQueue\n}",
"func (o *SnippetDTO) GetRemoteProcessGroupsOk() (*map[string]RevisionDTO, bool) {\n\tif o == nil || o.RemoteProcessGroups == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RemoteProcessGroups, true\n}",
"func (o *NSQProducer) GetRemoteAddressOk() (*string, bool) {\n\tif o == nil || o.RemoteAddress == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RemoteAddress, true\n}",
"func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues) IsYANGGoStruct() {}",
"func (o *VnicEthAdapterPolicyInventory) GetCompletionQueueSettingsOk() (*VnicCompletionQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CompletionQueueSettings.Get(), o.CompletionQueueSettings.IsSet()\n}",
"func (o *LinkLinkinfoInfoSlaveData) GetQueueIdOk() (*int32, bool) {\n\tif o == nil || o.QueueId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.QueueId, true\n}",
"func (o *NotificationConfig) GetReceiversOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Receivers, true\n}",
"func (o *VulnUpdateNotification) GetQueueIdOk() (*string, bool) {\n\tif o == nil || o.QueueId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.QueueId, true\n}",
"func (o *SmsBinaryMessage) GetDestinationsOk() (*[]SmsDestination, bool) {\n\tif o == nil || o.Destinations == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Destinations, true\n}",
"func (o *VnicEthAdapterPolicyAllOf) GetCompletionQueueSettingsOk() (*VnicCompletionQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CompletionQueueSettings.Get(), o.CompletionQueueSettings.IsSet()\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SetRemoteQueues sets field value | func (o *QueueManager) SetRemoteQueues(v []RemoteQueue) {
o.RemoteQueues = v
} | [
"func (o *RemoteQueue) SetRemoteQueue(v string) {\n\to.RemoteQueue = v\n}",
"func (s *Filters) SetQueues(v []*string) *Filters {\n\ts.Queues = v\n\treturn s\n}",
"func (p *Process) CmdSetQueue(pac teoapi.Packet) (err error) {\n\tdata := pac.RemoveTrailingZero(pac.Data())\n\trequest := cdb.KeyValue{Cmd: pac.Cmd()}\n\tif err = request.UnmarshalText(data); err != nil {\n\t\treturn\n\t} else if err = p.tcdb.SetQueue(request.Key, request.Value); err != nil {\n\t\treturn\n\t}\n\t// Return only Value for text requests and all fields for json\n\tresponce := request\n\tresponce.Value = nil\n\tif !request.RequestInJSON {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), responce.Value)\n\t} else if retdata, err := responce.MarshalText(); err == nil {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), retdata)\n\t}\n\treturn\n}",
"func (q *Queue) Set(ctx context.Context, ds *linux.MsqidDS) error {\n\tq.mu.Lock()\n\tdefer q.mu.Unlock()\n\n\tcreds := auth.CredentialsFromContext(ctx)\n\tif ds.MsgQbytes > maxQueueBytes && !creds.HasCapabilityIn(linux.CAP_SYS_RESOURCE, q.obj.UserNS) {\n\t\t// \"An attempt (IPC_SET) was made to increase msg_qbytes beyond the\n\t\t// system parameter MSGMNB, but the caller is not privileged (Linux:\n\t\t// does not have the CAP_SYS_RESOURCE capability).\"\n\t\treturn linuxerr.EPERM\n\t}\n\n\tif err := q.obj.Set(ctx, &ds.MsgPerm); err != nil {\n\t\treturn err\n\t}\n\n\tq.maxBytes = ds.MsgQbytes\n\tq.changeTime = ktime.NowFromContext(ctx)\n\treturn nil\n}",
"func (s *UserDataFilters) SetQueues(v []*string) *UserDataFilters {\n\ts.Queues = v\n\treturn s\n}",
"func (m *TeleconferenceDeviceMediaQuality) SetRemoteIPAddress(value *string)() {\n err := m.GetBackingStore().Set(\"remoteIPAddress\", value)\n if err != nil {\n panic(err)\n }\n}",
"func SetQueueSettings(ctx *context.Context) {\n\tqid := ctx.ParamsInt64(\"qid\")\n\tmq := queue.GetManager().GetManagedQueue(qid)\n\tif mq == nil {\n\t\tctx.Status(http.StatusNotFound)\n\t\treturn\n\t}\n\tif _, ok := mq.Managed.(queue.ManagedPool); !ok {\n\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.pool.none\"))\n\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\treturn\n\t}\n\n\tmaxNumberStr := ctx.FormString(\"max-number\")\n\tnumberStr := ctx.FormString(\"number\")\n\ttimeoutStr := ctx.FormString(\"timeout\")\n\n\tvar err error\n\tvar maxNumber, number int\n\tvar timeout time.Duration\n\tif len(maxNumberStr) > 0 {\n\t\tmaxNumber, err = strconv.Atoi(maxNumberStr)\n\t\tif err != nil {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.maxnumberworkers.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t\tif maxNumber < -1 {\n\t\t\tmaxNumber = -1\n\t\t}\n\t} else {\n\t\tmaxNumber = mq.MaxNumberOfWorkers()\n\t}\n\n\tif len(numberStr) > 0 {\n\t\tnumber, err = strconv.Atoi(numberStr)\n\t\tif err != nil || number < 0 {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.numberworkers.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tnumber = mq.BoostWorkers()\n\t}\n\n\tif len(timeoutStr) > 0 {\n\t\ttimeout, err = time.ParseDuration(timeoutStr)\n\t\tif err != nil {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.timeout.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t} else {\n\t\ttimeout = mq.BoostTimeout()\n\t}\n\n\tmq.SetPoolSettings(maxNumber, number, timeout)\n\tctx.Flash.Success(ctx.Tr(\"admin.monitor.queue.settings.changed\"))\n\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n}",
"func (r *RPC) SetQueueClient(c queue.Client) {\n\tgapi := NewGRpcServer(c, r.api)\n\tjapi := NewJSONRPCServer(c, r.api)\n\tr.gapi = gapi\n\tr.japi = japi\n\tr.c = c\n\t//注册系统rpc\n\tpluginmgr.AddRPC(r)\n\tr.Listen()\n}",
"func SetMaxQueues(maxQueues int) Option {\n\treturn func(o *options) {\n\t\to.maxQueues = maxQueues\n\t}\n}",
"func (r *RPC) SetQueueClient(c queue.Client) {\r\n\tgapi := NewGRpcServer(c, r.api)\r\n\tjapi := NewJSONRPCServer(c, r.api)\r\n\tr.gapi = gapi\r\n\tr.japi = japi\r\n\tr.c = c\r\n\t//注册系统rpc\r\n\tpluginmgr.AddRPC(r)\r\n\tr.Listen()\r\n}",
"func (o *QueueManager) GetRemoteQueues() []RemoteQueue {\n\tif o == nil {\n\t\tvar ret []RemoteQueue\n\t\treturn ret\n\t}\n\n\treturn o.RemoteQueues\n}",
"func (m *TeleconferenceDeviceMediaQuality) SetRemotePort(value *int32)() {\n err := m.GetBackingStore().Set(\"remotePort\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (tcdb *Teocdb) SetQueue(key string, value []byte) (err error) {\n\treturn tcdb.session.Query(`UPDATE queue SET lock = '', data = ? WHERE key = ? AND time = toTimestamp(now()) AND random = UUID()`,\n\t\tvalue, key).Exec()\n}",
"func (m *AudioRoutingGroup) SetReceivers(value []string)() {\n err := m.GetBackingStore().Set(\"receivers\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (t *UnpolledCaches) SetRemotePolled(results map[tc.CacheName]tc.IsAvailable) {\n\tt.m.Lock()\n\tdefer t.m.Unlock()\n\tnumUnpolledCaches := len(t.unpolledCaches)\n\tif numUnpolledCaches == 0 {\n\t\treturn\n\t}\n\tfor cache := range t.unpolledCaches {\n\tinnerLoop:\n\t\tfor cacheName := range results {\n\t\t\tif cacheName != cache {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdelete(t.unpolledCaches, cache)\n\t\t\tdelete(t.seenCaches, cache)\n\t\t\tbreak innerLoop\n\t\t}\n\t}\n}",
"func (m *VpnConfiguration) SetServers(value []VpnServerable)() {\n err := m.GetBackingStore().Set(\"servers\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (network *P2p) SetQueueClient(client queue.Client) {\n\tnetwork.client = client\n\tnetwork.node.SetQueueClient(client)\n\tgo func() {\n\t\tlog.Info(\"p2p\", \"setqueuecliet\", \"ok\")\n\t\tnetwork.node.Start()\n\t\tnetwork.subP2pMsg()\n\t\terr := network.loadP2PPrivKeyToWallet()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}()\n}",
"func PopulateQueues(c *gin.Context) {\n\tif queue == nil {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue doesn't exist, please create it!!!\",\n\t\t})\n\t\treturn\n\t}\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"roberto\",\n\t\tEMAIL: \"[email protected]\",\n\t\tUUID: \"1\",\n\t\tMSG: \"lindo\",\n\t})\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"alex\",\n\t\tEMAIL: \"[email protected]\",\n\t\tUUID: \"2\",\n\t\tMSG: \"lindox\",\n\t})\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"ale\",\n\t\tEMAIL: \"[email protected]\",\n\t\tUUID: \"3\",\n\t\tMSG: \"linduxo\",\n\t})\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"msg\": queue,\n\t})\n}",
"func SetQueueReclaimable(ctx *TestContext, queues []string, reclaimable bool) {\n\tBy(\"Setting Queue reclaimable\")\n\n\tfor _, q := range queues {\n\t\tqueue, err := ctx.Vcclient.SchedulingV1beta1().Queues().Get(context.TODO(), q, metav1.GetOptions{})\n\t\tExpect(err).NotTo(HaveOccurred(), \"failed to get queue %s\", q)\n\n\t\tqueue.Spec.Reclaimable = &reclaimable\n\t\t_, err = ctx.Vcclient.SchedulingV1beta1().Queues().Update(context.TODO(), queue, metav1.UpdateOptions{})\n\t\tExpect(err).NotTo(HaveOccurred(), \"failed to update queue %s\", q)\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetClusterQueues returns the ClusterQueues field value | func (o *QueueManager) GetClusterQueues() []ClusterQueue {
if o == nil {
var ret []ClusterQueue
return ret
}
return o.ClusterQueues
} | [
"func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.ClusterQueues, true\n}",
"func (o *QueueManager) GetClusters() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\n\treturn o.Clusters\n}",
"func (client *Client) GetClusterQueueInfo(request *GetClusterQueueInfoRequest) (response *GetClusterQueueInfoResponse, err error) {\n\tresponse = CreateGetClusterQueueInfoResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}",
"func (connection *redisConnection) getConsumingQueues() ([]string, error) {\n\treturn connection.redisClient.SMembers(connection.queuesKey)\n}",
"func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}",
"func (gores *Gores) Queues() []string {\n\tqueues := make([]string, 0)\n\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tdata, _ := conn.Do(\"SMEMBERS\", watchedQueues)\n\tfor _, q := range data.([]interface{}) {\n\t\tqueues = append(queues, string(q.([]byte)))\n\t}\n\n\treturn queues\n}",
"func (a *AfcNumQueues) Get(client sophos.ClientInterface, options ...sophos.Option) (err error) {\n\treturn get(client, \"/api/nodes/afc.num_queues\", &a.Value, options...)\n}",
"func (t *TopicCache) GetQueue(projectName, serviceName string) []string {\n\tt.RLock()\n\tdefer t.RUnlock()\n\n\tif len(t.inQueue[projectName+serviceName]) >= 100 {\n\t\treturn t.inQueue[projectName+serviceName][:99]\n\t}\n\n\treturn t.inQueue[projectName+serviceName]\n}",
"func GetQueues(c *gin.Context) {\n\t//TODO: create a while both back and front until value is != nil\n\tsize := len(queue)\n\tlog.Printf(\"squeue: %v\", queue)\n\tif size == 0 {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue don't have any item!\",\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"queues\": queue,\n\t})\n\tlog.Printf(\"equeue: %v\", queue)\n}",
"func GetAvailableQueues() ([]string, error) {\n\tclient := &http.Client{}\n\n\tres, err := client.Get(Host + \"/queues\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trespBody := res.Body\n\tdefer respBody.Close()\n\n\tavailableQueues := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{}\n\tif err := json.NewDecoder(respBody).Decode(&availableQueues); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn availableQueues.Queues, nil\n}",
"func (o *RemoteQueue) GetClusterVisibility() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\n\treturn o.ClusterVisibility\n}",
"func (p *Project) Queues() (*[]Queue, error) {\n qs := make([]Queue, 0)\n err := Mongo.Get(\"queue\", bson.M{\"project\": p.ID}, MaxQueuesPerProject, &qs)\n return &qs, err\n}",
"func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tdefer un(lock(b))\n\n\tqs := make(map[string]int)\n\tfor q, items := range b.heaps {\n\t\tif len(qq.MatchPrefix) != 0 || len(qq.MatchExact) != 0 {\n\t\t\tif !matchesPrefix(q, qq.MatchPrefix...) && !matchesExact(q, qq.MatchExact...) {\n\t\t\t\t// no match\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tqs[q] = items.Len()\n\t\tif qq.Limit > 0 && len(qs) >= qq.Limit {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn qs, nil\n}",
"func (i *Inspector) Queues() ([]string, error) {\n\treturn i.rdb.AllQueues()\n}",
"func (q *DistroQueueInfo) GetQueueCollection() string {\n\tif q.SecondaryQueue {\n\t\treturn TaskSecondaryQueuesCollection\n\t}\n\n\treturn TaskQueuesCollection\n}",
"func (s *QSvc) Queues(ctx context.Context, req *pb.QueuesRequest) (*pb.QueuesResponse, error) {\n\tqueueMap, err := s.impl.Queues(ctx,\n\t\tentroq.MatchPrefix(req.MatchPrefix...),\n\t\tentroq.MatchExact(req.MatchExact...),\n\t\tentroq.LimitQueues(int(req.Limit)))\n\tif err != nil {\n\t\treturn nil, autoCodeErrorf(\"failed to get queues: %w\", err)\n\t}\n\tresp := new(pb.QueuesResponse)\n\tfor name, count := range queueMap {\n\t\tresp.Queues = append(resp.Queues, &pb.QueueStats{\n\t\t\tName: name,\n\t\t\tNumTasks: int32(count),\n\t\t})\n\t}\n\treturn resp, nil\n}",
"func (bs *BeanstalkdConnectionPool) ListQueues() (queueNames []string, err error) {\n\tqueueNames, err = bs.getGlobalConnect().ListTubes()\n\treturn\n}",
"func (connection *redisConnection) GetOpenQueues() ([]string, error) {\n\treturn connection.redisClient.SMembers(queuesKey)\n}",
"func (client *Client) GetClusterQueueInfoWithCallback(request *GetClusterQueueInfoRequest, callback func(response *GetClusterQueueInfoResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetClusterQueueInfoResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetClusterQueueInfo(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetClusterQueuesOk returns a tuple with the ClusterQueues field value and a boolean to check if the value has been set. | func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) {
if o == nil {
return nil, false
}
return &o.ClusterQueues, true
} | [
"func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.AliasQueues, true\n}",
"func (o *QueueManager) GetClustersOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Clusters, true\n}",
"func (o *QueueManager) GetRemoteQueuesOk() (*[]RemoteQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueues, true\n}",
"func (o *RemoteQueue) GetClusterVisibilityOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.ClusterVisibility, true\n}",
"func (o *QueueManager) GetClusterQueues() []ClusterQueue {\n\tif o == nil {\n\t\tvar ret []ClusterQueue\n\t\treturn ret\n\t}\n\n\treturn o.ClusterQueues\n}",
"func (o *RemoteQueue) GetRemoteQueueOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueue, true\n}",
"func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}",
"func (o *RemoteQueue) GetLocalQueueOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.LocalQueue, true\n}",
"func (c *Context) HasQueuesMap(key string) bool {\n\treturn c.makross.HasQueuesMap(key)\n}",
"func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tdefer un(lock(b))\n\n\tqs := make(map[string]int)\n\tfor q, items := range b.heaps {\n\t\tif len(qq.MatchPrefix) != 0 || len(qq.MatchExact) != 0 {\n\t\t\tif !matchesPrefix(q, qq.MatchPrefix...) && !matchesExact(q, qq.MatchExact...) {\n\t\t\t\t// no match\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tqs[q] = items.Len()\n\t\tif qq.Limit > 0 && len(qs) >= qq.Limit {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn qs, nil\n}",
"func (client *Client) GetClusterQueueInfo(request *GetClusterQueueInfoRequest) (response *GetClusterQueueInfoResponse, err error) {\n\tresponse = CreateGetClusterQueueInfoResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}",
"func (o *LinkLinkinfoInfoSlaveData) GetQueueIdOk() (*int32, bool) {\n\tif o == nil || o.QueueId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.QueueId, true\n}",
"func GetAvailableQueues() ([]string, error) {\n\tclient := &http.Client{}\n\n\tres, err := client.Get(Host + \"/queues\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trespBody := res.Body\n\tdefer respBody.Close()\n\n\tavailableQueues := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{}\n\tif err := json.NewDecoder(respBody).Decode(&availableQueues); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn availableQueues.Queues, nil\n}",
"func (o *VnicEthAdapterPolicyAllOf) GetTxQueueSettingsOk() (*VnicEthTxQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.TxQueueSettings.Get(), o.TxQueueSettings.IsSet()\n}",
"func (o *V0037Node) GetThreadsOk() (*int32, bool) {\n\tif o == nil || o.Threads == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Threads, true\n}",
"func (m *Makross) HasQueuesMap(key string) bool {\n\tif value, okay := m.QueuesMap.Load(key); okay {\n\t\tif pqueue, okay := value.(*prior.PriorityQueue); okay {\n\t\t\tif pqueue.Length() > 0 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}",
"func (s *QSvc) Queues(ctx context.Context, req *pb.QueuesRequest) (*pb.QueuesResponse, error) {\n\tqueueMap, err := s.impl.Queues(ctx,\n\t\tentroq.MatchPrefix(req.MatchPrefix...),\n\t\tentroq.MatchExact(req.MatchExact...),\n\t\tentroq.LimitQueues(int(req.Limit)))\n\tif err != nil {\n\t\treturn nil, autoCodeErrorf(\"failed to get queues: %w\", err)\n\t}\n\tresp := new(pb.QueuesResponse)\n\tfor name, count := range queueMap {\n\t\tresp.Queues = append(resp.Queues, &pb.QueueStats{\n\t\t\tName: name,\n\t\t\tNumTasks: int32(count),\n\t\t})\n\t}\n\treturn resp, nil\n}",
"func (gores *Gores) Queues() []string {\n\tqueues := make([]string, 0)\n\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tdata, _ := conn.Do(\"SMEMBERS\", watchedQueues)\n\tfor _, q := range data.([]interface{}) {\n\t\tqueues = append(queues, string(q.([]byte)))\n\t}\n\n\treturn queues\n}",
"func GetAvailableQueues(w http.ResponseWriter, r *http.Request, p httprouter.Params) {\n\tvar queueNames []string\n\tfor k := range queue.ListQueues() {\n\t\tqueueNames = append(queueNames, k)\n\t}\n\n\tresponseBody := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{queueNames}\n\n\tresponse, err := json.Marshal(responseBody)\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\tslogger.Error(err.Error())\n\t\treturn\n\t}\n\n\t_, err = w.Write(response)\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\tslogger.Error(err.Error())\n\t\treturn\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a tiploc to the result so that it will be included in the tiploc map | func (bf *boardFilter) addTiploc(tiploc string) {
if tiploc != "" {
bf.tiplocs[tiploc] = nil
}
} | [
"func (r *LocationMap) Add(t *Location) {\n\tif _, ok := r.m[t.Tiploc]; !ok {\n\t\tr.m[t.Tiploc] = t\n\t}\n}",
"func (bd *BlockDAG) updateTips(b *Block) {\n\tif bd.tips == nil {\n\t\tbd.tips = NewHashSet()\n\t\tbd.tips.AddPair(b.GetHash(), b)\n\t\treturn\n\t}\n\tfor k := range bd.tips.GetMap() {\n\t\tblock := bd.getBlock(&k)\n\t\tif block.HasChildren() {\n\t\t\tbd.tips.Remove(&k)\n\t\t}\n\t}\n\tbd.tips.AddPair(b.GetHash(), b)\n}",
"func (m *MemoryStore) SetTips(add hash.Hash, del []*site.Site) {\n\tfor _, d := range del {\n\t\tdelete(m.tips, d.Hash())\n\t}\n\tm.tips[add] = true\n}",
"func (c *CIFImporter) putTiploc(t *cif.Tiploc) error {\n t.Update()\n\n // Link it to this CIF file\n t.DateOfExtract = c.importhd.DateOfExtract\n\n _, err := c.tx.Exec(\n \"INSERT INTO timetable.tiploc \"+\n \"(tiploc, crs, stanox, name, nlc, nlccheck, nlcdesc, station, dateextract) \"+\n \"VALUES ( $1, $2, $3, $4, $5, $6, $7, $8, $9) \"+\n \"ON CONFLICT ( id ) \"+\n \"DO UPDATE SET \"+\n \"crs = EXCLUDED.crs, \"+\n \"stanox = EXCLUDED.stanox, \"+\n \"name = EXCLUDED.name, \"+\n \"nlc = EXCLUDED.nlc, \"+\n \"nlccheck = EXCLUDED.nlccheck, \"+\n \"nlcdesc = EXCLUDED.nlcdesc, \"+\n \"station = EXCLUDED.station, \"+\n \"dateextract = EXCLUDED.dateextract \",\n t.Tiploc,\n t.CRS,\n t.Stanox,\n t.Name,\n t.NLC,\n t.NLCCheck,\n t.NLCDesc,\n t.Station,\n t.DateOfExtract,\n )\n if err != nil {\n log.Printf(\"Failed to insert tiploc %s\", t.Tiploc)\n return err\n }\n\n return nil\n}",
"func (q LocationTemperatureQueryResult) Add(temp float64, city string, y int, mo int, d int) {\n\tq[city][y][mo][d] = append(q[city][y][mo][d], temp)\n}",
"func (f Factory) WithTips(tip, tipper string) Factory {\n\tparsedTips, err := sdk.ParseCoinsNormalized(tip)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tf.tip = &tx.Tip{\n\t\tTipper: tipper,\n\t\tAmount: parsedTips,\n\t}\n\treturn f\n}",
"func (ts *TipSelector) AddTip(bndl *tangle.Bundle) {\n\tts.tipsLock.Lock()\n\tdefer ts.tipsLock.Unlock()\n\n\ttailTxHash := bndl.GetTailHash()\n\n\tif _, exists := ts.nonLazyTipsMap[string(tailTxHash)]; exists {\n\t\t// tip already exists\n\t\treturn\n\t}\n\n\tif _, exists := ts.semiLazyTipsMap[string(tailTxHash)]; exists {\n\t\t// tip already exists\n\t\treturn\n\t}\n\n\tlsmi := tangle.GetSolidMilestoneIndex()\n\n\tscore := ts.calculateScore(tailTxHash, lsmi)\n\tif score == ScoreLazy {\n\t\t// do not add lazy tips.\n\t\t// lazy tips should also not remove other tips from the pool, otherwise the tip pool will run empty.\n\t\treturn\n\t}\n\n\ttip := &Tip{\n\t\tScore: score,\n\t\tHash: tailTxHash,\n\t\tTimeFirstApprover: time.Time{},\n\t\tApproversCount: atomic.NewUint32(0),\n\t}\n\n\tswitch tip.Score {\n\tcase ScoreNonLazy:\n\t\tts.nonLazyTipsMap[string(tailTxHash)] = tip\n\t\tmetrics.SharedServerMetrics.TipsNonLazy.Add(1)\n\tcase ScoreSemiLazy:\n\t\tts.semiLazyTipsMap[string(tailTxHash)] = tip\n\t\tmetrics.SharedServerMetrics.TipsSemiLazy.Add(1)\n\t}\n\n\tts.Events.TipAdded.Trigger(tip)\n\n\t// the approvees (trunk and branch) are the tail transactions this tip approves\n\t// remove them from the tip pool\n\tapproveeTailTxHashes := map[string]struct{}{\n\t\tstring(bndl.GetTrunkHash(true)): {},\n\t\tstring(bndl.GetBranchHash(true)): {},\n\t}\n\n\tcheckTip := func(tipsMap map[string]*Tip, approveeTip *Tip, retentionRulesTipsLimit int, maxApprovers uint32, maxReferencedTipAgeSeconds time.Duration) bool {\n\t\t// if the amount of known tips is above the limit, remove the tip directly\n\t\tif len(tipsMap) > retentionRulesTipsLimit {\n\t\t\treturn ts.removeTipWithoutLocking(tipsMap, hornet.Hash(approveeTip.Hash))\n\t\t}\n\n\t\t// check if the maximum amount of approvers for this tip is reached\n\t\tif approveeTip.ApproversCount.Add(1) >= maxApprovers {\n\t\t\treturn ts.removeTipWithoutLocking(tipsMap, hornet.Hash(approveeTip.Hash))\n\t\t}\n\n\t\tif maxReferencedTipAgeSeconds == time.Duration(0) {\n\t\t\t// check for maxReferenceTipAge is disabled\n\t\t\treturn false\n\t\t}\n\n\t\t// check if the tip was referenced by another transaction before\n\t\tif approveeTip.TimeFirstApprover.IsZero() {\n\t\t\t// mark the tip as referenced\n\t\t\tapproveeTip.TimeFirstApprover = time.Now()\n\t\t}\n\n\t\treturn false\n\t}\n\n\tfor approveeTailTxHash := range approveeTailTxHashes {\n\t\t// we have to separate between the pools, to prevent semi-lazy tips from emptying the non-lazy pool\n\t\tswitch tip.Score {\n\t\tcase ScoreNonLazy:\n\t\t\tif approveeTip, exists := ts.nonLazyTipsMap[approveeTailTxHash]; exists {\n\t\t\t\tif checkTip(ts.nonLazyTipsMap, approveeTip, ts.retentionRulesTipsLimitNonLazy, ts.maxApproversNonLazy, ts.maxReferencedTipAgeSecondsNonLazy) {\n\t\t\t\t\tmetrics.SharedServerMetrics.TipsNonLazy.Sub(1)\n\t\t\t\t}\n\t\t\t}\n\t\tcase ScoreSemiLazy:\n\t\t\tif approveeTip, exists := ts.semiLazyTipsMap[approveeTailTxHash]; exists {\n\t\t\t\tif checkTip(ts.semiLazyTipsMap, approveeTip, ts.retentionRulesTipsLimitSemiLazy, ts.maxApproversSemiLazy, ts.maxReferencedTipAgeSecondsSemiLazy) {\n\t\t\t\t\tmetrics.SharedServerMetrics.TipsSemiLazy.Sub(1)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}",
"func (*Search) Tips() {\n\tfmt.Println(\"\\n => Tips: to select a manga, use `manga <index>`\")\n}",
"func (m *PregnancystatusMutation) AddAntenatalinformationIDs(ids ...int) {\n\tif m._Antenatalinformation == nil {\n\t\tm._Antenatalinformation = make(map[int]struct{})\n\t}\n\tfor i := range ids {\n\t\tm._Antenatalinformation[ids[i]] = struct{}{}\n\t}\n}",
"func (t *Tangle) Tips() []*site.Site {\n\tkeys := []*site.Site{}\n\tfor h := range t.tips {\n\t\ts := t.Get(h)\n\t\tif s != nil {\n\t\t\tkeys = append(keys, s.Site)\n\t\t}\n\t}\n\treturn keys\n}",
"func (resp Response) AddTags(newTags map[string]string) (*influx.Point, error) {\r\n\r\n\t// Pull off the current tags\r\n\ttags := resp.Point.Tags()\r\n\r\n\t// Add the new tags to the current tags\r\n\tfor tag, tagValue := range newTags {\r\n\t\ttags[tag] = tagValue\r\n\t}\r\n\r\n\t// Make a new point\r\n\tfields, err := resp.Point.Fields()\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\r\n\t}\r\n\tpt, err := influx.NewPoint(resp.Point.Name(), tags, fields, resp.Point.Time())\r\n\r\n\t// panic on error\r\n\tif err != nil {\r\n\t\tlog.Fatalf(\"Error adding tags to response point\\n point: %v\\n tags:%v\\n error: %v\\n\", resp.Point, newTags, err)\r\n\t}\r\n\r\n\treturn pt, nil\r\n}",
"func (c *Client) TipLog(names []string, lineCount int) error {\n\tsgs, err := c.getServiceList(names, false)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tc.tipLogServicesOrGroups(sgs, lineCount)\n\n\treturn nil\n}",
"func (s Searcher) AddLocatable(locatable Locatable) {\n\tlocatable_on_grid := newLocatableOnGrid(locatable, s.lat_tiles, s.lng_tiles)\n\ts.locatable_map.AddLocatableOnGrid(&locatable_on_grid)\n}",
"func PostLatestTip(tip *big.Int, poolID string, userID string, genesisHash string) error {\n u, err := url.Parse(poolToolTipURL)\n if err == nil {\n q := u.Query()\n q.Set(\"poolid\", poolID)\n q.Set(\"userid\", userID)\n q.Set(\"genesispref\", genesisHash)\n q.Set(\"mytip\", tip.String())\n u.RawQuery = q.Encode()\n response, err := http.Get(u.String())\n if err == nil {\n if response.StatusCode == 200 {\n return nil\n } else {\n return PoolToolAPIException{URL: poolToolTipURL, StatusCode: response.StatusCode, Reason: response.Status}\n }\n }\n return err\n }\n return err\n}",
"func (pool *TxPool) GetAllTips() map[common.Hash]types.Txi {\n\tpool.mu.RLock()\n\tdefer pool.mu.RUnlock()\n\n\treturn pool.tips.txs\n}",
"func addTagsToPoint(point *influxdb.Point, tags map[string]string) {\n\tif point.Tags == nil {\n\t\tpoint.Tags = tags\n\t} else {\n\t\tfor k, v := range tags {\n\t\t\tpoint.Tags[k] = v\n\t\t}\n\t}\n}",
"func (m *RisksMutation) AddAntenatalinformationIDs(ids ...int) {\n\tif m._Antenatalinformation == nil {\n\t\tm._Antenatalinformation = make(map[int]struct{})\n\t}\n\tfor i := range ids {\n\t\tm._Antenatalinformation[ids[i]] = struct{}{}\n\t}\n}",
"func (pu *PregnancystatusUpdate) AddAntenatalinformation(a ...*Antenatalinformation) *PregnancystatusUpdate {\n\tids := make([]int, len(a))\n\tfor i := range a {\n\t\tids[i] = a[i].ID\n\t}\n\treturn pu.AddAntenatalinformationIDs(ids...)\n}",
"func (t Traveler) AddLabeled(label string, r aql.QueryResult) Traveler {\n\to := Traveler{State: map[string]aql.QueryResult{}}\n\tfor k, v := range t.State {\n\t\to.State[k] = v\n\t}\n\to.State[label] = r\n\treturn o\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process calling points so that we generate the appropriate via and include their tiplocs | func (bf *boardFilter) processCallingPoints(s ldb.Service) {
if len(s.CallingPoints) > 0 {
viaRequest := bf.addVia(s.RID, s.CallingPoints[len(s.CallingPoints)-1].Tiploc)
for _, cp := range s.CallingPoints {
bf.addTiploc(cp.Tiploc)
viaRequest.AppendTiploc(cp.Tiploc)
}
}
} | [
"func TipCaller(handler interface{}, params ...interface{}) {\n\thandler.(func(*Tip))(params[0].(*Tip))\n}",
"func processCoords(gpspoints []GPSPoint) (points Points) {\n\tfor i := 0; i < len(gpspoints); i++ {\n\t\tpoints = append(points, Point{gpspoints[i].Lon, gpspoints[i].Lat, gpspoints[i].SignalDbm})\n\t}\n\treturn\n}",
"func CheckpointCaller(handler interface{}, params ...interface{}) {\n\thandler.(func(checkpointIndex int, tipIndex int, tipsTotal int, txHash aingle.Hash))(params[0].(int), params[1].(int), params[2].(int), params[3].(aingle.Hash))\n}",
"func CheckpointCaller(handler interface{}, params ...interface{}) {\n\thandler.(func(checkpointIndex int, tipIndex int, tipsTotal int, txHash hornet.Hash))(params[0].(int), params[1].(int), params[2].(int), params[3].(hornet.Hash))\n}",
"func pointProcess(term string, sess *mgo.Session, message bot.IncomingMessage) []*bot.OutgoingMessage {\n\twords := strings.Split(message.Text[1:], \" \")\n\tswitch strings.ToLower(words[0]) {\n\tcase \"adultme\":\n\t\treturn requestPoint(words[1:], sess, message)\n\tcase \"award\":\n\t\treturn awardPoint(words[1:2], sess, message)\n\tcase \"reject\":\n\t\treturn rejectPoint(words[1:2], sess, message)\n\tcase \"adults\":\n\t\treturn listAdults(sess)\n\tdefault:\n\t\treturn nil\n\t}\n}",
"func parsePointInfo(p Point, chargerType []string) PointInfoJS {\r\n\tpJS := PointInfoJS{}\r\n\r\n\tpJS.Provider = p.Provider\r\n\tpJS.Address = p.Address\r\n\tpJS.Operator = p.Operator\r\n\tpJS.Requirement = p.Requirement\r\n\tpJS.Charger = p.Charger\r\n\tpJS.Parking = p.Parking\r\n\tpJS.Hour = p.Hour\r\n\tpJS.Facility = p.Facility\r\n\tpJS.Website = p.Website\r\n\tpJS.Location = append(pJS.Location, p.Location.Coordinates[1])\r\n\tpJS.Location = append(pJS.Location, p.Location.Coordinates[0])\r\n\r\n\tfor _, v := range chargerType {\r\n\t\tfor k, n := range pJS.Charger {\r\n\t\t\tif v == n.Type {\r\n\t\t\t\tpJS.Charger[k].Match = true\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn pJS\r\n}",
"func CallerInfo(skip ...int) (caller *CallInfo) {\n\tcaller = &CallInfo{}\n\tskipCount := 1\n\tif len(skip) > 0 {\n\t\tskipCount = skip[0]\n\t}\n\n\tpc, file, line, ok := runtime.Caller(skipCount)\n\tif !ok {\n\t\treturn\n\t}\n\n\tcaller.Line = line\n\t_, caller.FileName = path.Split(file)\n\tparts := strings.Split(runtime.FuncForPC(pc).Name(), `.`)\n\tpl := len(parts)\n\tcaller.FuncName = parts[pl-1]\n\n\tif parts[pl-2][0] == '(' {\n\t\tcaller.FuncName = parts[pl-2] + `.` + caller.FuncName\n\t\tcaller.PackageName = strings.Join(parts[0:pl-2], `.`)\n\t} else {\n\t\tcaller.PackageName = strings.Join(parts[0:pl-1], `.`)\n\t}\n\n\treturn\n}",
"func (cb *CanBusClient) Points(nodeID string, points []data.Point) {\n\tcb.newPoints <- NewPoints{nodeID, \"\", points}\n}",
"func linePointsGen(p1, p2 Point, speed float64) (gen func() (x, y float64, e error)) {\n\t// Set up math\n\tslopeT, slope, _ := getLineParams(p1, p2)\n\n\tx := p1.X\n\txPrev := x\n\ty := p1.Y\n\tyPrev := y\n\te := fmt.Errorf(\"End of path reached\")\n\ttheta := math.Atan(slope)\n\n\t// Every slope type has a different iterator, since they change the\n\t// x and y values in different combinations, as well as do different\n\t// comparisons on the values.\n\tswitch slopeT {\n\tcase ZERORIGHT:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif x > p2.X {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\txPrev = x\n\t\t\tx += speed\n\n\t\t\treturn xPrev, y, nil\n\t\t}\n\tcase ZEROLEFT:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif x < p2.X {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\txPrev = x\n\t\t\tx -= speed\n\n\t\t\treturn xPrev, y, nil\n\t\t}\n\tcase POSRIGHT:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif y > p2.Y || x > p2.X {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\tyPrev = y\n\t\t\txPrev = x\n\n\t\t\ty += speed * math.Sin(theta)\n\t\t\tx += speed * math.Cos(theta)\n\n\t\t\treturn xPrev, yPrev, nil\n\t\t}\n\tcase NEGRIGHT:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif y < p2.Y || x > p2.X {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\tyPrev = y\n\t\t\txPrev = x\n\n\t\t\ty += speed * math.Sin(theta)\n\t\t\tx += speed * math.Cos(theta)\n\n\t\t\treturn xPrev, yPrev, nil\n\t\t}\n\tcase POSLEFT:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif y < p2.Y || x < p2.X {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\tyPrev = y\n\t\t\txPrev = x\n\n\t\t\ty -= speed * math.Sin(theta)\n\t\t\tx -= speed * math.Cos(theta)\n\n\t\t\treturn xPrev, yPrev, nil\n\t\t}\n\tcase NEGLEFT:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif y > p2.Y || x < p2.X {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\tyPrev = y\n\t\t\txPrev = x\n\n\t\t\ty -= speed * math.Sin(theta)\n\t\t\tx -= speed * math.Cos(theta)\n\n\t\t\treturn xPrev, yPrev, nil\n\t\t}\n\tcase INFUP:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif y > p2.Y {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\tyPrev := y\n\t\t\ty += speed\n\n\t\t\treturn x, yPrev, nil\n\t\t}\n\tcase INFDOWN:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif y < p2.Y {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\tyPrev := y\n\t\t\ty -= speed\n\n\t\t\treturn x, yPrev, nil\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (c *CustomAlgorithm) CustomMovingPoints(gpxPoint *geo.GPXPoint, previousGPXPoint *geo.GPXPoint, algorithm geo.Algorithm) error {\n\n\t/* \tDefine which points should be used; if a point should be used for calculation then set it's new values like Duration, Distance, Speed, etc.\n\tHere we use the set the new value for the points which used for \"Moving\"Time/Distanc\n\t*/\n\n\t// speed < 100 m/s\n\tif gpxPoint.Speed < 100.0 {\n\t\treturn errors.New(\"Point Speed below threshold\")\n\t}\n\tgpxPoint.Point.SetPointData(&previousGPXPoint.Point, algorithm)\n\treturn nil\n}",
"func (s *BaseAspidaListener) EnterPoints(ctx *PointsContext) {}",
"func (b *block) Plan(pointIds ...string) ([]spi.PointSPI, error) {\n\tpoints := []spi.PointSPI{}\n\n\tif len(pointIds) == 0 {\n\t\t// if there are no specified points, include all points\n\n\t\tfor _, p := range b.points {\n\t\t\tpoints = append(points, p)\n\t\t}\n\t} else {\n\t\tincluded := map[string]bool{}\n\t\tincluded_sf := map[string]bool{}\n\n\t\t// include all specified points\n\t\tfor _, id := range pointIds {\n\t\t\tif p, ok := b.points[id]; !ok {\n\t\t\t\treturn nil, sunspec.ErrNoSuchPoint\n\t\t\t} else {\n\t\t\t\tif !included[id] {\n\t\t\t\t\tpoints = append(points, p)\n\t\t\t\t\tincluded[id] = true\n\t\t\t\t}\n\t\t\t\tif p.Type() == typelabel.ScaleFactor {\n\t\t\t\t\tincluded_sf[id] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// include their scale factors too...\n\t\t//\n\t\t// we do this for several reasons:\n\t\t// - to interpret a point that uses a scale factor, we need the scale factor too\n\t\t// - if we don't there we may read a value point after its scale factor point has changed\n\t\t// By forcing contemporaneous reads of a scale factor and its related points we help to ensure\n\t\t// that the two values are consistent.\n\t\t// - we want to avoid app programmers having to encode knowedlege in their programs\n\t\t// about these depednencies - the knowledge is in the SMDX documents, so lets use it\n\t\tfor _, p := range points {\n\t\t\tsfp := p.(*point).scaleFactor\n\t\t\tif sfp != nil {\n\t\t\t\tif !included[sfp.Id()] {\n\t\t\t\t\tpoints = append(points, sfp.(spi.PointSPI))\n\t\t\t\t\tincluded[sfp.Id()] = true\n\t\t\t\t\tincluded_sf[sfp.Id()] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// We also include all the currently valid points that reference any scale\n\t\t// factor points we are going to read since we don't want such points to\n\t\t// unexpectedly enter an error state when they are invalidated by the\n\t\t// read of the scale factor point. This allows twp separate reads each\n\t\t// of which have a point that reference a shared scale factor point to\n\t\t// be equivalent to a single read of all points or to two reads in which\n\t\t// all points related to a single scale factor are read in the same read\n\t\t// as the scale factor itself.\n\t\t//\n\t\t// One consequence of this behaviour is that any local changes (via a\n\t\t// setter) to a point dependent on a scale factor point may be lost by a\n\t\t// read of any point that is dependent on the same scale factor which\n\t\t// itself means that local changes to points should be written to the\n\t\t// physical device with Block.Write before the next Block.Read or else\n\t\t// they may be lost under some circumstances even if the point concerned\n\t\t// is not directly referened by the Read call.\n\t\t//\n\t\t// Part of the reason we do this is to maximise the consistency of data\n\t\t// exposed by the API while minimising both the effort for the programmer\n\t\t// to maintain the consistency and also surprising behaviour.\n\t\tfor _, p := range b.points {\n\t\t\tif sfp := p.scaleFactor; sfp == nil || p.Error() != nil || !included_sf[sfp.Id()] {\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tif !included[p.Id()] {\n\t\t\t\t\tpoints = append(points, p)\n\t\t\t\t\tincluded[p.Id()] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// sort so scale factors come first, then other points in offset order\n\tsort.Sort(scaleFactorFirstOrder(points))\n\treturn points, nil\n}",
"func applyToPoints(points []Point, fn func(*Point)) {\n\tfor j := range points {\n\t\tfn(&points[j])\n\t}\n}",
"func buildSamplePoints (t *testing.T) []*point.Point {\n\treturn []*point.Point{\n\t\tbuildSamplePoint(t, 1), buildSamplePoint(t, 2), buildSamplePoint(t, 3)}\n}",
"func (u *DatadogUnifi) loopPoints(r report) {\n\tm := r.metrics()\n\n\tfor _, s := range m.RogueAPs {\n\t\tu.switchExport(r, s)\n\t}\n\n\tfor _, s := range m.Sites {\n\t\tu.switchExport(r, s)\n\t}\n\n\tfor _, s := range m.SitesDPI {\n\t\tu.reportSiteDPI(r, s.(*unifi.DPITable))\n\t}\n\n\tfor _, s := range m.Clients {\n\t\tu.switchExport(r, s)\n\t}\n\n\tfor _, s := range m.Devices {\n\t\tu.switchExport(r, s)\n\t}\n\n\tfor _, s := range r.events().Logs {\n\t\tu.switchExport(r, s)\n\t}\n\n\tappTotal := make(totalsDPImap)\n\tcatTotal := make(totalsDPImap)\n\n\tfor _, s := range m.ClientsDPI {\n\t\tu.batchClientDPI(r, s, appTotal, catTotal)\n\t}\n\n\treportClientDPItotals(r, appTotal, catTotal)\n}",
"func CallerChain(skipFrom, skipUntil int) (res []CallInfo) {\n\tfor skipCount := skipFrom; skipCount <= skipUntil; skipCount++ {\n\t\tpc, file, line, ok := runtime.Caller(skipCount)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\n\t\tcaller := CallInfo{Line: line}\n\t\t_, caller.FileName = path.Split(file)\n\t\tparts := strings.Split(runtime.FuncForPC(pc).Name(), `.`)\n\t\tpl := len(parts)\n\t\tcaller.FuncName = parts[pl-1]\n\n\t\tif parts[pl-2][0] == '(' {\n\t\t\tcaller.FuncName = parts[pl-2] + `.` + caller.FuncName\n\t\t\tcaller.PackageName = strings.Join(parts[0:pl-2], `.`)\n\t\t} else {\n\t\t\tcaller.PackageName = strings.Join(parts[0:pl-1], `.`)\n\t\t}\n\n\t\tres = append(res, caller)\n\t}\n\treturn\n}",
"func (a axes) drawPoint(p *vg.Painter, xy xyer, cs vg.CoordinateSystem, l Line, pointNumber int) {\n\tx, y, isEnvelope := xy.XY(l)\n\n\t// add number of NaNs leading pointNumber to pointNumber.\n\ttargetNumber := pointNumber\n\tfor i, v := range x {\n\t\tif i > targetNumber {\n\t\t\tbreak\n\t\t}\n\t\tif math.IsNaN(v) {\n\t\t\tpointNumber++\n\t\t}\n\t}\n\n\tif len(x) <= pointNumber || len(y) <= pointNumber || pointNumber < 0 {\n\t\treturn\n\t}\n\tp.SetFont(font1)\n\tlabels := make([]vg.FloatText, 2)\n\tif isEnvelope {\n\t\tif n := len(x); n != len(y) || pointNumber+2 > n {\n\t\t\treturn\n\t\t} else {\n\t\t\txp, yp := x[pointNumber], y[pointNumber]\n\t\t\txp2, yp2 := x[n-pointNumber-2], y[n-pointNumber-2]\n\t\t\tx = []float64{xp, xp2}\n\t\t\ty = []float64{yp, yp2}\n\t\t\tlabels[0] = vg.FloatText{X: xp, Y: yp, S: fmt.Sprintf(\"(%.4g, %.4g)\", xp, yp), Align: 5}\n\t\t\tlabels[1] = vg.FloatText{X: xp2, Y: yp2, S: fmt.Sprintf(\"(%.4g, %.4g)\", xp2, yp2), Align: 1}\n\t\t}\n\t} else {\n\t\txp, yp := x[pointNumber], y[pointNumber]\n\t\tx = []float64{xp}\n\t\ty = []float64{yp}\n\t\tvar s string\n\t\tif xyp, ok := xy.(xyPolar); ok {\n\t\t\txstr := \"\"\n\t\t\tif xyp.rmin == 0 && xyp.rmax == 0 { // polar\n\t\t\t\tif len(l.X) > pointNumber && pointNumber >= 0 {\n\t\t\t\t\txstr = fmt.Sprintf(\"%.4g, \", l.X[pointNumber])\n\t\t\t\t}\n\t\t\t\ts = xstr + xmath.Absang(complex(yp, xp), \"%.4g@%.0f\")\n\t\t\t} else { // ring\n\t\t\t\ts = fmt.Sprintf(\"%.4g@%.1f\", l.X[pointNumber], 180.0*l.Y[pointNumber]/math.Pi)\n\t\t\t}\n\t\t} else {\n\t\t\ts = fmt.Sprintf(\"(%.4g, %.4g)\", xp, yp)\n\t\t}\n\t\tlabels[0] = vg.FloatText{X: xp, Y: yp, S: s, Align: 1}\n\t\tlabels = labels[:1]\n\t}\n\n\tsize := l.Style.Marker.Size\n\tif size == 0 {\n\t\tsize = l.Style.Line.Width\n\t}\n\tif size == 0 {\n\t\tsize = 9\n\t} else {\n\t\tsize *= 3\n\t}\n\tc := a.plot.Style.Order.Get(l.Style.Marker.Color, l.Id+1).Color()\n\tp.SetColor(c)\n\tp.Add(vg.FloatCircles{X: x, Y: y, CoordinateSystem: cs, Radius: size, Fill: true})\n\trect := a.inside.Bounds()\n\tfor _, l := range labels {\n\t\tl.CoordinateSystem = cs\n\t\tl.Rect = rect\n\n\t\t// Change the alignment, if the label would be placed at a picture boundary.\n\t\tx0, y0 := cs.Pixel(l.X, l.Y, rect)\n\t\tif l.Align == 1 && y0 < 30 {\n\t\t\tl.Align = 5\n\t\t} else if l.Align == 5 && y0 > rect.Max.Y-30 {\n\t\t\tl.Align = 1\n\t\t}\n\t\tif x0 < 50 {\n\t\t\tif l.Align == 1 {\n\t\t\t\tl.Align = 0\n\t\t\t} else if l.Align == 5 {\n\t\t\t\tl.Align = 6\n\t\t\t}\n\t\t} else if x0 > rect.Max.X-50 {\n\t\t\tif l.Align == 1 {\n\t\t\t\tl.Align = 2\n\t\t\t} else if l.Align == 5 {\n\t\t\t\tl.Align = 4\n\t\t\t}\n\t\t}\n\n\t\t// Place the label above or below with the offset of the marker's radius.\n\t\tif l.Align <= 2 { // Label is above point.\n\t\t\tl.Yoff = -size\n\t\t} else if l.Align >= 4 { // Label is below point\n\t\t\tl.Yoff = size\n\t\t}\n\n\t\t// Fill background rectangle of the label.\n\t\tx, y, w, h := l.Extent(p)\n\t\tsaveColor := p.GetColor()\n\t\tp.SetColor(a.bg)\n\t\tp.Add(vg.Rectangle{X: x, Y: y, W: w, H: h, Fill: true})\n\t\tp.SetColor(saveColor)\n\t\tp.Add(l)\n\t}\n}",
"func drawPoints(bc *braille.Canvas, points []image.Point, opt *brailleCircleOptions) error {\n\tfor _, p := range points {\n\t\tswitch opt.pixelChange {\n\t\tcase braillePixelChangeSet:\n\t\t\tif err := bc.SetPixel(p, opt.cellOpts...); err != nil {\n\t\t\t\treturn fmt.Errorf(\"SetPixel => %v\", err)\n\t\t\t}\n\t\tcase braillePixelChangeClear:\n\t\t\tif err := bc.ClearPixel(p, opt.cellOpts...); err != nil {\n\t\t\t\treturn fmt.Errorf(\"ClearPixel => %v\", err)\n\t\t\t}\n\n\t\t}\n\t}\n\treturn nil\n}",
"func (b *BccLatticePointGenerator) forEachPoint(\n\tboundingBox *BoundingBox3D,\n\tspacing float64,\n\tpoints *[]*Vector3D.Vector3D,\n\tcallback func(*([]*Vector3D.Vector3D), *Vector3D.Vector3D) bool,\n) {\n\n\thalfSpacing := spacing / 2\n\tboxWidth := boundingBox.width()\n\tboxHeight := boundingBox.height()\n\tboxDepth := boundingBox.depth()\n\n\tposition := Vector3D.NewVector(0, 0, 0)\n\thasOffset := false\n\tshouldQuit := false\n\n\tfor k := float64(0); k*halfSpacing <= boxDepth && !shouldQuit; k++ {\n\n\t\tposition.Z = k*halfSpacing + boundingBox.lowerCorner.Z\n\t\tvar offset float64\n\t\tif hasOffset {\n\n\t\t\toffset = halfSpacing\n\t\t} else {\n\t\t\toffset = 0\n\t\t}\n\n\t\tfor j := float64(0); j*spacing+offset <= boxHeight && !shouldQuit; j++ {\n\t\t\tposition.Y = j*spacing + offset + boundingBox.lowerCorner.Y\n\n\t\t\tfor i := float64(0); i*spacing+offset <= boxWidth; i++ {\n\t\t\t\tposition.X = i*spacing + offset + boundingBox.lowerCorner.X\n\n\t\t\t\tif !callback(points, position) {\n\t\t\t\t\tshouldQuit = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\thasOffset = !hasOffset\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process any associations, pulling in their schedules | func (bf *boardFilter) processAssociations(s ldb.Service) {
for _, assoc := range s.Associations {
assoc.AddTiplocs(bf.tiplocs)
//if assoc.IsJoin() || assoc.IsSplit() {
ar := assoc.Main.RID
ai := assoc.Main.LocInd
if ar == s.RID {
ar = assoc.Assoc.RID
ai = assoc.Assoc.LocInd
}
// Resolve the schedule if a split, join or if NP only if previous service & we are not yet running
//if ar != s.RID {
if assoc.Category != "NP" || (s.LastReport.Tiploc == "" && assoc.Assoc.RID == s.RID) {
as := bf.d.ldb.GetSchedule(ar)
if as != nil {
assoc.Schedule = as
as.AddTiplocs(bf.tiplocs)
as.LastReport = as.GetLastReport()
bf.processToc(as.Toc)
if ai < (len(as.Locations) - 1) {
if as.Origin != nil {
bf.addTiploc(as.Destination.Tiploc)
}
destination := as.Locations[len(as.Locations)-1].Tiploc
if as.Destination != nil {
destination = as.Destination.Tiploc
}
viaRequest := bf.addVia(ar, destination)
for _, l := range as.Locations[ai:] {
bf.addTiploc(l.Tiploc)
viaRequest.AppendTiploc(l.Tiploc)
}
}
bf.processReason(as.CancelReason, true)
bf.processReason(as.LateReason, false)
}
}
}
} | [
"func (s *candidate) Schedule() (constructedSchedule, error) {\n\tsch := constructedSchedule{\n\t\tearliest: s.earliest,\n\t\teventsByAttendee: make(map[AttendeeID]*attendeeEvents),\n\t}\n\tfor _, event := range s.order {\n\t\tif err := sch.Add(s.reqs[event]); err != nil {\n\t\t\treturn sch, err\n\t\t}\n\t}\n\treturn sch, nil\n}",
"func (records Records) LoadDoctorSchedule(fetcher DoctorScheduleFetcher) {\n\tvar lastID, lastSpec, lastName string\n\n\tdoctorRecords := make(Records, 0)\n\n\tfor _, r := range records {\n\t\tif lastID == \"\" {\n\t\t\tlastID = r.ID()\n\t\t\tlastSpec = r.Spec\n\t\t\tlastName = r.Name\n\t\t}\n\n\t\tif r.ID() != lastID {\n\t\t\tdoctorRecords = doctorRecords.Cleaned()\n\n\t\t\tschedule := &DoctorSchedule{\n\t\t\t\tSpec: lastSpec,\n\t\t\t\tName: lastName,\n\t\t\t\tCells: make(TimeCells, len(doctorRecords)),\n\t\t\t}\n\n\t\t\tfor i, rr := range doctorRecords {\n\t\t\t\tschedule.Cells[i] = &TimeCell{\n\t\t\t\t\tStartTime: rr.StartTime,\n\t\t\t\t\tDuration: rr.Duration,\n\t\t\t\t\tFree: rr.Free,\n\t\t\t\t\tRoom: rr.Room,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfetcher(schedule)\n\n\t\t\tlastID = r.ID()\n\t\t\tlastSpec = r.Spec\n\t\t\tlastName = r.Name\n\t\t\tdoctorRecords = make(Records, 0)\n\t\t}\n\n\t\tdoctorRecords = append(doctorRecords, r)\n\t}\n}",
"func notifyScheduleAssociates(s models.Schedule, action string) error {\n\t// Get the associated schedule events\n\tvar events []models.ScheduleEvent\n\tif err := dbClient.GetScheduleEventsByScheduleName(&events, s.Name); err != nil {\n\t\treturn err\n\t}\n\n\t// Get the device services for the schedule events\n\tvar services []models.DeviceService\n\tfor _, se := range events {\n\t\tvar ds models.DeviceService\n\t\tif err := dbClient.GetDeviceServiceByName(&ds, se.Service); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tservices = append(services, ds)\n\t}\n\n\t// Notify the associated device services\n\tif err := notifyAssociates(services, s.Id.Hex(), action, models.SCHEDULE); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (db *Database) GetSchedule(startLocationName, destinationName, date string) ([]Trip, map[int][]TripOffering, error) {\n trips := []Trip{}\n offerings := make(map[int][]TripOffering)\n row, err := db.Query(fmt.Sprintf(\"SELECT * FROM Trip WHERE StartLocationName=%s\", startLocationName))\n if err != nil {\n return trips, offerings, err\n }\n // Get the trips with the given start location name\n trips = RowToTrips(row)\n row.Close()\n // Get the trip offerings for each trip\n for _, t := range trips {\n row, err := db.Query(fmt.Sprintf(\"SELECT * FROM TripOffering WHERE TripNumber=%d\", t.TripNumber))\n if err != nil {\n return trips, offerings, err\n }\n for row.Next() {\n var tripNumber int\n var date string\n var scheduledStartTime string\n var scheduledArrivalTime string\n var driverName string\n var busID int\n row.Scan(&tripNumber, &date, &scheduledStartTime, &scheduledArrivalTime, &driverName, &busID)\n if _, ok := offerings[tripNumber]; !ok {\n offerings[tripNumber] = []TripOffering{}\n }\n offerings[tripNumber] = append(offerings[tripNumber], TripOffering{\n TripNumber: tripNumber,\n Date: date,\n ScheduledStartTime: scheduledStartTime,\n ScheduledArrivalTime: scheduledArrivalTime,\n DriverName: driverName,\n BusID: busID,\n })\n }\n row.Close()\n }\n return trips, offerings, nil\n}",
"func soundersScheduleCollector() {\n\n\tfetchSoundersSchedule()\n\n\tc := time.Tick(24 * time.Hour)\n\tfor _ = range c {\n\t\tfetchSoundersSchedule()\n\t}\n}",
"func ScheduleUnmarshalJSON(b []byte) (schedule Schedule, err error) {\n\tvar mixed interface{}\n\tjson.Unmarshal(b, &mixed)\n\n\tfor key, value := range mixed.(map[string]interface{}) {\n\t\trawValue, _ := json.Marshal(value)\n\t\tswitch key {\n\t\tcase \"date\":\n\t\t\tvar date Date\n\t\t\terr = json.Unmarshal(rawValue, &date)\n\t\t\tschedule = date\n\t\tcase \"day\":\n\t\t\tvar day Day\n\t\t\terr = json.Unmarshal(rawValue, &day)\n\t\t\tschedule = day\n\t\tcase \"intersection\":\n\t\t\tvar intersection Intersection\n\t\t\terr = json.Unmarshal(rawValue, &intersection)\n\t\t\tschedule = intersection\n\t\tcase \"month\":\n\t\t\tvar month Month\n\t\t\terr = json.Unmarshal(rawValue, &month)\n\t\t\tschedule = month\n\t\tcase \"union\":\n\t\t\tvar union Union\n\t\t\terr = json.Unmarshal(rawValue, &union)\n\t\t\tschedule = union\n\t\tcase \"week\":\n\t\t\tvar week Week\n\t\t\terr = json.Unmarshal(rawValue, &week)\n\t\t\tschedule = week\n\t\tcase \"weekday\":\n\t\t\tvar weekday Weekday\n\t\t\terr = json.Unmarshal(rawValue, &weekday)\n\t\t\tschedule = weekday\n\t\tcase \"year\":\n\t\t\tvar year Year\n\t\t\terr = json.Unmarshal(rawValue, &year)\n\t\t\tschedule = year\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"%s is not a recognized schedule\", key)\n\t\t}\n\t}\n\treturn\n}",
"func (jf JobFactory) Process(schedules []Schedule) {\n\tfor _, item := range schedules {\n\t\tif item.Api.Url != \"\" {\n\t\t\tlocalItem := item\n\t\t\tAddJob(item.Schedule, func() {\n\t\t\t\tlog.Printf(\"executing %s at %s\", localItem.Name, localItem.Api.Url)\n\t\t\t\toptions := restful.Options{}\n\t\t\t\toptions.Method = localItem.Api.Method\n\t\t\t\toptions.Headers = make(map[string]string)\n\t\t\t\toptions.Headers[\"Content-Type\"] = \"application/json\"\n\t\t\t\tif localItem.Api.Authorization != \"\" {\n\t\t\t\t\toptions.Headers[\"Authorization\"] = localItem.Api.Authorization\n\t\t\t\t}\n\t\t\t\toptions.Transformer = localItem.Api.Transform\n\t\t\t\toptions.Payload = localItem.Api.Body\n\t\t\t\tmessage, _ := restful.Call(localItem.Api.Url, &options)\n\t\t\t\tevent := EventData{}\n\t\t\t\tjson.Unmarshal([]byte(message), &event)\n\t\t\t\tGetEventsManager().Notify(event)\n\t\t\t})\n\t\t\tcontinue\n\t\t}\n\t\tvalue, ok := advertisedJobs[item.Name]\n\t\tif ok {\n\t\t\tlog.Printf(\"%s, %s\", item.Schedule, item.Name)\n\t\t\tAddJob(item.Schedule, value)\n\t\t}\n\t}\n\tInitJobs()\n}",
"func (s *Schedule) GetAll(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\tconn, err := db.Connect()\n\tif err != nil {\n\t\treturn common.APIError(http.StatusInternalServerError, err)\n\t}\n\n\tsession := conn.NewSession(nil)\n\tdefer session.Close()\n\tdefer conn.Close()\n\n\tif request.QueryStringParameters == nil {\n\t\trequest.QueryStringParameters = map[string]string{\n\t\t\t\"event_id\": request.PathParameters[\"id\"],\n\t\t}\n\t} else {\n\t\trequest.QueryStringParameters[\"event_id\"] = request.PathParameters[\"id\"]\n\t}\n\n\tresult, err := db.Select(session, db.TableEventSchedule, request.QueryStringParameters, Schedule{})\n\tif err != nil {\n\t\treturn common.APIError(http.StatusInternalServerError, err)\n\t}\n\n\treturn common.APIResponse(result, http.StatusOK)\n}",
"func buildScheduleList(runables ScriptSet) scheduledSet {\n\tset := scheduledSet{}\n\n\t_ = runables.Walk(func(s *Script) error {\n\t\tsch := schedule{scriptID: s.ID}\n\t\tfor _, t := range s.triggers {\n\t\t\tif !t.IsDeferred() {\n\t\t\t\t// only interested in deferred scripts\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif t.Condition == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif ts, err := time.Parse(time.RFC3339, t.Condition); err == nil {\n\t\t\t\tts = ts.Truncate(time.Minute)\n\t\t\t\tif ts.Before(now()) {\n\t\t\t\t\t// in the past...\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tsch.timestamps = append(sch.timestamps, ts)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// @todo parse cron format and fill intervals\n\t\t}\n\n\t\t// If there is anything useful in the schedule,\n\t\t// add it to the list\n\t\tif len(sch.timestamps) > 0 {\n\t\t\tset = append(set, sch)\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn set\n}",
"func extract_schedules(hull []fpoint) []vrp.Schedule {\n\tschedules := make([]vrp.Schedule, len(hull))\n\tfor i, h := range hull {\n\t\tschedules[i] = h.schedule\n\t}\n\treturn schedules\n}",
"func (s *Scheduler) ScheduleTasks() {\n\t/*\n\t\tif events exist unattended, make tasks based on set up times\n\t*/\n\n}",
"func (r *ScheduleRepo) GetAll() (map[usecase.ScheduleID]*schedule.Schedule, usecase.Error) {\n\treturn r.getAllWhere(\"\")\n}",
"func (a *Airport) processArrivals() {\n\tfor {\n\t\tarrival, ok := <-a.arrivalChan\n\t\tif !ok {\n\t\t\ta.log.Errorf(\"arrival channel closed\")\n\t\t\treturn\n\t\t}\n\t\tswitch arrival.GetChangeType() {\n\t\tcase datasync.Put:\n\t\t\tfl := flight.Info{}\n\t\t\tif err := arrival.GetValue(&fl); err != nil {\n\t\t\t\ta.log.Errorf(\"failed to get value for arrival flight: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfl.Status = flight.Status_arrival\n\t\t\ta.runwayChan <- fl\n\t\tcase datasync.Delete:\n\t\t\ta.log.Debugf(\"arrival %s deleted\\n\", arrival.GetKey())\n\t\t}\n\t}\n}",
"func GetADVSchedules(id string, addr string, localIP string) error {\r\n\tlocalAddr, err := net.ResolveIPAddr(\"ip\", localIP)\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\tLocalBindAddr := &net.TCPAddr{IP: localAddr.IP}\r\n\ttransport := &http.Transport{\r\n\t\tDial: (&net.Dialer{\r\n\t\t\tLocalAddr: LocalBindAddr,\r\n\t\t\tTimeout: 5 * time.Second,\r\n\t\t\tKeepAlive: 30 * time.Second,\r\n\t\t}).Dial,\r\n\t}\r\n\tclient := &http.Client{\r\n\t\tTransport: transport,\r\n\t}\r\n\r\n\turl := \"http://\" + addr + \"/adm/adv-schedules/\" + id + \"?format=cic\"\r\n\r\n\treq, err := http.NewRequest(\"GET\", url, nil)\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\tresp, err := client.Do(req)\r\n\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\tif resp.StatusCode != 200 {\r\n\t\treturn fmt.Errorf(\"ADM Receved %v\", resp.Status)\r\n\t}\r\n\r\n\tfor {\r\n\t\tbuf := make([]byte, 32*1024)\r\n\t\t_, err := resp.Body.Read(buf)\r\n\r\n\t\tif err != nil && err != io.EOF {\r\n\t\t\treturn err\r\n\t\t}\r\n\r\n\t\tif err == io.EOF {\r\n\t\t\tbreak\r\n\t\t}\r\n\t}\r\n\tresp.Body.Close()\r\n\ttransport.CloseIdleConnections()\r\n\r\n\treturn nil\r\n}",
"func (pgmodel *PgDB) SelectCurrentScheduler() ([]model.ScheduleTask, error) {\n\tnow, _ := time.Parse(\"2006-01-02 15:04:00\", time.Now().UTC().Format(\"2006-01-02 15:04:00\"))\n\n\tscheduleRepository := model.NewScheduleRepository()\n\tscheduleModel := scheduleRepository.GetTaskModel()\n\n\terr := pgmodel.db.Model(&scheduleModel).\n\t\tColumnExpr(\"schedule_task.*\").\n\t\tColumnExpr(\"delivery.title AS delivery__title\").\n\t\tColumnExpr(\"delivery.text AS delivery__text\").\n\t\tColumnExpr(\"delivery.user_ids AS delivery__user_ids\").\n\t\tColumnExpr(\"delivery.id AS delivery__id\").\n\t\tColumnExpr(\"delivery.filter AS delivery__filter\").\n\t\tJoin(\"INNER JOIN talkbank_bots.delivery AS delivery ON delivery.id = schedule_task.action_id\").\n\t\tWhere(\"schedule_task.is_active = ?\", true).\n\t\tWhereGroup(func(q *orm.Query) (*orm.Query, error) {\n\t\t\treturn q.\n\t\t\t\tWhereOrGroup(func(subQ1 *orm.Query) (*orm.Query, error) {\n\t\t\t\t\treturn subQ1.\n\t\t\t\t\t\tWhere(\"schedule_task.type = ?\", \"onetime\").\n\t\t\t\t\t\tWhere(\"schedule_task.from_datetime >= ?\", now).\n\t\t\t\t\t\tWhereGroup(func(subQ *orm.Query) (*orm.Query, error) {\n\t\t\t\t\t\t\treturn subQ.\n\t\t\t\t\t\t\t\tWhereOr(\"schedule_task.to_datetime IS NULL\").\n\t\t\t\t\t\t\t\tWhereOr(\"schedule_task.to_datetime >= schedule_task.from_datetime\"), nil\n\t\t\t\t\t\t}), nil\n\t\t\t\t}).\n\t\t\t\tWhereOrGroup(func(subQ2 *orm.Query) (*orm.Query, error) {\n\t\t\t\t\treturn subQ2.\n\t\t\t\t\t\tWhere(\"schedule_task.type = ?\", \"recurrently\").\n\t\t\t\t\t\tWhereGroup(func(subGroup *orm.Query) (*orm.Query, error) {\n\t\t\t\t\t\t\treturn subGroup.Where(\"schedule_task.from_datetime <= ?\", now).\n\t\t\t\t\t\t\t\tWhereGroup(func(subQ *orm.Query) (*orm.Query, error) {\n\t\t\t\t\t\t\t\t\treturn subQ.\n\t\t\t\t\t\t\t\t\t\tWhereOr(\"schedule_task.to_datetime IS NULL\").\n\t\t\t\t\t\t\t\t\t\tWhereOrGroup(func(subQ1 *orm.Query) (*orm.Query, error) {\n\t\t\t\t\t\t\t\t\t\t\treturn subQ1.\n\t\t\t\t\t\t\t\t\t\t\t\tWhere(\"schedule_task.to_datetime >= ?\", now).\n\t\t\t\t\t\t\t\t\t\t\t\tWhere(\"schedule_task.to_datetime > schedule_task.from_datetime\"), nil\n\t\t\t\t\t\t\t\t\t\t}), nil\n\t\t\t\t\t\t\t\t}), nil\n\t\t\t\t\t\t}).\n\t\t\t\t\t\tWhereOrGroup(func(subGroup2 *orm.Query) (*orm.Query, error) {\n\t\t\t\t\t\t\treturn subGroup2.\n\t\t\t\t\t\t\t\tWhere(\"schedule_task.from_datetime >= ?\", now).\n\t\t\t\t\t\t\t\tWhere(\"schedule_task.from_datetime <= schedule_task.next_run\").\n\t\t\t\t\t\t\t\tWhereGroup(func(toGroup *orm.Query) (*orm.Query, error) {\n\t\t\t\t\t\t\t\t\treturn toGroup.\n\t\t\t\t\t\t\t\t\t\tWhereOr(\"schedule_task.to_datetime IS NULL\").\n\t\t\t\t\t\t\t\t\t\tWhereOr(\"schedule_task.to_datetime >= schedule_task.next_run\"), nil\n\t\t\t\t\t\t\t\t}), nil\n\t\t\t\t\t\t}), nil\n\t\t\t\t}), nil\n\t\t}).\n\t\tOrder(\"schedule_task.id ASC\").\n\t\tSelect()\n\n\tif err != nil {\n\t\tfmt.Println(\"Error to get data from scheduler_task\", err)\n\t\treturn nil, err\n\t}\n\n\treturn scheduleModel, nil\n}",
"func notifyScheduleEventAssociates(se models.ScheduleEvent, action string) error {\n\t// Get the associated device service\n\tvar ds models.DeviceService\n\tif err := dbClient.GetDeviceServiceByName(&ds, se.Service); err != nil {\n\t\treturn err\n\t}\n\n\tvar services []models.DeviceService\n\tservices = append(services, ds)\n\n\t// Notify the associated device service\n\tif err := notifyAssociates(services, se.Id.Hex(), action, models.SCHEDULEEVENT); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (o *Operation) populateLinks(zones []Zone, inGid GoogleID, assignments map[TaskID][]GoogleID, depends map[TaskID][]TaskID) error {\n\tvar description sql.NullString\n\n\trows, err := db.Query(\"SELECT link.ID, link.fromPortalID, link.toPortalID, task.comment, task.taskorder, task.state, link.color, task.zone, task.delta FROM link JOIN task ON link.ID = task.ID WHERE task.opID = ? AND link.opID = task.opID\", o.ID)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\ttmpLink := Link{}\n\t\ttmpLink.opID = o.ID\n\n\t\terr := rows.Scan(&tmpLink.ID, &tmpLink.From, &tmpLink.To, &description, &tmpLink.Order, &tmpLink.State, &tmpLink.Color, &tmpLink.Zone, &tmpLink.DeltaMinutes)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\tcontinue\n\t\t}\n\t\ttmpLink.Task.ID = TaskID(tmpLink.ID)\n\n\t\tif description.Valid {\n\t\t\ttmpLink.Desc = description.String\n\t\t\ttmpLink.Comment = description.String\n\t\t}\n\n\t\ttmpLink.ThrowOrder = tmpLink.Order\n\n\t\tif a, ok := assignments[tmpLink.Task.ID]; ok {\n\t\t\ttmpLink.Assignments = a\n\t\t\ttmpLink.AssignedTo = a[0]\n\t\t}\n\n\t\tif d, ok := depends[tmpLink.Task.ID]; ok {\n\t\t\ttmpLink.DependsOn = d\n\t\t}\n\n\t\tif tmpLink.State == \"completed\" {\n\t\t\ttmpLink.Completed = true\n\t\t}\n\n\t\t// this isn't in a zone with which we are concerned AND not assigned to me, skip\n\t\tif !tmpLink.Zone.inZones(zones) && !tmpLink.IsAssignedTo(inGid) {\n\t\t\tcontinue\n\t\t}\n\t\to.Links = append(o.Links, tmpLink)\n\t}\n\treturn nil\n}",
"func doEvents() error {\n\tif len(accounts) == 0 {\n\t\twf.NewItem(\"No Accounts Configured\").\n\t\t\tSubtitle(\"Action this item to add a Google account\").\n\t\t\tAutocomplete(\"workflow:login\").\n\t\t\tIcon(aw.IconWarning)\n\n\t\twf.SendFeedback()\n\t\treturn nil\n\t}\n\n\tvar (\n\t\tcals []*Calendar\n\t\terr error\n\t)\n\n\tif cals, err = activeCalendars(); err != nil {\n\t\tif err == errNoActive {\n\t\t\twf.NewItem(\"No Active Calendars\").\n\t\t\t\tSubtitle(\"Action this item to choose calendars\").\n\t\t\t\tAutocomplete(\"workflow:calendars\").\n\t\t\t\tIcon(aw.IconWarning)\n\n\t\t\twf.SendFeedback()\n\n\t\t\treturn nil\n\t\t}\n\n\t\tif err == errNoCalendars {\n\t\t\tif !wf.IsRunning(\"update-calendars\") {\n\t\t\t\tcmd := exec.Command(os.Args[0], \"update\", \"calendars\")\n\t\t\t\tif err := wf.RunInBackground(\"update-calendars\", cmd); err != nil {\n\t\t\t\t\treturn errors.Wrap(err, \"run calendar update\")\n\t\t\t\t}\n\t\t\t}\n\n\t\t\twf.NewItem(\"Fetching List of Calendars…\").\n\t\t\t\tSubtitle(\"List will reload shortly\").\n\t\t\t\tValid(false).\n\t\t\t\tIcon(ReloadIcon())\n\n\t\t\twf.Rerun(0.1)\n\t\t\twf.SendFeedback()\n\n\t\t\treturn nil\n\t\t}\n\n\t\treturn err\n\t}\n\n\tlog.Printf(\"%d active calendar(s)\", len(cals))\n\n\tvar (\n\t\tall []*Event\n\t\tevents []*Event\n\t\tparsed time.Time\n\t)\n\n\tif all, err = loadEvents(opts.StartTime, cals...); err != nil {\n\t\treturn errors.Wrap(err, \"load events\")\n\t}\n\n\t// Filter out events after cutoff\n\tfor _, e := range all {\n\t\tif !opts.ScheduleMode && e.Start.After(opts.EndTime) {\n\t\t\tbreak\n\t\t}\n\t\tevents = append(events, e)\n\t\tlog.Printf(\"%s\", e.Title)\n\t}\n\n\tif len(all) == 0 && wf.IsRunning(\"update-events\") {\n\t\twf.NewItem(\"Fetching Events…\").\n\t\t\tSubtitle(\"Results will refresh shortly\").\n\t\t\tIcon(ReloadIcon()).\n\t\t\tValid(false)\n\n\t\twf.Rerun(0.1)\n\t}\n\n\tlog.Printf(\"%d event(s) for %s\", len(events), opts.StartTime.Format(timeFormat))\n\n\tif t, ok := parseDate(opts.Query); ok {\n\t\tparsed = t\n\t}\n\n\tif len(events) == 0 && opts.Query == \"\" {\n\t\twf.NewItem(fmt.Sprintf(\"No Events on %s\", opts.StartTime.Format(timeFormatLong))).\n\t\t\tIcon(ColouredIcon(iconCalendar, yellow))\n\t}\n\n\tvar day time.Time\n\n\tfor _, e := range events {\n\t\t// Show day indicator if this is the first event of a given day\n\t\tif opts.ScheduleMode && midnight(e.Start).After(day) {\n\t\t\tday = midnight(e.Start)\n\n\t\t\twf.NewItem(day.Format(timeFormatLong)).\n\t\t\t\tArg(day.Format(timeFormat)).\n\t\t\t\tValid(true).\n\t\t\t\tIcon(iconDay)\n\t\t}\n\n\t\ticon := ColouredIcon(iconCalendar, e.Colour)\n\n\t\tsub := fmt.Sprintf(\"%s – %s / %s\",\n\t\t\te.Start.Local().Format(hourFormat),\n\t\t\te.End.Local().Format(hourFormat),\n\t\t\te.CalendarTitle)\n\n\t\tif e.Location != \"\" {\n\t\t\tsub = sub + \" / \" + e.Location\n\t\t}\n\n\t\tit := wf.NewItem(e.Title).\n\t\t\tSubtitle(sub).\n\t\t\tIcon(icon).\n\t\t\tArg(e.URL).\n\t\t\tQuicklook(previewURL(opts.StartTime, e.ID)).\n\t\t\tValid(true).\n\t\t\tVar(\"action\", \"open\")\n\n\t\tif e.Location != \"\" {\n\t\t\tapp := \"Google Maps\"\n\t\t\tif opts.UseAppleMaps {\n\t\t\t\tapp = \"Apple Maps\"\n\t\t\t}\n\n\t\t\ticon := ColouredIcon(iconMap, e.Colour)\n\t\t\tit.NewModifier(\"cmd\").\n\t\t\t\tSubtitle(\"Open in \"+app).\n\t\t\t\tArg(mapURL(e.Location)).\n\t\t\t\tValid(true).\n\t\t\t\tIcon(icon).\n\t\t\t\tVar(\"CALENDAR_APP\", \"\") // Don't open Maps URLs in CALENDAR_APP\n\t\t}\n\t}\n\n\tif !opts.ScheduleMode {\n\t\t// Navigation items\n\t\tprev := opts.StartTime.AddDate(0, 0, -1)\n\t\twf.NewItem(\"Previous: \"+relativeDate(prev)).\n\t\t\tIcon(iconPrevious).\n\t\t\tArg(prev.Format(timeFormat)).\n\t\t\tValid(true).\n\t\t\tVar(\"action\", \"date\")\n\n\t\tnext := opts.StartTime.AddDate(0, 0, 1)\n\t\twf.NewItem(\"Next: \"+relativeDate(next)).\n\t\t\tIcon(iconNext).\n\t\t\tArg(next.Format(timeFormat)).\n\t\t\tValid(true).\n\t\t\tVar(\"action\", \"date\")\n\t}\n\n\tif opts.Query != \"\" {\n\t\twf.Filter(opts.Query)\n\t}\n\n\tif !parsed.IsZero() {\n\t\ts := parsed.Format(timeFormat)\n\n\t\twf.NewItem(parsed.Format(timeFormatLong)).\n\t\t\tSubtitle(relativeDays(parsed, false)).\n\t\t\tArg(s).\n\t\t\tAutocomplete(s).\n\t\t\tValid(true).\n\t\t\tIcon(iconDefault)\n\t}\n\n\twf.WarnEmpty(\"No Matching Events\", \"Try a different query?\")\n\twf.SendFeedback()\n\treturn nil\n}",
"func updateScheduleFields(from models.Schedule, to *models.Schedule, w http.ResponseWriter) error {\n\tif from.Cron != \"\" {\n\t\tif _, err := cron.Parse(from.Cron); err != nil {\n\t\t\terr = errors.New(\"Invalid cron format\")\n\t\t\thttp.Error(w, err.Error(), http.StatusConflict)\n\t\t\treturn err\n\t\t}\n\t\tto.Cron = from.Cron\n\t}\n\tif from.End != \"\" {\n\t\tif _, err := msToTime(from.End); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusConflict)\n\t\t\treturn err\n\t\t}\n\t\tto.End = from.End\n\t}\n\tif from.Frequency != \"\" {\n\t\tif !isIntervalValid(from.Frequency) {\n\t\t\terr := errors.New(\"Frequency format is incorrect: \" + from.Frequency)\n\t\t\thttp.Error(w, err.Error(), http.StatusConflict)\n\t\t\treturn err\n\t\t}\n\t\tto.Frequency = from.Frequency\n\t}\n\tif from.Start != \"\" {\n\t\tif _, err := msToTime(from.Start); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusConflict)\n\t\t\treturn err\n\t\t}\n\t\tto.Start = from.Start\n\t}\n\tif from.Origin != 0 {\n\t\tto.Origin = from.Origin\n\t}\n\tif from.Name != \"\" && from.Name != to.Name {\n\t\t// Check if new name is unique\n\t\tvar checkS models.Schedule\n\t\tif err := dbClient.GetScheduleByName(&checkS, from.Name); err != nil {\n\t\t\tif err != db.ErrNotFound {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusServiceUnavailable)\n\t\t\t}\n\t\t} else {\n\t\t\tif checkS.Id != to.Id {\n\t\t\t\terr := errors.New(\"Duplicate name for the schedule\")\n\t\t\t\thttp.Error(w, err.Error(), http.StatusConflict)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// Check if the schedule still has attached schedule events\n\t\tstillInUse, err := isScheduleStillInUse(*to)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusServiceUnavailable)\n\t\t\treturn err\n\t\t}\n\t\tif stillInUse {\n\t\t\terr = errors.New(\"Schedule is still in use, can't change the name\")\n\t\t\thttp.Error(w, err.Error(), http.StatusConflict)\n\t\t\treturn err\n\t\t}\n\n\t\tto.Name = from.Name\n\t}\n\n\treturn nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
acceptService returns true if the service is to be accepted, false if it's to be ignored | func (bf *boardFilter) acceptService(service ldb.Service) bool {
// Original requirement, must have an RID
if service.RID == "" {
return false
}
// remove terminating services
if bf.terminated && bf.atStation(service.Destination) {
return false
}
if bf.callAt && !bf.callsAt(service.CallingPoints, bf.callAtTiplocs) {
return false
}
return true
} | [
"func (f *aclFilter) allowService(service string) bool {\n\tif service == \"\" {\n\t\treturn true\n\t}\n\n\tif !f.enforceVersion8 && service == structs.ConsulServiceID {\n\t\treturn true\n\t}\n\treturn f.authorizer.ServiceRead(service)\n}",
"func (r *RPCAcceptor) Accept(req *ChannelAcceptRequest) bool {\n\treturn r.acceptClosure(req)\n}",
"func (m *MockMessageSvc) Accept(msgType string, purpose []string) bool {\n\tif m.AcceptFunc != nil {\n\t\treturn m.AcceptFunc(msgType, purpose)\n\t}\n\n\treturn true\n}",
"func (s *Service) Accept(conn net.Conn, ipport string) error {\n\tswitch s.Role {\n\tcase ROLE_MANAGE:\n\t\treturn TcpAcceptor(conn, s, ipport)\n\tcase ROLE_PROXY, ROLE_WEBSERVER:\n\t\treturn HttpAcceptor(conn, s, ipport)\n\tdefault:\n\t\tlog.Fatal(\"unknown role in accept\")\n\t}\n\treturn errors.New(\"Accept fell through!\")\n}",
"func (s *acceptFirst) Accept(from interface{}) bool {\n\tif _, ok := s.handled[from]; ok {\n\t\treturn false\n\t}\n\ts.handled[from] = struct{}{}\n\treturn true\n}",
"func (s *Suite) Accept(t string) bool {\n\treturn t == signatureType\n}",
"func (c *ChainedAcceptor) Accept(req *ChannelAcceptRequest) bool {\n\tresult := true\n\n\tc.acceptorsMtx.RLock()\n\tfor _, acceptor := range c.acceptors {\n\t\t// We call Accept first in case any acceptor (perhaps an RPCAcceptor)\n\t\t// wishes to be notified about ChannelAcceptRequest.\n\t\tresult = acceptor.Accept(req) && result\n\t}\n\tc.acceptorsMtx.RUnlock()\n\n\treturn result\n}",
"func (r *Runtime) isAccept() bool {\n\taccepts := r.d.F\n\tif accepts.Contains(r.cur) {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (f *MSPFilter) Accept(peer fab.Peer) bool {\n\treturn peer.MSPID() == f.mspID\n}",
"func IsValidService(s string) bool {\n\tswitch s {\n\tcase\n\t\t\"all\",\n\t\t\"proxy\",\n\t\t\"authorize\",\n\t\t\"authenticate\":\n\t\treturn true\n\t}\n\treturn false\n}",
"func ValidService(service string, cfg *config.CloudConfig) bool {\n\tservices := availableService(cfg, false)\n\tif !IsLocalOrURL(service) && !util.Contains(services, service) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func matchService(req *http.Request, services []*Service) (*Service, bool) {\n\tfor _, service := range services {\n\t\thostRegexp := regexp.MustCompile(service.HostRegexp)\n\t\tif !hostRegexp.MatchString(req.Host) {\n\t\t\tlog.Tracef(\"Req host [%s] doesn't match [%s].\",\n\t\t\t\treq.Host, hostRegexp)\n\t\t\tcontinue\n\t\t}\n\n\t\tif service.PathRegexp == \"\" {\n\t\t\tlog.Debugf(\"Host [%s] matched pattern [%s] and path \"+\n\t\t\t\t\"expression is empty. Using service [%s].\",\n\t\t\t\treq.Host, hostRegexp, service.Address)\n\t\t\treturn service, true\n\t\t}\n\n\t\tpathRegexp := regexp.MustCompile(service.PathRegexp)\n\t\tif !pathRegexp.MatchString(req.URL.Path) {\n\t\t\tlog.Tracef(\"Req path [%s] doesn't match [%s].\",\n\t\t\t\treq.URL.Path, pathRegexp)\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Debugf(\"Host [%s] matched pattern [%s] and path [%s] \"+\n\t\t\t\"matched [%s]. Using service [%s].\",\n\t\t\treq.Host, hostRegexp, req.URL.Path, pathRegexp,\n\t\t\tservice.Address)\n\t\treturn service, true\n\t}\n\tlog.Errorf(\"No backend service matched request [%s%s].\", req.Host,\n\t\treq.URL.Path)\n\treturn nil, false\n}",
"func (v *VDRI) Accept(method string) bool {\n\treturn v.accept(method)\n}",
"func (e *entry) canServe() bool {\n\t_, ok := e.svc.(Service)\n\treturn ok\n}",
"func (sms *SMS) Accept() {\n\tsms.acceptCh <- true\n\tclose(sms.acceptCh)\n}",
"func (aa Acceptors) Accept(from interface{}) bool {\n\tfor _, a := range aa {\n\t\tif !a.Accept(from) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (s ServiceSpecs) SupportService(serviceUrl string, serviceOrg string) bool {\n\tif serviceUrl == \"\" {\n\t\treturn true\n\t} else {\n\t\tif len(s) == 0 {\n\t\t\treturn true\n\t\t} else {\n\t\t\tfor _, sp := range s {\n\t\t\t\tif sp.Url == serviceUrl && (sp.Org == \"\" || sp.Org == serviceOrg) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}",
"func containsService(name string, services []servicescm.Service) bool {\n\tfor _, svc := range services {\n\t\tif svc.Name == name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func IsExposedService(svc *corev1.Service) bool {\n\tlabels := svc.Labels\n\tif labels == nil {\n\t\tlabels = map[string]string{}\n\t}\n\tfor _, l := range ExposeLabelKeys {\n\t\tif labels[l] == \"true\" {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
rowToRecord converts from pgx.Row to a store.Record | func (s *sqlStore) rowToRecord(row pgx.Row) (*store.Record, error) {
var expiry *time.Time
record := &store.Record{}
metadata := make(Metadata)
if err := row.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil {
if err == sql.ErrNoRows {
return record, store.ErrNotFound
}
return nil, err
}
// set the metadata
record.Metadata = toMetadata(&metadata)
if expiry != nil {
record.Expiry = time.Until(*expiry)
}
return record, nil
} | [
"func (s *sqlStore) rowsToRecords(rows pgx.Rows) ([]*store.Record, error) {\n\tvar records []*store.Record\n\n\tfor rows.Next() {\n\t\tvar expiry *time.Time\n\t\trecord := &store.Record{}\n\t\tmetadata := make(Metadata)\n\n\t\tif err := rows.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil {\n\t\t\treturn records, err\n\t\t}\n\n\t\t// set the metadata\n\t\trecord.Metadata = toMetadata(&metadata)\n\t\tif expiry != nil {\n\t\t\trecord.Expiry = time.Until(*expiry)\n\t\t}\n\t\trecords = append(records, record)\n\t}\n\treturn records, nil\n}",
"func recordToRecord(\n\ttopic string,\n\tpartition int32,\n\tbatch *kmsg.RecordBatch,\n\trecord *kmsg.Record,\n) *Record {\n\th := make([]RecordHeader, 0, len(record.Headers))\n\tfor _, kv := range record.Headers {\n\t\th = append(h, RecordHeader{\n\t\t\tKey: kv.Key,\n\t\t\tValue: kv.Value,\n\t\t})\n\t}\n\n\treturn &Record{\n\t\tKey: record.Key,\n\t\tValue: record.Value,\n\t\tHeaders: h,\n\t\tTimestamp: timeFromMillis(batch.FirstTimestamp + int64(record.TimestampDelta)),\n\t\tTopic: topic,\n\t\tPartition: partition,\n\t\tAttrs: RecordAttrs{uint8(batch.Attributes)},\n\t\tProducerID: batch.ProducerID,\n\t\tProducerEpoch: batch.ProducerEpoch,\n\t\tLeaderEpoch: batch.PartitionLeaderEpoch,\n\t\tOffset: batch.FirstOffset + int64(record.OffsetDelta),\n\t}\n}",
"func (r RecordV1) toRecord() Record {\n\treturn Record{\n\t\tType: r.Type,\n\t\tName: r.Name,\n\t\tAppliedAt: r.AppliedAt,\n\t}\n}",
"func RowToRawData(rows *sql.Rows) (r RawData) {\n\trecord, _ := RowToArr(rows)\n\tr.Header = record[0]\n\tr.Rows = append(r.Rows, record[1:])\n\treturn\n}",
"func convertRow(\n\trow *Row,\n\twantsNode bool,\n\twantsTimestamp bool,\n\tdesiredValues []string,\n) *stats.Row {\n\tvar (\n\t\tnode string\n\t\ttimestamp time.Time\n\t)\n\n\tvar resultValues map[string]interface{}\n\tif len(desiredValues) > 0 {\n\t\tresultValues = make(map[string]interface{})\n\t}\n\n\tfor _, v := range desiredValues {\n\t\tresultValues[v] = row.value(v)\n\t}\n\n\tif wantsNode {\n\t\tnode = row.Node\n\t}\n\tif wantsTimestamp {\n\t\ttimestamp = row.Timestamp.UTC()\n\t}\n\n\treturn &stats.Row{\n\t\tNode: node,\n\t\tTimestamp: timestamp,\n\t\tValues: resultValues,\n\t}\n}",
"func Row2Bytes() func([]interface{}) ([]byte, error) {\n\t//TODO: test this\n\thandle := new(codec.MsgpackHandle)\n\n\treturn func(row []interface{}) ([]byte, error) {\n\t\tbuffer := new(bytes.Buffer)\n\t\tenc := codec.NewEncoder(buffer, handle)\n\t\terr := enc.Encode(row)\n\t\tif err != nil {\n\t\t\treturn []byte{}, err\n\t\t}\n\t\treturn buffer.Bytes(), nil\n\t}\n}",
"func toRecord(cache airtabledb.DB, src Feature, dst interface{}) {\n\tdV := reflect.ValueOf(dst).Elem().FieldByName(\"Fields\")\n\tsV := reflect.ValueOf(src)\n\tcopyFields(cache, sV, dV)\n}",
"func (e *commonFormatEncoder) Row(tp int, row *[]interface{}, seqno uint64) ([]byte, error) {\n\tcf := convertRowToCommonFormat(tp, row, e.inSchema, seqno, e.filter)\n\treturn CommonFormatEncode(cf)\n}",
"func MarshalRecord(record *rangedb.Record) ([]byte, error) {\n\tvar buf bytes.Buffer\n\n\tnewRecord := *record\n\tnewRecord.Data = nil\n\n\tencoder := msgpack.NewEncoder(&buf)\n\tencoder.UseJSONTag(true)\n\n\terr := encoder.Encode(newRecord)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed encoding record: %v\", err)\n\t}\n\n\terr = encoder.Encode(record.Data)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed encoding record data: %v\", err)\n\t}\n\n\treturn buf.Bytes(), nil\n}",
"func RowTo[T any](row CollectableRow) (T, error) {\n\tvar value T\n\terr := row.Scan(&value)\n\treturn value, err\n}",
"func (dao PathProfileDAOPsql) rowToPathProfile(row *sql.Row, o *models.PathProfile) error {\n\treturn row.Scan(&o.ID, &o.ProfileID, &o.Path.ID, &o.Path.Path, &o.Path.PathName, &o.Path.Description, &o.Post, &o.Put, &o.Del, &o.Get, &o.CreatedAt, &o.UpdatedAt)\n}",
"func (raw *Raw) ToRecord() Record {\n\tstart := time.Now()\n\tcborH := &codec.CborHandle{}\n\trec := getRecordByTypeID(raw.Type)\n\tdec := codec.NewDecoder(bytes.NewReader(raw.Data), cborH)\n\terr := dec.Decode(rec)\n\tsince := time.Since(start)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif raw.Type == codeRecordID {\n\t\tlog.Debugf(\"ToRecord func in record/serialize: for TypeID %s, time inside - %s\", raw.Type, since)\n\t}\n\treturn rec\n}",
"func CSVToRecord(fields []string) (record Record, err error) {\n\t// Convert prices from strings to floats\n\topenPrice, err := strconv.ParseFloat(fields[CSVOpenIndex], FloatSize)\n\tif err != nil {\n\t\terr = errors.New(\"failed to parse open price\")\n\t}\n\topenPriceCents := int(openPrice * 100)\n\n\tclosePrice, err := strconv.ParseFloat(fields[CSVCloseIndex], FloatSize)\n\tif err != nil {\n\t\terr = errors.New(\"failed to parse close price\")\n\t}\n\tclosePriceCents := int(closePrice * 100)\n\n\t// Convert time from string to time\n\tconst dateFormat = \"2006-01-02\"\n\trecordDate, err := time.Parse(dateFormat, fields[CSVDateIndex])\n\tif err != nil {\n\t\terr = errors.New(\"failed to parse quote date\")\n\t}\n\trecord.Day = recordDate\n\trecord.Open = openPriceCents\n\trecord.Close = closePriceCents\n\treturn\n}",
"func (tkrs *CSVKeyedRecordScanner) Record() (*libutils.KeyedRecord) {\n wire_data := tkrs.scanner.Bytes()\n wire_data_copy := make([]byte, len(wire_data))\n copy(wire_data_copy, wire_data)\n\n return libutils.NewKeyedRecordFromBytes(wire_data_copy, tkrs.decoder)\n}",
"func NewRecord(schema *arrow.Schema, cols []arrow.Array, nrows int64) *simpleRecord {\n\trec := &simpleRecord{\n\t\trefCount: 1,\n\t\tschema: schema,\n\t\trows: nrows,\n\t\tarrs: make([]arrow.Array, len(cols)),\n\t}\n\tcopy(rec.arrs, cols)\n\tfor _, arr := range rec.arrs {\n\t\tarr.Retain()\n\t}\n\n\tif rec.rows < 0 {\n\t\tswitch len(rec.arrs) {\n\t\tcase 0:\n\t\t\trec.rows = 0\n\t\tdefault:\n\t\t\trec.rows = int64(rec.arrs[0].Len())\n\t\t}\n\t}\n\n\terr := rec.validate()\n\tif err != nil {\n\t\trec.Release()\n\t\tpanic(err)\n\t}\n\n\treturn rec\n}",
"func (r *Rows) row(a ...interface{}) error {\n\tdefer r.Close()\n\n\tfor _, dp := range a {\n\t\tif _, ok := dp.(*sql.RawBytes); ok {\n\t\t\treturn VarTypeError(\"RawBytes isn't allowed on Row()\")\n\t\t}\n\t}\n\n\tif !r.Next() {\n\t\tif err := r.Err(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn sql.ErrNoRows\n\t}\n\tif err := r.Scan(a...); err != nil {\n\t\treturn err\n\t}\n\n\treturn r.Close()\n}",
"func ConvertRecord(s string) (r record) {\n // Drop the last char pf the string (it's a ' ')\n s = s[:len(s) - 1]\n\n // Split the string in the various fields\n var fields []string = strings.Split(s, \" \")\n\n // Update the fields of the record based on the various fields\n for _, f := range fields {\n switch f[:3] {\n case \"byr\": r.byr = f[4:]\n case \"iyr\": r.iyr = f[4:]\n case \"eyr\": r.eyr = f[4:]\n case \"hgt\": r.hgt = f[4:]\n case \"hcl\": r.hcl = f[4:]\n case \"ecl\": r.ecl = f[4:]\n case \"pid\": r.pid = f[4:]\n }\n }\n\n return\n}",
"func (m *MySQL) ToRecord(cs dbchangeset) *changeset.Record {\n\ttag := \"\"\n\tif cs.Tag != nil {\n\t\ttag = *cs.Tag\n\t}\n\n\treturn &changeset.Record{\n\t\tID: cs.ID,\n\t\tAuthor: cs.Author,\n\t\tFilename: cs.Filename,\n\t\tDateExecuted: cs.DateExecuted,\n\t\tOrderExecuted: cs.OrderExecuted,\n\t\tChecksum: cs.Checksum,\n\t\tDescription: cs.Description,\n\t\tTag: tag,\n\t\tVersion: cs.Version,\n\t}\n}",
"func RowToArr(rows *sql.Rows) (records [][]string, err error) {\n\tfmt.Printf(\"RowToArr start at %s\", time.Now())\n\tcolumns, err := rows.Columns()\n\tif err != nil {\n\t\treturn\n\t}\n\tcount := len(columns)\n\treadCols := make([]interface{}, count)\n\trawCols := make([]interface{}, count)\n\t//records = make([]interface{}, 0)\n\trecords = append(records, columns) //append row header as 1st row\n\n\t// var resultCols []string\n\tfor rows.Next() {\n\t\t// resultCols = make([]string, count)\n\t\tfor i := range columns {\n\t\t\treadCols[i] = &rawCols[i]\n\t\t}\n\t\terr = rows.Scan(readCols...)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tresultCols := assertTypeArray(columns, rawCols)\n\t\trecords = append(records, resultCols)\n\t}\n\n\tfmt.Printf(\"RowToArr end at %s\", time.Now())\n\treturn records, nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
rowsToRecords converts from pgx.Rows to []store.Record | func (s *sqlStore) rowsToRecords(rows pgx.Rows) ([]*store.Record, error) {
var records []*store.Record
for rows.Next() {
var expiry *time.Time
record := &store.Record{}
metadata := make(Metadata)
if err := rows.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil {
return records, err
}
// set the metadata
record.Metadata = toMetadata(&metadata)
if expiry != nil {
record.Expiry = time.Until(*expiry)
}
records = append(records, record)
}
return records, nil
} | [
"func (s *sqlStore) rowToRecord(row pgx.Row) (*store.Record, error) {\n\tvar expiry *time.Time\n\trecord := &store.Record{}\n\tmetadata := make(Metadata)\n\n\tif err := row.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn record, store.ErrNotFound\n\t\t}\n\t\treturn nil, err\n\t}\n\n\t// set the metadata\n\trecord.Metadata = toMetadata(&metadata)\n\tif expiry != nil {\n\t\trecord.Expiry = time.Until(*expiry)\n\t}\n\n\treturn record, nil\n}",
"func convertRows(rows []*bq.TableRow, schema Schema) ([][]Value, error) {\n\tvar rs [][]Value\n\tfor _, r := range rows {\n\t\trow, err := convertRow(r, schema)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trs = append(rs, row)\n\t}\n\treturn rs, nil\n}",
"func RowToArr(rows *sql.Rows) (records [][]string, err error) {\n\tfmt.Printf(\"RowToArr start at %s\", time.Now())\n\tcolumns, err := rows.Columns()\n\tif err != nil {\n\t\treturn\n\t}\n\tcount := len(columns)\n\treadCols := make([]interface{}, count)\n\trawCols := make([]interface{}, count)\n\t//records = make([]interface{}, 0)\n\trecords = append(records, columns) //append row header as 1st row\n\n\t// var resultCols []string\n\tfor rows.Next() {\n\t\t// resultCols = make([]string, count)\n\t\tfor i := range columns {\n\t\t\treadCols[i] = &rawCols[i]\n\t\t}\n\t\terr = rows.Scan(readCols...)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tresultCols := assertTypeArray(columns, rawCols)\n\t\trecords = append(records, resultCols)\n\t}\n\n\tfmt.Printf(\"RowToArr end at %s\", time.Now())\n\treturn records, nil\n}",
"func records(rows *sql.Rows) (Records, error) {\n\tvar res Records\n\tfor rows.Next() {\n\t\tvar streamID string\n\t\tvar streamIndex uint64\n\t\tvar originStreamID string\n\t\tvar originStreamIndex uint64\n\t\tvar id string\n\t\tvar typ string\n\t\tvar recordedOn string\n\t\tvar data []byte\n\t\tvar metadata []byte\n\t\terr := rows.Scan(&streamID, &streamIndex, &originStreamID, &originStreamIndex, &recordedOn, &id, &typ, &data, &metadata)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tr := Record{\n\t\t\tStreamID: streamID,\n\t\t\tStreamIndex: streamIndex,\n\t\t\tOriginStreamID: originStreamID,\n\t\t\tOriginStreamIndex: originStreamIndex,\n\t\t\tRecordedOn: parseTime(recordedOn),\n\t\t\tID: id,\n\t\t\tType: typ,\n\t\t\tData: json.RawMessage(data),\n\t\t\tMetadata: json.RawMessage(metadata),\n\t\t}\n\t\tres = append(res, r)\n\t}\n\treturn res, nil\n}",
"func RowToRawData(rows *sql.Rows) (r RawData) {\n\trecord, _ := RowToArr(rows)\n\tr.Header = record[0]\n\tr.Rows = append(r.Rows, record[1:])\n\treturn\n}",
"func (a *kinesisFirehoseWriter) toRecords(msg message.Batch) ([]*firehose.Record, error) {\n\tentries := make([]*firehose.Record, msg.Len())\n\n\terr := msg.Iter(func(i int, p *message.Part) error {\n\t\tentry := firehose.Record{\n\t\t\tData: p.AsBytes(),\n\t\t}\n\n\t\tif len(entry.Data) > mebibyte {\n\t\t\ta.log.Errorf(\"part %d exceeds the maximum Kinesis Firehose payload limit of 1 MiB\\n\", i)\n\t\t\treturn component.ErrMessageTooLarge\n\t\t}\n\n\t\tentries[i] = &entry\n\t\treturn nil\n\t})\n\n\treturn entries, err\n}",
"func (r RowIdentifiers) ToRows(callback func(*proto.RowResponse) error) error {\n\tif len(r.Keys) > 0 {\n\t\tci := []*proto.ColumnInfo{{Name: r.Field, Datatype: \"string\"}}\n\t\tfor _, key := range r.Keys {\n\t\t\tif err := callback(&proto.RowResponse{\n\t\t\t\tHeaders: ci,\n\t\t\t\tColumns: []*proto.ColumnResponse{\n\t\t\t\t\t{ColumnVal: &proto.ColumnResponse_StringVal{StringVal: key}},\n\t\t\t\t},\n\t\t\t}); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"calling callback\")\n\t\t\t}\n\t\t\tci = nil\n\t\t}\n\t} else {\n\t\tci := []*proto.ColumnInfo{{Name: r.Field, Datatype: \"uint64\"}}\n\t\tfor _, id := range r.Rows {\n\t\t\tif err := callback(&proto.RowResponse{\n\t\t\t\tHeaders: ci,\n\t\t\t\tColumns: []*proto.ColumnResponse{\n\t\t\t\t\t{ColumnVal: &proto.ColumnResponse_Uint64Val{Uint64Val: uint64(id)}},\n\t\t\t\t},\n\t\t\t}); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"calling callback\")\n\t\t\t}\n\t\t\tci = nil\n\t\t}\n\t}\n\treturn nil\n}",
"func rowsToThings(rows *sql.Rows) Things {\n\tvar (\n\t\tt Thing\n\t\tresult Things\n\t\terr error\n\t)\n\n\tcheckRows(\"Things\", rows)\n\n\tfor i := 0; rows.Next(); i++ {\n\t\terr := rows.Scan(&t.ckey, &t.cval, &t.url, &t.data, &t.clockid, &t.tsn)\n\t\tcheckErr(\"scan things\", err)\n\n\t\tresult = append(result, t)\n\t}\n\terr = rows.Err()\n\tcheckErr(\"end reading things loop\", err)\n\n\tfmt.Printf(\"returning things: %d rows\\n\", len(result))\n\treturn result\n}",
"func recordToSlice(record Record) []string {\n\tvar recordSlice []string\n\n\trecordSlice = []string{\n\t\tfmt.Sprintf(\"%d\",record.CheeseId), record.CheeseName, record.ManufacturerName, record.ManufacturerProvCode,\n\t\trecord.ManufacturingType, record.WebSite, fmt.Sprintf(\"%.2f\", record.FatContentPercent), \n\t\tfmt.Sprintf(\"%.2f\", record.MoisturePercent), record.Particularities, record.Flavour, \n\t\trecord.Characteristics, record.Ripening, fmt.Sprintf(\"%t\", record.Organic),\n\t\trecord.CategoryType, record.MilkType, record.MilkTreatmentType, record.RindType, record.LastUpdateDate,\n\t}\n\n\treturn recordSlice\n}",
"func convertToMap(rows *sql.Rows) ([]map[string]interface{}, error) {\n\tvar response []map[string]interface{}\n\tcols, err := rows.Columns()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor rows.Next() {\n\t\tcolumns := make([]interface{}, len(cols))\n\t\tcolumnPointers := make([]interface{}, len(cols))\n\t\tfor i := range columns {\n\t\t\tcolumnPointers[i] = &columns[i]\n\t\t}\n\t\tif err := rows.Scan(columnPointers...); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tm := make(map[string]interface{})\n\t\tfor i, colName := range cols {\n\t\t\tval := columnPointers[i].(*interface{})\n\t\t\tm[colName] = *val\n\t\t}\n\t\tresponse = append(response, m)\n\t}\n\treturn response, nil\n}",
"func databaseRowsToPaginationDataList(rows *sql.Rows, dtFields []dtColumn) ([]map[string]string, error) {\n\tvar dataList []map[string]string\n\n\tcolumns, err := rows.Columns()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not get row.Columns %w\", err)\n\t}\n\n\tvalues := make([]sql.RawBytes, len(columns))\n\t// rows.Scan wants '[]interface{}' as an argument, so we must copy the\n\t// references into such a slice\n\t// See http://code.google.com/p/go-wiki/wiki/InterfaceSlice for details\n\tscanArgs := make([]interface{}, len(values))\n\tfor i := range values {\n\t\tscanArgs[i] = &values[i]\n\t}\n\n\tfor rows.Next() {\n\t\t// get RawBytes from data\n\t\terr = rows.Scan(scanArgs...)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not scan rows to 'scanArgs...' %w\", err)\n\t\t}\n\n\t\tvar value string\n\n\t\tfor i, col := range values {\n\t\t\t// Here we can check if the value is nil (NULL value)\n\t\t\tif col == nil {\n\t\t\t\tvalue = \"NULL\"\n\t\t\t} else {\n\t\t\t\tvalue = string(col)\n\t\t\t}\n\n\t\t\tfor _, dtField := range dtFields {\n\t\t\t\tif dtField.dbColumnName == columns[i] {\n\t\t\t\t\tdtObject := map[string]string{dtField.dtColumnName: value}\n\t\t\t\t\tdataList = append(dataList, dtObject)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn dataList, nil\n}",
"func CollectRows[T any](rows Rows, fn RowToFunc[T]) ([]T, error) {\n\tdefer rows.Close()\n\n\tslice := []T{}\n\n\tfor rows.Next() {\n\t\tvalue, err := fn(rows)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tslice = append(slice, value)\n\t}\n\n\tif err := rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slice, nil\n}",
"func ConvertRows(rows Rows) (sql.Table, error) {\n\tcols, err := rows.Columns()\n\tif err != nil {\n\t\treturn sql.Table{}, err\n\t}\n\n\tcolumnTypes, err := rows.ColumnTypes()\n\tif err != nil {\n\t\treturn sql.Table{}, err\n\t}\n\n\tresult := sql.Table{Columns: cols}\n\n\tfor rows.Next() {\n\t\tcolumns := make([]interface{}, len(cols))\n\t\tfor i := range columns {\n\t\t\t// Populating with pointers to concrete types.\n\t\t\tcolumns[i] = reflect.New(columnTypes[i].ScanType()).Interface()\n\t\t}\n\n\t\t// Scan the result into the column pointers...\n\t\tif err := rows.Scan(columns...); err != nil {\n\t\t\treturn sql.Table{}, err\n\t\t}\n\n\t\tm := make(map[string]interface{})\n\t\tfor i, c := range cols {\n\t\t\t// Getting values from pointers to types.\n\t\t\tm[c] = reflect.ValueOf(columns[i]).Elem().Interface()\n\t\t}\n\n\t\tresult.Rows = append(result.Rows, m)\n\t}\n\n\treturn result, nil\n}",
"func rowsToPruebas(rows *sql.Rows) ([]*Prueba, error) {\n\tvar pruebas []*Prueba\n\tfor rows.Next() {\n\t\tvar t Prueba\n\t\terr := rows.Scan(&t.ID, &t.Preguntaid, &t.Entrada, &t.Salida, &t.Visible, &t.PostEntrega, &t.Valor)\n\t\tif err != nil {\n\t\t\treturn pruebas, err\n\t\t}\n\t\tpruebas = append(pruebas, &t)\n\t}\n\treturn pruebas, nil\n}",
"func RowToDrivers(row *sql.Rows) []Driver {\n result := []Driver{}\n for row.Next() {\n var driverName string\n var driverTelephoneNumber string\n row.Scan(&driverName, &driverTelephoneNumber)\n result = append(result, Driver{\n DriverName: driverName,\n DriverTelephoneNumber: driverTelephoneNumber,\n })\n }\n return result\n}",
"func (a *kinesisWriter) toRecords(msg message.Batch) ([]*kinesis.PutRecordsRequestEntry, error) {\n\tentries := make([]*kinesis.PutRecordsRequestEntry, msg.Len())\n\n\terr := msg.Iter(func(i int, p *message.Part) error {\n\t\tpartKey, err := a.partitionKey.String(i, msg)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"partition key interpolation error: %w\", err)\n\t\t}\n\t\tentry := kinesis.PutRecordsRequestEntry{\n\t\t\tData: p.AsBytes(),\n\t\t\tPartitionKey: aws.String(partKey),\n\t\t}\n\n\t\tif len(entry.Data) > mebibyte {\n\t\t\ta.log.Errorf(\"part %d exceeds the maximum Kinesis payload limit of 1 MiB\\n\", i)\n\t\t\treturn component.ErrMessageTooLarge\n\t\t}\n\n\t\thashKey, err := a.hashKey.String(i, msg)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"hash key interpolation error: %w\", err)\n\t\t}\n\t\tif hashKey != \"\" {\n\t\t\tentry.ExplicitHashKey = aws.String(hashKey)\n\t\t}\n\n\t\tentries[i] = &entry\n\t\treturn nil\n\t})\n\n\treturn entries, err\n}",
"func RowToTrips(row *sql.Rows) []Trip {\n trips := []Trip{}\n for row.Next() {\n var tripNumber int\n var startLocationName string\n var destinationName string\n row.Scan(&tripNumber, &startLocationName, &destinationName)\n trips = append(trips, Trip{\n TripNumber: tripNumber,\n StartLocationName: startLocationName,\n DestinationName: destinationName,\n })\n }\n return trips\n}",
"func Row2Bytes() func([]interface{}) ([]byte, error) {\n\t//TODO: test this\n\thandle := new(codec.MsgpackHandle)\n\n\treturn func(row []interface{}) ([]byte, error) {\n\t\tbuffer := new(bytes.Buffer)\n\t\tenc := codec.NewEncoder(buffer, handle)\n\t\terr := enc.Encode(row)\n\t\tif err != nil {\n\t\t\treturn []byte{}, err\n\t\t}\n\t\treturn buffer.Bytes(), nil\n\t}\n}",
"func convertFromTsRows(tsRows [][]TsCell) []*riak_ts.TsRow {\n\tvar rows []*riak_ts.TsRow\n\tvar cells []*riak_ts.TsCell\n\tfor _, tsRow := range tsRows {\n\t\tcells = make([]*riak_ts.TsCell, 0)\n\n\t\tfor _, tsCell := range tsRow {\n\t\t\tcells = append(cells, tsCell.cell)\n\t\t}\n\n\t\tif len(rows) < 1 {\n\t\t\trows = make([]*riak_ts.TsRow, 0)\n\t\t}\n\n\t\trows = append(rows, &riak_ts.TsRow{Cells: cells})\n\t}\n\n\treturn rows\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
findConflict finds the index of the conflict. It returns the first pair of conflicting entries between the existing entries and the given entries, if there are any. If there is no conflicting entries, and the existing entries contains all the given entries, zero will be returned. If there is no conflicting entries, but the given entries contains new entries, the index of the first new entry will be returned. An entry is considered to be conflicting if it has the same index but a different term. The first entry MUST have an index equal to the argument 'from'. The index of the given entries MUST be continuously increasing. | func (l *LogStore) findConflict(entries []*pb.Entry) uint64 {
// TODO: 会有第0个冲突么?
for _, ne := range entries {
if !l.matchTerm(ne.Index, ne.Term) {
if ne.Index <= l.lastIndex() {
l.logger.Info("log found conflict",
zap.Uint64("conflictIndex", ne.Index),
zap.Uint64("conflictTerm", ne.Term),
zap.Uint64("existTerm", l.termOrPanic(l.term(ne.Index))))
}
return ne.Index
}
}
return 0
} | [
"func (l *raftLog) findConflict(from uint64, ents []pb.Entry) uint64 {\n\t// TODO(xiangli): validate the index of ents\n\tfor i, ne := range ents {\n\t\tif oe := l.at(from + uint64(i)); oe == nil || oe.Term != ne.Term {\n\t\t\treturn from + uint64(i)\n\t\t}\n\t}\n\treturn 0\n}",
"func FindConflictsByUser(entries []*RenderedScheduleEntry) map[string][]*Conflict {\n\tentriesByUser := RenderedScheduleEntries(entries).GroupBy(func(entry *RenderedScheduleEntry) string {\n\t\treturn entry.User.ID\n\t})\n\n\tvar (\n\t\tm sync.Mutex\n\t\twg sync.WaitGroup\n\t\tresults = make(map[string][]*Conflict, len(entriesByUser))\n\t)\n\n\tfor userID, entries := range entriesByUser {\n\t\twg.Add(1)\n\n\t\tgo func(userID string, entries []*RenderedScheduleEntry) {\n\t\t\tdefer wg.Done()\n\n\t\t\tconflicts := []*Conflict{}\n\n\t\t\tsort.Slice(entries, func(i, j int) bool {\n\t\t\t\treturn entries[i].Start.Before(entries[j].Start)\n\t\t\t})\n\n\t\t\tfor i, left := range entries {\n\t\t\t\tfor j := i + 1; j < len(entries); j++ {\n\t\t\t\t\tright := entries[j]\n\n\t\t\t\t\tif !right.Start.Before(left.End) { // if left.End <= right.Start\n\t\t\t\t\t\t// All good, RHS doesn't start until at least after LHS\n\t\t\t\t\t\t// ends. Stop scanning for conflicts related to LHS.\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\n\t\t\t\t\tlog.Printf(\"CONFLICT: %s is in both %q and %q from %s to %s\\n\", left.User.Summary, left.Schedule, right.Schedule, right.Start, left.End)\n\n\t\t\t\t\tconflicts = append(conflicts, &Conflict{Left: left, Right: right})\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tm.Lock()\n\t\t\tdefer m.Unlock()\n\n\t\t\tresults[userID] = conflicts\n\t\t}(userID, entries)\n\t}\n\n\twg.Wait()\n\n\treturn results\n}",
"func startIdx[E any](haystack, needle []E) int {\n\tp := &needle[0]\n\tfor i := range haystack {\n\t\tif p == &haystack[i] {\n\t\t\treturn i\n\t\t}\n\t}\n\t// TODO: what if the overlap is by a non-integral number of Es?\n\tpanic(\"needle not found\")\n}",
"func NewCreateMailerEntryConflict() *CreateMailerEntryConflict {\n\n\treturn &CreateMailerEntryConflict{}\n}",
"func FindNonOverlapping(overlaps map[int]bool) int {\n\tfor index, isOverlapping := range overlaps {\n\t\tif !isOverlapping {\n\t\t\treturn index\n\t\t}\n\t}\n\treturn -1\n}",
"func (tr *TransactionRepository) FindBetween(start int64, end int64) ([]*types.Transaction, *rTypes.Error) {\n\tif start > end {\n\t\treturn nil, errors.Errors[errors.StartMustNotBeAfterEnd]\n\t}\n\tvar transactions []transaction\n\ttr.dbClient.Where(whereClauseBetweenConsensus, start, end).Find(&transactions)\n\n\tsameHashMap := make(map[string][]transaction)\n\tfor _, t := range transactions {\n\t\th := t.getHashString()\n\t\tsameHashMap[h] = append(sameHashMap[h], t)\n\t}\n\tres := make([]*types.Transaction, 0, len(sameHashMap))\n\tfor _, sameHashTransactions := range sameHashMap {\n\t\ttransaction, err := tr.constructTransaction(sameHashTransactions)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tres = append(res, transaction)\n\t}\n\treturn res, nil\n}",
"func (s *schedule) getConflicts(timestamp uint32, length uint32) (conflicts uint) {\n\ts.RLock()\n\tdefer s.RUnlock()\n\tfor _, item := range s.items {\n\t\tscheduledFrom := uint64(item.timestamp) % uintmax\n\t\tscheduledTo := scheduledFrom + uint64(item.length)\n\t\tfrom := uint64(timestamp)\n\t\tto := from + uint64(length)\n\n\t\tif scheduledTo > uintmax || to > uintmax {\n\t\t\tif scheduledTo-uintmax <= from || scheduledFrom >= to-uintmax {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else if scheduledTo <= from || scheduledFrom >= to {\n\t\t\tcontinue\n\t\t}\n\n\t\tif item.payload == nil {\n\t\t\tconflicts++\n\t\t} else {\n\t\t\tconflicts += 100\n\t\t}\n\t}\n\treturn\n}",
"func (s *server) ResolveConflict(ctx context.Context, in *proto_job.ResultRequest) (*proto_job.ResultReply, error) {\n\tlog.Print(\"ResolveConflict\")\n\treturn s.resultService.ResolveConflict(in)\n}",
"func (ml *messageLog) FromIndex(index int, exclusive bool) defs.MessageFindFunc {\r\n\tif index < 0 {\r\n\t\tindex = len(ml.log.entries) + index\r\n\t\tif index < 0 {\r\n\t\t\tindex = 0\r\n\t\t}\r\n\t}\r\n\tif exclusive {\r\n\t\tindex += 1\r\n\t}\r\n\treturn func() (int, bool) {\r\n\t\tif index < len(ml.log.entries) {\r\n\t\t\treturn index, true\r\n\t\t}\r\n\t\treturn 0, false\r\n\t}\r\n}",
"func ConflictFromMarshalUtil(marshalUtil *marshalutil.MarshalUtil) (conflict Conflict, err error) {\n\treadStartOffset := marshalUtil.ReadOffset()\n\n\tconflict = Conflict{}\n\tbytesID, err := marshalUtil.ReadBytes(int(ledgerstate.TransactionIDLength))\n\tif err != nil {\n\t\terr = errors.Errorf(\"failed to parse ID from conflict: %w\", err)\n\t\treturn\n\t}\n\tconflict.ID, _, err = ledgerstate.TransactionIDFromBytes(bytesID)\n\tif err != nil {\n\t\terr = errors.Errorf(\"failed to parse ID from bytes: %w\", err)\n\t\treturn\n\t}\n\n\tconflict.Opinion, err = OpinionFromMarshalUtil(marshalUtil)\n\tif err != nil {\n\t\terr = errors.Errorf(\"failed to parse opinion from conflict: %w\", err)\n\t\treturn\n\t}\n\n\t// return the number of bytes we processed\n\tparsedBytes := marshalUtil.ReadOffset() - readStartOffset\n\tif parsedBytes != ConflictLength {\n\t\terr = errors.Errorf(\"parsed bytes (%d) did not match expected size (%d): %w\", parsedBytes, ConflictLength, cerrors.ErrParseBytesFailed)\n\t\treturn\n\t}\n\n\treturn\n}",
"func (re *raftEngine) entriesToApply(ents []raftpb.Entry) (nents []raftpb.Entry) {\r\n\tif len(ents) == 0 {\r\n\t\treturn\r\n\t}\r\n\tfirstIndex := ents[0].Index\r\n\tif firstIndex > re.appliedIndex+1 {\r\n\t\tlog.ZAPSugaredLogger().Errorf(\"Error raised when processing entries to apply, first index of committed entry [%d] should <= appliedIndex [%d].\", firstIndex, re.appliedIndex)\r\n\t\treturn\r\n\t}\r\n\tif re.appliedIndex-firstIndex+1 < uint64(len(ents)) {\r\n\t\tnents = ents[re.appliedIndex-firstIndex+1:]\r\n\t}\r\n\treturn\r\n}",
"func NewGetWaitlistEntryConflict(body *GetWaitlistEntryConflictResponseBody) *goa.ServiceError {\n\tv := &goa.ServiceError{\n\t\tName: *body.Name,\n\t\tID: *body.ID,\n\t\tMessage: *body.Message,\n\t\tTemporary: *body.Temporary,\n\t\tTimeout: *body.Timeout,\n\t\tFault: *body.Fault,\n\t}\n\n\treturn v\n}",
"func (r *Replica) scanConflicts(instances []*Instance, cmds []cmd.Command, start InstanceId, end InstanceId) (InstanceId, bool) {\n\tfor i := start; i > end; i-- {\n\t\tif instances[i] == nil {\n\t\t\tcontinue\n\t\t}\n\t\t// we only need to find the highest instance in conflict\n\t\tif r.StateMac.HaveConflicts(cmds, instances[i].cmds) {\n\t\t\treturn i, true\n\t\t}\n\t}\n\n\treturn conflictNotFound, false\n}",
"func (finder *AmpliconFinder) Locate() ([]int, []int, error) {\n\tif finder.searched {\n\t\tif finder.found {\n\t\t\treturn []int{finder.iBegin, finder.iEnd}, []int{finder.mis5, finder.mis3}, nil\n\t\t}\n\t\treturn nil, nil, nil\n\t}\n\n\tif finder.MaxMismatch <= 0 { // exactly matching\n\t\t// search F\n\t\tvar i int\n\n\t\tif finder.rF == nil {\n\t\t\ti = bytes.Index(finder.Seq, finder.F)\n\t\t\tif i < 0 { // not found\n\t\t\t\tfinder.searched, finder.found = true, false\n\t\t\t\treturn nil, nil, nil\n\t\t\t}\n\t\t} else {\n\t\t\tloc := finder.rF.FindSubmatchIndex(finder.Seq)\n\t\t\tif len(loc) == 0 {\n\t\t\t\tfinder.searched, finder.found = true, false\n\t\t\t\treturn nil, nil, nil\n\t\t\t}\n\t\t\ti = loc[0]\n\t\t}\n\n\t\tif len(finder.R) == 0 { // only forward primer, returns location of F\n\t\t\tfinder.searched, finder.found = true, true\n\t\t\tfinder.iBegin, finder.iEnd = i, i+len(finder.F)-1\n\t\t\tfinder.mis5 = amplicon_mismatches(finder.Seq[i:i+len(finder.F)], finder.F)\n\t\t\tfinder.mis3 = 0\n\t\t\treturn []int{i + 1, i + len(finder.F)},\n\t\t\t\t[]int{finder.mis5, finder.mis3},\n\t\t\t\tnil\n\t\t}\n\n\t\t// two primers given, need to search R\n\t\tvar j int\n\t\tif finder.rR == nil {\n\t\t\tj = bytes.Index(finder.Seq, finder.R)\n\t\t\tif j < 0 {\n\t\t\t\tfinder.searched, finder.found = true, false\n\t\t\t\treturn nil, nil, nil\n\t\t\t}\n\n\t\t\tfor {\n\t\t\t\tif j+1 >= len(finder.Seq) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tk := bytes.Index(finder.Seq[j+1:], finder.R)\n\t\t\t\tif k < 0 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tj += k + 1\n\t\t\t}\n\t\t} else {\n\t\t\tloc := finder.rR.FindAllSubmatchIndex(finder.Seq, -1)\n\t\t\tif len(loc) == 0 {\n\t\t\t\tfinder.searched, finder.found = true, false\n\t\t\t\treturn nil, nil, nil\n\t\t\t}\n\t\t\tj = loc[len(loc)-1][0]\n\t\t}\n\n\t\tif j < i { // wrong location of F and R: 5' ---R-----F---- 3'\n\t\t\tfinder.searched, finder.found = true, false\n\t\t\treturn nil, nil, nil\n\t\t}\n\t\tfinder.searched, finder.found = true, true\n\t\tfinder.iBegin, finder.iEnd = i, j+len(finder.R)-1\n\t\tfinder.mis5 = amplicon_mismatches(finder.Seq[i:i+len(finder.F)], finder.F)\n\t\tfinder.mis3 = amplicon_mismatches(finder.Seq[j:j+len(finder.R)], finder.R)\n\t\treturn []int{i + 1, j + len(finder.R)},\n\t\t\t[]int{finder.mis5, finder.mis3},\n\t\t\tnil\n\t}\n\n\t// search F\n\tlocsI, err := finder.FMindex.Locate(finder.F, finder.MaxMismatch)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif len(locsI) == 0 { // F not found\n\t\tfinder.searched, finder.found = true, false\n\t\treturn nil, nil, nil\n\t}\n\tif len(finder.R) == 0 { // returns location of F\n\t\tsort.Ints(locsI) // remain the first location\n\t\tfinder.searched, finder.found = true, true\n\t\tfinder.iBegin, finder.iEnd = locsI[0], locsI[0]+len(finder.F)-1\n\t\tfinder.mis5 = amplicon_mismatches(finder.Seq[locsI[0]:locsI[0]+len(finder.F)], finder.F)\n\t\tfinder.mis3 = 0\n\t\treturn []int{locsI[0] + 1, locsI[0] + len(finder.F)},\n\t\t\t[]int{finder.mis5, finder.mis3},\n\t\t\tnil\n\t}\n\n\t// search R\n\tlocsJ, err := finder.FMindex.Locate(finder.R, finder.MaxMismatch)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif len(locsJ) == 0 {\n\t\tfinder.searched, finder.found = true, false\n\t\treturn nil, nil, nil\n\t}\n\tsort.Ints(locsI) // to remain the FIRST location\n\tsort.Ints(locsJ) // to remain the LAST location\n\tfinder.searched, finder.found = true, true\n\tfinder.iBegin, finder.iEnd = locsI[0], locsJ[len(locsJ)-1]+len(finder.R)-1\n\tfinder.mis5 = amplicon_mismatches(finder.Seq[locsI[0]:locsI[0]+len(finder.F)], finder.F)\n\tfinder.mis3 = amplicon_mismatches(finder.Seq[locsJ[len(locsJ)-1]:locsJ[len(locsJ)-1]+len(finder.R)], finder.R)\n\treturn []int{locsI[0] + 1, locsJ[len(locsJ)-1] + len(finder.R)},\n\t\t[]int{finder.mis5, finder.mis3},\n\t\tnil\n}",
"func searchInIndex(r io.ReadSeeker, from, to int, searchKey []byte) (int, bool, error) {\n\tif _, err := r.Seek(int64(from), io.SeekStart); err != nil {\n\t\treturn 0, false, fmt.Errorf(\"failed to seek: %w\", err)\n\t}\n\n\tfor {\n\t\tkey, value, err := decode(r)\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn 0, false, fmt.Errorf(\"failed to read: %w\", err)\n\t\t}\n\t\tif err == io.EOF {\n\t\t\treturn 0, false, nil\n\t\t}\n\t\toffset := decodeInt(value)\n\n\t\tif bytes.Equal(key, searchKey) {\n\t\t\treturn offset, true, nil\n\t\t}\n\n\t\tif to > from {\n\t\t\tcurrent, err := r.Seek(0, io.SeekCurrent)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, false, fmt.Errorf(\"failed to seek: %w\", err)\n\t\t\t}\n\n\t\t\tif current > int64(to) {\n\t\t\t\treturn 0, false, nil\n\t\t\t}\n\t\t}\n\t}\n}",
"func (c *causality) detectConflict(keys [][]byte) (bool, int) {\n\tif len(keys) == 0 {\n\t\treturn false, 0\n\t}\n\n\tfirstIdx := -1\n\tfor _, key := range keys {\n\t\tif idx, ok := c.relations[string(key)]; ok {\n\t\t\tif firstIdx == -1 {\n\t\t\t\tfirstIdx = idx\n\t\t\t} else if firstIdx != idx {\n\t\t\t\treturn true, -1\n\t\t\t}\n\t\t}\n\t}\n\n\treturn firstIdx != -1, firstIdx\n}",
"func searchRange(nums []int, target int) []int {\n\tresult := []int{-1, -1}\n\tif len(nums) <= 0 {\n\t\treturn result\n\t}\n\n\tfor i := 0; i < len(nums); i++ {\n\t\tif nums[i] == target {\n\t\t\tresult[0] = i\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfor j := len(nums) - 1; j >= 0; j-- {\n\t\tif nums[j] == target {\n\t\t\tresult[1] = j\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn result\n}",
"func NewConflict(parameters ...wparams.ParamStorer) Error {\n\treturn newGenericError(nil, DefaultConflict, wparams.NewParamStorer(parameters...))\n}",
"func (gui *Gui) findNewSelectedIdx(prevNodes []*filetree.FileNode, currNodes []*filetree.FileNode) int {\n\tgetPaths := func(node *filetree.FileNode) []string {\n\t\tif node == nil {\n\t\t\treturn nil\n\t\t}\n\t\tif node.File != nil && node.File.IsRename() {\n\t\t\treturn node.File.Names()\n\t\t} else {\n\t\t\treturn []string{node.Path}\n\t\t}\n\t}\n\n\tfor _, prevNode := range prevNodes {\n\t\tselectedPaths := getPaths(prevNode)\n\n\t\tfor idx, node := range currNodes {\n\t\t\tpaths := getPaths(node)\n\n\t\t\t// If you started off with a rename selected, and now it's broken in two, we want you to jump to the new file, not the old file.\n\t\t\t// This is because the new should be in the same position as the rename was meaning less cursor jumping\n\t\t\tfoundOldFileInRename := prevNode.File != nil && prevNode.File.IsRename() && node.Path == prevNode.File.PreviousName\n\t\t\tfoundNode := utils.StringArraysOverlap(paths, selectedPaths) && !foundOldFileInRename\n\t\t\tif foundNode {\n\t\t\t\treturn idx\n\t\t\t}\n\t\t}\n\t}\n\n\treturn -1\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add adds one or more previously unadded urls to crawler to visit. source can be nil to indicate root. Returns a list of errors if any occured. | func (c *Crawler) Add(source *url.URL, uri ...*url.URL) []error {
var errs []error
for _, u := range uri {
var err error
u := u
u.Fragment = "" // reset fragment, we don't want it messing our visited list
if source != nil {
u = source.ResolveReference(u)
}
if u.Scheme != "http" && u.Scheme != "https" {
err = ErrUnsupportedScheme
} else if err == nil && c.filter != nil && !c.filter(u) {
err = ErrFilteredOut
}
us := u.String()
// For the already-visited test we need to clean up each URL a bit
vkey := strings.TrimRight(us[strings.Index(us, ":")+1:], "/") // Remove scheme and trailing slash
if err == nil {
c.toVisitMu.RLock()
if _, ok := c.toVisit[vkey]; ok {
err = ErrAlreadyInList
}
c.toVisitMu.RUnlock()
}
if err == nil {
c.logger.Debugf("Add(%v %v): OK", source, us)
atomic.AddUint64(&c.numQueued, 1)
} else if err != nil {
//c.logger.Warnf("Add(%v %v): %v", source, us, err)
atomic.AddUint64(&c.numEncountered, 1)
errs = append(errs, errors.Wrapf(err, "Invalid URL %v", u))
continue
}
c.toVisitMu.Lock()
c.toVisit[vkey] = struct{}{}
c.toVisitMu.Unlock()
{
uu := *u
uu.Scheme = ""
if source != nil && source.Host == uu.Host {
uu.Host = ""
}
if source == nil {
c.mapper.Add("<root>", uu.String())
} else {
c.mapper.Add(source.String(), uu.String())
}
}
v := visit{
source: source,
target: u,
}
select {
case c.visitChan <- v:
case <-c.ctx.Done():
return append(errs, c.ctx.Err())
}
}
return errs
} | [
"func add(url string, verbose bool, scrapeURLs *scrapeURL) {\n\tscrapeURLs.AddedURLsCount++\n\tscrapeURLs.AddedURLs = append(scrapeURLs.AddedURLs, url)\n\tif verbose {\n\t\tlog.Println(\"Added: \" + url)\n\t}\n}",
"func (s *Sources) Add(src string) error {\n\tif src == \"\" {\n\t\treturn errors.New(\"src is an empty string\")\n\t}\n\tfor _, v := range *s {\n\t\tif v == src {\n\t\t\treturn errors.New(\"src already exist\")\n\t\t}\n\t}\n\t*s = append(*s, src)\n\treturn nil\n}",
"func (s *Sources) Add(source Source) {\n\ts.sources = append(s.sources, source)\n}",
"func (s *SitemapIndex) Add(u *URL) {\n\ts.URLs = append(s.URLs, u)\n}",
"func (os *OriginChecker) AddRawURLs(urls []string) {\n\tos.Lock()\n\tdefer os.Unlock()\n\n\tfor _, u := range urls {\n\t\tclean, err := cleanOrigin(u)\n\t\tif err == nil {\n\t\t\tos.origins[clean] = true\n\t\t}\n\t}\n}",
"func (search *Search) AddSource(source string) *Search {\n\tvar sources []string\n\tif search.query[SOURCE] == nil {\n\t\tsources = []string{}\n\t} else {\n\t\tsources = search.query[SOURCE].([]string)\n\t}\n\tsources = append(sources, source)\n\tsearch.query[SOURCE] = sources\n\treturn search\n}",
"func (r *RssFeedEmitter) Add(url string) {\n\tfor _, feed := range r.feeds {\n\t\tif feed.Link == url {\n\t\t\treturn\n\t\t}\n\t}\n\tnewFeed, err := r.parser.ParseURL(url)\n\tif err != nil {\n\t\treturn\n\t}\n\tr.feeds = append(r.feeds, *newFeed)\n}",
"func (s *Sources) AddSources(src ...string) error {\n\tfor _, v := range src {\n\t\terr := s.Add(v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (h *CrawlHandler) AddCrawl(url string, statusCode int) {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\n\th.crawls[url] = statusCode\n}",
"func (f *frontier) Add(uri ...string) {\n\tfor _, i := range uri {\n\t\tu, err := f.filter(f, i)\n\t\tif err != nil {\n\t\t\tcontinue // do nothing\n\t\t}\n\t\tf.lk.Lock()\n\t\tf.nbs = append(f.nbs, &visitable{uri: u})\n\t\tf.lk.Unlock()\n\t}\n}",
"func (s *Launcher) addSource(source *sources.LogSource) {\n\ts.activeSources = append(s.activeSources, source)\n\ts.launchTailers(source)\n}",
"func (r *Repository) AddImages(urls []string) []error {\n\tvar errors []error\n\terrChan := make(chan error, len(urls))\n\tvar wg sync.WaitGroup\n\tfor _, url := range urls {\n\t\turl := url\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terr := r.addImage(url)\n\t\t\terrChan <- err\n\t\t}()\n\t}\n\n\twg.Wait()\n\tclose(errChan)\n\n\tfor err := range errChan {\n\t\tif err != nil {\n\t\t\terrors = append(errors, err)\n\t\t}\n\t}\n\n\treturn errors\n}",
"func (m *Manager) addFiles(url string, source string, dir bool) error {\n\n\tif url == \"\" || url[0] != '/' {\n\t\treturn ErrUrl\n\t}\n\n\turl = strings.TrimSuffix(url, \"/\")\n\tif !dir && url == \"\" {\n\t\turl = \"/\"\n\t}\n\tif dir && url == \"\" {\n\t\treturn ErrRootLevel\n\t}\n\n\ts, err := os.Executable()\n\tif err != nil {\n\t\treturn err\n\t}\n\tpath, err := filepath.Abs(path.Dir(s) + \"/\" + source)\n\tif info, errDir := os.Stat(path); err != nil || os.IsNotExist(errDir) || (info != nil && info.IsDir() != dir) {\n\t\tif dir {\n\t\t\treturn fmt.Errorf(ErrPathDoesNotExist.Error(), source)\n\t\t}\n\t\treturn fmt.Errorf(ErrFileDoesNotExist.Error(), source)\n\t}\n\n\tif dir {\n\t\tm.router.AddPublicDir(url, path)\n\t\treturn nil\n\t}\n\n\tm.router.AddPublicFile(url, path)\n\treturn nil\n}",
"func (collector *ErrorCollector) addError(step string, err error) {\n\tcollector.Errors[step] = append(collector.Errors[step], err)\n\n\tcollector.ErrorsNb++\n}",
"func (self *errorList) Add(err error) {\n\tif err != nil {\n\t\tself.list = append(self.list, err.Error())\n\t}\n\t//return err\n}",
"func (e *ErrorsList) Add(err error) {\n\tif err != nil {\n\t\t// Checking against container\n\t\tif container, ok := err.(multipleErrorsContainer); ok {\n\t\t\tfor _, err := range container.List() {\n\t\t\t\te.Add(err)\n\t\t\t}\n\t\t} else {\n\t\t\t*e = append(*e, err)\n\t\t}\n\t}\n}",
"func (lc *linkCollection) Add(url string, path string) {\n\tif lc.links == nil {\n\t\tlc.links = make(linkList)\n\t}\n\n\tl := lc.links[url]\n\n\tif l.Target == \"\" {\n\t\tl = link{Target: url}\n\t}\n\n\tl.Documents = append(l.Documents, path)\n\n\tlc.links[url] = l\n}",
"func (u *URL) Add(host HostPath) {\n\tu.values[host]++\n\tu.total++\n}",
"func (gState *State) ManageNewURLs() {\n\t//decides on whether to add to the directory list, or add to file output\n\tfor {\n\t\tcandidate := <-gState.Chans.newPagesChan\n\t\t//check the candidate is an actual URL\n\t\t//handle that one crazy case where :/ might be at the start because reasons\n\t\tif strings.HasPrefix(candidate.URL, \"://\") {\n\t\t\t//add a garbage scheme to get past the url parse stuff (the scheme will be added from the reference anyway)\n\t\t\tcandidate.URL = \"xxx\" + candidate.URL\n\t\t}\n\t\tu, err := url.Parse(strings.TrimSpace(candidate.URL))\n\n\t\tif err != nil {\n\t\t\tgState.wg.Done()\n\t\t\tgState.PrintOutput(err.Error(), Error, 0)\n\t\t\tcontinue //probably a better way of doing this\n\t\t}\n\n\t\t//links of the form <a href=\"/thing\" ></a> don't have a host portion to the URL\n\t\tif u.Host == \"\" {\n\t\t\tu.Host = candidate.Reference.Host\n\t\t}\n\n\t\t//actualUrl := gState.ParsedURL.Scheme + \"://\" + u.Host\n\t\tactualURL := net.CleanURL(u, (*candidate.Reference).Scheme+\"://\"+u.Host)\n\n\t\tgState.CMut.Lock()\n\t\tif _, ok := gState.Checked[actualURL]; !ok && //must have not checked it before\n\t\t\t(gState.Hosts.HostExists(u.Host) || gState.Whitelist[u.Host]) && //must be within whitelist, or be one of the starting urls\n\t\t\t!gState.Cfg.NoRecursion { //no recursion means we don't care about adding extra paths or content\n\t\t\tgState.Checked[actualURL] = true\n\t\t\tgState.CMut.Unlock()\n\t\t\tgState.wg.Add(1)\n\t\t\tgState.Chans.pagesChan <- SpiderPage{URL: actualURL, Reference: candidate.Reference, Result: candidate.Result}\n\t\t\tgState.PrintOutput(\"URL Added: \"+actualURL, Debug, 3)\n\n\t\t\t//also add any directories in the supplied path to the 'to be hacked' queue\n\t\t\tpath := \"\"\n\t\t\tdirs := strings.Split(u.Path, \"/\")\n\t\t\tfor i, y := range dirs {\n\n\t\t\t\tpath = path + y\n\t\t\t\tif len(path) > 0 && string(path[len(path)-1]) != \"/\" && i != len(dirs)-1 {\n\t\t\t\t\tpath = path + \"/\" //don't add double /'s, and don't add on the last value\n\t\t\t\t}\n\t\t\t\t//prepend / if it doesn't already exist\n\t\t\t\tif len(path) > 0 && string(path[0]) != \"/\" {\n\t\t\t\t\tpath = \"/\" + path\n\t\t\t\t}\n\n\t\t\t\tnewDir := candidate.Reference.Scheme + \"://\" + candidate.Reference.Host + path\n\t\t\t\tnewPage := SpiderPage{}\n\t\t\t\tnewPage.URL = newDir\n\t\t\t\tnewPage.Reference = candidate.Reference\n\t\t\t\tnewPage.Result = candidate.Result\n\t\t\t\tgState.CMut.RLock()\n\t\t\t\tif gState.Checked[newDir] {\n\t\t\t\t\tgState.CMut.RUnlock()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tgState.CMut.RUnlock()\n\t\t\t\tgState.wg.Add(1)\n\t\t\t\tgState.Chans.newPagesChan <- newPage\n\t\t\t}\n\t\t} else {\n\t\t\tgState.CMut.Unlock()\n\t\t}\n\n\t\tgState.wg.Done()\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
getSourcegraphVersion queries the Sourcegraph GraphQL API to get the current version of the Sourcegraph instance. | func (svc *Service) getSourcegraphVersion(ctx context.Context) (string, error) {
var result struct {
Site struct {
ProductVersion string
}
}
ok, err := svc.client.NewQuery(sourcegraphVersionQuery).Do(ctx, &result)
if err != nil || !ok {
return "", err
}
return result.Site.ProductVersion, err
} | [
"func GetVersion() string {\n\treturn version\n}",
"func (_ EntityAliases) SensuAgentVersion(p graphql.ResolveParams) (string, error) {\n\tval, err := graphql.DefaultResolver(p.Source, p.Info.FieldName)\n\tret, ok := val.(string)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\tif !ok {\n\t\treturn ret, errors.New(\"unable to coerce value for field 'sensuAgentVersion'\")\n\t}\n\treturn ret, err\n}",
"func (a *BaseAggregateSourced) GetVersion() int {\n\treturn a.Version\n}",
"func getVersion(agentInstall DotNetAgentInstall) (result tasks.Result) {\n\n\tagentVersion, err := tasks.GetFileVersion(agentInstall.AgentPath)\n\n\tif err != nil {\n\t\tresult.Status = tasks.Error\n\t\tresult.Summary = \"Error finding .Net Agent version\"\n\t\tlog.Info(\"Error finding .Net Agent version. The error is \", err)\n\t\treturn result\n\t}\n\n\tresult.Status = tasks.Info\n\tresult.Summary = agentVersion\n\tresult.Payload = agentVersion\n\treturn result\n\n}",
"func SourceVersion() string {\n\treturn fmt.Sprintf(\"%s commit: %s / nearest-git-\"+\n\t\t\"tag: %s / branch: %s / %s\\n\",\n\t\tProgramName, LAST_GIT_COMMIT_HASH,\n\t\tNEAREST_GIT_TAG, GIT_BRANCH, GO_VERSION)\n}",
"func CurrentSourceVersion() string {\n\tif environ.HasValue(\"SOURCE_VERSION_OVERRIDE\") {\n\t\treturn environ.GetValueStr(\"SOURCE_VERSION_OVERRIDE\")\n\t}\n\n\tmanifestPath := path.Join(RootDir(), \"src\", \"appengine\", \"resources\", \"clusterfuzz-source.manifest\")\n\tresult, err := ioutil.ReadFile(manifestPath)\n\n\tif err != nil {\n\t\tlogs.Panicf(\"Failed to get current source version: %v\", err)\n\t}\n\n\treturn string(result)\n}",
"func (_Bridge *BridgeCaller) GetVersion(opts *bind.CallOpts) (string, error) {\n\tvar (\n\t\tret0 = new(string)\n\t)\n\tout := ret0\n\terr := _Bridge.contract.Call(opts, out, \"getVersion\")\n\treturn *ret0, err\n}",
"func (o ContentSourceOutput) Version() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ContentSource) *string { return v.Version }).(pulumi.StringPtrOutput)\n}",
"func Version() string {\n\treturn C.GoString(C.gfal2_version())\n}",
"func (m *SynchronizationSchema) GetVersion()(*string) {\n val, err := m.GetBackingStore().Get(\"version\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}",
"func (c *Client) GetVersion() (string, error) {\n\tvar version string\n\tvar statusResponse internal.StatusResponse\n\n\treq, err := http.NewRequest(http.MethodGet, c.baseURL+statusEndpoint, nil)\n\tif err != nil {\n\t\treturn version, fmt.Errorf(\"failed to build request for status endpoint - %s\", err.Error())\n\t}\n\treq.Header.Set(\"Accept\", \"application/xml\")\n\n\tresp, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn version, fmt.Errorf(\"failed to fetch backend version - %s\", err.Error())\n\t}\n\tdefer resp.Body.Close()\n\n\terr = json.NewDecoder(resp.Body).Decode(&statusResponse)\n\tif err != nil {\n\t\treturn version, fmt.Errorf(\"failed to fetch backend version - %s\", err.Error())\n\t}\n\n\treturn statusResponse.Version.Backend, nil\n}",
"func getVersion(driver *neo4j.Driver) (Version, error) {\n\tversion := Version{}\n\tsession := (*driver).NewSession(neo4j.SessionConfig{})\n\tdefer session.Close()\n\n\tresult, err := session.Run(VERSION_QUERY, nil)\n\tif err != nil {\n\t\treturn version, nil\n\t}\n\n\trecord, err := result.Single()\n\tif err != nil {\n\t\treturn version, nil\n\t}\n\n\tval, found := record.Get(\"version\")\n\tif !found {\n\t\treturn version, errors.New(\"couldn't find 'version' in query results\")\n\t}\n\tdata, ok := val.([]interface{})\n\tif !ok {\n\t\treturn version, errors.New(\"'version' isn't an array\")\n\t}\n\tif len(data) < 2 {\n\t\treturn version, errors.New(\"'version' array is empty or too small\")\n\t}\n\n\tval, found = record.Get(\"extra\")\n\tif !found {\n\t\treturn version, errors.New(\"couldn't find 'extra' version info\")\n\t}\n\textra, ok := val.(string)\n\tif !ok {\n\t\treturn version, errors.New(\"'extra' value isn't a string\")\n\t}\n\n\t// yolo for now\n\tversion.Major = uint8(data[0].(int64))\n\tversion.Minor = uint8(data[1].(int64))\n\n\tif len(data) > 2 {\n\t\tversion.Patch = uint8(data[2].(int64))\n\t}\n\tversion.Extra = extra\n\n\treturn version, nil\n}",
"func (f *Features) getVersion(ctx context.Context, adminDB *mongo.Database) {\n\tcmd := bson.D{\n\t\t{\n\t\t\tKey: \"buildInfo\",\n\t\t\tValue: 1,\n\t\t},\n\t}\n\tvar result buildInfo\n\terr := adminDB.RunCommand(ctx, cmd).Decode(&result)\n\tif err != nil {\n\t\tf.MongoVersion = &semver.Version{}\n\t\treturn\n\t}\n\n\tf.MongoVersion = semver.MustParse(result.Version)\n}",
"func (pr LocalPackageReference) GeneratorVersion() string {\n\treturn pr.generatorVersion\n}",
"func (o *ClusterUpgrade) GetVersion() (value string, ok bool) {\n\tok = o != nil && o.bitmap_&8 != 0\n\tif ok {\n\t\tvalue = o.version\n\t}\n\treturn\n}",
"func (c *Connection) Version(ctx context.Context) (string, error) {\n\tresp, err := c.Request(ctx).\n\t\tSetResult(&api.VersionResponse{}).\n\t\tGet(\"/version\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn resp.Result().(*api.VersionResponse).Version, nil\n}",
"func GetVersion() string {\n\tif len(Version) == 0 {\n\t\treturn \"dev\"\n\t}\n\treturn Version\n}",
"func GetVersion() string {\n\treturn version.VERSIONSTR\n}",
"func GetVersion() string {\n\treturn \"v\" + appVersion\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
DetermineFeatureFlags fetches the version of the configured Sourcegraph instance and then sets flags on the Service itself to use features available in that version, e.g. gzip compression. | func (svc *Service) DetermineFeatureFlags(ctx context.Context) error {
version, err := svc.getSourcegraphVersion(ctx)
if err != nil {
return errors.Wrap(err, "failed to query Sourcegraph version to check for available features")
}
return svc.features.setFromVersion(version)
} | [
"func InitFeatureFlags(flag *pflag.FlagSet) {\n\tflag.Bool(FeatureFlagAccessCode, false, \"Flag (bool) to enable requires-access-code\")\n\tflag.Bool(FeatureFlagRoleBasedAuth, false, \"Flag (bool) to enable role-based-auth\")\n\tflag.Bool(FeatureFlagConvertPPMsToGHC, false, \"Flag (bool) to enable convert-ppms-to-ghc\")\n}",
"func (a *AdminApiService) GetAllFeatureFlags(ctx _context.Context) (FeatureFlag, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue FeatureFlag\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/admin/feature-flag\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}",
"func InitializeFeatures(featuresClient managementv3.FeatureClient, featureArgs string) {\n\t// applies any default values assigned in --features flag to feature map\n\tif err := applyArgumentDefaults(featureArgs); err != nil {\n\t\tlogrus.Errorf(\"failed to apply feature args: %v\", err)\n\t}\n\n\tif featuresClient == nil {\n\t\treturn\n\t}\n\n\t// creates any features in map that do not exist, updates features with new default value\n\tfor key, f := range features {\n\t\tfeatureState, err := featuresClient.Get(key, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tif !errors.IsNotFound(err) {\n\t\t\t\tlogrus.Errorf(\"unable to retrieve feature %s in initialize features: %v\", f.name, err)\n\t\t\t}\n\n\t\t\tif f.install {\n\t\t\t\t// value starts off as nil, that way rancher can determine if value has been manually assigned\n\t\t\t\tnewFeature := &v3.Feature{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: f.name,\n\t\t\t\t\t},\n\t\t\t\t\tSpec: v3.FeatureSpec{\n\t\t\t\t\t\tValue: nil,\n\t\t\t\t\t},\n\t\t\t\t\tStatus: v3.FeatureStatus{\n\t\t\t\t\t\tDefault: f.def,\n\t\t\t\t\t\tDynamic: f.dynamic,\n\t\t\t\t\t\tDescription: f.description,\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tif _, err := featuresClient.Create(newFeature); err != nil {\n\t\t\t\t\tlogrus.Errorf(\"unable to create feature %s in initialize features: %v\", f.name, err)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tnewFeatureState := featureState.DeepCopy()\n\t\t\t// checks if default value has changed\n\t\t\tif featureState.Status.Default != f.def {\n\t\t\t\tnewFeatureState.Status.Default = f.def\n\t\t\t}\n\n\t\t\t// checks if developer has changed dynamic value from previous rancher version\n\t\t\tif featureState.Status.Dynamic != f.dynamic {\n\t\t\t\tnewFeatureState.Status.Dynamic = f.dynamic\n\t\t\t}\n\n\t\t\t// checks if developer has changed description value from previous rancher version\n\t\t\tif featureState.Status.Description != f.description {\n\t\t\t\tnewFeatureState.Status.Description = f.description\n\t\t\t}\n\n\t\t\tnewFeatureState, err = featuresClient.Update(newFeatureState)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"unable to update feature %s in initialize features: %v\", f.name, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif newFeatureState.Status.LockedValue != nil {\n\t\t\t\tf.Set(*newFeatureState.Status.LockedValue)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif featureState.Spec.Value == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif *featureState.Spec.Value == f.val {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tf.Set(*featureState.Spec.Value)\n\t\t}\n\t}\n}",
"func BuildServerFlags(cmd *cobra.Command, srv *server.Command) {\n\tflags := cmd.Flags()\n\tflags.StringVar(&srv.Config.Name, \"name\", srv.Config.Name, \"Name of the node in the cluster.\")\n\tflags.StringVarP(&srv.Config.DataDir, \"data-dir\", \"d\", srv.Config.DataDir, \"Directory to store FeatureBase data files.\")\n\tflags.StringVarP(&srv.Config.Bind, \"bind\", \"b\", srv.Config.Bind, \"Default URI on which FeatureBase should listen.\")\n\tflags.StringVar(&srv.Config.BindGRPC, \"bind-grpc\", srv.Config.BindGRPC, \"URI on which FeatureBase should listen for gRPC requests.\")\n\tflags.StringVar(&srv.Config.Advertise, \"advertise\", srv.Config.Advertise, \"Address to advertise externally.\")\n\tflags.StringVar(&srv.Config.AdvertiseGRPC, \"advertise-grpc\", srv.Config.AdvertiseGRPC, \"Address to advertise externally for gRPC.\")\n\tflags.IntVar(&srv.Config.MaxWritesPerRequest, \"max-writes-per-request\", srv.Config.MaxWritesPerRequest, \"Number of write commands per request.\")\n\tflags.StringVar(&srv.Config.LogPath, \"log-path\", srv.Config.LogPath, \"Log path\")\n\tflags.BoolVar(&srv.Config.Verbose, \"verbose\", srv.Config.Verbose, \"Enable verbose logging\")\n\tflags.Uint64Var(&srv.Config.MaxMapCount, \"max-map-count\", srv.Config.MaxMapCount, \"Limits the maximum number of active mmaps. FeatureBase will fall back to reading files once this is exhausted. Set below your system's vm.max_map_count.\")\n\tflags.Uint64Var(&srv.Config.MaxFileCount, \"max-file-count\", srv.Config.MaxFileCount, \"Soft limit on the maximum number of fragment files FeatureBase keeps open simultaneously.\")\n\tflags.DurationVar((*time.Duration)(&srv.Config.LongQueryTime), \"long-query-time\", time.Duration(srv.Config.LongQueryTime), \"Duration that will trigger log and stat messages for slow queries. Zero to disable.\")\n\tflags.IntVar(&srv.Config.QueryHistoryLength, \"query-history-length\", srv.Config.QueryHistoryLength, \"Number of queries to remember in history.\")\n\tflags.Int64Var(&srv.Config.MaxQueryMemory, \"max-query-memory\", srv.Config.MaxQueryMemory, \"Maximum memory allowed per Extract() or SELECT query.\")\n\n\t// TLS\n\tSetTLSConfig(flags, \"\", &srv.Config.TLS.CertificatePath, &srv.Config.TLS.CertificateKeyPath, &srv.Config.TLS.CACertPath, &srv.Config.TLS.SkipVerify, &srv.Config.TLS.EnableClientVerification)\n\n\t// Handler\n\tflags.StringSliceVar(&srv.Config.Handler.AllowedOrigins, \"handler.allowed-origins\", []string{}, \"Comma separated list of allowed origin URIs (for CORS/Web UI).\")\n\n\t// Cluster\n\tflags.IntVar(&srv.Config.Cluster.ReplicaN, \"cluster.replicas\", 1, \"Number of hosts each piece of data should be stored on.\")\n\tflags.DurationVar((*time.Duration)(&srv.Config.Cluster.LongQueryTime), \"cluster.long-query-time\", time.Duration(srv.Config.Cluster.LongQueryTime), \"RENAMED TO 'long-query-time': Duration that will trigger log and stat messages for slow queries.\") // negative duration indicates invalid value because 0 is meaningful\n\tflags.StringVar(&srv.Config.Cluster.Name, \"cluster.name\", srv.Config.Cluster.Name, \"Human-readable name for the cluster.\")\n\tflags.StringVar(&srv.Config.Cluster.PartitionToNodeAssignment, \"cluster.partition-to-node-assignment\", srv.Config.Cluster.PartitionToNodeAssignment, \"How to assign partitions to nodes. jmp-hash or modulus\")\n\n\t// Translation\n\tflags.StringVar(&srv.Config.Translation.PrimaryURL, \"translation.primary-url\", srv.Config.Translation.PrimaryURL, \"DEPRECATED: URL for primary translation node for replication.\")\n\tflags.IntVar(&srv.Config.Translation.MapSize, \"translation.map-size\", srv.Config.Translation.MapSize, \"Size in bytes of mmap to allocate for key translation.\")\n\n\t// Etcd\n\t// Etcd.Name used Config.Name for its value.\n\tflags.StringVar(&srv.Config.Etcd.Dir, \"etcd.dir\", srv.Config.Etcd.Dir, \"Directory to store etcd data files. If not provided, a directory will be created under the main data-dir directory.\")\n\t// Etcd.ClusterName uses Cluster.Name for its value\n\tflags.StringVar(&srv.Config.Etcd.LClientURL, \"etcd.listen-client-address\", srv.Config.Etcd.LClientURL, \"Listen client address.\")\n\tflags.StringVar(&srv.Config.Etcd.AClientURL, \"etcd.advertise-client-address\", srv.Config.Etcd.AClientURL, \"Advertise client address. If not provided, uses the listen client address.\")\n\tflags.StringVar(&srv.Config.Etcd.LPeerURL, \"etcd.listen-peer-address\", srv.Config.Etcd.LPeerURL, \"Listen peer address.\")\n\tflags.StringVar(&srv.Config.Etcd.APeerURL, \"etcd.advertise-peer-address\", srv.Config.Etcd.APeerURL, \"Advertise peer address. If not provided, uses the listen peer address.\")\n\tflags.StringVar(&srv.Config.Etcd.ClusterURL, \"etcd.cluster-url\", srv.Config.Etcd.ClusterURL, \"Cluster URL to join.\")\n\tflags.StringVar(&srv.Config.Etcd.InitCluster, \"etcd.initial-cluster\", srv.Config.Etcd.InitCluster, \"Initial cluster name1=apurl1,name2=apurl2\")\n\tflags.Int64Var(&srv.Config.Etcd.HeartbeatTTL, \"etcd.heartbeat-ttl\", srv.Config.Etcd.HeartbeatTTL, \"Timeout used to determine cluster status\")\n\n\tflags.StringVar(&srv.Config.Etcd.Cluster, \"etcd.static-cluster\", srv.Config.Etcd.Cluster, \"EXPERIMENTAL static featurebase cluster name1=apurl1,name2=apurl2\")\n\t_ = flags.MarkHidden(\"etcd.static-cluster\")\n\tflags.StringVar(&srv.Config.Etcd.EtcdHosts, \"etcd.etcd-hosts\", srv.Config.Etcd.EtcdHosts, \"EXPERIMENTAL etcd server host:port comma separated list\")\n\t_ = flags.MarkHidden(\"etcd.etcd-hosts\") // TODO (twg) expose when ready for public consumption\n\n\t// External postgres database for ExternalLookup\n\tflags.StringVar(&srv.Config.LookupDBDSN, \"lookup-db-dsn\", \"\", \"external (postgres) database DSN to use for ExternalLookup calls\")\n\n\t// AntiEntropy\n\tflags.DurationVar((*time.Duration)(&srv.Config.AntiEntropy.Interval), \"anti-entropy.interval\", (time.Duration)(srv.Config.AntiEntropy.Interval), \"Interval at which to run anti-entropy routine.\")\n\n\t// Metric\n\tflags.StringVar(&srv.Config.Metric.Service, \"metric.service\", srv.Config.Metric.Service, \"Where to send stats: can be expvar (in-memory served at /debug/vars), prometheus, statsd or none.\")\n\tflags.StringVar(&srv.Config.Metric.Host, \"metric.host\", srv.Config.Metric.Host, \"URI to send metrics when metric.service is statsd.\")\n\tflags.DurationVar((*time.Duration)(&srv.Config.Metric.PollInterval), \"metric.poll-interval\", (time.Duration)(srv.Config.Metric.PollInterval), \"Polling interval metrics.\")\n\tflags.BoolVar((&srv.Config.Metric.Diagnostics), \"metric.diagnostics\", srv.Config.Metric.Diagnostics, \"Enabled diagnostics reporting.\")\n\n\t// Tracing\n\tflags.StringVar(&srv.Config.Tracing.AgentHostPort, \"tracing.agent-host-port\", srv.Config.Tracing.AgentHostPort, \"Jaeger agent host:port.\")\n\tflags.StringVar(&srv.Config.Tracing.SamplerType, \"tracing.sampler-type\", srv.Config.Tracing.SamplerType, \"Jaeger sampler type (remote, const, probabilistic, ratelimiting) or 'off' to disable tracing completely.\")\n\tflags.Float64Var(&srv.Config.Tracing.SamplerParam, \"tracing.sampler-param\", srv.Config.Tracing.SamplerParam, \"Jaeger sampler parameter.\")\n\n\t// Profiling\n\tflags.IntVar(&srv.Config.Profile.BlockRate, \"profile.block-rate\", srv.Config.Profile.BlockRate, \"Sampling rate for goroutine blocking profiler. One sample per <rate> ns.\")\n\tflags.IntVar(&srv.Config.Profile.MutexFraction, \"profile.mutex-fraction\", srv.Config.Profile.MutexFraction, \"Sampling fraction for mutex contention profiling. Sample 1/<rate> of events.\")\n\n\tflags.StringVar(&srv.Config.Storage.Backend, \"storage.backend\", storage.DefaultBackend, \"Storage backend to use: 'rbf' is only supported value.\")\n\tflags.BoolVar(&srv.Config.Storage.FsyncEnabled, \"storage.fsync\", true, \"enable fsync fully safe flush-to-disk\")\n\n\t// RBF specific flags. See pilosa/rbf/cfg/cfg.go for definitions.\n\tsrv.Config.RBFConfig.DefineFlags(flags)\n\n\tflags.BoolVar(&srv.Config.SQL.EndpointEnabled, \"sql.endpoint-enabled\", srv.Config.SQL.EndpointEnabled, \"Enable FeatureBase SQL /sql endpoint (default false)\")\n\n\t// Future flags.\n\tflags.BoolVar(&srv.Config.Future.Rename, \"future.rename\", false, \"Present application name as FeatureBase. Defaults to false, will default to true in an upcoming release.\")\n\n\t// OAuth2.0 identity provider configuration\n\tflags.BoolVar(&srv.Config.Auth.Enable, \"auth.enable\", false, \"Enable AuthN/AuthZ of featurebase, disabled by default.\")\n\tflags.StringVar(&srv.Config.Auth.ClientId, \"auth.client-id\", srv.Config.Auth.ClientId, \"Identity Provider's Application/Client ID.\")\n\tflags.StringVar(&srv.Config.Auth.ClientSecret, \"auth.client-secret\", srv.Config.Auth.ClientSecret, \"Identity Provider's Client Secret.\")\n\tflags.StringVar(&srv.Config.Auth.AuthorizeURL, \"auth.authorize-url\", srv.Config.Auth.AuthorizeURL, \"Identity Provider's Authorize URL.\")\n\tflags.StringVar(&srv.Config.Auth.RedirectBaseURL, \"auth.redirect-base-url\", srv.Config.Auth.RedirectBaseURL, \"Base URL of the featurebase instance used to redirect IDP.\")\n\tflags.StringVar(&srv.Config.Auth.TokenURL, \"auth.token-url\", srv.Config.Auth.TokenURL, \"Identity Provider's Token URL.\")\n\tflags.StringVar(&srv.Config.Auth.GroupEndpointURL, \"auth.group-endpoint-url\", srv.Config.Auth.GroupEndpointURL, \"Identity Provider's Group endpoint URL.\")\n\tflags.StringVar(&srv.Config.Auth.LogoutURL, \"auth.logout-url\", srv.Config.Auth.LogoutURL, \"Identity Provider's Logout URL.\")\n\tflags.StringSliceVar(&srv.Config.Auth.Scopes, \"auth.scopes\", srv.Config.Auth.Scopes, \"Comma separated list of scopes obtained from IdP\")\n\tflags.StringVar(&srv.Config.Auth.SecretKey, \"auth.secret-key\", srv.Config.Auth.SecretKey, \"Secret key used for auth.\")\n\tflags.StringVar(&srv.Config.Auth.PermissionsFile, \"auth.permissions\", srv.Config.Auth.PermissionsFile, \"Permissions' file with group authorization.\")\n\tflags.StringVar(&srv.Config.Auth.QueryLogPath, \"auth.query-log-path\", srv.Config.Auth.QueryLogPath, \"Path to log user queries\")\n\tflags.StringSliceVar(&srv.Config.Auth.ConfiguredIPs, \"auth.configured-ips\", srv.Config.Auth.ConfiguredIPs, \"List of configured IPs allowed for ingest\")\n\n\tflags.BoolVar(&srv.Config.DataDog.Enable, \"datadog.enable\", false, \"enable continuous profiling with DataDog cloud service, Note you must have DataDog agent installed\")\n\tflags.StringVar(&srv.Config.DataDog.Service, \"datadog.service\", \"default-service\", \"The Datadog service name, for example my-web-app\")\n\tflags.StringVar(&srv.Config.DataDog.Env, \"datadog.env\", \"default-env\", \"The Datadog environment name, for example, production\")\n\tflags.StringVar(&srv.Config.DataDog.Version, \"datadog.version\", \"default-version\", \"The version of your application\")\n\tflags.StringVar(&srv.Config.DataDog.Tags, \"datadog.tags\", \"molecula\", \"The tags to apply to an uploaded profile. Must be a list of in the format <KEY1>:<VALUE1>,<KEY2>:<VALUE2>\")\n\tflags.BoolVar(&srv.Config.DataDog.CPUProfile, \"datadog.cpu-profile\", true, \"golang pprof cpu profile \")\n\tflags.BoolVar(&srv.Config.DataDog.HeapProfile, \"datadog.heap-profile\", true, \"golang pprof heap profile\")\n\tflags.BoolVar(&srv.Config.DataDog.MutexProfile, \"datadog.mutex-profile\", false, \"golang pprof mutex profile\")\n\tflags.BoolVar(&srv.Config.DataDog.GoroutineProfile, \"datadog.goroutine-profile\", false, \"golang pprof goroutine profile\")\n\tflags.BoolVar(&srv.Config.DataDog.BlockProfile, \"datadog.block-profile\", false, \"golang pprof goroutine \")\n}",
"func (b *AdapterBase) InstallFlags() {\n\tb.initFlagSet()\n\tb.flagOnce.Do(func() {\n\t\tif b.CustomMetricsAdapterServerOptions == nil {\n\t\t\tb.CustomMetricsAdapterServerOptions = server.NewCustomMetricsAdapterServerOptions()\n\t\t\tb.CustomMetricsAdapterServerOptions.OpenAPIConfig = b.OpenAPIConfig\n\t\t}\n\n\t\tb.SecureServing.AddFlags(b.FlagSet)\n\t\tb.Authentication.AddFlags(b.FlagSet)\n\t\tb.Authorization.AddFlags(b.FlagSet)\n\t\tb.Audit.AddFlags(b.FlagSet)\n\t\tb.Features.AddFlags(b.FlagSet)\n\n\t\tb.FlagSet.StringVar(&b.RemoteKubeConfigFile, \"lister-kubeconfig\", b.RemoteKubeConfigFile,\n\t\t\t\"kubeconfig file pointing at the 'core' kubernetes server with enough rights to list \"+\n\t\t\t\t\"any described objects\")\n\t\tb.FlagSet.DurationVar(&b.DiscoveryInterval, \"discovery-interval\", b.DiscoveryInterval,\n\t\t\t\"interval at which to refresh API discovery information\")\n\t})\n}",
"func (s *VMTServer) AddFlags(fs *pflag.FlagSet) {\n\tfs.StringVar(&s.ClusterKeyInjected, \"cluster-key-injected\", \"\", \"Injected cluster key to enable pod move across cluster\")\n\tfs.IntVar(&s.Port, \"port\", s.Port, \"The port that kubeturbo's http service runs on.\")\n\tfs.StringVar(&s.Address, \"ip\", s.Address, \"the ip address that kubeturbo's http service runs on.\")\n\t// TODO: The flagset that is included by vendoring k8s uses the same names i.e. \"master\" and \"kubeconfig\".\n\t// This for some reason conflicts with the names introduced by kubeturbo after upgrading the k8s vendored code\n\t// to version 1.19.1. Right now we have changed the names of kubeturbo flags as a quick fix. These flags are\n\t// not user facing and are useful only when running kubeturbo outside the cluster. Find a better solution\n\t// when need be.\n\tfs.StringVar(&s.Master, \"k8s-master\", s.Master, \"The address of the Kubernetes API server (overrides any value in kubeconfig).\")\n\tfs.StringVar(&s.K8sTAPSpec, \"turboconfig\", s.K8sTAPSpec, \"Path to the config file.\")\n\tfs.StringVar(&s.TestingFlagPath, \"testingflag\", s.TestingFlagPath, \"Path to the testing flag.\")\n\tfs.StringVar(&s.KubeConfig, \"k8s-kubeconfig\", s.KubeConfig, \"Path to kubeconfig file with authorization and master location information.\")\n\tfs.BoolVar(&s.EnableProfiling, \"profiling\", false, \"Enable profiling via web interface host:port/debug/pprof/.\")\n\tfs.BoolVar(&s.UseUUID, \"stitch-uuid\", true, \"Use VirtualMachine's UUID to do stitching, otherwise IP is used.\")\n\tfs.IntVar(&s.KubeletPort, \"kubelet-port\", DefaultKubeletPort, \"The port of the kubelet runs on.\")\n\tfs.BoolVar(&s.EnableKubeletHttps, \"kubelet-https\", DefaultKubeletHttps, \"Indicate if Kubelet is running on https server.\")\n\tfs.BoolVar(&s.UseNodeProxyEndpoint, \"use-node-proxy-endpoint\", false, \"Indicate if Kubelet queries should be routed through APIServer node proxy endpoint.\")\n\tfs.BoolVar(&s.ForceSelfSignedCerts, \"kubelet-force-selfsigned-cert\", true, \"Indicate if we must use self-signed cert.\")\n\tfs.BoolVar(&s.FailVolumePodMoves, \"fail-volume-pod-moves\", true, \"Indicate if kubeturbo should fail to move pods which have volumes attached. Default is set to true.\")\n\tfs.BoolVar(&s.UpdateQuotaToAllowMoves, \"update-quota-to-allow-moves\", true, \"Indicate if kubeturbo should try to update namespace quotas to allow pod moves when quota(s) is/are full. Default is set to true.\")\n\tfs.StringVar(&k8sVersion, \"k8sVersion\", k8sVersion, \"[deprecated] the kubernetes server version; for openshift, it is the underlying Kubernetes' version.\")\n\tfs.StringVar(&noneSchedulerName, \"noneSchedulerName\", noneSchedulerName, \"[deprecated] a none-exist scheduler name, to prevent controller to create Running pods during move Action.\")\n\tfs.IntVar(&s.DiscoveryIntervalSec, \"discovery-interval-sec\", defaultDiscoveryIntervalSec, \"The discovery interval in seconds.\")\n\tfs.IntVar(&s.ValidationWorkers, \"validation-workers\", DefaultValidationWorkers, \"The validation workers\")\n\tfs.IntVar(&s.ValidationTimeout, \"validation-timeout-sec\", DefaultValidationTimeout, \"The validation timeout in seconds.\")\n\tfs.IntVar(&s.DiscoveryWorkers, \"discovery-workers\", DefaultDiscoveryWorkers, \"The number of discovery workers.\")\n\tfs.IntVar(&s.DiscoveryTimeoutSec, \"discovery-timeout-sec\", DefaultDiscoveryTimeoutSec, \"The discovery timeout in seconds for each discovery worker.\")\n\tfs.IntVar(&s.DiscoverySamples, \"discovery-samples\", DefaultDiscoverySamples, \"The number of resource usage data samples to be collected from kubelet in each full discovery cycle. This should be no larger than 60.\")\n\tfs.IntVar(&s.DiscoverySampleIntervalSec, \"discovery-sample-interval\", DefaultDiscoverySampleIntervalSec, \"The discovery interval in seconds to collect additional resource usage data samples from kubelet. This should be no smaller than 10 seconds.\")\n\tfs.IntVar(&s.GCIntervalMin, \"garbage-collection-interval\", DefaultGCIntervalMin, \"The garbage collection interval in minutes for possible leaked pods from actions failed because of kubeturbo restarts. Default value is 20 mins.\")\n\tfs.IntVar(&s.ItemsPerListQuery, \"items-per-list-query\", 0, \"Number of workload controller items the list api call should request for.\")\n\tfs.StringSliceVar(&s.sccSupport, \"scc-support\", defaultSccSupport, \"The SCC list allowed for executing pod actions, e.g., --scc-support=restricted,anyuid or --scc-support=* to allow all. Default allowed scc is [*].\")\n\t// So far we have noticed cluster api support only in openshift clusters and our implementation works only for openshift\n\t// It thus makes sense to have openshifts machine api namespace as our default cluster api namespace\n\tfs.StringVar(&s.ClusterAPINamespace, \"cluster-api-namespace\", \"openshift-machine-api\", \"The Cluster API namespace.\")\n\tfs.StringVar(&s.BusyboxImage, \"busybox-image\", \"busybox\", \"The complete image uri used for fallback node cpu frequency getter job.\")\n\tfs.StringVar(&s.BusyboxImagePullSecret, \"busybox-image-pull-secret\", \"\", \"The name of the secret that stores the image pull credentials for busybox image.\")\n\tfs.StringVar(&s.CpufreqJobExcludeNodeLabels, \"cpufreq-job-exclude-node-labels\", \"\", \"The comma separated list of key=value node label pairs for the nodes (for example windows nodes) to be excluded from running job based cpufrequency getter.\")\n\tfs.StringVar(&s.containerUtilizationDataAggStrategy, \"cnt-utilization-data-agg-strategy\", agg.DefaultContainerUtilizationDataAggStrategy, \"Container utilization data aggregation strategy.\")\n\tfs.StringVar(&s.containerUsageDataAggStrategy, \"cnt-usage-data-agg-strategy\", agg.DefaultContainerUsageDataAggStrategy, \"Container usage data aggregation strategy.\")\n\tfs.IntVar(&s.readinessRetryThreshold, \"readiness-retry-threshold\", DefaultReadinessRetryThreshold, \"When the pod readiness check fails, Kubeturbo will try readinessRetryThreshold times before giving up. Defaults to 60.\")\n\t// Flags for gitops based action execution\n\tfs.StringVar(&s.gitConfig.GitSecretNamespace, \"git-secret-namespace\", \"\", \"The namespace of the secret which holds the git credentials.\")\n\tfs.StringVar(&s.gitConfig.GitSecretName, \"git-secret-name\", \"\", \"The name of the secret which holds the git credentials.\")\n\tfs.StringVar(&s.gitConfig.GitUsername, \"git-username\", \"\", \"The user name to be used to push changes to git.\")\n\tfs.StringVar(&s.gitConfig.GitEmail, \"git-email\", \"\", \"The email to be used to push changes to git.\")\n\tfs.StringVar(&s.gitConfig.CommitMode, \"git-commit-mode\", \"direct\", \"The commit mode that should be used for git action executions. One of request|direct. Defaults to direct.\")\n\t// CpuFreqGetter image and secret\n\tfs.StringVar(&s.CpuFrequencyGetterImage, \"cpufreqgetter-image\", \"icr.io/cpopen/turbonomic/cpufreqgetter\", \"The complete cpufreqgetter image uri used for fallback node cpu frequency getter job.\")\n\tfs.StringVar(&s.CpuFrequencyGetterPullSecret, \"cpufreqgetter-image-pull-secret\", \"\", \"The name of the secret that stores the image pull credentials for cpufreqgetter image.\")\n\tfs.BoolVar(&s.CleanupSccRelatedResources, \"cleanup-scc-impersonation-resources\", true, \"Enable cleanup the resources for scc impersonation.\")\n}",
"func InstallFlags(flags *pflag.FlagSet, o *Opts) {\n\tflags.StringVar(&o.HomeKubeconfig, \"home-kubeconfig\", o.HomeKubeconfig, \"kube config file to use for connecting to the Kubernetes API server\")\n\tflags.StringVar(&o.RemoteKubeconfigSecretName, \"foreign-kubeconfig-secret-name\", o.RemoteKubeconfigSecretName,\n\t\t\"Secret name to use for connecting to the remote Kubernetes API server\")\n\tflags.StringVar(&o.NodeName, \"nodename\", o.NodeName, \"The name of the node registered by the virtual kubelet\")\n\tflags.StringVar(&o.TenantNamespace, \"tenant-namespace\", o.TenantNamespace, \"The tenant namespace associated with the remote cluster\")\n\tflags.DurationVar(&o.InformerResyncPeriod, \"resync-period\", o.InformerResyncPeriod, \"The resync period for the informers\")\n\n\tflags.StringVar(&o.HomeCluster.ClusterID, \"home-cluster-id\", o.HomeCluster.ClusterID, \"The ID of the home cluster\")\n\tflags.StringVar(&o.HomeCluster.ClusterName, \"home-cluster-name\", o.HomeCluster.ClusterName, \"The name of the home cluster\")\n\tflags.StringVar(&o.ForeignCluster.ClusterID, \"foreign-cluster-id\", o.ForeignCluster.ClusterID, \"The ID of the foreign cluster\")\n\tflags.StringVar(&o.ForeignCluster.ClusterName, \"foreign-cluster-name\", o.ForeignCluster.ClusterName, \"The name of the foreign cluster\")\n\tflags.StringVar(&o.LiqoIpamServer, \"ipam-server\", o.LiqoIpamServer,\n\t\t\"The address to contact the IPAM module (leave it empty to disable the IPAM module)\")\n\tflags.BoolVar(&o.DisableIPReflection, \"disable-ip-reflection\", o.DisableIPReflection,\n\t\t\"Disable the IP reflection for the offloaded pods\")\n\n\tflags.StringVar(&o.NodeIP, \"node-ip\", o.NodeIP, \"The IP address of the virtual kubelet pod, and assigned to the virtual node as internal address\")\n\tflags.Var(o.CertificateType, \"certificate-type\", \"The type of virtual kubelet server certificate to generate, among kubelet, aws, self-signed\")\n\tflags.Uint16Var(&o.ListenPort, \"listen-port\", o.ListenPort, \"The port to listen to for requests from the Kubernetes API server\")\n\tflags.BoolVar(&o.EnableProfiling, \"enable-profiling\", o.EnableProfiling, \"Enable pprof profiling\")\n\n\tflags.UintVar(&o.PodWorkers, \"pod-reflection-workers\", o.PodWorkers, \"The number of pod reflection workers\")\n\tflags.UintVar(&o.ServiceWorkers, \"service-reflection-workers\", o.ServiceWorkers, \"The number of service reflection workers\")\n\tflags.UintVar(&o.EndpointSliceWorkers, \"endpointslice-reflection-workers\", o.EndpointSliceWorkers,\n\t\t\"The number of endpointslice reflection workers\")\n\tflags.UintVar(&o.IngressWorkers, \"ingress-reflection-workers\", o.IngressWorkers, \"The number of ingress reflection workers\")\n\tflags.UintVar(&o.ConfigMapWorkers, \"configmap-reflection-workers\", o.ConfigMapWorkers, \"The number of configmap reflection workers\")\n\tflags.UintVar(&o.SecretWorkers, \"secret-reflection-workers\", o.SecretWorkers, \"The number of secret reflection workers\")\n\tflags.UintVar(&o.ServiceAccountWorkers, \"service-account-reflection-workers\", o.ServiceAccountWorkers,\n\t\t\"The number of service account reflection workers (applies only if API server support is enabled in token API mode)\")\n\tflags.UintVar(&o.PersistentVolumeClaimWorkers, \"persistentvolumeclaim-reflection-workers\", o.PersistentVolumeClaimWorkers,\n\t\t\"The number of persistentvolumeclaim reflection workers\")\n\tflags.UintVar(&o.EventWorkers, \"event-reflection-workers\", o.EventWorkers, \"The number of event reflection workers\")\n\n\tflags.DurationVar(&o.NodeLeaseDuration, \"node-lease-duration\", o.NodeLeaseDuration, \"The duration of the node leases\")\n\tflags.DurationVar(&o.NodePingInterval, \"node-ping-interval\", o.NodePingInterval,\n\t\t\"The interval the reachability of the remote API server is verified to assess node readiness, 0 to disable\")\n\tflags.DurationVar(&o.NodePingTimeout, \"node-ping-timeout\", o.NodePingTimeout,\n\t\t\"The timeout of the remote API server reachability check\")\n\tflags.BoolVar(&o.NodeCheckNetwork, \"node-check-network\", o.NodeCheckNetwork,\n\t\t\"Check the network connectivity to set the node status to Ready\")\n\n\tflags.Var(&o.NodeExtraAnnotations, \"node-extra-annotations\", \"Extra annotations to add to the Virtual Node\")\n\tflags.Var(&o.NodeExtraLabels, \"node-extra-labels\", \"Extra labels to add to the Virtual Node\")\n\n\tflags.Var(&o.LabelsNotReflected, \"labels-not-reflected\", \"List of labels (key) that must not be reflected\")\n\tflags.Var(&o.AnnotationsNotReflected, \"annotations-not-reflected\", \"List of annotations (key) that must not be reflected\")\n\n\tflags.BoolVar(&o.EnableAPIServerSupport, \"enable-apiserver-support\", false,\n\t\t\"Enable offloaded pods to interact back with the local Kubernetes API server\")\n\tflags.BoolVar(&o.EnableStorage, \"enable-storage\", false, \"Enable the Liqo storage reflection\")\n\tflags.StringVar(&o.VirtualStorageClassName, \"virtual-storage-class-name\", \"liqo\", \"Name of the virtual storage class\")\n\tflags.StringVar(&o.RemoteRealStorageClassName, \"remote-real-storage-class-name\", \"\", \"Name of the real storage class to use for the actual volumes\")\n\tflags.BoolVar(&o.EnableMetrics, \"metrics-enabled\", false, \"Enable the metrics server\")\n\tflags.StringVar(&o.MetricsAddress, \"metrics-address\", \":8080\", \"The address to listen to for metrics requests\")\n\tflags.StringVar(&o.HomeAPIServerHost, \"home-api-server-host\", \"\",\n\t\t\"Home cluster API server HOST, this parameter is optional and required only to override the default values\")\n\tflags.StringVar(&o.HomeAPIServerPort, \"home-api-server-port\", \"\",\n\t\t\"Home cluster API server PORT, this parameter is optional and required only to override the default values\")\n\tflags.BoolVar(&o.CreateNode, \"create-node\", true, \"Create the virtual node in the home cluster\")\n\n\tflags.BoolVar(&o.VirtualKubeletLeaseEnabled, \"vk-lease-enabled\", true, \"Enable the virtual kubelet lease\")\n\tflags.DurationVar(&o.VirtualKubeletLeaseLeaseDuration, \"vk-lease-duration\", 15*time.Second,\n\t\t\" The duration that non-leader candidates will wait to force acquire leadership.\")\n\tflags.DurationVar(&o.VirtualKubeletLeaseRenewDeadline, \"vk-lease-renew-interval\", 10*time.Second,\n\t\t\"The duration that the acting master will retry refreshing leadership before giving up.\")\n\tflags.DurationVar(&o.VirtualKubeletLeaseRetryPeriod, \"vk-lease-retry-period\", 5*time.Second,\n\t\t\"the duration the LeaderElector clients should wait between tries of actions.\")\n\n\tflagset := flag.NewFlagSet(\"klog\", flag.PanicOnError)\n\tklog.InitFlags(flagset)\n\tflagset.VisitAll(func(f *flag.Flag) {\n\t\tf.Name = \"klog.\" + f.Name\n\t\tflags.AddGoFlag(f)\n\t})\n\n\tflagset = flag.NewFlagSet(\"restcfg\", flag.PanicOnError)\n\trestcfg.InitFlags(flagset)\n\tflags.AddGoFlagSet(flagset)\n}",
"func (s *VMTServer) AddFlags(fs *pflag.FlagSet) {\n\tfs.IntVar(&s.Port, \"port\", s.Port, \"The port that kubeturbo's http service runs on\")\n\tfs.StringVar(&s.Address, \"ip\", s.Address, \"the ip address that kubeturbo's http service runs on\")\n\tfs.IntVar(&s.CAdvisorPort, \"cadvisor-port\", K8sCadvisorPort, \"The port of the cadvisor service runs on\")\n\tfs.StringVar(&s.Master, \"master\", s.Master, \"The address of the Kubernetes API server (overrides any value in kubeconfig)\")\n\tfs.StringVar(&s.K8sTAPSpec, \"turboconfig\", s.K8sTAPSpec, \"Path to the config file.\")\n\tfs.StringVar(&s.TestingFlagPath, \"testingflag\", s.TestingFlagPath, \"Path to the testing flag.\")\n\tfs.StringVar(&s.KubeConfig, \"kubeconfig\", s.KubeConfig, \"Path to kubeconfig file with authorization and master location information.\")\n\tfs.BoolVar(&s.EnableProfiling, \"profiling\", false, \"Enable profiling via web interface host:port/debug/pprof/.\")\n\tfs.BoolVar(&s.UseVMWare, \"usevmware\", false, \"If the underlying infrastructure is VMWare.\")\n\tfs.IntVar(&s.KubeletPort, \"kubelet-port\", kubelet.DefaultKubeletPort, \"The port of the kubelet runs on\")\n\tfs.BoolVar(&s.EnableKubeletHttps, \"kubelet-https\", kubelet.DefaultKubeletHttps, \"Indicate if Kubelet is running on https server\")\n\tfs.StringVar(&s.K8sVersion, \"k8sVersion\", executor.HigherK8sVersion, \"the kubernetes server version; for openshift, it is the underlying Kubernetes' version.\")\n\tfs.StringVar(&s.NoneSchedulerName, \"noneSchedulerName\", executor.DefaultNoneExistSchedulerName, \"a none-exist scheduler name, to prevent controller to create Running pods during move Action.\")\n\n\t//leaderelection.BindFlags(&s.LeaderElection, fs)\n}",
"func TestHostFeatureFlags(t *testing.T) {\n\t// Extract the kernel version.\n\tversion, err := hostos.KernelVersion()\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to parse kernel version: %v\", err)\n\t}\n\n\t// Extract all cpuinfo flags.\n\tcpuinfoBytes, _ := ioutil.ReadFile(\"/proc/cpuinfo\")\n\tcpuinfo := string(cpuinfoBytes)\n\tre := regexp.MustCompile(`(?m)^flags\\s+: (.*)$`)\n\tm := re.FindStringSubmatch(cpuinfo)\n\tif len(m) != 2 {\n\t\tt.Fatalf(\"Unable to extract flags from %q\", cpuinfo)\n\t}\n\tcpuinfoFlags := make(map[string]struct{})\n\tfor _, f := range strings.Split(m[1], \" \") {\n\t\tcpuinfoFlags[f] = struct{}{}\n\t}\n\n\t// Check against host flags.\n\tfs := HostFeatureSet()\n\tfor feature, info := range allFeatures {\n\t\t// Special cases not consistently visible. We don't mind if\n\t\t// they are exposed in earlier versions.\n\t\tif archSkipFeature(feature, version) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Check against the flags.\n\t\t_, ok := cpuinfoFlags[feature.String()]\n\t\tif !info.shouldAppear && ok {\n\t\t\tt.Errorf(\"Unexpected flag: %v\", feature)\n\t\t} else if info.shouldAppear && fs.HasFeature(feature) && !ok {\n\t\t\tt.Errorf(\"Missing flag: %v\", feature)\n\t\t}\n\t}\n}",
"func TestHostFeatureFlags(t *testing.T) {\n\tcpuinfoBytes, _ := ioutil.ReadFile(\"/proc/cpuinfo\")\n\tcpuinfo := string(cpuinfoBytes)\n\tt.Logf(\"Host cpu info:\\n%s\", cpuinfo)\n\n\tfor f := range HostFeatureSet().Set {\n\t\tif f.flagString(false) == \"\" {\n\t\t\tt.Errorf(\"Non-parsable feature: %v\", f)\n\t\t}\n\t\tif s := f.flagString(true); !strings.Contains(cpuinfo, s) {\n\t\t\tt.Errorf(\"Non-native flag: %v\", f)\n\t\t}\n\t}\n}",
"func ParseFeatures(queryString string) error {\n\tfeatureMutex.Lock()\n\tdefer featureMutex.Unlock()\n\n\tfeatures := map[Feature]bool{}\n\t// copy the defaults into this map\n\tfor k, v := range featureDefaults {\n\t\tfeatures[k] = v\n\t}\n\n\tvalues, err := url.ParseQuery(queryString)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error parsing query string for feature gates\")\n\t}\n\n\tfor k := range values {\n\t\tf := Feature(k)\n\n\t\tif _, ok := featureDefaults[f]; !ok {\n\t\t\treturn errors.Errorf(\"Feature Gate %q is not a valid Feature Gate\", f)\n\t\t}\n\n\t\tb, err := strconv.ParseBool(values.Get(k))\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"error parsing bool value from flag %s \", k)\n\t\t}\n\t\tfeatures[f] = b\n\t}\n\n\tfeatureGates = features\n\treturn nil\n}",
"func (o *Options) InitFlags(fs *flag.FlagSet) {\n\tif fs == nil {\n\t\tfs = flag.CommandLine\n\t}\n\n\tflag.StringVar(\n\t\t&o.MetricsAddr,\n\t\t\"metrics-addr\",\n\t\t\":8080\",\n\t\t\"The address the metric endpoint binds to.\")\n\tflag.BoolVar(\n\t\t&o.LeaderElectionEnabled,\n\t\t\"enable-leader-election\",\n\t\ttrue,\n\t\t\"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.\")\n\tflag.StringVar(\n\t\t&o.LeaderElectionID,\n\t\t\"leader-election-id\",\n\t\t\"\",\n\t\t\"Name of the config map to use as the locking resource when configuring leader election.\")\n\tflag.StringVar(\n\t\t&o.LeaderElectionNamespace,\n\t\t\"leader-election-namespace\",\n\t\t\"\",\n\t\t\"Name of the namespace to use for the configmap locking resource when configuring leader election.\")\n\tflag.StringVar(\n\t\t&o.WatchNamespace,\n\t\t\"namespace\",\n\t\t\"\",\n\t\t\"Namespace that the controller watches to reconcile cluster-api objects. If unspecified, the controller watches for cluster-api objects across all namespaces.\")\n\tflag.DurationVar(\n\t\t&o.SyncPeriod,\n\t\t\"sync-period\",\n\t\tDefaultSyncPeriod,\n\t\t\"The interval at which cluster-api objects are synchronized\")\n\tflag.IntVar(\n\t\t&o.MaxConcurrentReconciles,\n\t\t\"max-concurrent-reconciles\",\n\t\t10,\n\t\t\"The maximum number of allowed, concurrent reconciles.\")\n\tflag.StringVar(\n\t\t&o.PodNameSuffix,\n\t\t\"pod-name-suffix\",\n\t\t\"controller-manager\",\n\t\t\"The suffix name of the pod running the controller manager.\")\n\tflag.StringVar(\n\t\t&o.PodNamespaceSuffix,\n\t\t\"pod-namespace-suffix\",\n\t\t\"controller-manager\",\n\t\t\"The suffix name of the pod namespace running the controller manager.\")\n\tflag.IntVar(\n\t\t&o.WebhookPort,\n\t\t\"webhook-port\",\n\t\tDefaultWebhookServiceContainerPort,\n\t\t\"Webhook Server port (set to 0 to disable)\")\n\tflag.StringVar(\n\t\t&o.HealthAddr,\n\t\t\"health-addr\",\n\t\t\":9440\",\n\t\t\"The address the health endpoint binds to.\",\n\t)\n}",
"func (s *ServerOption) AddFlags(fs *flag.FlagSet) {\n\n\tfs.StringVar(&s.Config, \"config\", \"https://raw.githubusercontent.com/kubeflow/kubeflow/master/bootstrap/config/kfctl_gcp_iap.yaml\", \"URI of a YAML file containing a KfDef object.\")\n\tfs.StringVar(&s.Name, \"name\", \"\", \"Name for the deployment.\")\n\tfs.StringVar(&s.Project, \"project\", \"\", \"Project.\")\n\tfs.StringVar(&s.Endpoint, \"endpoint\", \"\", \"The endpoint e.g. http://localhost:8080.\")\n\tfs.StringVar(&s.Zone, \"zone\", \"\", \"Zone.\")\n\n}",
"func (s *APIEnablementOptions) AddFlags(fs *pflag.FlagSet) {\n\tfs.Var(&s.RuntimeConfig, \"runtime-config\", \"\"+\n\t\t\"A set of key=value pairs that describe runtime configuration that may be passed \"+\n\t\t\"to apiserver. apis/<groupVersion> key can be used to turn on/off specific api versions. \"+\n\t\t\"apis/<groupVersion>/<resource> can be used to turn on/off specific resources. api/all and \"+\n\t\t\"api/legacy are special keys to control all and legacy api versions respectively.\")\n}",
"func (t *T) AddFlags(fs *flag.FlagSet) {\n\tt.RequirementLevels.AddFlags(fs)\n\tt.FeatureStates.AddFlags(fs)\n}",
"func (a *AdminApiService) GetFeatureFlag(ctx _context.Context, id string) (FeatureFlag, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue FeatureFlag\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/admin/feature-flag/{id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(parameterToString(id, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}",
"func (a *AgentBase) ProcessAgentSpecificCLIFlags(_ *flag.FlagSet) {}",
"func collectFeaturesInfo(gates []corev1alpha2.FeatureGate, features []corev1alpha2.Feature) map[string]*FeatureInfo {\n\tinfos := map[string]*FeatureInfo{}\n\n\tfor i := range features {\n\t\tpolicy := corev1alpha2.GetPolicyForStabilityLevel(features[i].Spec.Stability)\n\n\t\tinfos[features[i].Name] = &FeatureInfo{\n\t\t\tName: features[i].Name,\n\t\t\tDescription: features[i].Spec.Description,\n\t\t\tStability: features[i].Spec.Stability,\n\t\t\tActivated: features[i].Status.Activated,\n\t\t\tImmutable: policy.Immutable,\n\t\t\tDiscoverable: policy.Discoverable,\n\t\t\tFeatureGate: \"--\",\n\t\t}\n\t}\n\n\tfor i := range gates {\n\t\tfor _, featRef := range gates[i].Spec.Features {\n\t\t\tinfo, ok := infos[featRef.Name]\n\t\t\tif ok {\n\t\t\t\t// FeatureGate referenced Feature is in cluster.\n\t\t\t\tinfo.FeatureGate = gates[i].Name\n\t\t\t}\n\n\t\t\tif !ok {\n\t\t\t\t// FeatureGate referenced Feature is not in cluster. Since the Discoverable policy\n\t\t\t\t// cannot be known until the Feature shows up in cluster, set it to true for now.\n\t\t\t\tinfos[featRef.Name] = &FeatureInfo{\n\t\t\t\t\tName: featRef.Name,\n\t\t\t\t\tDiscoverable: true,\n\t\t\t\t\tFeatureGate: gates[i].Name,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn infos\n}",
"func WithFeatureFlags() metadata.MD {\n\treturn metadata.Pairs(\"bigtable-features\", featureFlags)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ZeroLengthSectionAsEOF sets whether to allow the CARv1 decoder to treat a zerolength section as the end of the input CAR file. For example, this can be useful to allow "null padding" after a CARv1 without knowing where the padding begins. | func ZeroLengthSectionAsEOF(enable bool) Option {
return func(o *Options) {
o.ZeroLengthSectionAsEOF = enable
}
} | [
"func TestReadEmptyAtEOF(t *testing.T) {\n\tb := new(Builder)\n\tslice := make([]byte, 0)\n\tn, err := b.Read(slice)\n\tif err != nil {\n\t\tt.Errorf(\"read error: %v\", err)\n\t}\n\tif n != 0 {\n\t\tt.Errorf(\"wrong count; got %d want 0\", n)\n\t}\n}",
"func (h Header) LastFourZero() bool {\n\tfor i := 1; i < len(h.Padding); i++ {\n\t\tif h.Padding[i] != 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func IsEOF(c rune, n int) bool {\n\treturn n == 0\n}",
"func (h *Header) IsZero() bool {\n\treturn !h.HasC && !h.HasEnd && !h.HasType && !h.HasSeq && !h.HasAck && (!h.HasMiss || len(h.Miss) == 0) && len(h.Extra) == 0 && len(h.Bytes) == 0\n}",
"func (f *File) zeroPad(plainSize uint64) syscall.Errno {\n\tlastBlockLen := plainSize % f.contentEnc.PlainBS()\n\tif lastBlockLen == 0 {\n\t\t// Already block-aligned\n\t\treturn 0\n\t}\n\tmissing := f.contentEnc.PlainBS() - lastBlockLen\n\tpad := make([]byte, missing)\n\ttlog.Debug.Printf(\"zeroPad: Writing %d bytes\\n\", missing)\n\t_, errno := f.doWrite(pad, int64(plainSize))\n\treturn errno\n}",
"func IsZeroFilled(b []byte) bool {\n\thdr := (*reflect.SliceHeader)((unsafe.Pointer)(&b))\n\tdata := unsafe.Pointer(hdr.Data)\n\tlength := hdr.Len\n\tif length == 0 {\n\t\treturn true\n\t}\n\n\tif uintptr(data)&0x07 != 0 {\n\t\t// the data is not aligned, fallback to a simple way\n\t\treturn isZeroFilledSimple(b)\n\t}\n\n\tdataEnd := uintptr(data) + uintptr(length)\n\tdataWordsEnd := uintptr(dataEnd) & ^uintptr(0x07)\n\t// example:\n\t//\n\t// 012345678901234567\n\t// wwwwwwwwWWWWWWWWtt : w -- word 0; W -- word 1; t -- tail\n\t// ^\n\t// |\n\t// +-- dataWordsEnd\n\tfor ; uintptr(data) < dataWordsEnd; data = unsafe.Pointer(uintptr(data) + 8) {\n\t\tif *(*uint64)(data) != 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\tfor ; uintptr(data) < dataEnd; data = unsafe.Pointer(uintptr(data) + 1) {\n\t\tif *(*uint8)(data) != 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func TestZeroLength(t *testing.T) {\n\tkey1, err := NewFixedLengthKeyFromReader(os.Stdin, 0)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer key1.Wipe()\n\tif key1.data != nil {\n\t\tt.Error(\"Fixed length key from reader contained data\")\n\t}\n\n\tkey2, err := NewKeyFromReader(bytes.NewReader(nil))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer key2.Wipe()\n\tif key2.data != nil {\n\t\tt.Error(\"Key from empty reader contained data\")\n\t}\n}",
"func (r *amlStreamReader) EOF() bool {\n\treturn r.offset == uint32(len(r.data))\n}",
"func (d *Decoder) ZeroEmpty(z bool) {\n\td.zeroEmpty = z\n}",
"func (bs *ByteStream) IsEOF() bool {\n\t// a bytestream wraps the bytereader interface\n\t// the Len method returns the number of unread bytes\n\t// in the underlying stream.\n\treturn bs.r.Len() == 0\n}",
"func (dbf *DBF) EOF() bool {\n\treturn dbf.recpointer >= dbf.header.NumRec\n}",
"func forceEOF(yylex interface{}) {\n\tyylex.(*Tokenizer).ForceEOF = true\n}",
"func TestIgnoreTruncatedPacketEOF(t *testing.T) {\n\toutputFile, err := ioutil.TempFile(\"\", \"joincap_output_\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\toutputFile.Close()\n\tdefer os.Remove(outputFile.Name())\n\n\terr = joincap([]string{\"joincap\",\n\t\t\"-v\", \"-w\", outputFile.Name(),\n\t\t\"test_pcaps/unexpected_eof_on_second_packet.pcap\"})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttestIsOrdered(t, outputFile.Name())\n\n\tif packetCount(t, outputFile.Name()) != 1 {\n\t\tt.Fatal(\"error counting\")\n\t}\n}",
"func (f *Feature) EndZero() uint64 {\n\treturn f.StartZero()\n}",
"func (c *Converter) atEof() bool {\n\treturn c.cursor >= c.inputLength\n}",
"func ReadSection0(reader io.Reader) (section0 Section0, err error) {\n\tsection0.Indicator = 255\n\terr = binary.Read(reader, binary.BigEndian, §ion0)\n\tif err != nil {\n\t\treturn section0, err\n\t}\n\n\tif section0.Indicator == Grib {\n\t\tif section0.Edition != SupportedGribEdition {\n\t\t\treturn section0, fmt.Errorf(\"Unsupported grib edition %d\", section0.Edition)\n\t\t}\n\t} else {\n\t\treturn section0, fmt.Errorf(\"Unsupported grib indicator %d\", section0.Indicator)\n\t}\n\n\treturn\n\n}",
"func isEOFPacket(p []byte) bool {\n\tif len(p) > 4 && p[4] == 0xFE {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (suite *RunePartTestSuite) TestReadToZeroLengthBuffer() {\n\tpart := runePart{runeVal: 'a'}\n\tbuff := make([]byte, 0, 0)\n\tcount, err := part.Read(buff)\n\tsuite.Nil(err)\n\tsuite.Equal(0, count)\n\tsuite.Equal(\"\", string(buff))\n}",
"func ZeroHeader() Header {\n\treturn Header{}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
UseDataPadding sets the padding to be added between CARv2 header and its data payload on Finalize. | func UseDataPadding(p uint64) Option {
return func(o *Options) {
o.DataPadding = p
}
} | [
"func PaddingData(method string, params ...string) string {\n\tvar res string\n\tif !strings.HasPrefix(method, HexPrefix) {\n\t\tres = HexPrefix + method\n\t}\n\tfor _, item := range params {\n\t\tif strings.HasPrefix(item, HexPrefix) {\n\t\t\titem = item[2:]\n\t\t}\n\t\tpaddingString := paddingstr[:64-len(item)]\n\t\ttmp := string(paddingString) + item\n\t\tres += tmp\n\t}\n\treturn res\n}",
"func (d *DataPacket) SetData(data []byte) {\n\tif len(data) > 512 {\n\t\tdata = data[0:512]\n\t}\n\t//make the length a multiply of 2\n\tif len(data)%2 != 0 { //add a 0 to make the length sufficient\n\t\tdata = append(data, 0)\n\t}\n\td.setFAL(uint16(126 + len(data)))\n\td.replace(126, data)\n}",
"func (cd *characterData) ReplaceData(offset, count uint, data string) {\n\tlength := cd.Length()\n\n}",
"func WithPaddingAllowed() ParserOption {\n\treturn func(p *Parser) {\n\t\tp.decodePaddingAllowed = true\n\t}\n}",
"func UseIndexPadding(p uint64) Option {\n\treturn func(o *Options) {\n\t\to.IndexPadding = p\n\t}\n}",
"func (enc Encoding) WithPadding(padding rune) *Encoding {}",
"func NewWithData(data []byte) AlignedBuff {\n\treturn AlignedBuff{data: data}\n}",
"func (f *http2Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {\n\tif !http2validStreamID(streamID) && !f.AllowIllegalWrites {\n\t\treturn http2errStreamID\n\t}\n\tif len(pad) > 0 {\n\t\tif len(pad) > 255 {\n\t\t\treturn http2errPadLength\n\t\t}\n\t\tif !f.AllowIllegalWrites {\n\t\t\tfor _, b := range pad {\n\t\t\t\tif b != 0 {\n\t\t\t\t\t// \"Padding octets MUST be set to zero when sending.\"\n\t\t\t\t\treturn http2errPadBytes\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tvar flags http2Flags\n\tif endStream {\n\t\tflags |= http2FlagDataEndStream\n\t}\n\tif pad != nil {\n\t\tflags |= http2FlagDataPadded\n\t}\n\tf.startWrite(http2FrameData, flags, streamID)\n\tif pad != nil {\n\t\tf.wbuf = append(f.wbuf, byte(len(pad)))\n\t}\n\tf.wbuf = append(f.wbuf, data...)\n\tf.wbuf = append(f.wbuf, pad...)\n\treturn f.endWrite()\n}",
"func (socket *Socket) SetFinalData(data []byte) {\n\tsocket.Lock()\n\tdefer socket.Unlock()\n\tsocket.finalData = data\n}",
"func (padding ISO10126) Pad(data []byte, blockSize int) (output []byte, err error) {\n\tif blockSize < 1 || blockSize >= 256 {\n\t\treturn output, fmt.Errorf(\"blocksize is out of bounds: %v\", blockSize)\n\t}\n\tvar paddingBytes = padSize(len(data), blockSize)\n\tpaddingSlice := make([]byte, paddingBytes-1)\n\t_, err = rand.Read(paddingSlice)\n\tif err != nil {\n\t\treturn output, err\n\t}\n\tpaddingSlice = append(paddingSlice, byte(paddingBytes))\n\toutput = append(data, paddingSlice...)\n\treturn output, nil\n}",
"func (socket *Socket) SetFinalData(data string) {\n\tsocket.Lock()\n\tdefer socket.Unlock()\n\tsocket.finalData = data\n}",
"func (p *Patch) SetPadding(value mat.AABB) {\n\tp.Padding = value\n\tp.SetRegion(p.Region)\n}",
"func append_padding_bytes(data []byte) []byte {\n\tpadLen := 16 - len(data)%16\n\tvar padding = make([]byte, padLen)\n\n\t// \"i\" bytes of value \"i\" are appended to the message\n\tfor i := 0; i < padLen; i++ {\n\t\tpadding[i] = byte(padLen)\n\t}\n\n\treturn append(data, padding[:]...)\n}",
"func (p *Flex) readDataAndCheckPaddingBytes(cnt int) (ok bool) {\n\tb := make([]byte, cnt+3+8) // max 3 more plus head plus possible long count\n\tcopy(b, p.IBuf)\n\tif cnt > 4 {\n\t\tb = append(b[0:4], b[8:]...) // remove long count in copy\n\t}\n\ttt := strings.TrimRight(p.upperCaseTriceType, \"I\")\n\tswitch tt { // for trice* too {\n\tcase \"TRICE0\":\n\t\treturn true\n\tcase \"TRICE32_4\", \"TRICE64_2\":\n\t\tp.d3 = p.ReadU32(b[16:20])\n\t\tfallthrough\n\tcase \"TRICE32_3\":\n\t\tp.d2 = p.ReadU32(b[12:16])\n\t\tfallthrough\n\tcase \"TRICE8_8\", \"TRICE16_4\", \"TRICE32_2\", \"TRICE64_1\":\n\t\tp.d1 = p.ReadU32(b[8:12])\n\t\tfallthrough\n\tcase \"TRICE8_4\", \"TRICE16_2\", \"TRICE32_1\":\n\t\tp.d0 = p.ReadU32(b[4:8])\n\t\treturn true // no padding bytes\n\tcase \"TRICE_S\":\n\t\tx := 3 & cnt\n\t\tswitch x {\n\t\tcase 0:\n\t\t\treturn true // no padding bytes\n\t\tcase 3:\n\t\t\tok = 0 == b[4+cnt]\n\t\tcase 2:\n\t\t\tok = 0 == b[4+cnt] && 0 == b[1+4+cnt]\n\t\tcase 1:\n\t\t\tok = 0 == b[4+cnt] && 0 == b[1+4+cnt] && 0 == b[2+4+cnt]\n\t\t}\n\tdefault:\n\t\tp.d0 = p.ReadU32(b[4:8])\n\t\tswitch tt {\n\t\tcase \"TRICE8_1\":\n\t\t\tok = p.d0 < (1 << 8)\n\t\tcase \"TRICE8_2\", \"TRICE16_1\":\n\t\t\tok = p.d0 < (1 << 16)\n\t\tcase \"TRICE8_3\":\n\t\t\tok = p.d0 < (1 << 24)\n\t\tdefault:\n\t\t\tp.d1 = p.ReadU32(b[8:12])\n\t\t\tswitch tt {\n\t\t\tcase \"TRICE8_5\":\n\t\t\t\tok = p.d1 < (1 << 8)\n\t\t\tcase \"TRICE8_6\", \"TRICE16_3\":\n\t\t\t\tok = p.d1 < (1 << 16)\n\t\t\tcase \"TRICE8_7\":\n\t\t\t\tok = p.d1 < (1 << 24)\n\t\t\t}\n\t\t}\n\t}\n\tif true == ok {\n\t\treturn\n\t}\n\treturn false\n}",
"func (t *Transport) RequiresPadding() bool {\n\treturn true\n}",
"func (o *ExportArchiveParams) SetData(data bool) {\n\to.Data = data\n}",
"func (t DNSOverTCP) RequiresPadding() bool {\n\treturn t.requiresPadding\n}",
"func DataChangesWithFinality(finalaity string) DataChangesOption {\n\treturn func(qr *itypes.ChangesRequest) {\n\t\tqr.Finality = finalaity\n\t}\n}",
"func (p *Packet) SetData(data []byte) {\n\tp.Data = data\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
UseIndexPadding sets the padding between data payload and its index on Finalize. | func UseIndexPadding(p uint64) Option {
return func(o *Options) {
o.IndexPadding = p
}
} | [
"func (s *BasePlSqlParserListener) ExitUsing_index_clause(ctx *Using_index_clauseContext) {}",
"func UseDataPadding(p uint64) Option {\n\treturn func(o *Options) {\n\t\to.DataPadding = p\n\t}\n}",
"func (dict *Dictionary) DropIndex() {\n\tdict.shortIndex = nil\n\tdict.longIndex = nil\n}",
"func IndexFixer(index int, listSize int) int {\n\tindex = index - 1\n\n\tif index <= 0 {\n\t\tindex = 0\n\t} else if index > listSize-1 {\n\t\tindex = listSize - 1\n\t}\n\n\treturn index\n}",
"func indexTruncateInTxn(\n\tctx context.Context,\n\ttxn *kv.Txn,\n\texecCfg *ExecutorConfig,\n\tevalCtx *tree.EvalContext,\n\ttableDesc catalog.TableDescriptor,\n\tidx *descpb.IndexDescriptor,\n\ttraceKV bool,\n) error {\n\talloc := &rowenc.DatumAlloc{}\n\tvar sp roachpb.Span\n\tfor done := false; !done; done = sp.Key == nil {\n\t\trd := row.MakeDeleter(execCfg.Codec, tableDesc, nil /* requestedCols */)\n\t\ttd := tableDeleter{rd: rd, alloc: alloc}\n\t\tif err := td.init(ctx, txn, evalCtx); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvar err error\n\t\tsp, err = td.deleteIndex(\n\t\t\tctx, idx, sp, indexTruncateChunkSize, traceKV,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t// Remove index zone configs.\n\treturn RemoveIndexZoneConfigs(ctx, txn, execCfg, tableDesc, []descpb.IndexDescriptor{*idx})\n}",
"func (i *Index) Write(off uint32, pos uint64) error {\n\tif uint64(len(i.mmap)) < i.size+entWidth {\n\t\treturn lib.Wrap(io.EOF, \"Not enough space to append index data\")\n\t}\n\n\tenc.PutUint32(i.mmap[i.size:i.size+offWidth], off)\n\tenc.PutUint64(i.mmap[i.size+offWidth:i.size+entWidth], pos)\n\n\ti.size += entWidth\n\n\treturn nil\n}",
"func (app *builder) WithIndex(index uint) Builder {\n\tapp.index = index\n\treturn app\n}",
"func IndexWrite(x *suffixarray.Index, w io.Writer) error",
"func (e *mockClient) FlushIndex(ctx context.Context, index string) error {\n\treturn nil\n}",
"func (mc *MockContiv) SetPodAppNsIndex(pod podmodel.ID, nsIndex uint32) {\n\tmc.podAppNs[pod] = nsIndex\n}",
"func (self *SinglePad) SetIndexA(member int) {\n self.Object.Set(\"index\", member)\n}",
"func (s *BaseCymbolListener) ExitIndex(ctx *IndexContext) {}",
"func (s *Service) DeleteIdx(c context.Context, nwMsg []byte) (err error) {\n\tvar opinion *model.Opinion\n\tif err = json.Unmarshal(nwMsg, &opinion); err != nil {\n\t\tlog.Error(\"json.Unmarshal(%s) error(%v)\", string(nwMsg), err)\n\t\treturn\n\t}\n\ts.dao.DelOpinionCache(c, opinion.Vid)\n\ts.dao.DelCaseIdx(c, opinion.Cid)\n\ts.dao.DelVoteIdx(c, opinion.Cid)\n\treturn\n}",
"func _1731resizeIndexObject(tls *crt.TLS, _db uintptr /* *Tsqlite3 */, _pIdx uintptr /* *TIndex */, _N int32) (r int32) {\n\tvar (\n\t\t_zExtra uintptr // *int8\n\t\t_nByte int32\n\t)\n\tif int32(*(*Tu16)(unsafe.Pointer(_pIdx + 52))) < _N {\n\t\tgoto _1\n\t}\n\n\treturn int32(0)\n\n_1:\n\t_nByte = int32(uint32(7) * uint32(_N))\n\t_zExtra = _297sqlite3DbMallocZero(tls, _db, uint64(_nByte))\n\tif _zExtra != 0 {\n\t\tgoto _2\n\t}\n\n\treturn int32(7)\n\n_2:\n\tcrt.Xmemcpy(tls, _zExtra, *(*uintptr)(unsafe.Pointer(_pIdx + 32)), uint32(4)*uint32(*(*Tu16)(unsafe.Pointer(_pIdx + 52))))\n\t*(*uintptr)(unsafe.Pointer(_pIdx + 32)) = _zExtra\n\t_zExtra += uintptr(uint32(4) * uint32(_N))\n\tcrt.Xmemcpy(tls, _zExtra, *(*uintptr)(unsafe.Pointer(_pIdx + 4)), uint32(2)*uint32(*(*Tu16)(unsafe.Pointer(_pIdx + 52))))\n\t*(*uintptr)(unsafe.Pointer(_pIdx + 4)) = _zExtra\n\t_zExtra += uintptr(uint32(2) * uint32(_N))\n\tcrt.Xmemcpy(tls, _zExtra, *(*uintptr)(unsafe.Pointer(_pIdx + 28)), uint32(*(*Tu16)(unsafe.Pointer(_pIdx + 52))))\n\t*(*uintptr)(unsafe.Pointer(_pIdx + 28)) = _zExtra\n\t*(*Tu16)(unsafe.Pointer(_pIdx + 52)) = Tu16(_N)\n\tset221b(&(*(*uint8)(unsafe.Pointer(_pIdx + 55))), uint32(1))\n\treturn int32(0)\n}",
"func (s *BaseLuaParserListener) ExitIndex(ctx *IndexContext) {}",
"func WriteIndex(index common.Index) error {\n\tbytes, err := json.Marshal(index)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(indexCachePath, bytes, 0600)\n\treturn err\n}",
"func (s *BasevhdlListener) ExitIndex_specification(ctx *Index_specificationContext) {}",
"func (h *Hasher) WriteChainIndex(index ChainIndex) {\n\tbinary.LittleEndian.PutUint64(h.buf[:8], index.Height)\n\tcopy(h.buf[8:], index.ID[:])\n\th.h.Write(h.buf[:40])\n}",
"func dataframeResetIndex(_ *starlark.Thread, b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {\n\tif err := starlark.UnpackArgs(\"reset_index\", args, kwargs); err != nil {\n\t\treturn nil, err\n\t}\n\tself := b.Receiver().(*DataFrame)\n\n\tif self.index == nil {\n\t\treturn self, nil\n\t}\n\n\tnewColumns := append([]string{\"index\"}, self.columns.texts...)\n\tnewBody := make([]Series, 0, self.numCols())\n\n\tnewBody = append(newBody, Series{which: typeObj, valObjs: self.index.texts})\n\tfor _, col := range self.body {\n\t\tnewBody = append(newBody, col)\n\t}\n\n\treturn &DataFrame{\n\t\tcolumns: NewIndex(newColumns, \"\"),\n\t\tbody: newBody,\n\t}, nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
UseIndexCodec sets the codec used for index generation. | func UseIndexCodec(c multicodec.Code) Option {
return func(o *Options) {
o.IndexCodec = c
}
} | [
"func encodeIndex(d *Index) *internal.Index {\n\treturn &internal.Index{\n\t\tName: d.name,\n\t\tMeta: &internal.IndexMeta{\n\t\t\tColumnLabel: d.columnLabel,\n\t\t\tTimeQuantum: string(d.timeQuantum),\n\t\t},\n\t\tMaxSlice: d.MaxSlice(),\n\t\tFrames: encodeFrames(d.Frames()),\n\t}\n}",
"func UseIndex(designDocument, name string) Parameter {\n\treturn func(pa Parameterizable) {\n\t\tif name == \"\" {\n\t\t\tpa.SetParameter(\"use_index\", designDocument)\n\t\t} else {\n\t\t\tpa.SetParameter(\"use_index\", []string{designDocument, name})\n\t\t}\n\t}\n}",
"func (o *BlockBasedTableOptions) SetIndexType(value IndexType) {\n\tC.rocksdb_block_based_options_set_index_type(o.c, C.int(value))\n}",
"func NewIndexDriver(root string) sql.IndexDriver {\n\treturn NewDriver(root, pilosa.DefaultClient())\n}",
"func UseIndex() *ishell.Cmd {\n\n\treturn &ishell.Cmd{\n\t\tName: \"use\",\n\t\tHelp: \"Select index to use for subsequent document operations\",\n\t\tFunc: func(c *ishell.Context) {\n\t\t\tif context == nil {\n\t\t\t\terrorMsg(c, errNotConnected)\n\t\t\t} else {\n\t\t\t\tdefer restorePrompt(c)\n\t\t\t\tif len(c.Args) < 1 {\n\t\t\t\t\tif context.ActiveIndex != \"\" {\n\t\t\t\t\t\tcprintlist(c, \"Using index \", cy(context.ActiveIndex))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcprintln(c, \"No index is in use\")\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif c.Args[0] == \"--\" {\n\t\t\t\t\tif context.ActiveIndex != \"\" {\n\t\t\t\t\t\tcprintlist(c, \"Index \", cy(context.ActiveIndex), \" is no longer in use\")\n\t\t\t\t\t\tcontext.ActiveIndex = \"\"\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcprintln(c, \"No index is in use\")\n\t\t\t\t\t}\n\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ts, err := context.ResolveAndValidateIndex(c.Args[0])\n\t\t\t\tif err != nil {\n\t\t\t\t\terrorMsg(c, err.Error())\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcontext.ActiveIndex = s\n\t\t\t\tif s != c.Args[0] {\n\t\t\t\t\tcprintlist(c, \"For alias \", cyb(c.Args[0]), \" selected index \", cy(s))\n\t\t\t\t} else {\n\t\t\t\t\tcprintlist(c, \"Selected index \", cy(s))\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t}\n\n}",
"func WithoutIndex() Option {\n\treturn func(o *Options) {\n\t\to.IndexCodec = index.CarIndexNone\n\t}\n}",
"func (s *Store) SetCodec(codec types.Codec) {\n\ts.codec = codec\n}",
"func (idx *IndexMap) SetIndexType(indtype string) *IndexMap {\n\tidx.IndexType = indtype\n\treturn idx\n}",
"func (c *Chip8) SetIndex() {\n\tc.index = c.inst & 0x0FFF\n}",
"func WithIndexCtx(ctx context.Context, indexCtx IndexCtx) context.Context {\n\treturn context.WithValue(ctx, indexCtxKey{}, indexCtx)\n}",
"func (o *NearestUsingGET1Params) SetIndexType(indexType *string) {\n\to.IndexType = indexType\n}",
"func Index(collectionName, key, indexType string) error {\n\tconnection := shared.Connection()\n\tif connection == nil {\n\t\treturn errors.New(\"No connection to database server\")\n\t}\n\tcollection := connection.DB(variables.DBName).C(collectionName)\n\n\tsetIndex := fmt.Sprintf(\"$%s:%s\", indexType, key)\n\n\t// Define the index\n\tindex := mgo.Index{\n\t\tKey: []string{setIndex},\n\t}\n\n\t// Apply the index\n\treturn collection.EnsureIndex(index)\n}",
"func (u UserConfig) IndexType() string {\n\treturn \"hnsw\"\n}",
"func GenerateIndex(index int, nameLen uint32, ensType uint32) int {\n\tdatatype := BytesInFloat\n\n\tswitch ensType {\n\tcase dataTypeByte:\n\t\tdatatype = BytesInInt8\n\t\tbreak\n\tcase dataTypeInt:\n\t\tdatatype = BytesInInt32\n\t\tbreak\n\tcase dataTypeFloat:\n\t\tdatatype = BytesInFloat\n\t\tbreak\n\tdefault:\n\t\tdatatype = BytesInFloat\n\t\tbreak\n\t}\n\n\treturn getHeaderSize(nameLen) + (index * datatype)\n}",
"func setIndex(resp http.ResponseWriter, index uint64) {\n\t// If we ever return X-Consul-Index of 0 blocking clients will go into a busy\n\t// loop and hammer us since ?index=0 will never block. It's always safe to\n\t// return index=1 since the very first Raft write is always an internal one\n\t// writing the raft config for the cluster so no user-facing blocking query\n\t// will ever legitimately have an X-Consul-Index of 1.\n\tif index == 0 {\n\t\tindex = 1\n\t}\n\tresp.Header().Set(\"X-Consul-Index\", strconv.FormatUint(index, 10))\n}",
"func (g *GenOpts) BlobIndex() (string, error) {\n\tbp, err := g.blobIndexPrefix()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tjk, err := g.jsonKey()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\ts := bp + jk\n\treturn s, nil\n}",
"func EncodingIndexer(encoding string) Indexer {\n\treturn func(r *http.Request) interface{} {\n\t\tp := r.Method\n\t\tif strings.Contains(r.Header.Get(header.AcceptEncoding), encoding) {\n\t\t\tp += \":\" + encoding\n\t\t}\n\t\tp += \":\" + path.Clean(r.URL.Path)\n\t\treturn p\n\t}\n}",
"func FromIndex(log zerolog.Logger, lib dps.ReadLibrary, db *badger.DB, options ...Option) *Index {\n\n\tcfg := DefaultConfig\n\tfor _, option := range options {\n\t\toption(&cfg)\n\t}\n\n\ti := Index{\n\t\tlog: log.With().Str(\"component\", \"index_loader\").Logger(),\n\t\tlib: lib,\n\t\tdb: db,\n\t\tcfg: cfg,\n\t}\n\n\treturn &i\n}",
"func (a *Allocator) SetIndex(index uint64) {\n\ta.Lock()\n\ta.dbIndex = index\n\ta.dbExists = true\n\ta.Unlock()\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
WithoutIndex flags that no index should be included in generation. | func WithoutIndex() Option {
return func(o *Options) {
o.IndexCodec = index.CarIndexNone
}
} | [
"func IndexOptionsNone() IndexOptions {\n\tresult := IndexOptions{}\n\n\treturn result\n}",
"func (dict *Dictionary) DropIndex() {\n\tdict.shortIndex = nil\n\tdict.longIndex = nil\n}",
"func (_m *DirectRepositoryWriter) DisableIndexRefresh() {\n\t_m.Called()\n}",
"func (r *Search) AllowNoIndices(allownoindices bool) *Search {\n\tr.values.Set(\"allow_no_indices\", strconv.FormatBool(allownoindices))\n\n\treturn r\n}",
"func (_m *DirectRepository) DisableIndexRefresh() {\n\t_m.Called()\n}",
"func WithoutCompression() WriteOption {\n\treturn func(opt *wopt) error {\n\t\topt.compress = 0\n\t\treturn nil\n\t}\n}",
"func NewRebuildIndexNoContent() *RebuildIndexNoContent {\n\treturn &RebuildIndexNoContent{}\n}",
"func (wouo *WorkOrderUpdateOne) ClearIndex() *WorkOrderUpdateOne {\n\twouo.index = nil\n\twouo.clearindex = true\n\treturn wouo\n}",
"func WithoutPosition() OptionFunc {\n\treturn func(opt *Options) {\n\t\topt.ShowFlag = Fnopos\n\t}\n}",
"func (s *FieldStatsService) AllowNoIndices(allowNoIndices bool) *FieldStatsService {\n\ts.allowNoIndices = &allowNoIndices\n\treturn s\n}",
"func WithoutSyncWrites() DBOption {\n\treturn func(opts *bdgrOpts) {\n\t\topts.opts = opts.opts.WithSyncWrites(false)\n\t}\n}",
"func NoValues() Values { return noValues{} }",
"func WithoutTimestamp() Option {\n\treturn func(l LoggerOpts) LoggerOpts {\n\t\tl.IncludeTime = false\n\t\treturn l\n\t}\n}",
"func (o *DatasetEvent) UnsetSourceMapIndex() {\n\to.SourceMapIndex.Unset()\n}",
"func (g *GeneratedFile) Unskip() {\n\tg.skip = false\n}",
"func NewEmptyIndex() *Index {\n\tpostingLists := make(map[string]PostingList)\n\tdocIDToFilePath := make(map[int]string)\n\treturn &Index{\n\t\tpostingLists: postingLists,\n\t\tdocIDToFilePath: docIDToFilePath,\n\t}\n}",
"func WithoutLocation() Option {\n\treturn func(l LoggerOpts) LoggerOpts {\n\t\tl.IncludeLocation = false\n\t\treturn l\n\t}\n}",
"func NoCompact() Option {\n\treturn func(o *encOpts) { o.flags.set(noCompact) }\n}",
"func NoDedup() walkOption {\n\treturn func(wo *walkOptions) {\n\t\two.dedup = nil\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
StoreIdentityCIDs sets whether to persist sections that are referenced by CIDs with multihash.IDENTITY digest. When writing CAR files with this option, Characteristics.IsFullyIndexed will be set. By default, the blockstore interface will always return true for Has() called with identity CIDs, but when this option is turned on, it will defer to the index. When creating an index (or loading a CARv1 as a blockstore), when this option is on, identity CIDs will be included in the index. This option is disabled by default. | func StoreIdentityCIDs(b bool) Option {
return func(o *Options) {
o.StoreIdentityCIDs = b
}
} | [
"func (cosi *cosiAggregate) StoreIdentities(idents map[string]proto.Message) {\n\tfor k, v := range idents {\n\t\tpoint := suite.G2().Point()\n\t\terr := point.UnmarshalBinary(v.(*BdnIdentity).PublicKey)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tcosi.skipchain.identities[k] = point\n\t}\n}",
"func StoreIdentityInFiles(i *security.Identity, keyFile string, crtFile string, csrFile string) error {\n\tvar err error\n\n\tif i.Key != nil {\n\t\tif err = CreatePEM(keyFile, i.Key); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif i.Certificate != nil {\n\t\tif err = CreatePEM(crtFile, i.Certificate); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif i.Request != nil {\n\t\tif err = CreatePEM(csrFile, i.Request); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (b *ReadWrite) PutMany(blks []blocks.Block) error {\n\tb.ronly.mu.Lock()\n\tdefer b.ronly.mu.Unlock()\n\n\tif b.ronly.closed {\n\t\treturn errClosed\n\t}\n\n\tfor _, bl := range blks {\n\t\tc := bl.Cid()\n\n\t\t// If StoreIdentityCIDs option is disabled then treat IDENTITY CIDs like IdStore.\n\t\tif !b.opts.StoreIdentityCIDs {\n\t\t\t// Check for IDENTITY CID. If IDENTITY, ignore and move to the next block.\n\t\t\tif _, ok, err := isIdentity(c); err != nil {\n\t\t\t\treturn err\n\t\t\t} else if ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// Check if its size is too big.\n\t\t// If larger than maximum allowed size, return error.\n\t\t// Note, we need to check this regardless of whether we have IDENTITY CID or not.\n\t\t// Since multhihash codes other than IDENTITY can result in large digests.\n\t\tcSize := uint64(len(c.Bytes()))\n\t\tif cSize > b.opts.MaxIndexCidSize {\n\t\t\treturn &carv2.ErrCidTooLarge{MaxSize: b.opts.MaxIndexCidSize, CurrentSize: cSize}\n\t\t}\n\n\t\tif !b.opts.BlockstoreAllowDuplicatePuts {\n\t\t\tif b.ronly.opts.BlockstoreUseWholeCIDs && b.idx.hasExactCID(c) {\n\t\t\t\tcontinue // deduplicated by CID\n\t\t\t}\n\t\t\tif !b.ronly.opts.BlockstoreUseWholeCIDs {\n\t\t\t\t_, err := b.idx.Get(c)\n\t\t\t\tif err == nil {\n\t\t\t\t\tcontinue // deduplicated by hash\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tn := uint64(b.dataWriter.Position())\n\t\tif err := util.LdWrite(b.dataWriter, c.Bytes(), bl.RawData()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tb.idx.insertNoReplace(c, n)\n\t}\n\treturn nil\n}",
"func (k Keeper) SetIdentityCount(ctx sdk.Context, count int64) {\n\tstore := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.IdentityCountKey))\n\tbyteKey := types.KeyPrefix(types.IdentityCountKey)\n\tbz := []byte(strconv.FormatInt(count, 10))\n\tstore.Set(byteKey, bz)\n}",
"func AddIndependentPropertyGeneratorsForIdentityForCmk(gens map[string]gopter.Gen) {\n\tgens[\"UserAssignedIdentity\"] = gen.PtrOf(gen.AlphaString())\n}",
"func (m *cidsMap) Sync(vmis []*virtv1.VirtualMachineInstance) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tfor _, vmi := range vmis {\n\t\tif vmi.Status.VSOCKCID == nil {\n\t\t\tcontinue\n\t\t}\n\t\tkey := controller.VirtualMachineInstanceKey(vmi)\n\t\tm.cids[key] = *vmi.Status.VSOCKCID\n\t\tm.reverse[*vmi.Status.VSOCKCID] = key\n\t}\n}",
"func (m *User) SetIdentities(value []ObjectIdentityable)() {\n m.identities = value\n}",
"func (certManager *identityManager) StoreIdentity(ctx context.Context, remoteCluster discoveryv1alpha1.ClusterIdentity,\n\tnamespace string, key []byte, remoteProxyURL string, identityResponse *auth.CertificateIdentityResponse) error {\n\tsecret := &v1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tGenerateName: identitySecretRoot + \"-\",\n\t\t\tNamespace: namespace,\n\t\t\tLabels: map[string]string{\n\t\t\t\tlocalIdentitySecretLabel: \"true\",\n\t\t\t\tdiscovery.ClusterIDLabel: remoteCluster.ClusterID,\n\t\t\t\tcertificateAvailableLabel: \"true\",\n\t\t\t},\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t// one year starting from now\n\t\t\t\tcertificateExpireTimeAnnotation: fmt.Sprintf(\"%v\", time.Now().AddDate(1, 0, 0).Unix()),\n\t\t\t},\n\t\t},\n\t\tStringData: map[string]string{\n\t\t\tAPIServerURLSecretKey: identityResponse.APIServerURL,\n\t\t\tnamespaceSecretKey: identityResponse.Namespace,\n\t\t},\n\t\tData: map[string][]byte{\n\t\t\tprivateKeySecretKey: key,\n\t\t},\n\t}\n\n\tif identityResponse.HasAWSValues() || certManager.isAwsIdentity(secret) {\n\t\tsecret.StringData[awsAccessKeyIDSecretKey] = identityResponse.AWSIdentityInfo.AccessKeyID\n\t\tsecret.StringData[awsSecretAccessKeySecretKey] = identityResponse.AWSIdentityInfo.SecretAccessKey\n\t\tsecret.StringData[awsRegionSecretKey] = identityResponse.AWSIdentityInfo.Region\n\t\tsecret.StringData[awsEKSClusterIDSecretKey] = identityResponse.AWSIdentityInfo.EKSClusterID\n\t\tsecret.StringData[awsIAMUserArnSecretKey] = identityResponse.AWSIdentityInfo.IAMUserArn\n\t} else {\n\t\tcertificate, err := base64.StdEncoding.DecodeString(identityResponse.Certificate)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to decode certificate: %w\", err)\n\t\t}\n\n\t\tsecret.Data[certificateSecretKey] = certificate\n\t}\n\n\t// ApiServerCA may be empty if the remote cluster exposes the ApiServer with a certificate issued by \"public\" CAs\n\tif identityResponse.APIServerCA != \"\" {\n\t\tapiServerCa, err := base64.StdEncoding.DecodeString(identityResponse.APIServerCA)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to decode certification authority: %w\", err)\n\t\t}\n\n\t\tsecret.Data[apiServerCaSecretKey] = apiServerCa\n\t}\n\n\tif remoteProxyURL != \"\" {\n\t\tsecret.StringData[apiProxyURLSecretKey] = remoteProxyURL\n\t}\n\n\tif _, err := certManager.client.CoreV1().Secrets(secret.Namespace).Create(ctx, secret, metav1.CreateOptions{}); err != nil {\n\t\treturn fmt.Errorf(\"failed to create secret: %w\", err)\n\t}\n\treturn nil\n}",
"func (ic *IdentityCache) StoreIdentity(identity Identity) error {\n\tcache := cacheData{\n\t\tIdentity: identity,\n\t}\n\n\treturn ic.writeCache(cache)\n}",
"func migrateIdentities(ctx hive.HookContext, clientset k8sClient.Clientset, shutdowner hive.Shutdowner) error {\n\tdefer shutdowner.Shutdown(nil)\n\n\t// Setup global configuration\n\t// These are defined in cilium/cmd/kvstore.go\n\toption.Config.KVStore = kvStore\n\toption.Config.KVStoreOpt = kvStoreOpts\n\n\t// This allows us to initialize a CRD allocator\n\toption.Config.IdentityAllocationMode = option.IdentityAllocationModeCRD // force CRD mode to make ciliumid\n\n\t// Init Identity backends\n\tinitCtx, initCancel := context.WithTimeout(ctx, opTimeout)\n\tkvstoreBackend := initKVStore(initCtx)\n\n\tcrdBackend, crdAllocator := initK8s(initCtx, clientset)\n\tinitCancel()\n\n\tlog.Info(\"Listing identities in kvstore\")\n\tlistCtx, listCancel := context.WithTimeout(ctx, opTimeout)\n\tkvstoreIDs, err := getKVStoreIdentities(listCtx, kvstoreBackend)\n\tif err != nil {\n\t\tlog.WithError(err).Fatal(\"Unable to initialize Identity Allocator with CRD backend to allocate identities with already allocated IDs\")\n\t}\n\tlistCancel()\n\n\tlog.Info(\"Migrating identities to CRD\")\n\talreadyAllocatedKeys := make(map[idpool.ID]allocator.AllocatorKey) // IDs that are already allocated, maybe with different labels\n\n\tfor id, key := range kvstoreIDs {\n\t\tscopedLog := log.WithFields(logrus.Fields{\n\t\t\tlogfields.Identity: id,\n\t\t\tlogfields.IdentityLabels: key.GetKey(),\n\t\t})\n\n\t\tctx, cancel := context.WithTimeout(ctx, opTimeout)\n\t\terr := crdBackend.AllocateID(ctx, id, key)\n\t\tswitch {\n\t\tcase err != nil && k8serrors.IsAlreadyExists(err):\n\t\t\talreadyAllocatedKeys[id] = key\n\n\t\tcase err != nil:\n\t\t\tscopedLog.WithField(logfields.Key, key).WithError(err).Error(\"Cannot allocate CRD ID. This key will be allocated with a new numeric identity\")\n\n\t\tdefault:\n\t\t\tscopedLog.Info(\"Migrated identity\")\n\t\t}\n\t\tcancel()\n\t}\n\n\t// Handle IDs that have conflicts. These can be:\n\t// 1- The same ID -> key (from a previous run). This is a no-op\n\t// 2- The same ID but with different labels. This is not ideal. A new ID is\n\t// allocated as a fallback.\n\tfor id, key := range alreadyAllocatedKeys {\n\t\tscopedLog := log.WithFields(logrus.Fields{\n\t\t\tlogfields.Identity: id,\n\t\t\tlogfields.IdentityLabels: key.GetKey(),\n\t\t})\n\n\t\tgetCtx, getCancel := context.WithTimeout(ctx, opTimeout)\n\t\tupstreamKey, err := crdBackend.GetByID(getCtx, id)\n\t\tgetCancel()\n\t\tscopedLog.Debugf(\"Looking at upstream key with this ID: %+v\", upstreamKey)\n\t\tswitch {\n\t\tcase err != nil:\n\t\t\tlog.WithError(err).Error(\"ID already allocated but we cannot verify whether it is the same key. It may not be migrated\")\n\t\t\tcontinue\n\n\t\t// nil returns mean the key doesn't exist. This shouldn't happen, but treat\n\t\t// it like a mismatch and allocate it. The allocator will find it if it has\n\t\t// been re-allocated via master key protection.\n\t\tcase upstreamKey == nil && err == nil:\n\t\t\t// fallthrough\n\n\t\tcase key.GetKey() == upstreamKey.GetKey():\n\t\t\tscopedLog.Info(\"ID was already allocated to this key. It is already migrated\")\n\t\t\tcontinue\n\t\t}\n\n\t\tscopedLog = log.WithFields(logrus.Fields{\n\t\t\tlogfields.OldIdentity: id,\n\t\t\tlogfields.IdentityLabels: key.GetKey(),\n\t\t})\n\t\tscopedLog.Warn(\"ID is allocated to a different key in CRD. A new ID will be allocated for the this key\")\n\n\t\tctx, cancel := context.WithTimeout(context.Background(), opTimeout)\n\t\tdefer cancel()\n\t\tnewID, actuallyAllocated, _, err := crdAllocator.Allocate(ctx, key)\n\t\tswitch {\n\t\tcase err != nil:\n\t\t\tlog.WithError(err).Errorf(\"Cannot allocate new CRD ID for %v\", key)\n\t\t\tcontinue\n\n\t\tcase !actuallyAllocated:\n\t\t\tscopedLog.Debug(\"Expected to allocate ID but this ID->key mapping re-existed\")\n\t\t}\n\n\t\tlog.WithFields(logrus.Fields{\n\t\t\tlogfields.OldIdentity: id,\n\t\t\tlogfields.Identity: newID,\n\t\t\tlogfields.IdentityLabels: key.GetKey(),\n\t\t}).Info(\"New ID allocated for key in CRD\")\n\t}\n\treturn nil\n}",
"func WithStore(s ...correlationIdStore) Option {\n\treturn withStoreOption(s)\n}",
"func SetIdentity(storageDir string, cid, nid uint64) (err error) {\n\tif cid == 0 {\n\t\treturn errors.New(\"raft: cid is zero\")\n\t}\n\tif nid == 0 {\n\t\treturn errors.New(\"raft: nid is zero\")\n\t}\n\td, err := os.Stat(storageDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !d.IsDir() {\n\t\treturn fmt.Errorf(\"raft: %q is not a diretory\", storageDir)\n\t}\n\tif err := lockDir(storageDir); err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\terr = unlockDir(storageDir)\n\t}()\n\tval, err := openValue(storageDir, \".id\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif cid == val.v1 && nid == val.v2 {\n\t\treturn nil\n\t}\n\tif val.v1 != 0 && val.v2 != 0 {\n\t\treturn ErrIdentityAlreadySet\n\t}\n\treturn val.set(cid, nid)\n}",
"func getKVStoreIdentities(ctx context.Context, kvstoreBackend allocator.Backend) (identities map[idpool.ID]allocator.AllocatorKey, err error) {\n\tidentities = make(map[idpool.ID]allocator.AllocatorKey)\n\tstopChan := make(chan struct{})\n\n\tgo kvstoreBackend.ListAndWatch(ctx, kvstoreListHandler{\n\t\tonAdd: func(id idpool.ID, key allocator.AllocatorKey) {\n\t\t\tlog.Debugf(\"kvstore listed ID: %+v -> %+v\", id, key)\n\t\t\tidentities[id] = key\n\t\t},\n\t\tonListDone: func() {\n\t\t\tclose(stopChan)\n\t\t},\n\t}, stopChan)\n\t// This makes the ListAndWatch exit after the initial listing or on a timeout\n\t// that exits this function\n\n\t// Wait for the listing to complete\n\tselect {\n\tcase <-stopChan:\n\t\tlog.Debug(\"kvstore ID list complete\")\n\n\tcase <-ctx.Done():\n\t\treturn nil, errors.New(\"Timeout while listing identities\")\n\t}\n\n\treturn identities, nil\n}",
"func (ns *NonCachedStorage) ForEachBlockID(consumer BlockIDConsumer, iteratorOptions ...IteratorOption) {\n\n\tns.storage.blocksStorage.ForEachKeyOnly(func(key []byte) bool {\n\t\tblockID := iotago.BlockID{}\n\t\tcopy(blockID[:], key)\n\n\t\treturn consumer(blockID)\n\t}, append(ObjectStorageIteratorOptions(iteratorOptions...), objectstorage.WithIteratorSkipCache(true))...)\n}",
"func Store(s ...correlationIdStore) Option {\n\treturn storeOption(s)\n}",
"func AddIndependentPropertyGeneratorsForManagedClusterStorageProfileFileCSIDriver(gens map[string]gopter.Gen) {\n\tgens[\"Enabled\"] = gen.PtrOf(gen.Bool())\n}",
"func (o GetKubernetesClusterIdentityOutput) IdentityIds() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetKubernetesClusterIdentity) []string { return v.IdentityIds }).(pulumi.StringArrayOutput)\n}",
"func PersistNonces(txs []*pb.InBlockTransaction) (error) {\n\t// Only persist if the security is not enabled, otherwise keys should be encrypted with the chainPublicKey\n\tif !comm.SecurityEnabled() {\n\t\tdbHandle := db.GetDBHandle()\n\t\twriteBatch := gorocksdb.NewWriteBatch()\n\t\tdefer writeBatch.Destroy()\n\t\tfor _, tx := range txs {\n\t\t\tswitch tx.Transaction.(type) {\n\t\t\tcase *pb.InBlockTransaction_TransactionSet:\n\t\t\t\tif tx.ConfidentialityLevel == pb.ConfidentialityLevel_CONFIDENTIAL && tx.Nonce != nil {\n\t\t\t\t\tnonce, err := dbHandle.GetFromNoncesCF(encodeTxID(tx.Txid));\n\t\t\t\t\tif err == nil && nonce != nil && !reflect.DeepEqual(nonce, tx.Nonce) {\n\t\t\t\t\t\treturn fmt.Errorf(\"The transaction with %s id was defined before with a different nonce.\", tx.Txid)\n\t\t\t\t\t}\n\t\t\t\t\t// Note that in case an error occurs the possibly previous nonce is simply deleted\n\t\t\t\t\twriteBatch.PutCF(dbHandle.NoncesCF, encodeTxID(tx.Txid), tx.Nonce)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\topt := gorocksdb.NewDefaultWriteOptions()\n\t\tdefer opt.Destroy()\n\t\tdbErr := dbHandle.DB.Write(opt, writeBatch)\n\t\tif dbErr != nil {\n\t\t\treturn dbErr\n\t\t}\n\t\tfor _, tx := range txs {\n\t\t\ttx.Nonce = nil;\n\t\t}\n\t}\n\treturn nil;\n}",
"func StoreCAUniqueIDToCNMap(c context.Context, mapping map[int64]string) error {\n\tbuf := bytes.Buffer{}\n\tenc := gob.NewEncoder(&buf)\n\tif err := enc.Encode(mapping); err != nil {\n\t\treturn err\n\t}\n\t// Note that in practice 'mapping' is usually very small, so we are not\n\t// concerned about 1MB entity size limit.\n\treturn errors.WrapTransient(datastore.Get(c).Put(&CAUniqueIDToCNMap{\n\t\tGobEncodedMap: buf.Bytes(),\n\t}))\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
MaxIndexCidSize specifies the maximum allowed size for indexed CIDs in bytes. Indexing a CID with larger than the allowed size results in ErrCidTooLarge error. | func MaxIndexCidSize(s uint64) Option {
return func(o *Options) {
o.MaxIndexCidSize = s
}
} | [
"func (r *CustomerResourceCollection) MaximumSize() int {\n\treturn len(r.ids)\n}",
"func (p *MessagePartition) calculateMaxMessageIdFromIndex(fileId uint64) (uint64, error) {\n\tstat, err := os.Stat(p.indexFilenameByMessageId(fileId))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tentriesInIndex := uint64(stat.Size() / int64(INDEX_ENTRY_SIZE))\n\n\treturn (entriesInIndex - 1 + fileId), nil\n}",
"func (e *awsElastigroup) MaxSize() int { return fi.IntValue(e.obj.Capacity.Maximum) }",
"func MaxValSize(max int) Option {\n\treturn func(lc cacheWithOpts) error {\n\t\treturn lc.setMaxValSize(max)\n\t}\n}",
"func (o ApplicationSettingsOutput) ElasticsearchIndexedFileSizeLimitKb() pulumi.IntOutput {\n\treturn o.ApplyT(func(v *ApplicationSettings) pulumi.IntOutput { return v.ElasticsearchIndexedFileSizeLimitKb }).(pulumi.IntOutput)\n}",
"func MaxBatchSize(size int) DBConfiguration {\n\treturn func(c *CyclePDB) error {\n\t\tc.maxBatchSize = size\n\t\treturn nil\n\t}\n}",
"func GetMaxIndexKey(shardID uint64, key []byte) []byte {\n\tkey = getKeySlice(key, idKeyLength)\n\treturn getIDKey(maxIndexSuffix, shardID, key)\n}",
"func (r *Redis) MaxSize() int64 {\n\treturn r.maxSize\n}",
"func MaxValSize(max int) Option {\n\treturn func(lc *memoryCache) error {\n\t\tlc.maxValueSize = max\n\t\treturn nil\n\t}\n}",
"func (ch *clientSecureChannel) MaxMessageSize() uint32 {\n\treturn ch.maxMessageSize\n}",
"func (cd *ContinueDecompress) MaxMessageSize() int {\n\treturn cd.maxMessageSize\n}",
"func MaxBufferSize(size int) Options {\n\treturn func(c *config) {\n\t\tc.maxBufferSize = size\n\t}\n}",
"func (d *DHCPv4) MaxMessageSize() (uint16, error) {\n\treturn GetUint16(OptionMaximumDHCPMessageSize, d.Options)\n}",
"func GetMaxIndexes() int {\r\n\treturn converter.StrToInt(SysString(MaxIndexes))\r\n}",
"func (c *Cache) SizeMaxBytes() int {\n\tn := 0\n\tfor _, shard := range c.shards {\n\t\tn += shard.SizeMaxBytes()\n\t}\n\treturn n\n}",
"func RaggedCountSparseOutputMaxlength(value int64) RaggedCountSparseOutputAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"maxlength\"] = value\n\t}\n}",
"func (c *Corpus) Size() int {\n\tsize := atomic.LoadInt64(&c.maxid)\n\treturn int(size)\n}",
"func (group *NodeGroup) MaxSize() int {\n\tdefer group.lk.Unlock()\n\tgroup.lk.Lock()\n\treturn group.maxSize\n}",
"func (o ApplicationSettingsOutput) ElasticsearchIndexedFieldLengthLimit() pulumi.IntOutput {\n\treturn o.ApplyT(func(v *ApplicationSettings) pulumi.IntOutput { return v.ElasticsearchIndexedFieldLengthLimit }).(pulumi.IntOutput)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
WithTraversalPrototypeChooser specifies the prototype chooser that should be used when performing traversals in writes from a linksystem. | func WithTraversalPrototypeChooser(t traversal.LinkTargetNodePrototypeChooser) Option {
return func(o *Options) {
o.TraversalPrototypeChooser = t
}
} | [
"func NewSocketsTraversalExtension() *SocketsTraversalExtension {\n\treturn &SocketsTraversalExtension{\n\t\tSocketsToken: traversalSocketsToken,\n\t}\n}",
"func WithSortingByPathAscAndRevisionDesc() GetImplementationOption {\n\treturn func(options *ListImplementationRevisionsOptions) {\n\t\toptions.sortByPathAscAndRevisionDesc = true\n\t}\n}",
"func (result *Result) WithGraphTraversal(graphTraversals []*GraphTraversal) *Result {\n\tresult.GraphTraversals = graphTraversals\n\treturn result\n}",
"func (shim *QueryDirectClient) Traversal(ctx context.Context, in *GraphQuery, opts ...grpc.CallOption) (Query_TraversalClient, error) {\n md, _ := metadata.FromOutgoingContext(ctx)\n ictx := metadata.NewIncomingContext(ctx, md)\n\n\tw := &directQueryTraversal{ictx, make(chan *QueryResult, 100), in, nil}\n if shim.streamServerInt != nil {\n go func() {\n defer w.close()\n info := grpc.StreamServerInfo{\n FullMethod: \"/gripql.Query/Traversal\",\n IsServerStream: true,\n }\n w.e = shim.streamServerInt(shim.server, w, &info, _Query_Traversal_Handler)\n } ()\n return w, nil\n }\n\tgo func() {\n defer w.close()\n\t\tw.e = shim.server.Traversal(in, w)\n\t}()\n\treturn w, nil\n}",
"func (_options *CreateConfigurationOptions) SetConfigurationPrototype(configurationPrototype ConfigurationPrototypeIntf) *CreateConfigurationOptions {\n\t_options.ConfigurationPrototype = configurationPrototype\n\treturn _options\n}",
"func (d6 *Deep6DB) TraversalWithValue(val string, t Traversal, filterspec FilterSpec) (map[string][]map[string]interface{}, error) {\n\n\tdefer timeTrack(time.Now(), \"TraversalWithValue()\")\n\n\treturn traversalWithValue(val, t.TraversalSpec, filterspec, d6.db, d6.AuditLevel)\n\n}",
"func WithTransferCallback(callback base.TransferCallback) Option {\n\treturn func(node base.Node) {\n\t\tnode.SetTransferCallback(callback)\n\t}\n}",
"func WithVPProvider(vpp types.VPProvider) Option {\n\treturn func(so *serverOptions) {\n\t\tso.vpp = vpp\n\t}\n}",
"func NewTraversal(document interface{}, action Action) *Traversal {\n\treturn &Traversal{\n\t\tdocument,\n\t\taction,\n\t}\n}",
"func WithScheme(scheme string) RoutePathReverserOption {\n\treturn func(ps *RoutePathReverser) {\n\t\tps.vscheme = scheme\n\t}\n}",
"func NewTraversal() (g String) {\n\tg.string = \"g\"\n\tg.buffer = bytes.NewBufferString(\"\")\n\treturn\n}",
"func (_SlowWallet *SlowWalletFilterer) WatchTransferProposed(opts *bind.WatchOpts, sink chan<- *SlowWalletTransferProposed, destination []common.Address) (event.Subscription, error) {\n\n\tvar destinationRule []interface{}\n\tfor _, destinationItem := range destination {\n\t\tdestinationRule = append(destinationRule, destinationItem)\n\t}\n\n\tlogs, sub, err := _SlowWallet.contract.WatchLogs(opts, \"TransferProposed\", destinationRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(SlowWalletTransferProposed)\n\t\t\t\tif err := _SlowWallet.contract.UnpackLog(event, \"TransferProposed\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func GoAddressChooser(env jutil.Env, jChooser jutil.Object) rpc.AddressChooser {\n\t// Reference Java chooser; it will be de-referenced when the go function\n\t// created below is garbage-collected (through the finalizer callback we\n\t// setup just below).\n\tchooser := &jniAddressChooser{\n\t\tjChooser: jutil.NewGlobalRef(env, jChooser),\n\t}\n\truntime.SetFinalizer(chooser, func(chooser *jniAddressChooser) {\n\t\tenv, freeFunc := jutil.GetEnv()\n\t\tdefer freeFunc()\n\t\tjutil.DeleteGlobalRef(env, chooser.jChooser)\n\t})\n\treturn chooser\n}",
"func (plan *PlanSpec) TopologicalWalk(visitFn func(node PlanNode) error) error {\n\ttw := newTopologicalWalk(PlanNode.Predecessors, visitFn)\n\n\troots := make([]PlanNode, 0, len(plan.Roots))\n\tfor root := range plan.Roots {\n\t\troots = append(roots, root)\n\t}\n\n\t// Make sure to sort the roots first otherwise\n\t// an in-consistent walk order is possible.\n\tsort.Slice(roots, func(i, j int) bool {\n\t\treturn roots[i].ID() < roots[j].ID()\n\t})\n\n\tfor _, root := range roots {\n\t\tif err := tw.walk(root); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn tw.visit()\n}",
"func NewNextHopTraversalExtension() *NextHopTraversalExtension {\n\treturn &NextHopTraversalExtension{\n\t\tNextHopToken: traversalNextHopToken,\n\t}\n}",
"func JavaAddressChooser(env jutil.Env, chooser rpc.AddressChooser) (jutil.Object, error) {\n\tref := jutil.GoNewRef(&chooser) // Un-refed when the Java AddressChooser object is finalized.\n\tjAddressChooser, err := jutil.NewObject(env, jAddressChooserImplClass, []jutil.Sign{jutil.LongSign}, int64(ref))\n\tif err != nil {\n\t\tjutil.GoDecRef(ref)\n\t\treturn jutil.NullObject, err\n\t}\n\treturn jAddressChooser, nil\n}",
"func WithPrinter(p io.Writer) Option {\n\treturn func(s *initSpec) {\n\t\ts.Printer = p\n\t}\n}",
"func WithProposePresentation(msg *ProposePresentation) presentproof.Opt {\n\torigin := presentproof.ProposePresentation(*msg)\n\n\treturn presentproof.WithProposePresentation(&origin)\n}",
"func AddIndependentPropertyGeneratorsForVirtualNetworkGateway_Spec(gens map[string]gopter.Gen) {\n\tgens[\"ActiveActive\"] = gen.PtrOf(gen.Bool())\n\tgens[\"AzureName\"] = gen.AlphaString()\n\tgens[\"EnableBgp\"] = gen.PtrOf(gen.Bool())\n\tgens[\"EnableDnsForwarding\"] = gen.PtrOf(gen.Bool())\n\tgens[\"EnablePrivateIpAddress\"] = gen.PtrOf(gen.Bool())\n\tgens[\"GatewayType\"] = gen.PtrOf(gen.OneConstOf(VirtualNetworkGatewayPropertiesFormat_GatewayType_ExpressRoute, VirtualNetworkGatewayPropertiesFormat_GatewayType_LocalGateway, VirtualNetworkGatewayPropertiesFormat_GatewayType_Vpn))\n\tgens[\"Location\"] = gen.PtrOf(gen.AlphaString())\n\tgens[\"Tags\"] = gen.MapOf(gen.AlphaString(), gen.AlphaString())\n\tgens[\"VpnGatewayGeneration\"] = gen.PtrOf(gen.OneConstOf(VirtualNetworkGatewayPropertiesFormat_VpnGatewayGeneration_Generation1, VirtualNetworkGatewayPropertiesFormat_VpnGatewayGeneration_Generation2, VirtualNetworkGatewayPropertiesFormat_VpnGatewayGeneration_None))\n\tgens[\"VpnType\"] = gen.PtrOf(gen.OneConstOf(VirtualNetworkGatewayPropertiesFormat_VpnType_PolicyBased, VirtualNetworkGatewayPropertiesFormat_VpnType_RouteBased))\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
WithTrustedCAR specifies whether CIDs match the block data as they are read from the CAR files. | func WithTrustedCAR(t bool) Option {
return func(o *Options) {
o.TrustedCAR = t
}
} | [
"func isSpecTrustedCASet(proxyConfig *configv1.ProxySpec) bool {\n\treturn len(proxyConfig.TrustedCA.Name) > 0\n}",
"func WithTrusted(trusted bool) Option {\n\treturn func(linter *Linter) {\n\t\tlinter.trusted = trusted\n\t}\n}",
"func WithTrustedCertificatesFile(path string) Option {\n\treturn func(c *Client) error {\n\t\tc.trustedCertsFile = path\n\t\treturn nil\n\t}\n}",
"func RequireTrusted(req bool) Opt {\n\treturn func(p *params) { p.requireTrust = req }\n}",
"func DARECiphers() []byte {\n\tif Enabled {\n\t\treturn []byte{sio.AES_256_GCM}\n\t}\n\treturn []byte{sio.AES_256_GCM, sio.CHACHA20_POLY1305}\n}",
"func (c *Championship) ValidCarIDs() []string {\n\tcars := make(map[string]bool)\n\n\tfor _, class := range c.Classes {\n\t\tfor _, e := range class.Entrants {\n\t\t\tcars[e.Model] = true\n\t\t}\n\t}\n\n\tvar out []string\n\n\tfor car := range cars {\n\t\tout = append(out, car)\n\t}\n\n\treturn out\n}",
"func (c *Car) CanShow() bool {\n\ts := false\n\tif e := os.Getenv(\"BULLETTRAIN_CAR_OS_SHOW\"); e == \"true\" {\n\t\ts = true\n\t}\n\n\treturn s\n}",
"func TrustedOrigins(origins []string) Option {\n\treturn func(cs *csrf) {\n\t\tcs.opts.TrustedOrigins = origins\n\t}\n}",
"func (c *ChampionshipClass) ValidCarIDs() []string {\n\tcars := make(map[string]bool)\n\n\tfor _, e := range c.Entrants {\n\t\tcars[e.Model] = true\n\t}\n\n\tvar out []string\n\n\tfor car := range cars {\n\t\tout = append(out, car)\n\t}\n\n\treturn out\n}",
"func findTrustedCerts(cfg *Config, objects []*Object) ([]*x509.Certificate, error) {\n\tvar out []*x509.Certificate\n\n\tcerts := filterObjectsByClass(objects, \"CKO_CERTIFICATE\")\n\ttrusts := filterObjectsByClass(objects, \"CKO_NSS_TRUST\")\n\n\tfor _, cert := range certs {\n\t\tderBytes := cert.attrs[\"CKA_VALUE\"].value\n\t\thash := sha1.New()\n\t\thash.Write(derBytes)\n\t\tdigest := hash.Sum(nil)\n\n\t\tx509, err := x509.ParseCertificate(derBytes)\n\t\tif err != nil {\n\t\t\t// This is known to occur because of a broken certificate in NSS.\n\t\t\t// https://bugzilla.mozilla.org/show_bug.cgi?id=707995\n\t\t\tcontinue\n\t\t}\n\n\t\t// TODO(agl): wtc tells me that Mozilla might get rid of the\n\t\t// SHA1 records in the future and use issuer and serial number\n\t\t// to match trust records to certificates (which is what NSS\n\t\t// currently uses). This needs some changes to the crypto/x509\n\t\t// package to keep the raw names around.\n\n\t\tvar trust *Object\n\t\tfor _, possibleTrust := range trusts {\n\t\t\tif bytes.Equal(digest, possibleTrust.attrs[\"CKA_CERT_SHA1_HASH\"].value) {\n\t\t\t\ttrust = possibleTrust\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\ttrustType := trust.attrs[\"CKA_TRUST_SERVER_AUTH\"].value\n\n\t\tvar trusted bool\n\t\tswitch string(trustType) {\n\t\tcase \"CKT_NSS_NOT_TRUSTED\":\n\t\t\t// An explicitly distrusted cert\n\t\t\ttrusted = false\n\t\tcase \"CKT_NSS_TRUSTED_DELEGATOR\":\n\t\t\t// A cert trusted for issuing SSL server certs.\n\t\t\ttrusted = true\n\t\tcase \"CKT_NSS_TRUST_UNKNOWN\", \"CKT_NSS_MUST_VERIFY_TRUST\":\n\t\t\t// A cert not trusted for issuing SSL server certs, but is trusted for other purposes.\n\t\t\ttrusted = false\n\t\t}\n\n\t\tif !trusted && !cfg.IncludedUntrustedFlag {\n\t\t\tcontinue\n\t\t}\n\n\t\tout = append(out, x509)\n\t}\n\n\treturn out, nil\n}",
"func (_Casper *CasperCaller) Trusted(opts *bind.CallOpts) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _Casper.contract.Call(opts, out, \"trusted\")\n\treturn *ret0, err\n}",
"func newLightClientAttackEvidence(conflicted, trusted, common *types.LightBlock) *types.LightClientAttackEvidence {\n\tev := &types.LightClientAttackEvidence{ConflictingBlock: conflicted}\n\t// if this is an equivocation or amnesia attack, i.e. the validator sets are the same, then we\n\t// return the height of the conflicting block else if it is a lunatic attack and the validator sets\n\t// are not the same then we send the height of the common header.\n\tif ev.ConflictingHeaderIsInvalid(trusted.Header) {\n\t\tev.CommonHeight = common.Height\n\t\tev.Timestamp = common.Time\n\t\tev.TotalVotingPower = common.ValidatorSet.TotalVotingPower()\n\t} else {\n\t\tev.CommonHeight = trusted.Height\n\t\tev.Timestamp = trusted.Time\n\t\tev.TotalVotingPower = trusted.ValidatorSet.TotalVotingPower()\n\t}\n\tev.ByzantineValidators = ev.GetByzantineValidators(common.ValidatorSet, trusted.SignedHeader)\n\treturn ev\n}",
"func (h *ProxyHeaders) ParseAndAddTrusted(cidrs ...string) error {\n\tfor _, cidr := range cidrs {\n\t\t_, net, err := net.ParseCIDR(cidr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\th.Trusted = append(h.Trusted, net)\n\t}\n\treturn nil\n}",
"func (s SessionData) SafetyCar() string {\n\treturn safetyCar[s.SafetyCarStatus]\n}",
"func (s *session) LoadTrust(cancel <-chan struct{}, id uuid.UUID) (Trust, bool, error) {\n\ttoken, err := s.token(cancel)\n\tif err != nil {\n\t\treturn Trust{}, false, errors.WithStack(err)\n\t}\n\n\ttrust, ok, err := s.net.TrustById(cancel, token, id)\n\treturn trust, ok, errors.WithStack(err)\n}",
"func (m *AppManager) ValidiateSideCar() (bool, error) {\n\tif !m.app.DaprEnabled {\n\t\treturn false, fmt.Errorf(\"dapr is not enabled for this app\")\n\t}\n\n\tpodClient := m.client.Pods(m.namespace)\n\n\t// Filter only 'testapp=appName' labeled Pods\n\tpodList, err := podClient.List(metav1.ListOptions{\n\t\tLabelSelector: fmt.Sprintf(\"%s=%s\", TestAppLabelKey, m.app.AppName),\n\t})\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif len(podList.Items) != int(m.app.Replicas) {\n\t\treturn false, fmt.Errorf(\"expected number of pods for %s: %d, received: %d\", m.app.AppName, m.app.Replicas, len(podList.Items))\n\t}\n\n\t// Each pod must have daprd sidecar\n\tfor _, pod := range podList.Items {\n\t\tdaprdFound := false\n\t\tfor _, container := range pod.Spec.Containers {\n\t\t\tif container.Name == DaprSideCarName {\n\t\t\t\tdaprdFound = true\n\t\t\t}\n\t\t}\n\t\tif !daprdFound {\n\t\t\treturn false, fmt.Errorf(\"cannot find dapr sidecar in pod %s\", pod.Name)\n\t\t}\n\t}\n\n\treturn true, nil\n}",
"func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, trustedHeader *types.SignedHeader,\n\tcommonVals *types.ValidatorSet, now time.Time, trustPeriod time.Duration) error {\n\t// In the case of lunatic attack there will be a different commonHeader height. Therefore the node perform a single\n\t// verification jump between the common header and the conflicting one\n\tif commonHeader.Height != e.ConflictingBlock.Height {\n\t\terr := commonVals.VerifyCommitLightTrusting(trustedHeader.ChainID, e.ConflictingBlock.Commit, light.DefaultTrustLevel)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"skipping verification of conflicting block failed: %w\", err)\n\t\t}\n\n\t\t// In the case of equivocation and amnesia we expect all header hashes to be correctly derived\n\t} else if e.ConflictingHeaderIsInvalid(trustedHeader.Header) {\n\t\treturn errors.New(\"common height is the same as conflicting block height so expected the conflicting\" +\n\t\t\t\" block to be correctly derived yet it wasn't\")\n\t}\n\n\t// Verify that the 2/3+ commits from the conflicting validator set were for the conflicting header\n\tif err := e.ConflictingBlock.ValidatorSet.VerifyCommitLight(trustedHeader.ChainID, e.ConflictingBlock.Commit.BlockID,\n\t\te.ConflictingBlock.Height, e.ConflictingBlock.Commit); err != nil {\n\t\treturn fmt.Errorf(\"invalid commit from conflicting block: %w\", err)\n\t}\n\n\t// Assert the correct amount of voting power of the validator set\n\tif evTotal, valsTotal := e.TotalVotingPower, commonVals.TotalVotingPower(); evTotal != valsTotal {\n\t\treturn fmt.Errorf(\"total voting power from the evidence and our validator set does not match (%d != %d)\",\n\t\t\tevTotal, valsTotal)\n\t}\n\n\t// check in the case of a forward lunatic attack that monotonically increasing time has been violated\n\tif e.ConflictingBlock.Height > trustedHeader.Height && e.ConflictingBlock.Time.After(trustedHeader.Time) {\n\t\treturn fmt.Errorf(\"conflicting block doesn't violate monotonically increasing time (%v is after %v)\",\n\t\t\te.ConflictingBlock.Time, trustedHeader.Time,\n\t\t)\n\n\t\t// In all other cases check that the hashes of the conflicting header and the trusted header are different\n\t} else if bytes.Equal(trustedHeader.Hash(), e.ConflictingBlock.Hash()) {\n\t\treturn fmt.Errorf(\"trusted header hash matches the evidence's conflicting header hash: %X\",\n\t\t\ttrustedHeader.Hash())\n\t}\n\n\treturn validateABCIEvidence(e, commonVals, trustedHeader)\n}",
"func (c *ClientWithResponses) GetaspecificTrustedSourceWithResponse(ctx context.Context, id string) (*GetaspecificTrustedSourceResponse, error) {\n\trsp, err := c.GetaspecificTrustedSource(ctx, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseGetaspecificTrustedSourceResponse(rsp)\n}",
"func (c *ClientWithResponses) ChangeaspecificTrustedSourceWithBodyWithResponse(ctx context.Context, id string, contentType string, body io.Reader) (*ChangeaspecificTrustedSourceResponse, error) {\n\trsp, err := c.ChangeaspecificTrustedSourceWithBody(ctx, id, contentType, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseChangeaspecificTrustedSourceResponse(rsp)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
MaxAllowedHeaderSize overrides the default maximum size (of 32 MiB) that a CARv1 decode (including within a CARv2 container) will allow a header to be without erroring. | func MaxAllowedHeaderSize(max uint64) Option {
return func(o *Options) {
o.MaxAllowedHeaderSize = max
}
} | [
"func (s *fseEncoder) maxHeaderSize() uint32 {\n\tif s.preDefined {\n\t\treturn 0\n\t}\n\tif s.useRLE {\n\t\treturn 8\n\t}\n\treturn (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8\n}",
"func (*endpoint) MaxHeaderLength() uint16 {\n\treturn header.EthernetMinimumSize\n}",
"func (e *endpoint) MaxHeaderLength() uint16 {\n\treturn uint16(e.hdrSize)\n}",
"func MaxHeaderBytes(v int) Option {\n\treturn optionSetter(func(opt *Options) {\n\t\topt.MaxHeaderBytes = v\n\t})\n}",
"func (st *Settings) MaxHeaderListSize() uint32 {\n\treturn st.headerSize\n}",
"func HeaderFieldsTooLarge(message ...interface{}) Err {\n\treturn Boomify(http.StatusRequestHeaderFieldsTooLarge, message...)\n}",
"func (c Config) MaxHeaderBytesOrDefault() int {\n\tif c.MaxHeaderBytes > 0 {\n\t\treturn c.MaxHeaderBytes\n\t}\n\treturn DefaultMaxHeaderBytes\n}",
"func GetHeaderSize(headers http.Header) uint32 {\n\tvar size uint32\n\tfor k, v := range headers {\n\t\tsize += uint32(len(k))\n\t\tfor _, val := range v {\n\t\t\tsize += uint32(len(val))\n\t\t}\n\t}\n\treturn size\n}",
"func (r *Responder) RequestHeaderFieldsTooLarge() { r.write(http.StatusRequestHeaderFieldsTooLarge) }",
"func (st *Settings) SetMaxHeaderListSize(size uint32) {\n\tst.headerSize = size\n}",
"func (e *endpoint) MaxHeaderLength() uint16 {\n\treturn e.lower.MaxHeaderLength()\n}",
"func (e *Endpoint) MaxHeaderLength() uint16 {\n\treturn e.lower.MaxHeaderLength()\n}",
"func getMaxRequestHeaderParam(serverParams []tc.Parameter) (int, []string) {\n\twarnings := []string{}\n\tglobalRequestHeaderMaxSize := TsDefaultRequestHeaderMaxSize\n\tparams, paramWarns := paramsToMap(filterParams(serverParams, RecordsFileName, \"\", \"\", \"location\"))\n\twarnings = append(warnings, strings.Join(paramWarns, \" \"))\n\tif val, ok := params[\"CONFIG proxy.config.http.request_header_max_size\"]; ok {\n\t\tsize := strings.Fields(val)\n\t\tsizeI, err := strconv.Atoi(size[1])\n\t\tif err != nil {\n\t\t\twarnings = append(warnings, \"Couldn't convert string to int for max_req_header_size\")\n\t\t} else {\n\t\t\tglobalRequestHeaderMaxSize = sizeI\n\t\t}\n\t}\n\treturn globalRequestHeaderMaxSize, warnings\n}",
"func (s *Server) SetMaxHeaderBytes(b int) {\n\ts.config.MaxHeaderBytes = b\n}",
"func (f *frame) headerLength() int {\n\treturn 8 + 1 + 4\n}",
"func MaxEncodedLen(n int) int {}",
"func writeHeaderSize(headerLength int) []byte {\n\ttotalHeaderLen := make([]byte, 4)\n\ttotalLen := uint32(headerLength)\n\tbinary.BigEndian.PutUint32(totalHeaderLen, totalLen)\n\treturn totalHeaderLen\n}",
"func (r Response) RequestHeaderFieldsTooLarge(code string, payload Payload, header ...ResponseHeader) {\n\tr.Response(code, http.RequestHeaderFieldsTooLarge, payload, header...)\n}",
"func HeaderSize(h http.Header) int {\n\tl := 0\n\tfor field, value := range h {\n\t\tl += len(field)\n\t\tfor _, v := range value {\n\t\t\tl += len(v)\n\t\t}\n\t}\n\n\treturn l\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
MaxAllowedSectionSize overrides the default maximum size (of 8 MiB) that a CARv1 decode (including within a CARv2 container) will allow a header to be without erroring. Typically IPLD blocks should be under 2 MiB (ideally under 1 MiB), so unless atypical data is expected, this should not be a large value. | func MaxAllowedSectionSize(max uint64) Option {
return func(o *Options) {
o.MaxAllowedSectionSize = max
}
} | [
"func MaxAllowedHeaderSize(max uint64) Option {\n\treturn func(o *Options) {\n\t\to.MaxAllowedHeaderSize = max\n\t}\n}",
"func (*endpoint) MaxHeaderLength() uint16 {\n\treturn header.EthernetMinimumSize\n}",
"func (st *Settings) MaxHeaderListSize() uint32 {\n\treturn st.headerSize\n}",
"func (s *fseEncoder) maxHeaderSize() uint32 {\n\tif s.preDefined {\n\t\treturn 0\n\t}\n\tif s.useRLE {\n\t\treturn 8\n\t}\n\treturn (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8\n}",
"func (e *endpoint) MaxHeaderLength() uint16 {\n\treturn uint16(e.hdrSize)\n}",
"func (f FormatHeader) BlockSize() uint16 {\n\treturn (f.BitsPerSample / 8) * f.NumChannels\n}",
"func (s Section) Size() uint32 { return bytes.ReadUint32(s[6:]) }",
"func MaxBlockLen(ct CompressionType) uint64 {\n\tif ct == Snappy {\n\t\t// https://github.com/golang/snappy/blob/2a8bb927dd31d8daada140a5d09578521ce5c36a/encode.go#L76\n\t\treturn 6 * (0xffffffff - 32) / 7\n\t}\n\treturn math.MaxUint64\n}",
"func (d *DHCPv4) MaxMessageSize() (uint16, error) {\n\treturn GetUint16(OptionMaximumDHCPMessageSize, d.Options)\n}",
"func MaxHeaderBytes(v int) Option {\n\treturn optionSetter(func(opt *Options) {\n\t\topt.MaxHeaderBytes = v\n\t})\n}",
"func (s *SE) PMRLLimitTotalSize() uint64 {\n\treturn 4\n}",
"func (e *awsElastigroup) MaxSize() int { return fi.IntValue(e.obj.Capacity.Maximum) }",
"func (e *endpoint) MaxHeaderLength() uint16 {\n\treturn e.lower.MaxHeaderLength()\n}",
"func (c Config) MaxHeaderBytesOrDefault() int {\n\tif c.MaxHeaderBytes > 0 {\n\t\treturn c.MaxHeaderBytes\n\t}\n\treturn DefaultMaxHeaderBytes\n}",
"func (st *Settings) SetMaxHeaderListSize(size uint32) {\n\tst.headerSize = size\n}",
"func (cd *ContinueDecompress) MaxMessageSize() int {\n\treturn cd.maxMessageSize\n}",
"func MaxPktSize(size int) CodecOpt {\n\treturn func(c *Codec) {\n\t\tif size > maxPktSize || size <= 0 {\n\t\t\tc.maxPktSize = maxPktSize\n\t\t}\n\t\tc.maxPktSize = size\n\t}\n}",
"func readSection(r io.Reader, sSize int, maxSize uint64) (data []byte, nr io.Reader, size int, err error) {\n\t// We are not going to lose data by copying a smaller var into a larger one.\n\tvar sectionSize uint64\n\tswitch sSize {\n\tcase 2:\n\t\t// Read uint16.\n\t\tvar size16 uint16\n\t\terr = binary.Read(r, binary.LittleEndian, &size16)\n\t\tif err != nil {\n\t\t\treturn data, nr, size, fmt.Errorf(\"golnk.readSection: read size %d bytes - %s\", sSize, err.Error())\n\t\t}\n\t\tsectionSize = uint64(size16)\n\t\t// Add bytes to the start of data []byte.\n\t\tdata = uint16Byte(size16)\n\tcase 4:\n\t\t// Read uint32.\n\t\tvar size32 uint32\n\t\terr = binary.Read(r, binary.LittleEndian, &size32)\n\t\tif err != nil {\n\t\t\treturn data, nr, size, fmt.Errorf(\"golnk.readSection: read size %d bytes - %s\", sSize, err.Error())\n\t\t}\n\t\tsectionSize = uint64(size32)\n\t\t// Add bytes to the start of data []byte.\n\t\tdata = uint32Byte(size32)\n\tcase 8:\n\t\t// Read uint64 or sectionSize.\n\t\terr = binary.Read(r, binary.LittleEndian, §ionSize)\n\t\tif err != nil {\n\t\t\treturn data, nr, size, fmt.Errorf(\"golnk.readSection: read size %d bytes - %s\", sSize, err.Error())\n\t\t}\n\t\t// Add bytes to the start of data []byte.\n\t\tdata = uint64Byte(sectionSize)\n\tdefault:\n\t\treturn data, nr, size, fmt.Errorf(\"golnk.readSection: invalid sSize - got %v\", sSize)\n\t}\n\n\t// Create a []byte of sectionSize-4 and read that many bytes from io.Reader.\n\tcomputedSize := sectionSize - uint64(sSize)\n\tif computedSize > maxSize {\n\t\treturn data, nr, size, fmt.Errorf(\"golnk.readSection: invalid computed size got %d; expected a size < %d\", computedSize, maxSize)\n\t}\n\n\ttempData := make([]byte, computedSize)\n\terr = binary.Read(r, binary.LittleEndian, &tempData)\n\tif err != nil {\n\t\treturn data, nr, size, fmt.Errorf(\"golnk.readSection: read section %d bytes - %s\", sectionSize-uint64(sSize), err.Error())\n\t}\n\n\t// If this is successful, append it to data []byte.\n\tdata = append(data, tempData...)\n\n\t// Create a reader from the unread bytes.\n\tnr = bytes.NewReader(tempData)\n\n\treturn data, nr, int(sectionSize), nil\n}",
"func MaxSize32(length int) int {\n\tnumControlBytes := (length + 3) / 4\n\tmaxNumDataBytes := 4 * length\n\treturn numControlBytes + maxNumDataBytes\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
WriteAsCarV1 is a write option which makes a CAR interface (blockstore or storage) write the output as a CARv1 only, with no CARv2 header or index. Indexing is used internally during write but is discarded upon finalization. Note that this option only affects the storage interfaces (blockstore or storage), and is ignored by the root gocar/v2 package. | func WriteAsCarV1(asCarV1 bool) Option {
return func(o *Options) {
o.WriteAsCarV1 = asCarV1
}
} | [
"func WriteV1Header(h *tar.Header, w io.Writer) {\n\tfor _, elem := range v1TarHeaderSelect(h) {\n\t\tw.Write([]byte(elem[0] + elem[1]))\n\t}\n}",
"func (r *RAMOutputStream) WriteToV1(bytes []byte) error {\n\terr := r.flush()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tend := int(r.file.length)\n\tpos, buffer, bytesUpto := 0, 0, 0\n\n\tfor pos < end {\n\t\tlength := r.bufferSize\n\t\tnextPos := pos + length\n\t\tif nextPos > end {\n\t\t\tlength = end - pos\n\t\t}\n\n\t\tsrc := r.file.getBuffer(buffer)[:length]\n\t\tcopy(bytes[bytesUpto:], src)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbuffer++\n\t\tbytesUpto += length\n\t\tpos = nextPos\n\t}\n\treturn nil\n}",
"func ExampleWriteTo() {\n\t// Open the CARv2 file\n\tsrc := \"../testdata/sample-wrapped-v2.car\"\n\tcr, err := carv2.OpenReader(src)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer func() {\n\t\tif err := cr.Close(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\t// Read and unmarshall index within CARv2 file.\n\tir, err := cr.IndexReader()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tidx, err := index.ReadFrom(ir)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Store the index alone onto destination file.\n\tf, err := os.CreateTemp(os.TempDir(), \"example-index-*.carindex\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer func() {\n\t\tif err := f.Close(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\t_, err = index.WriteTo(idx, f)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Seek to the beginning of tile to read it back.\n\t_, err = f.Seek(0, io.SeekStart)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Read and unmarshall the destination file as a separate index instance.\n\treReadIdx, err := index.ReadFrom(f)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Expect indices to be equal.\n\tif reflect.DeepEqual(idx, reReadIdx) {\n\t\tfmt.Printf(\"Saved index file matches the index embedded in CARv2 at %v.\\n\", src)\n\t} else {\n\t\tpanic(\"expected to get the same index as the CARv2 file\")\n\t}\n\n\t// Output:\n\t// Saved index file matches the index embedded in CARv2 at ../testdata/sample-wrapped-v2.car.\n}",
"func (cs *CS) Write(path string) {\n\tr1cs := NewR1CS(cs)\n\tif err := gob.Write(path, r1cs); err != nil {\n\t\tpanic(err)\n\t}\n}",
"func (crc *CasbinRuleCreate) SetV1(s string) *CasbinRuleCreate {\n\tcrc.mutation.SetV1(s)\n\treturn crc\n}",
"func (c ConfChange) AsV1() (ConfChange, bool) {\n\treturn c, true\n}",
"func (_PBridge *PBridgeTransactor) UpgradeContractS1(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _PBridge.contract.Transact(opts, \"upgradeContractS1\")\n}",
"func (f *rcFeature) ConfigureV1(dda *v1alpha1.DatadogAgent) (reqComp feature.RequiredComponents) {\n\treturn\n}",
"func (d *Encoder) One(v interface{}) error {\n\theader := deriveHeader(v)\n\trecord := makeRecord(v, header)\n\tif !d.headWritten {\n\t\td.Csvwriter.Write(header)\n\t\td.headWritten = true\n\t}\n\n\terr := d.Csvwriter.Write(record)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func bindV1(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(V1ABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}",
"func (cc *CarCreate) SetCarNo(s string) *CarCreate {\n\tcc.mutation.SetCarNo(s)\n\treturn cc\n}",
"func (a *AsmBuf) Put1(x byte)",
"func (a *Client) SyncDistroXV1ByName(params *SyncDistroXV1ByNameParams) error {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewSyncDistroXV1ByNameParams()\n\t}\n\n\t_, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"syncDistroXV1ByName\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/v1/distrox/name/{name}/sync\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &SyncDistroXV1ByNameReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\n}",
"func NewV1Encoder(b []byte) *V1Encoder {\n\treturn &V1Encoder{\n\t\tdata: b,\n\t}\n}",
"func (a *Client) PutCredentialV1(params *PutCredentialV1Params) (*PutCredentialV1OK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPutCredentialV1Params()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"putCredentialV1\",\n\t\tMethod: \"PUT\",\n\t\tPathPattern: \"/v1/credentials\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &PutCredentialV1Reader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*PutCredentialV1OK), nil\n\n}",
"func (a *Agency) AddCar(a1 Car){\n\ta.Cars=append(a.Cars, a1)\n}",
"func (w *Writer) Put1(n int) *Writer {\n\tif n < 0 || 1<<8 <= n {\n\t\tpanic(\"stor.Writer.Put1 value outside range\")\n\t}\n\tw.buf = append(w.buf,\n\t\tbyte(n))\n\treturn w\n}",
"func (w *RWWrapper) WriteHeader(statusCode int) {\n\tif w.statusWritten {\n\t\treturn\n\t}\n\n\tw.configureHeader()\n\tw.rw.WriteHeader(statusCode)\n\tw.statusWritten = true\n}",
"func (s *SupportedFieldTypeDetails) SetV1(v *FieldTypeDetails) *SupportedFieldTypeDetails {\n\ts.V1 = v\n\treturn s\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
AllowDuplicatePuts is a write option which makes a CAR interface (blockstore or storage) not deduplicate blocks in Put and PutMany. The default is to deduplicate, which matches the current semantics of goipfsblockstore v1. Note that this option only affects the storage interfaces (blockstore or storage), and is ignored by the root gocar/v2 package. | func AllowDuplicatePuts(allow bool) Option {
return func(o *Options) {
o.BlockstoreAllowDuplicatePuts = allow
}
} | [
"func AllowDuplicatePuts(allow bool) carv2.Option {\n\treturn func(o *carv2.Options) {\n\t\to.BlockstoreAllowDuplicatePuts = allow\n\t}\n}",
"func DisallowDuplicateKey() DecodeOption {\n\treturn func(d *Decoder) error {\n\t\td.disallowDuplicateKey = true\n\t\treturn nil\n\t}\n}",
"func ChangeAllowDuplication(m *KaraokeManager, allowdupes bool) error {\n\tstate, err := db.GetEngineState(m, m.Config.KaraokeConfig.SessionName)\n\tif err != nil {\n\t\tm.Logger.Printf(\"Failed to get session data due to error %q\", err)\n\t}\n\tstate.AllowingDupes = allowdupes\n\terr = db.UpdateEngineState(m, *state)\n\tif err != nil {\n\t\tm.Logger.Printf(\"Failed to update duplicate rule due to error %q\", err)\n\t\treturn err\n\t}\n\tFetchAndUpdateListenersQueue(m, 1)\n\treturn nil\n}",
"func (c *Client) PutDuplicate(oldName, newName upspin.PathName) (*upspin.DirEntry, error) {\n\tconst op errors.Op = \"client.PutDuplicate\"\n\tm, s := newMetric(op)\n\tdefer m.Done()\n\n\treturn c.dupOrRename(op, oldName, newName, false, s)\n}",
"func (blk *Block) duplicate() *Block {\n\tdup := &Block{}\n\n\t// Copy over.\n\t*dup = *blk\n\n\tdupContents := contentstream.ContentStreamOperations{}\n\tfor _, op := range *blk.contents {\n\t\tdupContents = append(dupContents, op)\n\t}\n\tdup.contents = &dupContents\n\n\treturn dup\n}",
"func (b *ReadWrite) PutMany(blks []blocks.Block) error {\n\tb.ronly.mu.Lock()\n\tdefer b.ronly.mu.Unlock()\n\n\tif b.ronly.closed {\n\t\treturn errClosed\n\t}\n\n\tfor _, bl := range blks {\n\t\tc := bl.Cid()\n\n\t\t// If StoreIdentityCIDs option is disabled then treat IDENTITY CIDs like IdStore.\n\t\tif !b.opts.StoreIdentityCIDs {\n\t\t\t// Check for IDENTITY CID. If IDENTITY, ignore and move to the next block.\n\t\t\tif _, ok, err := isIdentity(c); err != nil {\n\t\t\t\treturn err\n\t\t\t} else if ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// Check if its size is too big.\n\t\t// If larger than maximum allowed size, return error.\n\t\t// Note, we need to check this regardless of whether we have IDENTITY CID or not.\n\t\t// Since multhihash codes other than IDENTITY can result in large digests.\n\t\tcSize := uint64(len(c.Bytes()))\n\t\tif cSize > b.opts.MaxIndexCidSize {\n\t\t\treturn &carv2.ErrCidTooLarge{MaxSize: b.opts.MaxIndexCidSize, CurrentSize: cSize}\n\t\t}\n\n\t\tif !b.opts.BlockstoreAllowDuplicatePuts {\n\t\t\tif b.ronly.opts.BlockstoreUseWholeCIDs && b.idx.hasExactCID(c) {\n\t\t\t\tcontinue // deduplicated by CID\n\t\t\t}\n\t\t\tif !b.ronly.opts.BlockstoreUseWholeCIDs {\n\t\t\t\t_, err := b.idx.Get(c)\n\t\t\t\tif err == nil {\n\t\t\t\t\tcontinue // deduplicated by hash\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tn := uint64(b.dataWriter.Position())\n\t\tif err := util.LdWrite(b.dataWriter, c.Bytes(), bl.RawData()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tb.idx.insertNoReplace(c, n)\n\t}\n\treturn nil\n}",
"func SetDupesAllowed(m *KaraokeManager, newDupePermissions bool) error {\n\tnewstate, err := db.GetEngineState(m, m.GetConfig().KaraokeConfig.SessionName)\n\tif err != nil {\n\t\tm.Logger.Printf(\"Failed to get session data due to error %q\", err)\n\t}\n\tnewstate.AllowingDupes = newDupePermissions\n\terr = db.UpdateEngineState(m, *newstate)\n\tif err != nil {\n\t\tm.Logger.Printf(\"Failed to set duplicate request permissions due to error %q\", err)\n\t\treturn err\n\t} else {\n\t\treturn nil\n\t}\n}",
"func (kv *ShardKV) IsDupPutAppend(args *PutAppendArgs) bool {\n\t_, ok := kv.prevRequests[args.CurrId]\n\n\t// Duplicate RPC request\n\tif ok {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (c Config) Replicate(obj ObjectOpts) bool {\n\n\tfor _, rule := range c.FilterActionableRules(obj) {\n\n\t\tif obj.DeleteMarker {\n\t\t\t// Indicates whether MinIO will remove a delete marker. By default, delete markers\n\t\t\t// are not replicated.\n\t\t\treturn false\n\t\t}\n\t\tif obj.SSEC {\n\t\t\treturn false\n\t\t}\n\t\tif obj.VersionID != \"\" && !obj.IsLatest {\n\t\t\treturn false\n\t\t}\n\t\tif rule.Status == Disabled {\n\t\t\tcontinue\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}",
"func (handle Handle) Duplicate(src, dest Handle, access DuplicateAccess) (Handle, error) {\n\tvar destHandle Handle\n\terrno, _, err := duplicateHandle.Call(\n\t\tuintptr(src),\n\t\tuintptr(handle),\n\t\tuintptr(dest),\n\t\tuintptr(unsafe.Pointer(&destHandle)),\n\t\tuintptr(access),\n\t\t0,\n\t\t0,\n\t)\n\tif winerrno.Errno(errno) != winerrno.Success {\n\t\treturn destHandle, nil\n\t}\n\treturn Handle(0), os.NewSyscallError(\"DuplicateHandle\", err)\n}",
"func AllowOverwrite(existing, new Source) bool {\n\tswitch existing {\n\n\t// KubeAPIServer state can only be overwritten by other kube-apiserver\n\t// state.\n\tcase KubeAPIServer:\n\t\treturn new == KubeAPIServer\n\n\t// Local state can only be overwritten by other local state or\n\t// kube-apiserver state.\n\tcase Local:\n\t\treturn new == Local || new == KubeAPIServer\n\n\t// KVStore can be overwritten by other kvstore, local state, or\n\t// kube-apiserver state.\n\tcase KVStore:\n\t\treturn new == KVStore || new == Local || new == KubeAPIServer\n\n\t// Custom-resource state can be overwritten by everything except\n\t// generated, unspecified and Kubernetes (non-CRD) state\n\tcase CustomResource:\n\t\treturn new != Generated && new != Unspec && new != Kubernetes\n\n\t// Kubernetes state can be overwritten by everything except generated\n\t// and unspecified state\n\tcase Kubernetes:\n\t\treturn new != Generated && new != Unspec\n\n\t// Generated can be overwritten by everything except by Unspecified\n\tcase Generated:\n\t\treturn new != Unspec\n\n\t// Unspecified state can be overwritten by everything\n\tcase Unspec:\n\t\treturn true\n\t}\n\n\treturn true\n}",
"func isDuplicate(m map[item]struct{}, name, addr string, port uint16) bool {\n\tif addr != \"\" {\n\t\t_, ok := m[item{name, 0, addr}]\n\t\tif !ok {\n\t\t\tm[item{name, 0, addr}] = struct{}{}\n\t\t}\n\t\treturn ok\n\t}\n\t_, ok := m[item{name, port, \"\"}]\n\tif !ok {\n\t\tm[item{name, port, \"\"}] = struct{}{}\n\t}\n\treturn ok\n}",
"func duplicateBotConfig(cfgs []*BotConfig) error {\n\tmkts := make(map[string]struct{})\n\n\tfor _, cfg := range cfgs {\n\t\tmkt := dexMarketID(cfg.Host, cfg.BaseAsset, cfg.QuoteAsset)\n\t\tif _, found := mkts[mkt]; found {\n\t\t\treturn fmt.Errorf(\"duplicate bot config for market %s\", mkt)\n\t\t}\n\t\tmkts[mkt] = struct{}{}\n\t}\n\n\treturn nil\n}",
"func mergeDuplicateShares(tripId int64, planId int64, members []Member, sharesWithDuplicates []Share) ([]Share, bool) {\n\n\t// creating new shares out of benefactor\n\tshares := createSharesOutOfBenefactor(planId, members, sharesWithDuplicates)\n\n\t//merging shares by memberid\n\tmemberMap := make(map[string]*Share)\n\tfor i := 0; i < len(shares); i++ {\n\t\tshare := shares[i]\n\t\t// allow zero planid for calculating total shares\n\t\tif isCurrentPlan(share.Planid, planId) {\n\t\t\tif savedShare, ok := memberMap[share.Memberemail]; ok {\n\t\t\t\tif savedShare.Id == 0 { // dynamic share created from createSharesOutOfBenefactor\n\t\t\t\t\tsavedShare.Id = share.Id\n\t\t\t\t\tsavedShare.Membername = share.Membername\n\t\t\t\t\tsavedShare.Memberavatar = share.Memberavatar\n\t\t\t\t}\n\t\t\t\tsavedShare.Paid = savedShare.Paid + share.Paid\n\t\t\t\tsavedShare.Share = savedShare.Share + share.Share\n\t\t\t} else {\n\t\t\t\tmemberMap[share.Memberemail] = &share\n\t\t\t}\n\t\t}\n\t}\n\n\t// In go there is no way get set of values from map.\n\tallShares := make([]Share, 0)\n\t// find whether there are shares already present in the db\n\tsharesPresentAlready := false\n\tfor _, share := range memberMap {\n\t\tif share.Id != 0 {\n\t\t\tsharesPresentAlready = true\n\t\t}\n\t\tallShares = append(allShares, *share)\n\t}\n\treturn allShares, sharesPresentAlready\n}",
"func (o TransferJobTransferSpecTransferOptionsOutput) OverwriteObjectsAlreadyExistingInSink() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v TransferJobTransferSpecTransferOptions) *bool { return v.OverwriteObjectsAlreadyExistingInSink }).(pulumi.BoolPtrOutput)\n}",
"func (dto *DashboardAclInfoDTO) IsDuplicateOf(other *DashboardAclInfoDTO) bool {\n\treturn dto.hasSameRoleAs(other) || dto.hasSameUserAs(other) || dto.hasSameTeamAs(other)\n}",
"func (f *PushFilter) Duplicate() *PushFilter {\n\n\tnf := NewPushFilter()\n\n\tfor id, types := range f.Identities {\n\t\tnf.FilterIdentity(id, types...)\n\t}\n\n\tfor k, v := range f.Params {\n\t\tnf.SetParameter(k, v...)\n\t}\n\n\treturn nf\n}",
"func (o TransferJobTransferSpecTransferOptionsPtrOutput) OverwriteObjectsAlreadyExistingInSink() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *TransferJobTransferSpecTransferOptions) *bool {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.OverwriteObjectsAlreadyExistingInSink\n\t}).(pulumi.BoolPtrOutput)\n}",
"func (cache *diskBlockCacheWrapped) Put(ctx context.Context, tlfID tlf.ID,\n\tblockID kbfsblock.ID, buf []byte,\n\tserverHalf kbfscrypto.BlockCryptKeyServerHalf,\n\tcacheType DiskBlockCacheType) error {\n\t// This is a write operation but we are only reading the pointers to the\n\t// caches. So we use a read lock.\n\tcache.mtx.RLock()\n\tdefer cache.mtx.RUnlock()\n\tif cacheType == DiskBlockSyncCache && cache.syncCache != nil {\n\t\tworkingSetCache := cache.workingSetCache\n\t\terr := cache.syncCache.Put(ctx, tlfID, blockID, buf, serverHalf)\n\t\tif err == nil {\n\t\t\tcache.deleteGroup.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer cache.deleteGroup.Done()\n\t\t\t\t// Don't catch the errors -- this is just best effort.\n\t\t\t\t_, _, _ = workingSetCache.Delete(ctx, []kbfsblock.ID{blockID})\n\t\t\t}()\n\t\t\treturn nil\n\t\t}\n\t\t// Otherwise drop through and put it into the working set cache.\n\t}\n\t// No need to put it in the working cache if it's already in the\n\t// sync cache.\n\tif cache.syncCache != nil {\n\t\t_, _, _, err := cache.syncCache.Get(ctx, tlfID, blockID)\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn cache.workingSetCache.Put(ctx, tlfID, blockID, buf, serverHalf)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Minutes returns the duration as a floating point number of minutes. | func (d Duration) Minutes() float64 {
return time.Duration(d).Minutes()
} | [
"func (period Period) Minutes() int {\n\treturn int(period.MinutesFloat())\n}",
"func (f *Formatter) Minutes() string {\n\tvar format string\n\tif f.withoutUnit {\n\t\tformat = \"%d\\n\"\n\t} else {\n\t\tformat = \"%d minutes\\n\"\n\t}\n\treturn fmt.Sprintf(format, int(f.duration.Minutes()))\n}",
"func GetDurationInMillseconds(start time.Time) float64 {\n\tend := time.Now()\n\tduration := end.Sub(start)\n\tmilliseconds := float64(duration) / float64(time.Millisecond)\n\trounded := float64(int(milliseconds*100+.5)) / 100\n\treturn rounded\n}",
"func (p *pvc) minutes() float64 {\n\tif p == nil {\n\t\treturn 0.0\n\t}\n\n\treturn p.End.Sub(p.Start).Minutes()\n}",
"func (period Period) MinutesFloat() float32 {\n\treturn float32(period.minutes) / 10\n}",
"func (c Clock) minutes() int {\n\treturn c.m - (c.hours() * 60)\n}",
"func getMinutes(time *int) int {\n\treturn getTimeScale(time, 60)\n}",
"func minutesToDuration(n uint8) Duration {\n\treturn Duration(time.Duration(n) * time.Minute)\n}",
"func Ms(duration time.Duration) float64 {\n\treturn float64(duration / time.Millisecond)\n}",
"func roundDuration(d time.Duration) time.Duration {\n\trd := time.Duration(d.Minutes()) * time.Minute\n\tif rd < d {\n\t\trd += time.Minute\n\t}\n\treturn rd\n}",
"func (s *UpdateInstantBooking) SetDurationInMinutes(v int64) *UpdateInstantBooking {\n\ts.DurationInMinutes = &v\n\treturn s\n}",
"func (s *CreateInstantBooking) SetDurationInMinutes(v int64) *CreateInstantBooking {\n\ts.DurationInMinutes = &v\n\treturn s\n}",
"func DaytoMinutes(day float32) float32 {\n\treturn day * 1440\n}",
"func (s *GetChannelScheduleInput) SetDurationMinutes(v string) *GetChannelScheduleInput {\n\ts.DurationMinutes = &v\n\treturn s\n}",
"func (o TransferJobScheduleStartTimeOfDayOutput) Minutes() pulumi.IntOutput {\n\treturn o.ApplyT(func(v TransferJobScheduleStartTimeOfDay) int { return v.Minutes }).(pulumi.IntOutput)\n}",
"func durToMsec(dur time.Duration) string {\n\treturn fmt.Sprintf(\"%dms\", dur/time.Millisecond)\n}",
"func (cvr Converter) SecondsToMinutes(s Seconds) Minutes {\r\n\tvar bes = Minutes(s / 60)\r\n\treturn bes\r\n}",
"func (d Dispatcher) ExecDurationMinutes(id string, hash string) (float64, error) {\n\te, err := d.GetBC().FindExec(id, hash)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn e.GetDuration().Minutes(), nil\n}",
"func (fn *formulaFuncs) MDURATION(argsList *list.List) formulaArg {\n\targs := fn.prepareDurationArgs(\"MDURATION\", argsList)\n\tif args.Type != ArgList {\n\t\treturn args\n\t}\n\tduration := fn.duration(args.List[0], args.List[1], args.List[2], args.List[3], args.List[4], args.List[5])\n\tif duration.Type != ArgNumber {\n\t\treturn duration\n\t}\n\treturn newNumberFormulaArg(duration.Number / (1 + args.List[3].Number/args.List[4].Number))\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Nanoseconds returns the duration as an integer nanosecond count. | func (d Duration) Nanoseconds() int64 {
return time.Duration(d).Nanoseconds()
} | [
"func nanoseconds(t uint64) time.Duration {\n\treturn time.Duration(t) * time.Nanosecond\n}",
"func (s Stopwatch) Nanoseconds() int64 {\n\treturn s.acc.Nanoseconds()\n}",
"func (ft *filetime) Nanoseconds() int64 {\n\t// 100-nanosecond intervals since January 1, 1601\n\tnsec := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime)\n\t// change starting time to the Epoch (00:00:00 UTC, January 1, 1970)\n\tnsec -= 116444736000000000\n\t// convert into nanoseconds\n\tnsec *= 100\n\treturn nsec\n}",
"func (f *Formatter) Nanoseconds() string {\n\tvar format string\n\tif f.withoutUnit {\n\t\tformat = \"%d\\n\"\n\t} else {\n\t\tformat = \"%d nanoseconds\\n\"\n\t}\n\treturn fmt.Sprintf(format, f.duration.Nanoseconds())\n}",
"func (t Time) Nanosecond() int {}",
"func nanotime() int64",
"func (i InternationalColorTime) Nanosecond() int {\n\treturn int(i.nanos % 1e9)\n}",
"func (dt DateTime) Nanosecond() int {\n\treturn dt.Time().Nanosecond()\n}",
"func TimeUnitNano(unit string) int64 {\n\tswitch unit {\n\tcase TimeUnitSeconds:\n\t\treturn int64(time.Second)\n\tcase TimeUnitMilliseconds:\n\t\treturn int64(time.Millisecond)\n\tcase TimeUnitMicroseconds:\n\t\treturn int64(time.Microsecond)\n\tdefault:\n\t\treturn int64(time.Nanosecond)\n\t}\n}",
"func ticksToNanoseconds(ticks timeUnit) int64 {\n\t// The following calculation is actually the following, but with both sides\n\t// reduced to reduce the risk of overflow:\n\t// ticks * 1e9 / (390000000 / 50)\n\t// 50 is the CLINT divider and 390000000 is the CPU frequency.\n\treturn int64(ticks) * 5000 / 39\n}",
"func Nanosecond() int64 {\n\treturn time.Now().UnixNano()\n}",
"func (xt XSDTime) Nanosecond() int {\n\treturn xt.innerTime.Nanosecond()\n}",
"func TimevalToNsec(tv Timeval) int64 { return tv.Nano() }",
"func run_timeNano() int64",
"func (t Time) Microseconds() int64 {\n\treturn time.Time(t).UnixNano() / DivideMicroseconds\n}",
"func to_ms(nano int64) int64 {\n\treturn nano / int64(time.Millisecond)\n}",
"func NanoTime() int64",
"func (d Duration) Microseconds() int64 {\n\treturn int64(d)\n}",
"func TimespecToNsec(ts Timespec) int64 { return ts.Nano() }"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Seconds returns the duration as a floating point number of seconds. | func (d Duration) Seconds() float64 {
return time.Duration(d).Seconds()
} | [
"func (d Duration) Seconds() float64 {\n\tsec := d / Second\n\tusec := d % Second\n\treturn float64(sec) + float64(usec)/1e6\n}",
"func fseconds(d time.Duration) float64 { return float64(d) / float64(time.Second) }",
"func (period Period) Seconds() int {\n\treturn int(period.SecondsFloat())\n}",
"func (s *Sample) Seconds() float64 {\n\treturn s.Duration().Seconds()\n}",
"func (p *Process) Seconds() uint64 {\n\treturn uint64(time.Since(p.StartedAt) / time.Second)\n}",
"func (s Stopwatch) Seconds() float64 {\n\treturn s.acc.Seconds()\n}",
"func ToFloat(d time.Duration) (seconds float64) {\n\treturn float64(d) / float64(time.Second)\n}",
"func (f *Formatter) Seconds() string {\n\tvar format string\n\tif f.withoutUnit {\n\t\tformat = \"%d\\n\"\n\t} else {\n\t\tformat = \"%d seconds\\n\"\n\t}\n\treturn fmt.Sprintf(format, int(f.duration.Seconds()))\n}",
"func (period Period) SecondsFloat() float32 {\n\treturn float32(period.seconds) / 10\n}",
"func durationInSeconds(d time.Duration) int64 {\n\t// converting a floating-point number to an integer discards\n\t// the fraction (truncation towards zero)\n\treturn int64(d.Seconds())\n}",
"func getSeconds(data *speedTestData) float64 {\n\treturn float64(data.Milliseconds) / 1000\n}",
"func (i ISODuration) GetSeconds() int {\r\n\treturn i.duration.Seconds\r\n}",
"func (sw Stopwatch) ElapsedSeconds() float64 {\n\tduration := time.Since(sw.startTime)\n\treturn duration.Seconds()\n}",
"func FloatSecondsDur(v float64) time.Duration {\n\treturn time.Duration(v * float64(time.Second))\n}",
"func (m *Metadata) DurationSec() float64 {\n\treturn m.Float(\"Duration\")\n}",
"func DurSecondsFloat(d time.Duration) float64 {\n\treturn float64(d) / float64(time.Second)\n}",
"func (s *Stopwatch) ElapsedSeconds() float64 {\n\ts.RLock()\n\tdefer s.RUnlock()\n\n\treturn s.Elapsed().Seconds()\n}",
"func (p Planet) Seconds() float64 {\n\treturn planetDuration[p]\n}",
"func ConvertSeconds(s string) float64 {\n\tnum, err := strconv.ParseFloat(s, 64)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn num\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a string representation of the approximate duration in russian language | func (d Duration) StringApproximate() (result string) {
var seconds, minutes, hours, days, months, years int
seconds = int(d.Seconds())
if seconds > 60 {
minutes = int(d.Minutes())
}
if minutes > 59 {
hours = int(d.Hours())
minutes = minutes - hours*60
}
if hours > 24 {
days = (hours - hours%24) / 24
hours = hours - days*24
}
if days > 365 {
years = (days - days%365) / 365
days = days - years*365
}
if days > 30 {
months = (days - days%30) / 30
days = days - months*30
}
if years > 0 {
if months < 3 {
result = numberInString(years, false) + " " + yearsTail(years)
} else {
result = "Более"
if years > 1 {
result = " " + strings.ToLower(numberStringInGenitiveCase(years, false))
}
result += " " + strings.ToLower(numberStringInGenitiveCase(years, false)) + " " + strings.ToLower(yearsTailInGenitiveCase(years))
}
} else if months > 0 {
if days < 8 {
result = numberInString(months, false) + " " + monthsTail(months)
} else {
result = "Более"
if months > 1 {
result = " " + strings.ToLower(numberStringInGenitiveCase(months, false))
}
result += " " + strings.ToLower(numberStringInGenitiveCase(months, false)) + " " + strings.ToLower(monthsTailInGenitiveCase(months))
}
} else if days > 0 {
if hours < 5 {
result = numberInString(days, false) + " " + daysTail(days)
} else {
result = "Более "
if days == 1 {
result += "суток"
} else {
result += strings.ToLower(numberStringInGenitiveCase(days, false)) + " суток"
}
}
} else if hours > 0 {
if minutes < 16 {
result = numberInString(hours, false) + " " + hoursTail(hours)
} else {
result = "Более "
if hours == 1 {
result += "часа"
} else {
result += strings.ToLower(numberStringInGenitiveCase(hours, false))
result += " " + strings.ToLower(hoursTailInGenitiveCase(hours))
}
}
} else if minutes > 0 {
if minutes == 1 {
result = "Минуту"
} else {
result = numberInString(minutes, true) + " " + minutesTail(minutes)
}
} else {
result = "Менее минуты"
}
result += " назад"
return
} | [
"func (d *Duration) String() string { return (*time.Duration)(d).String() }",
"func (t Throughput) StringDuration() string {\n\treturn fmt.Sprintf(\"Duration: %v, starting %v\", time.Duration(t.MeasureDurationMillis)*time.Millisecond, t.StartTime.Format(\"15:04:05 MST\"))\n}",
"func (d Duration) String() string {\n\t// Largest time is 2540400h10m10.000000000s\n\tvar buf [32]byte\n\tw := len(buf)\n\n\tu := uint64(d)\n\tneg := d < 0\n\tif neg {\n\t\tu = -u\n\t}\n\n\tif u < uint64(time.Second) {\n\t\t// Special case: if duration is smaller than a second,\n\t\t// use smaller units, like 1.2ms\n\t\tvar prec int\n\t\tw--\n\t\tbuf[w] = 's'\n\t\tw--\n\t\tswitch {\n\t\tcase u == 0:\n\t\t\treturn \"0s\"\n\t\tcase u < uint64(time.Microsecond):\n\t\t\t// print nanoseconds\n\t\t\tprec = 0\n\t\t\tbuf[w] = 'n'\n\t\tcase u < uint64(time.Millisecond):\n\t\t\t// print microseconds\n\t\t\tprec = 3\n\t\t\t// U+00B5 'µ' micro sign == 0xC2 0xB5\n\t\t\tw-- // Need room for two bytes.\n\t\t\tcopy(buf[w:], \"µ\")\n\t\tdefault:\n\t\t\t// print milliseconds\n\t\t\tprec = 6\n\t\t\tbuf[w] = 'm'\n\t\t}\n\t\tw, u = fmtFrac(buf[:w], u, prec)\n\t\tw = fmtInt(buf[:w], u)\n\t} else {\n\t\tw--\n\t\tbuf[w] = 's'\n\n\t\tw, u = fmtFrac(buf[:w], u, 9)\n\n\t\t// u is now integer seconds\n\t\tif u%60 != 0 || w != len(buf)-1 {\n\t\t\tw = fmtInt(buf[:w], u%60)\n\t\t} else {\n\t\t\tw = len(buf)\n\t\t}\n\t\tu /= 60\n\n\t\t// u is now integer minutes\n\t\tif u > 0 {\n\t\t\tif u%60 != 0 {\n\t\t\t\tw--\n\t\t\t\tbuf[w] = 'm'\n\t\t\t\tw = fmtInt(buf[:w], u%60)\n\t\t\t}\n\t\t\tu /= 60\n\n\t\t\t// u is now integer hours\n\t\t\tif u > 0 {\n\t\t\t\tif u%24 != 0 {\n\t\t\t\t\tw--\n\t\t\t\t\tbuf[w] = 'h'\n\t\t\t\t\tw = fmtInt(buf[:w], u%24)\n\t\t\t\t}\n\t\t\t\tu /= 24\n\n\t\t\t\t// u is now integer days\n\t\t\t\tif u > 0 {\n\t\t\t\t\tw--\n\t\t\t\t\tbuf[w] = 'd'\n\t\t\t\t\tw = fmtInt(buf[:w], u)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif neg {\n\t\tw--\n\t\tbuf[w] = '-'\n\t}\n\n\treturn string(buf[w:])\n}",
"func humanDuration(d time.Duration) string {\n\tif seconds := int(d.Seconds()); seconds < 1 {\n\t\treturn \"Less than a second\"\n\t} else if seconds < 60 {\n\t\treturn fmt.Sprintf(\"%d seconds\", seconds)\n\t} else if minutes := int(d.Minutes()); minutes == 1 {\n\t\treturn \"About a minute\"\n\t} else if minutes < 60 {\n\t\treturn fmt.Sprintf(\"%d minutes\", minutes)\n\t} else if hours := int(d.Hours()); hours == 1 {\n\t\treturn \"About an hour\"\n\t} else if hours < 48 {\n\t\treturn fmt.Sprintf(\"%d hours\", hours)\n\t} else if hours < 24*7*2 {\n\t\treturn fmt.Sprintf(\"%d days\", hours/24)\n\t} else if hours < 24*30*3 {\n\t\treturn fmt.Sprintf(\"%d weeks\", hours/24/7)\n\t} else if hours < 24*365*2 {\n\t\treturn fmt.Sprintf(\"%d months\", hours/24/30)\n\t}\n\treturn fmt.Sprintf(\"%f years\", d.Hours()/24/365)\n}",
"func (d *DataGenerator) getDuration() string {\n\t// ISO 8601 format\n\t// P3Y6M4DT12H30M5S = three years, six months, four days, twelve hours, thirty minutes, and five seconds\n\thr := rand.Int31n(12)\n\tmin := rand.Int31n(60)\n\tsec := rand.Int31n(60)\n\treturn fmt.Sprintf(\"P0Y0M0DT%dH%dM%dS\", hr, min, sec)\n}",
"func (india indianTimeZones) Mauritius() string {return \"Indian/Mauritius\" }",
"func humanDuration(d time.Duration) string {\n\tif seconds := int(d.Seconds()); seconds < 1 {\n\t\treturn \"Less than a second ago\"\n\t} else if seconds == 1 {\n\t\treturn \"1 second ago\"\n\t} else if seconds < 60 {\n\t\treturn fmt.Sprintf(\"%d seconds ago\", seconds)\n\t} else if minutes := int(d.Minutes()); minutes == 1 {\n\t\treturn \"About a minute ago\"\n\t} else if minutes < 60 {\n\t\treturn fmt.Sprintf(\"%d minutes ago\", minutes)\n\t} else if hours := int(d.Hours() + 0.5); hours == 1 {\n\t\treturn \"About an hour ago\"\n\t} else if hours < 48 {\n\t\treturn fmt.Sprintf(\"%d hours ago\", hours)\n\t} else if hours < 24*7*2 {\n\t\treturn fmt.Sprintf(\"%d days ago\", hours/24)\n\t} else if hours < 24*30*2 {\n\t\treturn fmt.Sprintf(\"%d weeks ago\", hours/24/7)\n\t} else if hours < 24*365*2 {\n\t\treturn fmt.Sprintf(\"%d months ago\", hours/24/30)\n\t}\n\treturn fmt.Sprintf(\"%d years ago\", int(d.Hours())/24/365)\n}",
"func durationToWord(in Interval) string {\n\tswitch in {\n\tcase FifteenSecond:\n\t\treturn \"fifteensecond\"\n\tcase OneMin:\n\t\treturn \"onemin\"\n\tcase ThreeMin:\n\t\treturn \"threemin\"\n\tcase FiveMin:\n\t\treturn \"fivemin\"\n\tcase TenMin:\n\t\treturn \"tenmin\"\n\tcase FifteenMin:\n\t\treturn \"fifteenmin\"\n\tcase ThirtyMin:\n\t\treturn \"thirtymin\"\n\tcase OneHour:\n\t\treturn \"onehour\"\n\tcase TwoHour:\n\t\treturn \"twohour\"\n\tcase FourHour:\n\t\treturn \"fourhour\"\n\tcase SixHour:\n\t\treturn \"sixhour\"\n\tcase EightHour:\n\t\treturn \"eighthour\"\n\tcase TwelveHour:\n\t\treturn \"twelvehour\"\n\tcase OneDay:\n\t\treturn \"oneday\"\n\tcase ThreeDay:\n\t\treturn \"threeday\"\n\tcase FifteenDay:\n\t\treturn \"fifteenday\"\n\tcase OneWeek:\n\t\treturn \"oneweek\"\n\tcase TwoWeek:\n\t\treturn \"twoweek\"\n\tcase OneMonth:\n\t\treturn \"onemonth\"\n\tcase OneYear:\n\t\treturn \"oneyear\"\n\tdefault:\n\t\treturn \"notfound\"\n\t}\n}",
"func DescDuration(d time.Duration) string {\n\tif d < time.Minute {\n\t\treturn fmt.Sprintf(\"%0.1f sec ago\", d.Seconds())\n\t} else if d < time.Hour {\n\t\treturn fmt.Sprintf(\"%0.1f min ago\", d.Minutes())\n\t} else if d < time.Hour*24 {\n\t\treturn fmt.Sprintf(\"%0.1f hrs ago\", d.Hours())\n\t} else {\n\t\treturn fmt.Sprintf(\"%0.1f days ago\", d.Hours()/24.0)\n\t}\n}",
"func DurationInWords(d time.Duration) string {\n\n\tif d >= time.Second && d <= (time.Second*4) {\n\t\treturn fmt.Sprintf(lssthnd, 5, \"seconds\")\n\t} else if d >= (time.Second*5) && d < (time.Second*10) {\n\t\treturn fmt.Sprintf(lssthnd, 10, \"seconds\")\n\t} else if d >= (time.Second*10) && d < (time.Second*20) {\n\t\treturn fmt.Sprintf(lssthnd, 20, \"seconds\")\n\t} else if d >= (time.Second*20) && d < (time.Second*40) {\n\t\treturn \"half a minute\"\n\t} else if d >= (time.Second*40) && d < (time.Second*60) {\n\t\treturn fmt.Sprintf(lssthns, \"minute\")\n\t} else if d >= (time.Second*60) && d < time.Minute+(time.Second*30) {\n\t\treturn \"1 minute\"\n\t} else if d >= time.Minute+(time.Second*30) && d < (time.Minute*44)+(time.Second*30) {\n\t\treturn fmt.Sprintf(\"%d minutes\", (d / time.Minute))\n\t} else if d >= (time.Minute*44)+(time.Second*30) && d < (time.Minute*89)+(time.Second*30) {\n\t\treturn fmt.Sprintf(aboutnd, d/time.Hour, \"hour\")\n\t} else if d >= (time.Minute*89)+(time.Second*30) && d < (time.Hour*29)+(time.Minute*59)+(time.Second*30) {\n\t\treturn fmt.Sprintf(aboutnd, (d / time.Hour), \"hours\")\n\t} else if d >= (time.Hour*23)+(time.Minute*59)+(time.Second*30) && d < (time.Hour*41)+(time.Minute*59)+(time.Second*30) {\n\t\treturn \"1 day\"\n\t} else if d >= (time.Hour*41)+(time.Minute*59)+(time.Second*30) && d < (day*29)+(time.Hour*23)+(time.Minute*59)+(time.Second*30) {\n\t\treturn fmt.Sprintf(\"%d days\", d/(time.Hour*24))\n\t} else if d >= (day*29)+(time.Hour*23)+(time.Minute*59)+(time.Second*30) && d < (day*59)+(time.Hour*23)+(time.Minute*59)+(time.Second*30) {\n\t\treturn fmt.Sprintf(aboutnd, 1, \"month\")\n\t} else if d >= (day*59)+(time.Hour*23)+(time.Minute*59)+(time.Second*30) && d < (year) {\n\t\treturn fmt.Sprintf(aboutnd, d/month+1, \"months\")\n\t} else if d >= year && d < year+(3*month) {\n\t\treturn fmt.Sprintf(aboutnd, 1, \"year\")\n\t} else if d >= year+(3*month) && d < year+(9*month) {\n\t\treturn \"over 1 year\"\n\t} else if d >= year+(9*month) && d < (year*2) {\n\t\treturn \"almost 2 years\"\n\t} else {\n\t\treturn fmt.Sprintf(aboutnd, d/year, \"years\")\n\t}\n}",
"func HumanReadableTime(duration time.Duration, unitSep ...string) string {\n\n\tvar (\n\t\tout,\n\t\tsep string\n\t\tstop bool\n\t\tmin = time.Second * 60\n\t\thour = min * 60\n\t\tday = hour * 24\n\t\td int64\n\t)\n\n\tif len(unitSep) > 0 {\n\t\tsep = unitSep[0]\n\t}\n\n\tfor !stop {\n\t\tswitch {\n\n\t\tcase duration >= day: // day\n\t\t\td = int64(duration / day)\n\t\t\tout += fmt.Sprintf(\"%dd%s\", d, sep)\n\t\t\tstop = duration == day\n\t\t\tduration = duration - (time.Duration(d) * (day))\n\n\t\tcase duration >= hour: // hour\n\t\t\td = int64(duration / hour)\n\t\t\tout += fmt.Sprintf(\"%dh%s\", d, sep)\n\t\t\tstop = duration == hour\n\t\t\tduration = duration - (time.Duration(d) * (hour))\n\n\t\tcase duration >= min: // minute\n\t\t\td = int64(duration / min)\n\t\t\tout += fmt.Sprintf(\"%dm%s\", d, sep)\n\t\t\tstop = duration == min\n\t\t\tduration = duration - (time.Duration(d) * (min))\n\n\t\tcase int64(duration) > 0: // seconds\n\t\t\tout += fmt.Sprintf(\"%06.3fs%s\", duration.Seconds(), sep)\n\t\t\tstop = true\n\n\t\tdefault:\n\t\t\tstop = true\n\t\t}\n\t}\n\treturn out\n}",
"func HumanizeDuration(duration time.Duration) string {\n\tif duration.Nanoseconds() < 1000 {\n\t\treturn fmt.Sprintf(\"%d nanoseconds\", int64(duration.Nanoseconds()))\n\t}\n\tif duration.Nanoseconds()/int64(time.Microsecond) < 1000 {\n\t\treturn fmt.Sprintf(\"%d microseconds\", int64(duration.Nanoseconds()/int64(time.Microsecond)))\n\t}\n\tif duration.Nanoseconds()/int64(time.Millisecond) < 1000 {\n\t\treturn fmt.Sprintf(\"%d miliseconds\", int64(duration.Nanoseconds()/int64(time.Millisecond)))\n\t}\n\tif duration.Nanoseconds() < 1000 {\n\t\treturn fmt.Sprintf(\"%d nanoseconds\", int64(duration.Nanoseconds()))\n\t}\n\t//if duration.Seconds() < 60.0 {\n\t//\treturn fmt.Sprintf(\"%d seconds\", int64(duration.Seconds()))\n\t//}\n\n\t// only seconds\n\treturn fmt.Sprintf(\"%d seconds\", int64(duration.Seconds()))\n\n\t// if duration.Minutes() < 60.0 {\n\t// \tremainingSeconds := math.Mod(duration.Seconds(), 60)\n\t// \treturn fmt.Sprintf(\"%d minutes %d seconds\", int64(duration.Minutes()), int64(remainingSeconds))\n\t// }\n\t// if duration.Hours() < 24.0 {\n\t// \tremainingMinutes := math.Mod(duration.Minutes(), 60)\n\t// \tremainingSeconds := math.Mod(duration.Seconds(), 60)\n\t// \treturn fmt.Sprintf(\"%d hours %d minutes %d seconds\",\n\t// \t\tint64(duration.Hours()), int64(remainingMinutes), int64(remainingSeconds))\n\t// }\n\t// remainingHours := math.Mod(duration.Hours(), 24)\n\t// remainingMinutes := math.Mod(duration.Minutes(), 60)\n\t// remainingSeconds := math.Mod(duration.Seconds(), 60)\n\t// return fmt.Sprintf(\"%d days %d hours %d minutes %d seconds\",\n\t// \tint64(duration.Hours()/24), int64(remainingHours),\n\t// \tint64(remainingMinutes), int64(remainingSeconds))\n}",
"func (d Duration) String() string {\n\tvalue := int64(d)\n\tout := \"\"\n\tif value < 0 {\n\t\tout = \"-\"\n\t\tvalue = -value\n\t}\n\tdivmod := func(divisor, dividend int64) (int64, int64) {\n\t\treturn divisor / dividend, divisor % dividend\n\t}\n\textract := func(symbol string, unit int64) {\n\t\tvar units int64\n\t\tunits, value = divmod(value, unit)\n\t\tif units > 0 {\n\t\t\tout += fmt.Sprintf(\"%d%s\", units, symbol)\n\t\t}\n\t}\n\textract(\"y\", Year)\n\textract(\"m\", Month)\n\textract(\"d\", Day)\n\tif value > 0 {\n\t\tout += \"t\"\n\t}\n\textract(\"h\", Hour)\n\textract(\"m\", Minute)\n\textract(\"s\", Second)\n\textract(\"us\", Microsecond)\n\n\tif out == \"\" {\n\t\t// input duration was 0\n\t\tout = \"t0s\" // seconds are the fundamental unit\n\t}\n\n\treturn out\n}",
"func RenderDuration(d time.Duration) string {\n\tif d == math.MaxInt64 {\n\t\treturn \"never\"\n\t}\n\n\tif d == 0 {\n\t\treturn \"forever\"\n\t}\n\n\ttsecs := d / time.Second\n\ttmins := tsecs / 60\n\tthrs := tmins / 60\n\ttdays := thrs / 24\n\ttyrs := tdays / 365\n\n\tif tyrs > 0 {\n\t\treturn fmt.Sprintf(\"%dy%dd%dh%dm%ds\", tyrs, tdays%365, thrs%24, tmins%60, tsecs%60)\n\t}\n\n\tif tdays > 0 {\n\t\treturn fmt.Sprintf(\"%dd%dh%dm%ds\", tdays, thrs%24, tmins%60, tsecs%60)\n\t}\n\n\tif thrs > 0 {\n\t\treturn fmt.Sprintf(\"%dh%dm%ds\", thrs, tmins%60, tsecs%60)\n\t}\n\n\tif tmins > 0 {\n\t\treturn fmt.Sprintf(\"%dm%ds\", tmins, tsecs%60)\n\t}\n\n\treturn fmt.Sprintf(\"%.2fs\", d.Seconds())\n}",
"func (hms HHMMSS) String() string {\n\treturn time.Duration(hms).String()\n}",
"func (b *Build) HumanDuration() string {\n\td := time.Duration(b.Duration)\n\tif seconds := int(d.Seconds()); seconds < 1 {\n\t\treturn \"Less than a second\"\n\t} else if seconds < 60 {\n\t\treturn fmt.Sprintf(\"%d seconds\", seconds)\n\t} else if minutes := int(d.Minutes()); minutes == 1 {\n\t\treturn \"About a minute\"\n\t} else if minutes < 60 {\n\t\treturn fmt.Sprintf(\"%d minutes\", minutes)\n\t} else if hours := int(d.Hours()); hours == 1 {\n\t\treturn \"About an hour\"\n\t} else if hours < 48 {\n\t\treturn fmt.Sprintf(\"%d hours\", hours)\n\t} else if hours < 24*7*2 {\n\t\treturn fmt.Sprintf(\"%d days\", hours/24)\n\t} else if hours < 24*30*3 {\n\t\treturn fmt.Sprintf(\"%d weeks\", hours/24/7)\n\t} else if hours < 24*365*2 {\n\t\treturn fmt.Sprintf(\"%d months\", hours/24/30)\n\t}\n\treturn fmt.Sprintf(\"%f years\", d.Hours()/24/365)\n}",
"func abbrvToDuration(input string) (dur time.Duration, s string) {\n\tswitch {\n\tcase containsAny(input, hourAbbrvs):\n\t\tdur = time.Since(time.Now().Add(-1 * time.Hour))\n\t\ts = \"hour\"\n\tcase containsAny(input, dayAbbrvs):\n\t\tdur = time.Since(time.Now().AddDate(0, 0, -1))\n\t\ts = \"day\"\n\tcase containsAny(input, weekAbbrvs):\n\t\tdur = time.Since(time.Now().AddDate(0, 0, -7))\n\t\ts = \"week\"\n\tcase containsAny(input, monthAbbrvs):\n\t\tdur = time.Since(time.Now().AddDate(0, -1, 0))\n\t\ts = \"month\"\n\tcase containsAny(input, yearAbbrvs):\n\t\tdur = time.Since(time.Now().AddDate(-1, 0, 0))\n\t\ts = \"year\"\n\tdefault:\n\t\t// Default to one week if we can't figure anything out\n\t\tdur = time.Since(time.Now().AddDate(0, 0, -7))\n\t\ts = \"week\"\n\t}\n\n\treturn dur, s\n}",
"func (channelInfo ChannelInfo) GetStreamDuration() string {\n\n\tif !channelInfo.StreamStatus.Online {\n\t\treturn \"\"\n\t}\n\tminutePrefix := \"минут\"\n\thourPrefix := \"часов\"\n\tduration := time.Now().Sub(channelInfo.StreamStatus.Start)\n\tminutes := float64(int(duration.Minutes() - math.Floor(duration.Minutes()/60)*60))\n\thours := float64(int(duration.Hours()))\n\tif math.Floor(minutes/10) != 1 {\n\t\tswitch int(minutes - math.Floor(minutes/10)*10) {\n\t\tcase 1:\n\t\t\tminutePrefix = \"минуту\"\n\t\t\tbreak\n\t\tcase 2:\n\t\tcase 3:\n\t\tcase 4:\n\t\t\tminutePrefix = \"минуты\"\n\t\t}\n\t}\n\n\tif int(math.Floor(hours/10)) != 1 {\n\t\tswitch int(hours - math.Floor(hours/10)*10) {\n\t\tcase 1:\n\t\t\thourPrefix = \"час\"\n\t\t\tbreak\n\t\tcase 2:\n\t\tcase 3:\n\t\tcase 4:\n\t\t\thourPrefix = \"часа\"\n\t\t}\n\t}\n\tif int(minutes) == 0 {\n\t\treturn fmt.Sprintf(\"%d %s\", int(hours), hourPrefix)\n\n\t}\n\tif int(hours) == 0 {\n\t\treturn fmt.Sprintf(\"%d %s\", int(minutes), minutePrefix)\n\t}\n\treturn fmt.Sprintf(\"%d %s %d %s\", int(hours), hourPrefix, int(minutes), minutePrefix)\n\n}",
"func (d duration) String() string {\n\treturn time.Duration(d).String()\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetIssueLabels gets the current labels on the specified PR or issue | func (fc *fakeClient) GetIssueLabels(owner, repo string, number int) ([]github.Label, error) {
var la []github.Label
for _, l := range fc.labels {
la = append(la, github.Label{Name: l})
}
return la, nil
} | [
"func (c *client) GetIssueLabels(org, repo string, number int) ([]Label, error) {\n\tdurationLogger := c.log(\"GetIssueLabels\", org, repo, number)\n\tdefer durationLogger()\n\n\treturn c.getLabels(fmt.Sprintf(\"/repos/%s/%s/issues/%d/labels\", org, repo, number), org)\n}",
"func (c *client) GetIssueLabels(owner, repo string, number int) ([]*github.Label, error) {\n\taction := \"GetIssueLabels\"\n\tlogDuration := c.log(action, owner, repo, number)\n\tdefer logDuration()\n\tvar labels []*github.Label\n\terr := doPaginatedRequest(\n\t\tfunc(page, perPage int) (interface{}, *github.Response, error) {\n\t\t\topts := &github.ListOptions{Page: page, PerPage: perPage}\n\t\t\treturn c.Issues.ListLabelsByIssue(context.Background(), owner, repo, number, opts)\n\t\t},\n\t\tfunc(v interface{}) {\n\t\t\tlabels = append(labels, v.([]*github.Label)...)\n\t\t})\n\treturn labels, err\n}",
"func (m *MockRerunClient) GetIssueLabels(org, repo string, number int) ([]github.Label, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetIssueLabels\", org, repo, number)\n\tret0, _ := ret[0].([]github.Label)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (m *MockClient) GetIssueLabels(org, repo string, number int) ([]github.Label, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetIssueLabels\", org, repo, number)\n\tret0, _ := ret[0].([]github.Label)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (a ProblemAdapter) GetLabels() map[string]string {\n\treturn nil\n}",
"func GetLabels(repositoryURL string, token string) ([]Label, error) {\n\tURL := fmt.Sprintf(\"%v/labels\", repositoryURL)\n\n\trequest, err := http.NewRequest(\"GET\", URL, nil)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Couldn't make a new request in GetLabel: %v\", err)\n\t}\n\n\trequest.Header.Add(\"Authorization\", token)\n\trequest.Header.Add(\"Accept\", \"application/vnd.github.v3+json\")\n\n\tresponse, err := http.DefaultClient.Do(request)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Response error in GetLabel: %v\", err)\n\t}\n\n\tif response.Body != nil {\n\t\tdefer response.Body.Close()\n\t}\n\n\tbody, err := ioutil.ReadAll(response.Body)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"couldn't convert response body to []byte: %v\", err)\n\t}\n\n\tvar labels []Label\n\n\terr = json.Unmarshal(body, &labels)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"problem unmarshalling the response body: %v\", err)\n\t}\n\n\treturn labels, nil\n}",
"func NewIssueGetLabelParams() *IssueGetLabelParams {\n\tvar ()\n\treturn &IssueGetLabelParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (c *client) GetRepoLabels(org, repo string) ([]Label, error) {\n\tdurationLogger := c.log(\"GetRepoLabels\", org, repo)\n\tdefer durationLogger()\n\n\treturn c.getLabels(fmt.Sprintf(\"/repos/%s/%s/labels\", org, repo), org)\n}",
"func jiraLabels(j *v1alpha1.Jira) map[string]string {\n\tlabels := defaultLabels(j)\n\tfor key, val := range j.ObjectMeta.Labels {\n\t\tlabels[key] = val\n\t}\n\treturn labels\n}",
"func (c *client) getLabels(path, org string) ([]Label, error) {\n\tvar labels []Label\n\tif c.fake {\n\t\treturn labels, nil\n\t}\n\terr := c.readPaginatedResults(\n\t\tpath,\n\t\t\"application/vnd.github.symmetra-preview+json\", // allow the description field -- https://developer.github.com/changes/2018-02-22-label-description-search-preview/\n\t\torg,\n\t\tfunc() interface{} {\n\t\t\treturn &[]Label{}\n\t\t},\n\t\tfunc(obj interface{}) {\n\t\t\tlabels = append(labels, *(obj.(*[]Label))...)\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn labels, nil\n}",
"func GetLabels(component, name, identifier string) map[string]string {\n\t// see https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels\n\treturn map[string]string{\n\t\t\"app.kubernetes.io/managed-by\": \"splunk-operator\",\n\t\t\"app.kubernetes.io/component\": component,\n\t\t\"app.kubernetes.io/name\": name,\n\t\t\"app.kubernetes.io/part-of\": fmt.Sprintf(\"splunk-%s-%s\", identifier, component),\n\t\t\"app.kubernetes.io/instance\": fmt.Sprintf(\"splunk-%s-%s\", identifier, name),\n\t}\n}",
"func (mr *MockRerunClientMockRecorder) GetIssueLabels(org, repo, number interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetIssueLabels\", reflect.TypeOf((*MockRerunClient)(nil).GetIssueLabels), org, repo, number)\n}",
"func (m *MockGitClient) GetPullRequestLabels(arg0 context.Context, arg1 git.GetPullRequestLabelsArgs) (*[]core.WebApiTagDefinition, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetPullRequestLabels\", arg0, arg1)\n\tret0, _ := ret[0].(*[]core.WebApiTagDefinition)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func getSigLabelsForIssue(issue Issue) []string {\n\tvar sigs []string = nil\n\n\tvar sizeFactor float64 = 400\n\tissueSize := float64(len(issue.Title) + len(issue.Body))\n\tsizeScaling := 0.75 * issueSize / sizeFactor\n\tif sizeScaling < 1 { // Don't weirdly scale tiny issues\n\t\tsizeScaling = 1\n\t}\n\tfmt.Println(\"size scaling\", sizeScaling)\n\n\tfor sigName, scoreData := range getScoresForSigs(issue) {\n\t\tfmt.Println(\"Debug\", sigName, scoreData.scoreItems)\n\t\tif float64(scoreData.scoreTotal) >= scoreThreshhold*sizeScaling {\n\t\t\tsigs = append(sigs, sigName)\n\t\t}\n\t}\n\n\treturn sigs\n}",
"func GetLabels(ctx context.Context) Labels {\n\tif raw := ctx.Value(labelsKey{}); raw != nil {\n\t\tif typed, ok := raw.(Labels); ok {\n\t\t\t// create a copy\n\t\t\toutput := make(Labels)\n\t\t\tfor key, value := range typed {\n\t\t\t\toutput[key] = value\n\t\t\t}\n\t\t\treturn output\n\t\t}\n\t}\n\treturn make(Labels)\n}",
"func GetLabels(w http.ResponseWriter, r *http.Request) {\n\tret := []byte(\"{}\")\n\tstatus := http.StatusInternalServerError\n\tvar err error\n\n\tif ret, err = common.GetLabels(); err != nil {\n\t\tret = ServerError(err)\n\t\tlog.Error.Printf(err.Error())\n\t} else {\n\t\tstatus = http.StatusOK\n\t}\n\n\tw.WriteHeader(status)\n\tw.Write(ret)\n}",
"func checkLabels(repo *github.Repository) {\n\tlabels, _, err := client.Issues.ListLabels(ctx, repo.GetOwner().GetLogin(), repo.GetName(), nil)\n\tif err != nil {\n\t\tprintln(err.Error())\n\t\treturn\n\t}\n\n\tcheckForLabel(repo, labels, \"major\", \"b60205\")\n\tcheckForLabel(repo, labels, \"minor\", \"e8894a\")\n\tcheckForLabel(repo, labels, \"patch\", \"b5d3ff\")\n\n}",
"func (mr *MockClientMockRecorder) GetIssueLabels(org, repo, number interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetIssueLabels\", reflect.TypeOf((*MockClient)(nil).GetIssueLabels), org, repo, number)\n}",
"func GetLabels(component constants.ComponentName, cr_name string) map[string]string {\n\treturn generateComponentLabels(component, cr_name)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
CreateComment adds and tracks a comment in the client | func (fc *fakeClient) CreateComment(owner, repo string, number int, comment string) error {
fc.commentsAdded[number] = append(fc.commentsAdded[number], comment)
return nil
} | [
"func CreateComment(w http.ResponseWriter, r *http.Request) {\n\tcomment := jsonToComment(w, r)\n\tcomment = CommentDAO.AddComment(comment)\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusCreated)\n\tif err := json.NewEncoder(w).Encode(comment); err != nil {\n\t\tpanic(err)\n\t}\n}",
"func (b *Service) CommentCreate(ctx context.Context, TeamID string, UserID string, EventValue string) ([]byte, error, bool) {\n\tvar c struct {\n\t\tCheckinId string `json:\"checkinId\"`\n\t\tUserID string `json:\"userId\"`\n\t\tComment string `json:\"comment\"`\n\t}\n\terr := json.Unmarshal([]byte(EventValue), &c)\n\tif err != nil {\n\t\treturn nil, err, false\n\t}\n\n\terr = b.CheckinService.CheckinComment(ctx, TeamID, c.CheckinId, c.UserID, c.Comment)\n\tif err != nil {\n\t\treturn nil, err, false\n\t}\n\n\tmsg := createSocketEvent(\"comment_added\", \"\", \"\")\n\n\treturn msg, nil, false\n}",
"func (s *APIClientService) CreateComment(ctx context.Context, id string, new CommentRequest) (Comment, *http.Response, error) {\n\tresource := Comment{} // new(APIClient)\n\n\treq, err := s.client.NewRequest(ctx, http.MethodPost, \"comments/\"+apiClientBasePath+\"/\"+id, new)\n\tif err != nil {\n\t\treturn resource, nil, err\n\t}\n\n\tresp, _, err := s.client.Do(ctx, req, &resource, false)\n\tif err != nil {\n\t\treturn resource, nil, err\n\t}\n\n\treturn resource, resp, nil\n}",
"func (c *Client) CreateComment(owner, repo string, number int, comment string) error {\n\tif c.dry {\n\t\treturn nil\n\t}\n\n\tic := IssueComment{\n\t\tBody: comment,\n\t}\n\tresp, err := c.request(http.MethodPost, fmt.Sprintf(\"%s/repos/%s/%s/issues/%d/comments\", c.base, owner, repo, number), ic)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != 201 {\n\t\treturn fmt.Errorf(\"response not 201: %s\", resp.Status)\n\t}\n\treturn nil\n}",
"func (u *User) CreateComment(pid string, content string) {\n\t//getCommentがいらないので、コメント件数にインクリメントするだけ.\n\tpid_int, e := strconv.Atoi(pid)\n\tif e != nil {\n\t\tpanic(e)\n\t}\n\tcnt, ok := commentCount.Load(pid_int)\n\tif ok {}\n\tcommentCount.Store(pid_int, cnt.(int) + 1)\n\n\tproductsWithComments[pid_int - 1].CommentCount += 1\n}",
"func (b *Client) CreateComment(repo models.Repo, pullNum int, comment string, command string) error {\n\t// NOTE: I tried to find the maximum size of a comment for bitbucket.org but\n\t// I got up to 200k chars without issue so for now I'm not going to bother\n\t// to detect this.\n\tbodyBytes, err := json.Marshal(map[string]map[string]string{\"content\": {\n\t\t\"raw\": comment,\n\t}})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"json encoding\")\n\t}\n\tpath := fmt.Sprintf(\"%s/2.0/repositories/%s/pullrequests/%d/comments\", b.BaseURL, repo.FullName, pullNum)\n\t_, err = b.makeRequest(\"POST\", path, bytes.NewBuffer(bodyBytes))\n\treturn err\n}",
"func (cc *Controller) CreateComment(c *gin.Context) {\n\tuserID, ok := utils.RetrieveUserID(c)\n\tif !ok {\n\t\tlog.Fatal(\"This route needs VerifyToken middleware\")\n\t}\n\n\tvar reqBody cmtabledtos.CreateCommentRequest\n\tvar resBody cmtabledtos.CreateCommentResponse\n\n\tif err := c.ShouldBind(&reqBody); err != nil {\n\t\tutils.ResponseWithError(c, http.StatusBadRequest, \"Some required fields missing\", err.Error())\n\t\treturn\n\t}\n\n\tcmtableID, err := utils.StringToUint(c.Param(\"cmtableID\"))\n\tif err != nil {\n\t\tutils.ResponseWithError(c, http.StatusBadRequest, \"Invalid parameter\", err.Error())\n\t\treturn\n\t}\n\n\tif err := cc.checkUserAuthorizationForCommentable(cmtableID, userID); err != nil {\n\t\tutils.ResponseWithError(c, err.StatusCode, err.Message, err.Data)\n\t\treturn\n\t}\n\n\tcomment := models.Comment{Content: reqBody.Content, UserID: userID, CommentableID: cmtableID}\n\tif err := cc.commentService.Save(&comment); err != nil {\n\t\tutils.ResponseWithError(c, http.StatusInternalServerError, \"Error while saving comment\", err.Error())\n\t\treturn\n\t}\n\n\tresBody.Comment = comment.ToDTO()\n\tutils.ResponseWithSuccess(c, http.StatusCreated, \"Save comment successfully\", resBody)\n}",
"func (s *PullRequestsService) CreateComment(ctx context.Context, owner, project, repo string, pullNum int, threadId int, comment *Comment) (*Comment, *http.Response, error) {\n\tURL := fmt.Sprintf(\"%s/%s/_apis/git/repositories/%s/pullrequests/%d/threads/%d/comments?api-version=5.1-preview.1\",\n\t\towner,\n\t\tproject,\n\t\trepo,\n\t\tpullNum,\n\t\tthreadId,\n\t)\n\n\tif comment.GetContent() == \"\" {\n\t\treturn nil, nil, errors.New(\"PullRequests.CreateComment: Nil pointer or empty string in comment.Content field \")\n\t}\n\n\tif comment.GetCommentType() == \"\" {\n\t\tcomment.CommentType = String(\"text\")\n\t}\n\n\treq, err := s.client.NewRequest(\"POST\", URL, comment)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tr := new(Comment)\n\tresp, err := s.client.Execute(ctx, req, r)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn r, resp, err\n}",
"func (v Notes) CreateComment(params NotesCreateCommentParams) (NotesCreateCommentResponse, error) {\n\tr, err := v.API.Request(\"notes.createComment\", params)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar resp NotesCreateCommentResponse\n\n\tvar cnv int\n\tcnv, err = strconv.Atoi(string(r))\n\tresp = NotesCreateCommentResponse(cnv)\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn resp, nil\n}",
"func (c *client) CreateComment(owner, repo string, number int, comment string) error {\n\taction := \"CreateComment\"\n\tc.log(action, owner, repo, number, comment)\n\tcmt := &github.IssueComment{Body: github.String(comment)}\n\t_, _, err := c.Issues.CreateComment(context.Background(), owner, repo, number, cmt)\n\treturn c.wrapperError(err, action)\n}",
"func (m *Market) Createcomment(ownerId int, itemId int, message string, attachments []string, fromGroup bool, replyToComment int, stickerId int, guid string) (resp responses.MarketCreatecomment, err error) {\n\tparams := map[string]interface{}{}\n\n\tparams[\"owner_id\"] = ownerId\n\n\tparams[\"item_id\"] = itemId\n\n\tif message != \"\" {\n\t\tparams[\"message\"] = message\n\t}\n\n\tif len(attachments) > 0 {\n\t\tparams[\"attachments\"] = SliceToString(attachments)\n\t}\n\n\tparams[\"from_group\"] = fromGroup\n\n\tif replyToComment > 0 {\n\t\tparams[\"reply_to_comment\"] = replyToComment\n\t}\n\n\tif stickerId > 0 {\n\t\tparams[\"sticker_id\"] = stickerId\n\t}\n\n\tif guid != \"\" {\n\t\tparams[\"guid\"] = guid\n\t}\n\n\terr = m.SendObjRequest(\"market.createComment\", params, &resp)\n\n\treturn\n}",
"func (a *ProblemsApiService) CreateComment(ctx _context.Context, problemId string) ApiCreateCommentRequest {\n\treturn ApiCreateCommentRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tproblemId: problemId,\n\t}\n}",
"func (g *GithubClient) CreateComment(repo models.Repo, pullNum int, comment string, command string) error {\n\tvar sepStart string\n\n\tsepEnd := \"\\n```\\n</details>\" +\n\t\t\"\\n<br>\\n\\n**Warning**: Output length greater than max comment size. Continued in next comment.\"\n\n\tif command != \"\" {\n\t\tsepStart = fmt.Sprintf(\"Continued %s output from previous comment.\\n<details><summary>Show Output</summary>\\n\\n\", command) +\n\t\t\t\"```diff\\n\"\n\t} else {\n\t\tsepStart = \"Continued from previous comment.\\n<details><summary>Show Output</summary>\\n\\n\" +\n\t\t\t\"```diff\\n\"\n\t}\n\n\tcomments := common.SplitComment(comment, maxCommentLength, sepEnd, sepStart)\n\tfor i := range comments {\n\t\tg.logger.Debug(\"POST /repos/%v/%v/issues/%d/comments\", repo.Owner, repo.Name, pullNum)\n\t\t_, _, err := g.client.Issues.CreateComment(g.ctx, repo.Owner, repo.Name, pullNum, &github.IssueComment{Body: &comments[i]})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (dbHandler *Handler) CreateComment(userID uint, entryID uint, text string, ts time.Time) (api.Comment, error) {\n\tcomment := api.Comment{UserID: userID, EntryID: entryID, Text: text}\n\tif !ts.IsZero() {\n\t\tcomment.CreatedAt = ts\n\t\tcomment.UpdatedAt = ts\n\t}\n\n\tdb := dbHandler.DB.Create(&comment)\n\tif db.Error != nil {\n\t\treturn comment, errors.WrapWithDetails(db.Error, \"cannot create comment\", \"userID\", userID, \"entryID\", entryID)\n\t}\n\n\treturn comment, nil\n}",
"func (_article *Article) CommentsCreate(am map[string]interface{}) error {\n\t\t\tam[\"article_id\"] = _article.Id\n\t\t_, err := CreateComment(am)\n\treturn err\n}",
"func (s *Server) createComment() http.HandlerFunc {\n\ttype request struct {\n\t\tBody string `json:\"body\"`\n\t}\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tuserToken := getAuthorizationToken(r)\n\t\tif userToken == \"\" {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\tctx := r.Context()\n\n\t\tuser, err := s.Accounts.GetUserByToken(ctx, userToken)\n\t\tif err != nil {\n\t\t\thandleRPCErrors(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tvar req request\n\t\tb, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\thandleRPCErrors(w, err)\n\t\t\treturn\n\t\t}\n\t\terr = json.Unmarshal(b, &req)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusUnprocessableEntity)\n\t\t\treturn\n\t\t}\n\n\t\tvars := mux.Vars(r)\n\t\tnewsUUID := vars[\"newsuuid\"]\n\n\t\tcomment, err := s.Comments.AddComment(ctx, req.Body, user.UID, newsUUID)\n\t\tif err != nil {\n\t\t\thandleRPCErrors(w, err)\n\t\t\treturn\n\t\t}\n\t\tjson, err := json.Marshal(*comment)\n\t\tif err != nil {\n\t\t\thandleRPCErrors(w, err)\n\t\t\treturn\n\t\t}\n\t\tw.WriteHeader(http.StatusAccepted)\n\t\tw.Write(json)\n\t}\n}",
"func CreateComment(c *gin.Context, in *createCommentIn) (*task.Comment, error) {\n\tmetadata.AddActionMetadata(c, metadata.TaskID, in.TaskID)\n\n\tdbp, err := zesty.NewDBProvider(utask.DBName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tt, err := task.LoadFromPublicID(dbp, in.TaskID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttt, err := tasktemplate.LoadFromID(dbp, t.TemplateID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmetadata.AddActionMetadata(c, metadata.TemplateName, tt.Name)\n\n\tvar res *resolution.Resolution\n\tif t.Resolution != nil {\n\t\tres, err = resolution.LoadFromPublicID(dbp, *t.Resolution)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tmetadata.AddActionMetadata(c, metadata.ResolutionID, res.PublicID)\n\t}\n\n\tadmin := auth.IsAdmin(c) == nil\n\trequester := auth.IsRequester(c, t) == nil\n\twatcher := auth.IsWatcher(c, t) == nil\n\tresolutionManager := auth.IsResolutionManager(c, tt, t, res) == nil\n\n\tif !requester && !watcher && !resolutionManager && !admin {\n\t\treturn nil, errors.Forbiddenf(\"Can't create comment\")\n\t} else if !requester && !watcher && !resolutionManager {\n\t\tmetadata.SetSUDO(c)\n\t}\n\n\treqUsername := auth.GetIdentity(c)\n\n\tcomment, err := task.CreateComment(dbp, t, reqUsername, in.Content)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn comment, nil\n}",
"func (g *Github) createComment(pr int, comment string) error {\n\tctx, cancel := context.WithTimeout(context.Background(), timeoutLongRequest)\n\tdefer cancel()\n\n\tc := &github.IssueComment{Body: &comment}\n\n\t_, _, err := g.client.Issues.CreateComment(ctx, g.owner, g.repo, pr, c)\n\n\treturn err\n}",
"func (r *Resolver) CreateComment(ctx context.Context, args struct {\n\tInput createCommentInput\n}) (Comment, error) {\n\tresult := Comment{}\n\tm := dbmodel.Comment{}\n\n\t// Role-based Access Control\n\tif _, err := AssertPermissions(ctx, \"create\", \"Comment\", args, &args.Input); err != nil {\n\t\treturn result, errors.Wrapf(err, \"permission denied\")\n\t}\n\n\tdata, err := json.Marshal(args.Input)\n\tif err != nil {\n\t\treturn result, errors.Wrapf(err, \"json.Marshal(%#v)\", args.Input)\n\t}\n\tif err = json.Unmarshal(data, &m); err != nil {\n\t\treturn result, errors.Wrapf(err, \"json.Unmarshal(%s)\", data)\n\t}\n\n\tif err := m.Insert(r.db(ctx)); err != nil {\n\t\treturn result, errors.Wrapf(err, \"createComment(%#v)\", m)\n\t}\n\treturn Comment{model: m, db: r.db(ctx)}, nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NumComments counts the number of tracked comments | func (fc *fakeClient) NumComments() int {
n := 0
for _, comments := range fc.commentsAdded {
n += len(comments)
}
return n
} | [
"func NumberOfComments(c context.Context, id int64) (int, error) {\n\tcount, err := datastore.NewQuery(\"Post\").Filter(\"Parent =\", id).Count(c)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"NumberOfComments: could not collect posts: %v\", err)\n\t}\n\treturn count, nil\n}",
"func (o IncidentAdditionalDataResponseOutput) CommentsCount() pulumi.IntOutput {\n\treturn o.ApplyT(func(v IncidentAdditionalDataResponse) int { return v.CommentsCount }).(pulumi.IntOutput)\n}",
"func (o IncidentAdditionalDataResponsePtrOutput) CommentsCount() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *IncidentAdditionalDataResponse) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.CommentsCount\n\t}).(pulumi.IntPtrOutput)\n}",
"func (m *ReactionsFacet) GetCommentCount()(*int32) {\n val, err := m.GetBackingStore().Get(\"commentCount\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*int32)\n }\n return nil\n}",
"func (q commentQuery) Count() (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRow().Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count comment rows\")\n\t}\n\n\treturn count, nil\n}",
"func (o *InlineResponse200115) GetCommentsCount() string {\n\tif o == nil || o.CommentsCount == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.CommentsCount\n}",
"func (o *InlineResponse20033Milestones) SetCommentsCount(v string) {\n\to.CommentsCount = &v\n}",
"func (o *InlineResponse200115) SetCommentsCount(v string) {\n\to.CommentsCount = &v\n}",
"func CountAllCommentsPerPost(postID uint64) uint64 {\n\n\tvar result uint64\n\tDB, err := database.NewOpen()\n\n\tcountedCommentsResult, err := DB.Query(\"SELECT * FROM comment WHERE PostID=?\", postID)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tfor countedCommentsResult.Next() {\n\t\tresult = result + 1\n\t}\n\n\tDB.Close()\n\n\tfmt.Println(\"Number of comments for u:\", result)\n\n\treturn result\n}",
"func (o *ViewMilestone) SetCommentsCount(v int32) {\n\to.CommentsCount = &v\n}",
"func (t *TeamDiscussion) GetCommentsCount() int {\n\tif t == nil || t.CommentsCount == nil {\n\t\treturn 0\n\t}\n\treturn *t.CommentsCount\n}",
"func (d *decred) cmdGetNumComments(payload string) (string, error) {\n\tlog.Tracef(\"decred cmdGetNumComments\")\n\n\tgnc, err := decredplugin.DecodeGetNumComments([]byte(payload))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Lookup number of comments for provided tokens\n\ttype Result struct {\n\t\tToken string\n\t\tCounts int\n\t}\n\tresults := make([]Result, 0, len(gnc.Tokens))\n\terr = d.recordsdb.\n\t\tTable(\"comments\").\n\t\tSelect(\"count(*) as counts, token\").\n\t\tGroup(\"token\").\n\t\tWhere(\"token IN (?)\", gnc.Tokens).\n\t\tFind(&results).\n\t\tError\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Put results into a map\n\tnumComments := make(map[string]int, len(results)) // [token]numComments\n\tfor _, c := range results {\n\t\tnumComments[c.Token] = c.Counts\n\t}\n\n\t// Encode reply\n\tgncr := decredplugin.GetNumCommentsReply{\n\t\tNumComments: numComments,\n\t}\n\tgncre, err := decredplugin.EncodeGetNumCommentsReply(gncr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(gncre), nil\n}",
"func CommentCountOnCommit(repo *git.Repository, commit string) result.Result {\n\tvar count uint16 = 0\n\treturn gg.CommitCommentRefIterator(repo, commit, func(ref *git.Reference) {\n\t\tcount += 1\n\t}).FlatMap(func(value interface{}) result.Result {\n\t\treturn result.NewSuccess(count)\n\t})\n}",
"func (c Comments) Len() int {\n\treturn c.num\n}",
"func (o *InlineResponse20034Milestone) HasCommentsCount() bool {\n\tif o != nil && o.CommentsCount != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (s *TattooStorage) GetArticleCommentCount(name string) int {\n\tlst_buff, err := s.CommentIndexDB.GetJSON(name)\n\tif err != nil {\n\t\tlog.Printf(\"load comment index failed (%v)!\\n\", err)\n\t\treturn 0\n\t}\n\treturn len(lst_buff.([]interface{}))\n}",
"func (o *ViewMilestone) HasCommentsCount() bool {\n\tif o != nil && o.CommentsCount != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (s *userService) IncrCommentCount(userId int64) int {\n\tt := dao.UserDao.Get(userId)\n\tif t == nil {\n\t\treturn 0\n\t}\n\tcommentCount := t.CommentCount + 1\n\tif err := dao.UserDao.UpdateColumn(userId, \"comment_count\", commentCount); err != nil {\n\t\tlog.Error(err.Error())\n\t} else {\n\t\tcache.UserCache.Invalidate(userId)\n\t}\n\treturn commentCount\n}",
"func (o *ViewMilestone) GetCommentsCountOk() (*int32, bool) {\n\tif o == nil || o.CommentsCount == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CommentsCount, true\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewOutput instantiates a new output plugin instance publishing to elasticsearch. | func (f elasticsearchOutputPlugin) NewOutput(
config *outputs.MothershipConfig,
topologyExpire int,
) (outputs.Outputer, error) {
// configure bulk size in config in case it is not set
if config.BulkMaxSize == nil {
bulkSize := defaultBulkSize
config.BulkMaxSize = &bulkSize
}
output := &elasticsearchOutput{}
err := output.init(*config, topologyExpire)
if err != nil {
return nil, err
}
return output, nil
} | [
"func NewOutput(qs url.Values, fragment string) (plogd.OutputWriter, error) {\n\tvar err error\n\tw := defaultWriter\n\tesurl, err := url.Parse(qs.Get(\"url\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdelete(qs, \"url\")\n\n\tif err := w.configure(qs); err != nil {\n\t\treturn nil, err\n\t}\n\n\tesurl.RawQuery = qs.Encode()\n\tw.conf, err = config.Parse(esurl.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif w.conf.Index == \"\" && w.indexTemplate == nil && w.indexTimeLayout == \"\" {\n\t\tw.indexTimeLayout = \"plog-2006.01.02\"\n\t}\n\n\terr = w.initClient()\n\tif err != nil {\n\t\tif !w.nofail {\n\t\t\treturn nil, err\n\t\t}\n\t\tslog.Error(\"Failed initial connection to Elasticsearch\", \"error\", err)\n\t\tw.client = nil\n\t}\n\treturn plogd.NewQueuedWriter(&w), nil\n}",
"func NewOutputPlugin(region, deliveryStream, dataKeys, roleARN, firehoseEndpoint, stsEndpoint, timeKey, timeFmt, logKey, replaceDots string, pluginID int, simpleAggregation bool) (*OutputPlugin, error) {\n\tclient, err := newPutRecordBatcher(roleARN, region, firehoseEndpoint, stsEndpoint, pluginID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trecords := make([]*firehose.Record, 0, maximumRecordsPerPut)\n\n\ttimer, err := plugins.NewTimeout(func(d time.Duration) {\n\t\tlogrus.Errorf(\"[firehose %d] timeout threshold reached: Failed to send logs for %s\\n\", pluginID, d.String())\n\t\tlogrus.Errorf(\"[firehose %d] Quitting Fluent Bit\", pluginID)\n\t\tos.Exit(1)\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar timeFormatter *strftime.Strftime\n\tif timeKey != \"\" {\n\t\tif timeFmt == \"\" {\n\t\t\ttimeFmt = defaultTimeFmt\n\t\t}\n\t\ttimeFormatter, err = strftime.New(timeFmt, strftime.WithMilliseconds('L'), strftime.WithMicroseconds('f'))\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"[firehose %d] Issue with strftime format in 'time_key_format'\", pluginID)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn &OutputPlugin{\n\t\tregion: region,\n\t\tdeliveryStream: deliveryStream,\n\t\tclient: client,\n\t\trecords: records,\n\t\tdataKeys: dataKeys,\n\t\ttimer: timer,\n\t\ttimeKey: timeKey,\n\t\tfmtStrftime: timeFormatter,\n\t\tlogKey: logKey,\n\t\tPluginID: pluginID,\n\t\treplaceDots: replaceDots,\n\t\tsimpleAggregation: simpleAggregation,\n\t}, nil\n}",
"func New(topic string, pub Publisher) *Output {\n\treturn &Output{\n\t\ttopic: topic,\n\t\tpub: pub,\n\t}\n}",
"func (m Mock) NewOutput(ctx context.Context, output backend.Output) {}",
"func newOutput(txHash *chainhash.Hash, vout uint32, value uint64, tree int8) *output {\n\treturn &output{\n\t\tpt: outPoint{\n\t\t\ttxHash: *txHash,\n\t\t\tvout: vout,\n\t\t},\n\t\tvalue: value,\n\t\ttree: tree,\n\t}\n}",
"func NewOutput() (Output, func(interface{}), func(error)) {\n\tout := newOutput()\n\n\tresolve := func(v interface{}) {\n\t\tout.s.resolve(v, true)\n\t}\n\treject := func(err error) {\n\t\tout.s.reject(err)\n\t}\n\n\treturn out, resolve, reject\n}",
"func NewOutput() *Output {\n\treturn &Output{\n\t\tConnections: make(map[Connection]bool),\n\t}\n}",
"func (tc *TypedConfig) NewOutput(ctx context.Context) (Output, error) {\n\tif err := tc.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn tc.Config.NewOutput(ctx)\n}",
"func NewOutput(t mockConstructorTestingTNewOutput) *Output {\n\tmock := &Output{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NewOutput(conf output.MongoDBConfig, mgr bundle.NewManagement) (output.Streamed, error) {\n\tm, err := NewWriter(mgr, conf, mgr.Logger(), mgr.Metrics())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar w output.Streamed\n\tif w, err = output.NewAsyncWriter(\"mongodb\", conf.MaxInFlight, m, mgr); err != nil {\n\t\treturn w, err\n\t}\n\treturn batcher.NewFromConfig(conf.Batching, w, mgr)\n}",
"func NewOutputter(outputFormat string) (Outputter, error) {\n\tif _, exists := registry.Outputs[outputFormat]; !exists {\n\t\treturn nil, ErrorUnknownOutputter\n\t}\n\tfactory, ok := registry.Outputs[outputFormat]\n\tif !ok {\n\t\treturn nil, ErrorInvalidOutputter\n\t}\n\to := factory()\n\treturn o, nil\n}",
"func (a *Agent) StartOutput(ctx context.Context, pluginName string) (string, error) {\n\toutputConfig := models.OutputConfig{\n\t\tName: pluginName,\n\t}\n\n\toutput, err := a.CreateOutput(pluginName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tuniqueId, err := uuid.NewUUID()\n\tif err != nil {\n\t\treturn \"\", errors.New(\"errored while generating UUID for new INPUT\")\n\t}\n\n\tro := models.NewRunningOutput(pluginName, output, &outputConfig,\n\t\ta.Config.Agent.MetricBatchSize, a.Config.Agent.MetricBufferLimit, uniqueId.String())\n\n\terr = ro.Init()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = a.connectOutput(ctx, ro)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = a.RunSingleOutput(ro, ctx)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// add new output to outputunit\n\ta.ou.outputs = append(a.ou.outputs, ro)\n\n\terr = a.Config.UpdateConfig(map[string]interface{}{\"unique_id\": uniqueId.String(), \"name\": pluginName}, uniqueId.String(), \"outputs\", \"START_PLUGIN\")\n\tif err != nil {\n\t\tlog.Printf(\"W! [agent] Unable to save configuration for output %s\", uniqueId.String())\n\t}\n\treturn uniqueId.String(), nil\n}",
"func NewOutput() (Output, func(interface{}), func(error)) {\n\tout := newOutputState(anyType)\n\n\tresolve := func(v interface{}) {\n\t\tout.resolve(v, true, false)\n\t}\n\treject := func(err error) {\n\t\tout.reject(err)\n\t}\n\n\treturn AnyOutput{out}, resolve, reject\n}",
"func NewOutputController(service *goa.Service, om *output.Manager) *OutputController {\n\treturn &OutputController{\n\t\tController: service.NewController(\"OutputController\"),\n\t\tom: om,\n\t}\n}",
"func NewOutput(output *Synapse) *Neuron {\n\treturn &Neuron{\n\t\tInputs: []*Synapse{},\n\t\tOutputs: []*Synapse{output},\n\t\tFunction: func(inputs, outputs []*Synapse) {\n\t\t\tvar sum float64\n\t\t\tfor _, s := range inputs {\n\t\t\t\tsum += (*s.Value * *s.Weight)\n\t\t\t}\n\t\t\toutputs[0].Value = &sum\n\t\t},\n\t}\n}",
"func (pub *Publisher) CreateOutput(nodeHWID string, outputType types.OutputType,\n\tinstance string) *types.OutputDiscoveryMessage {\n\toutput := pub.registeredOutputs.CreateOutput(nodeHWID, outputType, instance)\n\treturn output\n}",
"func (out *elasticsearchOutput) Init(beat string, config outputs.MothershipConfig, topology_expire int) error {\n\n\tif len(config.Protocol) == 0 {\n\t\tconfig.Protocol = \"http\"\n\t}\n\n\tvar urls []string\n\n\tif len(config.Hosts) > 0 {\n\t\t// use hosts setting\n\t\tfor _, host := range config.Hosts {\n\t\t\turl := fmt.Sprintf(\"%s://%s%s\", config.Protocol, host, config.Path)\n\t\t\turls = append(urls, url)\n\t\t}\n\t} else {\n\t\t// use host and port settings\n\t\turl := fmt.Sprintf(\"%s://%s:%d%s\", config.Protocol, config.Host, config.Port, config.Path)\n\t\turls = append(urls, url)\n\t}\n\n\tes := NewElasticsearch(urls, config.Username, config.Password)\n\tout.Conn = es\n\n\tif config.Index != \"\" {\n\t\tout.Index = config.Index\n\t} else {\n\t\tout.Index = beat\n\t}\n\n\tout.TopologyExpire = 15000\n\tif topology_expire != 0 {\n\t\tout.TopologyExpire = topology_expire /*sec*/ * 1000 // millisec\n\t}\n\n\tout.FlushInterval = 1000 * time.Millisecond\n\tif config.Flush_interval != nil {\n\t\tout.FlushInterval = time.Duration(*config.Flush_interval) * time.Millisecond\n\t}\n\tout.BulkMaxSize = 10000\n\tif config.Bulk_size != nil {\n\t\tout.BulkMaxSize = *config.Bulk_size\n\t}\n\n\tif config.Max_retries != nil {\n\t\tout.Conn.SetMaxRetries(*config.Max_retries)\n\t}\n\n\tlogp.Info(\"[ElasticsearchOutput] Using Elasticsearch %s\", urls)\n\tlogp.Info(\"[ElasticsearchOutput] Using index pattern [%s-]YYYY.MM.DD\", out.Index)\n\tlogp.Info(\"[ElasticsearchOutput] Topology expires after %ds\", out.TopologyExpire/1000)\n\tif out.FlushInterval > 0 {\n\t\tlogp.Info(\"[ElasticsearchOutput] Insert events in batches. Flush interval is %s. Bulk size is %d.\", out.FlushInterval, out.BulkMaxSize)\n\t} else {\n\t\tlogp.Info(\"[ElasticsearchOutput] Insert events one by one. This might affect the performance of the shipper.\")\n\t}\n\n\tif config.Save_topology {\n\t\terr := out.EnableTTL()\n\t\tif err != nil {\n\t\t\tlogp.Err(\"Fail to set _ttl mapping: %s\", err)\n\t\t\t// keep trying in the background\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\terr := out.EnableTTL()\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tlogp.Err(\"Fail to set _ttl mapping: %s\", err)\n\t\t\t\t\ttime.Sleep(5 * time.Second)\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}\n\n\tout.sendingQueue = make(chan EventMsg, 1000)\n\tgo out.SendMessagesGoroutine()\n\n\treturn nil\n}",
"func New(\n\tconf Config,\n\tmgr interop.Manager,\n\tlog log.Modular,\n\tstats metrics.Type,\n\tpipelines ...iprocessor.PipelineConstructorFunc,\n) (output.Streamed, error) {\n\tif mgrV2, ok := mgr.(interface {\n\t\tNewOutput(Config, ...iprocessor.PipelineConstructorFunc) (output.Streamed, error)\n\t}); ok {\n\t\treturn mgrV2.NewOutput(conf, pipelines...)\n\t}\n\tif c, ok := Constructors[conf.Type]; ok {\n\t\treturn c.constructor(conf, mgr, log, stats, pipelines...)\n\t}\n\treturn nil, component.ErrInvalidType(\"output\", conf.Type)\n}",
"func NewOutput(w io.Writer, configFunc ...func(*Output)) *Output {\n\tout := &Output{\n\t\tStyle: style.DefaultStyle(),\n\t\tindentStyle: DefaultIndentString,\n\t\twriter: w,\n\t\tisTerm: isTerm(w),\n\t}\n\tfor _, f := range configFunc {\n\t\tf(out)\n\t}\n\treturn out\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
New returns a new PagerDuty notifier. | func New(c *config.PagerdutyConfig, t *template.Template, l log.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
client, err := commoncfg.NewClientFromConfig(*c.HTTPConfig, "pagerduty", httpOpts...)
if err != nil {
return nil, err
}
n := &Notifier{conf: c, tmpl: t, logger: l, client: client}
if c.ServiceKey != "" || c.ServiceKeyFile != "" {
n.apiV1 = "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
// Retrying can solve the issue on 403 (rate limiting) and 5xx response codes.
// https://v2.developer.pagerduty.com/docs/trigger-events
n.retrier = ¬ify.Retrier{RetryCodes: []int{http.StatusForbidden}, CustomDetailsFunc: errDetails}
} else {
// Retrying can solve the issue on 429 (rate limiting) and 5xx response codes.
// https://v2.developer.pagerduty.com/docs/events-api-v2#api-response-codes--retry-logic
n.retrier = ¬ify.Retrier{RetryCodes: []int{http.StatusTooManyRequests}, CustomDetailsFunc: errDetails}
}
return n, nil
} | [
"func New(c *config.DingTalkConfig, t *template.Template, l log.Logger) (*Notifier, error) {\n\tclient, err := commoncfg.NewClientFromConfig(*c.HTTPConfig, \"dingtalk\", false, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Notifier{conf: c, tmpl: t, logger: l, client: client}, nil\n}",
"func New(done <-chan bool) *Notifier {\n\tnotifier := Notifier{\n\t\tnotificationMessages: make(chan string),\n\t\tobservers: make(map[chan *model.Notification]bool),\n\t\tdone: done,\n\t}\n\n\tgo notifier.dispatch()\n\n\treturn ¬ifier\n}",
"func New(config json.RawMessage) (Notifier, error) {\n\tvar notifier Notifier\n\terr := json.Unmarshal(config, ¬ifier)\n\treturn notifier, err\n}",
"func New(log *zap.Logger, touchNotifyDelay time.Duration) *Notify {\n\treturn &Notify{\n\t\tlog: log,\n\t\ttouchNotifyDelay: touchNotifyDelay,\n\t}\n}",
"func NewNotifier() *Notifier {\n\treturn &Notifier{\n\t\tc: make(chan struct{}, 1),\n\t}\n}",
"func NewNotifier(cfg Config) (forward.Notifier, error) {\n\terr := cfg.defaults()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%s: %w\", err, internalerrors.ErrInvalidConfiguration)\n\t}\n\n\treturn ¬ifier{\n\t\tcfg: cfg,\n\t\ttplRenderer: cfg.TemplateRenderer,\n\t\tclient: cfg.Client,\n\t\tlogger: cfg.Logger.WithValues(log.KV{\"notifier\": \"telegram\"}),\n\t}, nil\n}",
"func NewNotifier(slack *chat.Slack) (*Notifier, error) {\n\tnotifier := &Notifier{s: slack, db: slack.DB, conf: slack.Conf}\n\treturn notifier, nil\n}",
"func New(d *dut.DUT) *Reporter {\n\treturn &Reporter{d}\n}",
"func NewNotifier(config *config.Config) Notifier {\n\t// webhook URL and template are required\n\tif len(config.WebHookURL) > 0 && len(config.WebHookTemplate) > 0 {\n\t\treturn &baseNotifier{config}\n\t}\n\t// otherwise return noop\n\treturn &noopNotifier{baseNotifier{config}}\n}",
"func New(url string) *SlackNotify {\n\treturn &SlackNotify{\n\t\tURL: url,\n\t\tc: http.Client{\n\t\t\tTimeout: 10 * time.Second,\n\t\t},\n\t}\n}",
"func New(summary, body string) *Notification {\n\tnoti := Notification{}\n\tnoti.summary = summary\n\tnoti.body = body\n\tnoti.urgency = UrgencyNormal\n\tnoti.timeout = ExpiresDefault\n\tnoti.hints = make(map[string]dbus.Variant, 1)\n\treturn ¬i\n}",
"func New(name, summary, body, icon string, timeout time.Duration, urgency NotificationUrgency) *Notification {\n\treturn &Notification{name, summary, body, icon, timeout, urgency}\n}",
"func NewNotifier(token string) *Notifier {\n\treturn &Notifier{\n\t\tToken: token,\n\t\tClient: nil,\n\t}\n}",
"func New(c *config.AliyunSmsConfig, t *template.Template, l log.Logger) (*Notifier, error) {\n\tclient, err := dysmsapi.NewClientWithAccessKey(\"cn-hangzhou\", c.AccessKeyId, c.AccessSecret)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Notifier{conf: c, tmpl: t, logger: l, client: client}, nil\n}",
"func NewNotifier() WakeSleepNotifier {\n\treturn new(notifier)\n}",
"func New(dependencies Dependencies) {\n\twriter = dependencies.Writer\n\treader = dependencies.Reader\n\thost = dependencies.Host\n\tnotifierService = dependencies.NotifierService\n}",
"func New() *Prober {\n\treturn newForTest(time.Now, newRealTicker)\n}",
"func New() *Checker {\n\treturn &Checker{\n\t\tHostname: Hostname,\n\t\tMaxTimeInFailure: maxTimeInFailure,\n\t}\n}",
"func NewNotifier() base.Notifier {\n\treturn &webhookNotifier{}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewEndpoint creates a new endpoint. To keep things simple, the endpoint listens on a fixed port number. | func NewEndpoint() *Endpoint {
// Create a new Endpoint with an empty list of handler funcs.
return &Endpoint{
handler: map[string]HandleFunc{},
}
} | [
"func (*protocol) NewEndpoint(stack *stack.Stack, netProto tcpip.NetworkProtocolNumber,\n\twaiterQueue *waiter.Queue) (tcpip.Endpoint, *tcpip.Error) {\n\treturn newEndpoint(stack, netProto, waiterQueue), nil\n}",
"func NewEndpoint(resource, httpMethod, route string) *Endpoint {\n\treturn &Endpoint{\n\t\tResource: resource,\n\t\tHTTPMethod: httpMethod,\n\t\tRoute: route,\n\t\tBodyParameters: []*Parameter{},\n\t\tRequests: []*Request{},\n\t}\n}",
"func newEndpoint() *testSocket {\n\tp := fmt.Sprintf(\"@%s.sock\", uuid.NewUUID())\n\n\treturn &testSocket{\n\t\tpath: p,\n\t\tendpoint: fmt.Sprintf(\"unix:///%s\", p),\n\t}\n}",
"func NewEndpoint(ctx *pulumi.Context,\n\tname string, args *EndpointArgs, opts ...pulumi.ResourceOption) (*Endpoint, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.EndpointId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'EndpointId'\")\n\t}\n\tif args.Network == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Network'\")\n\t}\n\tif args.Severity == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Severity'\")\n\t}\n\treplaceOnChanges := pulumi.ReplaceOnChanges([]string{\n\t\t\"endpointId\",\n\t\t\"location\",\n\t\t\"project\",\n\t})\n\topts = append(opts, replaceOnChanges)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Endpoint\n\terr := ctx.RegisterResource(\"google-native:ids/v1:Endpoint\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func NewEndpoint(ctx *pulumi.Context,\n\tname string, args *EndpointArgs, opts ...pulumi.ResourceOption) (*Endpoint, error) {\n\tif args == nil || args.EndpointId == nil {\n\t\treturn nil, errors.New(\"missing required argument 'EndpointId'\")\n\t}\n\tif args == nil || args.Service == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Service'\")\n\t}\n\tif args == nil {\n\t\targs = &EndpointArgs{}\n\t}\n\tvar resource Endpoint\n\terr := ctx.RegisterResource(\"gcp:servicedirectory/endpoint:Endpoint\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func NewEndpoint(conn *websocket.Conn, registry *Registry) *Endpoint {\n\tif registry == nil {\n\t\tregistry = dummyRegistry\n\t}\n\te := &Endpoint{}\n\te.conn = conn\n\te.server.registry = registry\n\te.client.pending = make(map[uint64]*rpc.Call)\n\treturn e\n}",
"func NewEndpoint(ctx *pulumi.Context,\n\tname string, args *EndpointArgs, opts ...pulumi.ResourceOption) (*Endpoint, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.EndpointId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'EndpointId'\")\n\t}\n\tif args.NamespaceId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'NamespaceId'\")\n\t}\n\tif args.ServiceId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ServiceId'\")\n\t}\n\tvar resource Endpoint\n\terr := ctx.RegisterResource(\"google-native:servicedirectory/v1:Endpoint\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func New(bc component.Core) *Endpoint {\n\treturn &Endpoint{\n\t\tCore: bc,\n\t}\n}",
"func NewEndpoint(r io.Reader, w io.Writer, c io.Closer, options ...Option) (*Endpoint, error) {\n\tbw := bufio.NewWriter(w)\n\te := &Endpoint{\n\t\tdone: make(chan struct{}),\n\t\thandlers: make(map[string]*handler),\n\t\tpending: make(map[uint64]*Call),\n\t\tcloser: c,\n\t\tbw: bw,\n\t\tenc: msgpack.NewEncoder(bw),\n\t\tdec: msgpack.NewDecoder(r),\n\t}\n\tfor _, option := range options {\n\t\toption.f(e)\n\t}\n\treturn e, nil\n\n}",
"func New(lower stack.LinkEndpoint) *Endpoint {\n\treturn &Endpoint{\n\t\tlower: lower,\n\t}\n}",
"func newEndpoints() *Endpoints {\n\treturn &Endpoints{\n\t\tBackends: map[string]service.PortConfiguration{},\n\t}\n}",
"func NewEndpoint(prefix string, store streamstore.Storage) http.Handler {\n\tendpoint := &Endpoint{mux.NewRouter(), store, prefix}\n\tendpoint.router.Path(\"/\").Methods(\"POST\").HandlerFunc(endpoint.handlePost)\n\tendpoint.router.Path(\"/\").Methods(\"GET\").HandlerFunc(endpoint.handleList)\n\tendpoint.router.Path(\"/{id}\").Methods(\"GET\").HandlerFunc(endpoint.handleGet)\n\tendpoint.router.Path(\"/{id}\").Methods(\"PUT\").HandlerFunc(endpoint.handlePut)\n\tendpoint.router.Path(\"/{id}\").Methods(\"PATCH\").HandlerFunc(endpoint.handlePatch)\n\tendpoint.router.Path(\"/{id}\").Methods(\"DELETE\").HandlerFunc(endpoint.handleDel)\n\treturn endpoint\n}",
"func newRESTEndpointService(hostPortStr string) endpointService {\n\treturn endpointService(\n\t\tnewRESTDiscoveryService(fmt.Sprintf(edsRestEndpointTemplate, hostPortStr)),\n\t)\n}",
"func NewVirtualEndpoint()(*VirtualEndpoint) {\n m := &VirtualEndpoint{\n Entity: *NewEntity(),\n }\n return m\n}",
"func New(lower tcpip.LinkEndpointID) tcpip.LinkEndpointID {\n\treturn stack.RegisterLinkEndpoint(&endpoint{\n\t\tlower: stack.FindLinkEndpoint(lower),\n\t})\n}",
"func makeEndpoint(hostport, serviceName string) *zipkincore.Endpoint {\n\thost, port, err := net.SplitHostPort(hostport)\n\tif err != nil {\n\t\treturn nil\n\t}\n\taddrs, err := net.LookupIP(host)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tif len(addrs) <= 0 {\n\t\treturn nil\n\t}\n\tportInt, err := strconv.ParseInt(port, 10, 16)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tendpoint := zipkincore.NewEndpoint()\n\tbinary.LittleEndian.PutUint32(addrs[0], (uint32)(endpoint.Ipv4))\n\tendpoint.Port = int16(portInt)\n\tendpoint.ServiceName = serviceName\n\treturn endpoint\n}",
"func NewAddEndpoint(s Service) goa.Endpoint {\n\treturn func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\tp := req.(*AddPayload)\n\t\treturn s.Add(ctx, p)\n\t}\n}",
"func NewLocalEndpoint() (*Endpoint, error) {\n\taddrs, err := net.InterfaceAddrs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar ip []string\n\tfor _, addr := range addrs {\n\t\tipnet, ok := addr.(*net.IPNet)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tif ipnet.IP.IsLoopback() {\n\t\t\tcontinue\n\t\t}\n\t\tif ipnet.IP.To4() != nil {\n\t\t\tip = append(ip, ipnet.IP.String())\n\t\t}\n\t}\n\n\treturn &Endpoint{\n\t\tIP: ip,\n\t\tPort: make(map[string]int),\n\t}, nil\n}",
"func NewAddEndpoint(s Service) goa.Endpoint {\n\treturn func(ctx context.Context, req any) (any, error) {\n\t\tep := req.(*AddEndpointInput)\n\t\treturn nil, s.Add(ctx, ep.Payload, ep.Stream)\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
AddHandleFunc adds a new function for handling incoming data. | func (e *Endpoint) AddHandleFunc(name string, f HandleFunc) {
e.mutex.Lock()
e.handler[name] = f
e.mutex.Unlock()
} | [
"func (p *ServerParams) AddHandleFunc(handlerFunc GrpcHandler, grpcProxyHandler GrpcProxyHandler) {\n\tif handlerFunc != nil {\n\t\tp.handlersForGrpc = append(p.handlersForGrpc, handlerFunc)\n\t}\n\tif grpcProxyHandler != nil {\n\t\tp.handlersForGrpcProxy = append(p.handlersForGrpcProxy, grpcProxyHandler)\n\t}\n}",
"func (l *logPipe) HandleFunc(hf func(string) error) {\n\tl.handleFunc = hf\n}",
"func (c *CmdRunner) HandleFunc(cmdId string, handler func(cmdMessage CmdMessage)) {\n\tc.Handlers[cmdId] = handler\n}",
"func HandleFunc(path string, pattern string, handle func(\n\thttp.ResponseWriter, *http.Request, map[string]string)) (\n\tr *RouteHandler) {\n\thandler := &wrapHandler{handle: handle}\n\tr = Handle(path, pattern, handler)\n\treturn\n}",
"func (r *Router) HandleFunc(pattern string, fn HandlerFunc) {\n\tcmd := commander.NewCommand(pattern)\n\tr.commands = append(r.commands, command{\n\t\tCommand: cmd,\n\t\tpattern: pattern,\n\t\thandler: fn,\n\t})\n\tlog.WithField(\"pattern\", pattern).Debug(\"added command to router\")\n}",
"func HandleFunc(c Checker, pattern string, h http.HandlerFunc) {\n\thttp.HandleFunc(pattern, HandlerFunc(c, h))\n}",
"func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request))",
"func HandleFunc(method string, path string, h interface{}) {\n\tDefaultMux.HandleFunc(method, path, h)\n}",
"func HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {\n\tDefaultServeMux.HandleFunc(pattern, handler)\n}",
"func (s *Simple) HandleFunc(method string, handler kite.HandlerFunc) {\n\ts.Server.Kite.HandleFunc(method, handler)\n}",
"func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {\n\tDefaultServeMux.HandleFunc(pattern, handler)\n}",
"func (r *RouteHandler) AddPatternHandlerFunc(pattern string, handle func(\n\thttp.ResponseWriter, *http.Request, map[string]string)) {\n\thandler := &wrapHandler{handle: handle}\n\tr.AddPatternHandler(pattern, handler)\n}",
"func (e *Exporter) HandleFunc(url string, f func(w http.ResponseWriter, r *http.Request)) {\n\te.mu.Lock()\n\tdefer e.mu.Unlock()\n\tif e.name == \"\" {\n\t\tHTTPHandleFunc(url, f)\n\t\treturn\n\t}\n\n\tif hf, ok := e.handleFuncs[url]; ok {\n\t\thf.Set(f)\n\t\treturn\n\t}\n\thf := &handleFunc{f: f}\n\te.handleFuncs[url] = hf\n\n\tHTTPHandleFunc(e.URLPrefix()+url, func(w http.ResponseWriter, r *http.Request) {\n\t\tif f := hf.Get(); f != nil {\n\t\t\tf(w, r)\n\t\t}\n\t})\n}",
"func (k *Kite) HandleHTTPFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {\n\tk.muxer.HandleFunc(pattern, handler)\n}",
"func (api *API) RegisterHandleFunction(method string, path string, f func(w http.ResponseWriter, r *http.Request)) {\n\tmethod = strings.ToUpper(method)\n\tswitch method {\n\tcase \"GET\":\n\t\tapi.get(path, f)\n\tcase \"PUT\":\n\t\tapi.put(path, f)\n\tcase \"POST\":\n\t\tapi.post(path, f)\n\tcase \"DELETE\":\n\t\tapi.delete(path, f)\n\tcase \"OPTIONS\":\n\t\tapi.options(path, f)\n\t}\n}",
"func (s *Stub) HandleFunc(fn func(http.ResponseWriter, *http.Request)) {\n\ts.response.handler = fn\n}",
"func (p *spaDevProxy) HandleFunc(w http.ResponseWriter, r *http.Request) {\n\tp.proxy.ServeHTTP(w, r)\n}",
"func (m *ServeMux) HandleFunc(command string, handler func(conn Conn, cmd Command)) {\n\tif handler == nil {\n\t\tpanic(\"redcon: nil handler\")\n\t}\n\tm.Handle(command, HandlerFunc(handler))\n}",
"func (a *Asock) AddHandler(name string, argmode string, df DispatchFunc) error {\n\tif _, ok := a.d[name]; ok {\n\t\treturn fmt.Errorf(\"handler '%v' already exists\", name)\n\t}\n\tif argmode != \"split\" && argmode != \"nosplit\" {\n\t\treturn fmt.Errorf(\"invalid argmode '%v'\", argmode)\n\t}\n\ta.d[name] = &dispatchFunc{df, argmode}\n\ta.help = \"\"\n\tfor cmd := range a.d {\n\t\ta.help = a.help + cmd + \" \"\n\t}\n\treturn nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Listen starts listening on the endpoint port on all interfaces. At least one handler function must have been added through AddHandleFunc() before. | func (e *Endpoint) Listen() error {
var err error
e.listener, err = net.Listen("tcp", Port)
if err != nil {
return errors.Wrapf(err, "Unable to listen on port %s\n", Port)
}
log.Println("Listen on", e.listener.Addr().String())
for {
log.Println("Accept a connection request.")
conn, err := e.listener.Accept()
if err != nil {
log.Println("Failed accepting a connection request:", err)
continue
}
log.Println("Handle incoming messages.")
go e.handleMessages(conn)
}
} | [
"func (r *EndpointRegistry) Listen(listener Listener) {\n\tif !r.OnCloseAlways(func() {\n\t\tif err := listener.Close(); err != nil {\n\t\t\tr.Log().Debugf(\"EndpointRegistry.Listen: closing listener OnClose: %v\", err)\n\t\t}\n\t}) {\n\t\treturn\n\t}\n\n\t// Start listener and accept all incoming peer connections, writing them to\n\t// the registry.\n\tfor {\n\t\tconn, err := listener.Accept(r.ser)\n\t\tif err != nil {\n\t\t\tr.Log().Debugf(\"EndpointRegistry.Listen: Accept() loop: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tr.Log().Debug(\"EndpointRegistry.Listen: setting up incoming connection\")\n\t\t// setup connection in a separate routine so that new incoming\n\t\t// connections can immediately be handled.\n\t\tgo func() {\n\t\t\tif err := r.setupConn(conn); err != nil {\n\t\t\t\tlog.WithError(err).Error(\"EndpointRegistry could not setup wire/net.Conn\")\n\t\t\t}\n\t\t}()\n\t}\n}",
"func Listen(endpoint string, rcvBufSize uint32) (*Listener, error) {\n\tnetwork, laddr, err := utils.ResolveEndpoint(endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlis := &Listener{\n\t\tendpoint: endpoint,\n\t\trcvBufSize: rcvBufSize,\n\t\tsndBufSize: 0xffff,\n\t}\n\tlis.lowerListener, err = net.Listen(network, laddr.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn lis, nil\n}",
"func Listen(fullHostOrPort interface{}) error {\n\tcheck()\n\treturn mainIris.Listen(fullHostOrPort)\n}",
"func Listen(env *models.Env) {\n\n\tr := mux.NewRouter().StrictSlash(false)\n\n\tv1 := r.PathPrefix(\"/v1\").Subrouter()\n\n\t// HelloWorld Endpoint\n\taclV1 := v1.PathPrefix(\"/profiles\").Subrouter()\n\taclV1.Handle(\"\", handlers.CustomHandle(env, handlers.AddVerneMQACL)).Methods(\"POST\")\n\taclV1.Handle(\"/mappings\", handlers.CustomHandle(env, handlers.GetMappingForUsers)).Methods(\"POST\")\n\n\tconversationsV1 := v1.PathPrefix(\"/conversations\").Subrouter()\n\tconversationsV1.Handle(\"/group\", handlers.CustomHandle(env, handlers.AddGroupConversation)).Methods(\"POST\")\n\n\tcorsHandler := cors.New(cors.Options{\n\t\tAllowedHeaders: []string{\"X-Requested-With\"},\n\t\tAllowedOrigins: []string{\"*\"},\n\t\tAllowCredentials: true,\n\t\tAllowedMethods: []string{\"GET\", \"HEAD\", \"POST\", \"PUT\", \"OPTIONS\"},\n\t})\n\n\thttp.ListenAndServe(\":\"+fmt.Sprintf(\"%d\", PORT), corsHandler.Handler(r))\n}",
"func (router *routerSocket) Listen(ep string) error {\n\treturn router.sck.Listen(ep)\n}",
"func Listen(addr string, handler HandlerFunc) error {\n\tif addr == \"\" {\n\t\taddr = \"localhost:4573\"\n\t}\n\n\tl, err := net.Listen(\"tcp\", addr)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to bind server\")\n\t}\n\tdefer l.Close() // nolint: errcheck\n\n\tfor {\n\t\tconn, err := l.Accept()\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to accept TCP connection\")\n\t\t}\n\n\t\tgo handler(NewConn(conn))\n\t}\n}",
"func (s *Service) listen(addr string) {\n\t//Create a new listener\n\tlog.Info(\"service.listen\", \"starting the listner at \"+addr)\n\n\tl, err := listener.New(addr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tl.SetReadTimeout(120 * time.Second)\n\n\t// Configure the protos\n\tif s.config.GrpcListen != \"\" {\n\t\tgrpcList, err := netListener(s.config.GrpcListen)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\ts.grpc.Serve(grpcList)\n\t}\n\tl.ServeCallback(listener.MatchWS(\"GET\"), s.http.Serve)\n\tl.ServeCallback(listener.MatchAny(), s.tcp.Serve)\n\n\tgo l.Serve()\n}",
"func (r *Router) Listen() {\n\tlambda.Start(r.LambdaHandler)\n}",
"func (d *Daemon) Listen() {\n\tdefer d.Socket.Close()\n\trpc.RegisterHandler()\n\n\tfor {\n\t\tconn, err := d.Socket.Accept()\n\t\tif err != nil {\n\t\t\tutils.LogFatalf(\"Socket connection error: %+v\\n\", err)\n\t\t}\n\n\t\tutils.LogInfof(\"New socket connection from %s\\n\", conn.RemoteAddr().String())\n\t\tgo rpc.HandleConnection(conn)\n\t}\n}",
"func Serve(eventHandler EventHandler, addr ...string) error {\n\tvar lns []*listener\n\tdefer func() {\n\t\tfor _, ln := range lns {\n\t\t\tln.close()\n\t\t}\n\t}()\n\tvar stdlib bool\n\tfor _, addr := range addr {\n\t\tvar ln listener\n\t\tvar stdlibt bool\n\t\tln.network, ln.addr, ln.reuseport, stdlibt = parseAddr(addr)\n\t\tif stdlibt {\n\t\t\tstdlib = true\n\t\t}\n\t\tif ln.network == \"unix\" {\n\t\t\tos.RemoveAll(ln.addr)\t//remove existed socket file for sockets' communication\n\t\t}\n\t\tvar err error\n\t\tif ln.network == \"udp\" {\n\t\t\tif ln.reuseport {\n\t\t\t\t//ln.pconn, err = reuse\n\t\t\t} else {\n\t\t\t\tln.pconn, err = net.ListenPacket(ln.network, ln.addr)\n\t\t\t}\n\t\t} else {\n\t\t\tif ln.reuseport {\n\t\t\t\t//operation for reuseport\n\t\t\t} else {\n\t\t\t\tln.ln, err = net.Listen(ln.network, ln.addr)\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif ln.pconn != nil {\n\t\t\tln.lnaddr = ln.pconn.LocalAddr()\n\t\t} else {\n\t\t\tln.lnaddr = ln.ln.Addr()\n\t\t}\n\t\tif !stdlib {\n\t\t\tif err := ln.system(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tlns = append(lns, &ln)\n\t}\n\treturn serve(eventHandler, lns)\n}",
"func (g *Goer) listen() {\n\tif g.socketName == \"\" {\n\t\treturn\n\t}\n\n\tif g.mainSocket == nil {\n\t\tswitch g.Transport {\n\t\tcase \"tcp\", \"tcp4\", \"tcp6\", \"unix\", \"unixpacket\", \"ssl\":\n\t\t\tif len(os.Args) > 2 && os.Args[2] == \"graceful\" {\n\t\t\t\tfile := os.NewFile(3, \"\")\n\t\t\t\tlistener, err := net.FileListener(file)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlib.Fatal(\"Fail to listen tcp: %v\", err)\n\t\t\t\t}\n\t\t\t\tg.mainSocket = listener.(*net.TCPListener)\n\t\t\t} else {\n\t\t\t\taddr, err := net.ResolveTCPAddr(g.Transport, g.socketName)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlib.Fatal(\"fail to resolve addr: %v\", err)\n\t\t\t\t}\n\t\t\t\tlistener, err := net.ListenTCP(\"tcp\", addr)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlib.Fatal(\"fail to listen tcp: %v\", err)\n\t\t\t\t}\n\t\t\t\tg.mainSocket = listener\n\t\t\t}\n\t\tcase \"udp\", \"upd4\", \"udp6\", \"unixgram\":\n\t\t\tlistener, err := net.ListenPacket(g.Transport, g.socketName)\n\t\t\tif err != nil {\n\t\t\t\tlib.Fatal(err.Error())\n\t\t\t}\n\t\t\tg.mainSocket = listener\n\t\tdefault:\n\t\t\tlib.Fatal(\"unknown transport layer protocol\")\n\t\t}\n\n\t\tlib.Info(\"server start success...\")\n\t\tg.status = StatusRunning\n\n\t\tgo g.resumeAccept()\n\t}\n}",
"func Listen(network, addr string) (net.Listener, error) {\n\treturn nil, &ErrNoReusePort{fmt.Errorf(\"Not supported on Windows\")}\n}",
"func (t *TrapListener) Listen(addr string) error {\n\tif t.Params == nil {\n\t\tt.Params = Default\n\t}\n\n\t// TODO TODO returning an error cause the following to hang/break\n\t// TestSendTrapBasic\n\t// TestSendTrapWithoutWaitingOnListen\n\t// TestSendV1Trap\n\t_ = t.Params.validateParameters()\n\n\tif t.OnNewTrap == nil {\n\t\tt.OnNewTrap = t.debugTrapHandler\n\t}\n\n\tsplitted := strings.SplitN(addr, \"://\", 2)\n\tt.proto = udp\n\tif len(splitted) > 1 {\n\t\tt.proto = splitted[0]\n\t\taddr = splitted[1]\n\t}\n\n\tswitch t.proto {\n\tcase tcp:\n\t\treturn t.listenTCP(addr)\n\tcase udp:\n\t\treturn t.listenUDP(addr)\n\tdefault:\n\t\treturn fmt.Errorf(\"not implemented network protocol: %s [use: tcp/udp]\", t.proto)\n\t}\n}",
"func Listen(proto, addr string) (l net.Listener, err error) {\n\treturn NewReusablePortListener(proto, addr)\n}",
"func (s *Server) Listen(address string, port int) {\n\tif port == 0 {\n\t\tport = 8067\n\t}\n\tif address == \"\" {\n\t\taddress = \"0.0.0.0\"\n\t}\n\tserverAddr := fmt.Sprintf(\"%s:%d\", address, port)\n\tfor _, m := range Metrics {\n\t\tprometheus.MustRegister(m)\n\t}\n\thttp.Handle(\"/metrics\", prometheus.Handler())\n\thttp.ListenAndServe(serverAddr, nil)\n}",
"func (z *Zipkin) Listen(ln net.Listener, acc telegraf.Accumulator) {\n\tif err := z.server.Serve(ln); err != nil {\n\t\t// Because of the clean shutdown in `(*Zipkin).Stop()`\n\t\t// We're expecting a server closed error at some point\n\t\t// So we don't want to display it as an error.\n\t\t// This interferes with telegraf's internal data collection,\n\t\t// by making it appear as if a serious error occurred.\n\t\tif err != http.ErrServerClosed {\n\t\t\tacc.AddError(fmt.Errorf(\"error listening: %w\", err))\n\t\t}\n\t}\n}",
"func (s *Server) listen(listener net.Listener) {\n\tfor {\n\t\t// Accept a connection\n\t\tconn, err := listener.Accept()\n\t\tif err != nil {\n\t\t\tif s.shutdown {\n\t\t\t\treturn\n\t\t\t}\n\t\t\ts.logger.Printf(\"[ERR] consul.rpc: failed to accept RPC conn: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tgo s.handleConn(conn, false)\n\t\tmetrics.IncrCounter([]string{\"rpc\", \"accept_conn\"}, 1)\n\t}\n}",
"func (s *Service) Listen() (err error) {\n\tdefer s.Close()\n\ts.hookSignals()\n\n\ts.listen(s.config.Listen)\n\n\tlog.Info(\"service\", \"service started\")\n\tselect {}\n}",
"func (d *DNSResolver) Listen(ctx context.Context) error {\n\tcli := d.cli\n\n\targs := filters.NewArgs()\n\n\tmessages, errC := cli.Events(ctx, types.EventsOptions{Filters: args})\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil\n\t\tcase err := <-errC:\n\t\t\treturn err\n\t\tcase event, ok := <-messages:\n\t\t\tif !ok {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tgo d.lookupIPs(event)\n\t\t}\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
handleMessages reads the connection up to the first newline. Based on this string, it calls the appropriate HandleFunc. | func (e *Endpoint) handleMessages(conn net.Conn) {
// Wrap the connection into a buffered reader for easier reading.
rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn))
defer conn.Close()
// Read from the connection until EOF. Expect a command name as the
// next input. Call the handler that is registered for this command.
for {
log.Print("Receive command '")
cmd, err := rw.ReadString('\n')
switch {
case err == io.EOF:
log.Println("Reached EOF - close this connection.\n ---")
return
case err != nil:
log.Println("\nError reading command. Got: '"+cmd+"'\n", err)
return
}
// Trim the request string - ReadString does not strip any newlines.
cmd = strings.Trim(cmd, "\n ")
log.Println(cmd + "'")
// Fetch the appropriate handler function from the 'handler' map and call it.
e.mutex.RLock()
handleCommand, ok := e.handler[cmd]
e.mutex.RUnlock()
if !ok {
log.Println("Command '" + cmd + "' is not registered.")
return
}
handleCommand(rw)
}
} | [
"func (cli *Client) HandleIncomingMessages(writeCh chan<- IncomingMessage) {\n\tfor {\n\t\tline, err := cli.r.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[%d] Client error: %s\\n\", cli.id, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tparts := strings.SplitN(line[:len(line)-1], \" \", 2)\n\t\tswitch parts[0] {\n\t\tcase message.RelayType:\n\t\t\tvar size int\n\t\t\tvar sender uint64\n\n\t\t\tif _, err = fmt.Sscanf(parts[1], \"%d %d\", &sender, &size); err != nil {\n\t\t\t\tlog.Printf(\"Message in wrong format: %s\\n\", err.Error())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tdata := make([]byte, size)\n\t\t\tif _, err = io.ReadFull(cli.r, data); err != nil {\n\t\t\t\tlog.Printf(\"Cannot read full data: %s\\n\", err.Error())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\twriteCh <- IncomingMessage{SenderID: sender, Body: data}\n\t\tdefault:\n\t\t\tlog.Println(\"Unknown message\")\n\t\t}\n\t}\n}",
"func (srv *MetricReceiver) handleMessage(addr net.Addr, msg []byte) {\n\tbuf := bytes.NewBuffer(msg)\n\tfor {\n\t\tline, readerr := buf.ReadBytes('\\n')\n\n\t\t// protocol does not require line to end in \\n, if EOF use received line if valid\n\t\tif readerr != nil && readerr != io.EOF {\n\t\t\tlog.Printf(\"error reading message from %s: %s\", addr, readerr)\n\t\t\treturn\n\t\t} else if readerr != io.EOF {\n\t\t\t// remove newline, only if not EOF\n\t\t\tif len(line) > 0 {\n\t\t\t\tline = line[:len(line)-1]\n\t\t\t}\n\t\t}\n\n\t\t// Only process lines with more than one character\n\t\tif len(line) > 1 {\n\t\t\tmetric, err := parseLine(line)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"error parsing line %q from %s: %s\", line, addr, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tgo srv.Handler.HandleMetric(metric)\n\t\t}\n\n\t\tif readerr == io.EOF {\n\t\t\t// if was EOF, finished handling\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (r *MetricReceiver) handleMessage(addr net.Addr, msg []byte) {\n\tbuf := bytes.NewBuffer(msg)\n\tfor {\n\t\tline, readerr := buf.ReadBytes('\\n')\n\n\t\t// protocol does not require line to end in \\n, if EOF use received line if valid\n\t\tif readerr != nil && readerr != io.EOF {\n\t\t\tr.handleError(fmt.Errorf(\"error reading message from %s: %s\", addr, readerr))\n\t\t\treturn\n\t\t} else if readerr != io.EOF {\n\t\t\t// remove newline, only if not EOF\n\t\t\tif len(line) > 0 {\n\t\t\t\tline = line[:len(line)-1]\n\t\t\t}\n\t\t}\n\n\t\t// Only process lines with more than one character\n\t\tif len(line) > 1 {\n\t\t\tmetric, err := parseLine(line)\n\t\t\tif err != nil {\n\t\t\t\tr.handleError(fmt.Errorf(\"error parsing line %q from %s: %s\", line, addr, err))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tgo r.Handler.HandleMetric(metric)\n\t\t}\n\n\t\tif readerr == io.EOF {\n\t\t\t// if was EOF, finished handling\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (c *Conn) handleMessages() {\n\tfor {\n\t\tselect {\n\t\tcase msg := <-c.outputChan:\n\t\t\t_, err := io.WriteString(c.c, msg)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"error writing to conn %d: %s\\n\", c.id, err)\n\t\t\t}\n\t\tcase <-c.closeChan:\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (u *Input) goHandleMessages(ctx context.Context) {\n\tu.wg.Add(1)\n\n\tgo func() {\n\t\tdefer u.wg.Done()\n\n\t\tdec := decoder.New(u.encoding)\n\t\tbuf := make([]byte, 0, MaxUDPSize)\n\t\tfor {\n\t\t\tmessage, remoteAddr, err := u.readMessage()\n\t\t\tif err != nil {\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t\tu.Errorw(\"Failed reading messages\", zap.Error(err))\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif u.OneLogPerPacket {\n\t\t\t\tlog := truncateMaxLog(message)\n\t\t\t\tu.handleMessage(ctx, remoteAddr, dec, log)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tscanner := bufio.NewScanner(bytes.NewReader(message))\n\t\t\tscanner.Buffer(buf, MaxUDPSize)\n\n\t\t\tscanner.Split(u.splitFunc)\n\n\t\t\tfor scanner.Scan() {\n\t\t\t\tu.handleMessage(ctx, remoteAddr, dec, scanner.Bytes())\n\t\t\t}\n\t\t\tif err := scanner.Err(); err != nil {\n\t\t\t\tu.Errorw(\"Scanner error\", zap.Error(err))\n\t\t\t}\n\t\t}\n\t}()\n}",
"func (bot *Hitbot) MessageHandler() {\n\tfor {\n\t\t_, p, err := bot.conn.ReadMessage()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\t//log.Printf(\"Message: %v\", string(p)) //debug info\n\t\tif string(p[:3]) == \"2::\" {\n\t\t\tbot.conn.WriteMessage(websocket.TextMessage, []byte(\"2::\"))\n\t\t\t//log.Print(\"Ping!\")\n\t\t\tcontinue\n\t\t} else if string(p[:3]) == \"1::\" {\n\t\t\tlog.Print(\"Connection successful!\")\n\t\t\tfor _, channel := range bot.channels {\n\t\t\t\tbot.joinChannel(channel)\n\t\t\t}\n\t\t\tcontinue\n\t\t} else if string(p[:4]) == \"5:::\" {\n\t\t\tbot.parseMessage(p[4:])\n\t\t}\n\t}\n}",
"func (s *Socket) handleMessagesIn() {\n\tfor {\n\t\tm := <-s.messagesIn\n\t\tfmt.Printf(\"Receiving message: %v\", m)\n\t\tswitch m.MessageType {\n\t\tcase PLACE_ORDER:\n\t\t\ts.placeOrder(m.Payload)\n\t\tcase CANCEL_ORDER:\n\t\t\ts.cancelOrder(m.Payload)\n\t\tcase SIGNED_DATA:\n\t\t\ts.executeOrder(m.Payload)\n\t\tcase DONE:\n\t\tdefault:\n\t\t\tpanic(\"Unknown message type\")\n\t\t}\n\t}\n}",
"func (handler *BotHandler) handleMessages() {\n\thandler.McRunner.WaitGroup.Add(1)\n\tdefer handler.McRunner.WaitGroup.Done()\n\tfor {\n\t\tselect {\n\t\tcase msg := <-handler.McRunner.MessageChannel:\n\t\t\tmessage := message{Timestamp: time.Now().Format(time.RFC3339), Message: msg}\n\t\t\tmessageJSON, _ := json.Marshal(message)\n\t\t\theader := header{Type: \"msg\", Data: messageJSON}\n\t\t\thandler.sock.WriteJSON(header)\n\t\tcase <-handler.killChannel:\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (q *Queue) HandleMessages(h MessageHandler) {\n\tfor msg := range q.ch {\n\t\th.HandleMessage(msg.content, msg.sender)\n\t}\n}",
"func (bot *Bot) handleIncomingMessages() {\n\tscan := bufio.NewScanner(bot.con)\n\tfor scan.Scan() {\n\t\t// Disconnect if we have seen absolutely nothing for 300 seconds\n\t\tbot.con.SetDeadline(time.Now().Add(bot.PingTimeout))\n\t\tmsg := ParseTwitchMessage(scan.Text())\n\t\tbot.Debug(\"Incoming\", \"msg.To\", msg.Message.To, \"msg.From\", msg.Message.From, \"msg.Params\", msg.Message.Params, \"msg.Trailing\", msg.Message.Trailing)\n\t\tfor _, t := range bot.triggers {\n\t\t\tif t.Condition(bot, msg) {\n\t\t\t\tgo t.Action(bot, msg)\n\t\t\t}\n\t\t}\n\t\tbot.Incoming <- msg\n\t}\n\tclose(bot.Incoming)\n}",
"func handleMessage(message string, clientConnection *clientTCPConnection) {\n\tclientConnection.messageCount++\n\t//Handle first message\n\tif clientConnection.messageCount == 1 {\n\t\tmessage = message[0 : len(message)-2]\n\t\tbase64PublicKey := base64.StdEncoding.EncodeToString(publicKeyPKIX)\n\t\tfmt.Fprintf(clientConnection.conn, \"RSAKEY::%s\\n\", base64PublicKey)\n\t\treturn\n\t}\n\tunEncodedMessage, err := base64.StdEncoding.DecodeString(message)\n\tif err != nil {\n\t\tfmt.Println(\"Error unencoding message \", err)\n\t\treturn\n\t}\n\tunencryptedMessage, err := rsa.DecryptOAEP(hashInterface, cRand.Reader, privateKey, unEncodedMessage, blankLabel)\n\tif err != nil {\n\t\tfmt.Println(\"Error decrypting message \", err)\n\t\treturn\n\t}\n\tswitch {\n\tcase !clientConnection.authenticated:\n\t\tif string(unencryptedMessage) == \"TESTPASS\" {\n\t\t\tclientConnection.authenticated = true\n\t\t}\n\t\tfmt.Println(\"Authenticated successfully \")\n\tcase clientConnection.authenticated:\n\t\tswitch {\n\t\tcase !clientConnection.started:\n\t\t\tif string(unencryptedMessage) == \"starting\" {\n\t\t\t\tclientConnection.started = true\n\t\t\t}\n\t\tcase clientConnection.started:\n\t\t\tif strings.HasPrefix(string(unencryptedMessage), \"done\") {\n\t\t\t\tunencryptedMessage = []byte(strings.TrimPrefix(string(unencryptedMessage), \"done:\"))\n\t\t\t\tvar runResult result\n\t\t\t\terr := json.Unmarshal(unencryptedMessage, &runResult)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"ERROR ERROR ERROR!!!\", err)\n\t\t\t\t}\n\t\t\t\tclientConnection.started = false\n\t\t\t\tfinishChannel <- runResult\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tfmt.Fprintf(clientConnection.conn, \"Unauthenticated clients not supported\")\n\t}\n}",
"func handleMessage(conn net.Conn, msg string) {\n\tfmt.Println(\"> Server response:\", msg)\n}",
"func (cl *Client) receiveMessages() {\r\n\tmsgBuffer := make([]byte, MSGHEADER)\r\n\tfor {\r\n\t\tvar msg string\r\n\t\t_, err := cl.Conn.Read(msgBuffer)\r\n\t\tif err != nil {\r\n\t\t\tcl.Conn.Close()\r\n\t\t\tlog.Fatal(\"FATAL ERROR, closing connection.\")\r\n\t\t}\r\n\t\tmsgUsername := strings.TrimSpace(string(msgBuffer))\r\n\r\n\t\tfor {\r\n\t\t\tn, err := cl.Conn.Read(msgBuffer)\r\n\t\t\tif err != nil {\r\n\t\t\t\tcl.Conn.Close()\r\n\t\t\t\tlog.Fatal(\"FATAL ERROR, closing connection.\")\r\n\t\t\t}\r\n\t\t\tif n != len(msgBuffer) {\r\n\t\t\t\tmsg += string(msgBuffer[:n])\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t\tmsg += string(msgBuffer)\r\n\t\t}\r\n\r\n\t\tfmt.Printf(\"\\n<%s> %s\", msgUsername, msg)\r\n\t\tfmt.Printf(\"\\n<%s> \", cl.Username)\r\n\t}\r\n\r\n}",
"func (tv *TV) MessageHandler() (err error) {\n\tdefer func() {\n\t\ttv.resMutex.Lock()\n\t\tfor _, ch := range tv.res {\n\t\t\tclose(ch)\n\t\t}\n\t\ttv.res = nil\n\t\ttv.resMutex.Unlock()\n\t}()\n\n\tfor {\n\t\tmt, p, err := tv.ws.ReadMessage()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif mt != websocket.TextMessage {\n\t\t\tcontinue\n\t\t}\n\n\t\tmsg := Message{}\n\n\t\terr = json.Unmarshal(p, &msg)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\ttv.resMutex.Lock()\n\t\tch := tv.res[msg.ID]\n\t\ttv.resMutex.Unlock()\n\n\t\tch <- msg\n\t}\n}",
"func (u *UDPInput) goHandleMessages(ctx context.Context) {\n\tu.waitGroup.Add(1)\n\n\tgo func() {\n\t\tdefer u.waitGroup.Done()\n\n\t\tfor {\n\t\t\tmessage, err := u.readMessage()\n\t\t\tif err != nil && u.isExpectedClose(err) {\n\t\t\t\tu.Debugf(\"Exiting message handler: %s\", err)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tentry := u.NewEntry(message)\n\t\t\tu.Write(ctx, entry)\n\t\t}\n\t}()\n}",
"func (g *Gossiper) HandleClientMessages() {\n\tg.ConnectToClient()\n\n\tpacketBytes := make([]byte, buffsize)\n\tmsg := &message.Message{}\n\n\tfor {\n\t\tnRead, _, err := g.clientConn.ReadFromUDP(packetBytes)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error: read from buffer failed.\")\n\t\t\tos.Exit(-1)\n\t\t}\n\n\t\tif nRead > 0 {\n\t\t\tprotobuf.Decode(packetBytes, msg)\n\t\t\tprintClientMessage(*msg)\n\t\t\tg.PrintPeers()\n\n\t\t\trumorMsg := message.RumorMessage{Origin: g.name, ID: messageID, Text: msg.Text}\n\t\t\tg.rumorMsgs.AddMessage(g.name, rumorMsg)\n\t\t\tg.newMsgs = append(g.newMsgs, rumorMsg)\n\t\t\tmessageID++\n\t\t\tg.myStatus.SetStatus(g.name, messageID)\n\n\t\t\tpacket := &gossippacket.GossipPacket{Rumor: &rumorMsg}\n\t\t\tgo g.rumorMonger(*packet, nil)\n\t\t}\n\t}\n}",
"func handle_conn(conn * Connection) {\n for conn.connected {\n messages := conn.Receive()\n if conn.connected && messages != nil {\n for _, message := range messages {\n fmt.Println(\"Received message\", string(message.Serialize()))\n handle_message(conn, message)\n }\n }\n }\n}",
"func (refreshHandler *refreshCredentialsHandler) handleMessages() {\n\tfor {\n\t\tselect {\n\t\tcase message := <-refreshHandler.messageBuffer:\n\t\t\trefreshHandler.handleSingleMessage(message)\n\t\tcase <-refreshHandler.ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}",
"func receiveMessages(conn *websocket.Conn) {\n\tdefer disconnect(conn)\n\tfor {\n\t\tvar demarshaled struct {\n\t\t\tCommand string\n\t\t\tBody string\n\t\t\tClient models.Client\n\t\t}\n\t\terr := conn.ReadJSON(&demarshaled)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error: Unable to read message from client\")\n\t\t\tlog.Println(\"Disconnecting client...\")\n\t\t\tbreak\n\t\t}\n\t\tmessage := &models.Message{\n\t\t\tCommand: demarshaled.Command,\n\t\t\tBody: demarshaled.Body,\n\t\t\tClient: &demarshaled.Client,\n\t\t}\n\t\trequest := serverRequest{\n\t\t\tMessage: message,\n\t\t\tClient: models.CloneClient(clients[conn]),\n\t\t}\n\n\t\tswitch command := message.GetCommand(); command {\n\t\tcase \"login\":\n\t\t\tloginRequests <- request\n\t\tcase \"newuser\":\n\t\t\tnewUserRequests <- request\n\t\tcase \"send\":\n\t\t\tsendRequests <- request\n\t\tcase \"logout\":\n\t\t\tlogoutRequests <- request\n\t\tcase \"help\":\n\t\t\thelpRequests <- request\n\t\tdefault:\n\t\t\tlog.Println(\"Received unrecognized command -\", command, \"- from client\")\n\t\t}\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
/ Now let's create two handler functions. The easiest case is where our adhoc protocol only sends string data. The second handler receives and processes a struct that was sent as GOB data. handleStrings handles the "STRING" request. | func handleStrings(rw *bufio.ReadWriter) {
// Receive a string.
log.Print("Receive STRING message:")
s, err := rw.ReadString('\n')
if err != nil {
log.Println("Cannot read from connection.\n", err)
}
s = strings.Trim(s, "\n ")
log.Println(s)
_, err = rw.WriteString("Thank you.\n")
if err != nil {
log.Println("Cannot write to connection.\n", err)
}
err = rw.Flush()
if err != nil {
log.Println("Flush failed.", err)
}
} | [
"func (serv *Server) handleText(conn int, payload []byte) {\n\tvar (\n\t\tlogp = `handleText`\n\n\t\thandler RouteHandler\n\t\terr error\n\t\tctx context.Context\n\t\treq *Request\n\t\tres *Response\n\t\tok bool\n\t)\n\n\tres = _resPool.Get().(*Response)\n\tres.reset()\n\n\tctx, ok = serv.Clients.Context(conn)\n\tif !ok {\n\t\terr = errors.New(\"client context not found\")\n\t\tres.Code = http.StatusInternalServerError\n\t\tres.Message = err.Error()\n\t\tgoto out\n\t}\n\n\treq = _reqPool.Get().(*Request)\n\treq.reset()\n\n\terr = json.Unmarshal(payload, req)\n\tif err != nil {\n\t\tres.Code = http.StatusBadRequest\n\t\tres.Message = err.Error()\n\t\tgoto out\n\t}\n\n\thandler, err = req.unpack(serv.routes)\n\tif err != nil {\n\t\tres.Code = http.StatusBadRequest\n\t\tres.Message = req.Target\n\t\tgoto out\n\t}\n\tif handler == nil {\n\t\tres.Code = http.StatusNotFound\n\t\tres.Message = req.Method + \" \" + req.Target\n\t\tgoto out\n\t}\n\n\treq.Conn = conn\n\n\t*res = handler(ctx, req)\n\nout:\n\tif req != nil {\n\t\tres.ID = req.ID\n\t\t_reqPool.Put(req)\n\t}\n\n\terr = serv.sendResponse(conn, res)\n\tif err != nil {\n\t\tlog.Printf(`%s: %s`, logp, err)\n\t\tserv.ClientRemove(conn)\n\t}\n\n\t_resPool.Put(res)\n}",
"func handle(w http.ResponseWriter, r *http.Request) {\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tError.Println(err)\n\t\treturn\n\t}\n\n\tvar msg json.RawMessage\n\td := data{Msg: &msg}\n\terr = json.Unmarshal(body, &d)\n\tif err != nil {\n\t\tError.Println(err)\n\t\treturn\n\t}\n\tInfo.Println(d.Type)\n\n\tgwn := d.Gateway.Msg.Name\n\n\tgo func() {\n\n\t\tswitch d.Type {\n\t\tcase \"heart\":\n\t\t\tvar h heart\n\t\t\terr = json.Unmarshal(msg, &h)\n\t\t\tif err != nil {\n\t\t\t\tError.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thandleHeartBeatData(h)\n\t\tcase \"topology_data\":\n\t\t\tvar t topology\n\t\t\terr = json.Unmarshal(msg, &t)\n\t\t\tif err != nil {\n\t\t\t\tError.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thandleTopologyData(t, gwn)\n\t\tcase \"partition_data\":\n\t\t\tvar pt partition\n\t\t\terr = json.Unmarshal(msg, &pt)\n\t\t\tif err != nil {\n\t\t\t\tError.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thandlePartitionData(pt, gwn)\n\t\tcase \"harp_partition_data\":\n\t\t\tvar pt partition\n\t\t\terr = json.Unmarshal(msg, &pt)\n\t\t\tif err != nil {\n\t\t\t\tError.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thandlePartitionHARPData(pt, gwn)\n\t\tcase \"harp_sp_data\":\n\t\t\tvar sp subpartition\n\t\t\terr = json.Unmarshal(msg, &sp)\n\t\t\tif err != nil {\n\t\t\t\tError.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thandleHARPSPData(sp, gwn)\n\t\tcase \"schedule_data\":\n\t\t\tvar sch schedule\n\t\t\terr = json.Unmarshal(msg, &sch)\n\t\t\tif err != nil {\n\t\t\t\tError.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thandleScheduleData(sch, gwn)\n\t\tcase \"sensor_type_0\":\n\t\t\tvar s sensor\n\t\t\terr = json.Unmarshal(msg, &s)\n\t\t\tif err != nil {\n\t\t\t\tError.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thandleSensorData(s, gwn)\n\t\tcase \"network_data_0\":\n\t\t\tvar n0 network0\n\t\t\terr = json.Unmarshal(msg, &n0)\n\t\t\tif err != nil {\n\t\t\t\tError.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thandleNetworkData0(n0, gwn)\n\t\tcase \"network_data_1\":\n\t\t\tvar n1 network1\n\t\t\terr = json.Unmarshal(msg, &n1)\n\t\t\tif err != nil {\n\t\t\t\tError.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thandleNetworkData1(n1, gwn)\n\t\tcase \"network_data_2\":\n\t\t\tvar n2 network2\n\t\t\terr = json.Unmarshal(msg, &n2)\n\t\t\tif err != nil {\n\t\t\t\tError.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thandleNetworkData2(n2, gwn)\n\t\tdefault:\n\t\t\tError.Println(\"Unknown data type:\", string(body))\n\t\t}\n\n\t}()\n\tfmt.Fprintf(w, \"Got it!\\n\")\n}",
"func HandlePayload(payload string, m Metserver) string {\n\tpayload = decodePayload(payload, m)\n\tfmt.Println(\"HandlePayload decPayload: \" + payload)\n\tsplitPayload := strings.SplitN(payload, \"||\", 3)\n\tmode := splitPayload[0]\n\taid := splitPayload[1]\n\tdata := splitPayload[2]\n\tretval := \"\"\n\tswitch mode {\n\tcase \"C\":\n\t\tretval = registerBot(data, m)\n\tcase \"D\":\n\t\tretval = getCommands(data, m)\n\tcase \"E\":\n\t\tretval = addResult(data, aid, m)\n\tdefault:\n\t\treturn \"\"\n\t}\n\tr := encodePayload(retval, m)\n\treturn r\n}",
"func serveStringHandler(str string) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\t\tw.Write([]byte(str))\n\t})\n}",
"func (serv *Server) handleBin(conn int, payload []byte) {}",
"func (s *Server) handlerConn(c net.Conn) {\n\tdefer c.Close()\n\tbuf := make([]byte, 2048)\n\trcvPacketSize, err := c.Read(buf)\n\tif err != nil && err != io.EOF {\n\t\tlog.Println(\"Read error: \", err)\n\t\treturn\n\t}\n\tdata := buf[:rcvPacketSize]\n\n\trec := strings.Split(string(data), \" \")\n\tlog.Println(\"Received data: \", rec)\n\n\t// rec must have 3 field (as at form)\n\tif len(rec) <= 3 {\n\t\tif err := s.db.Insert(rec); err != nil {\n\t\t\tlog.Printf(\"Insert error: %v\\n\", err)\n\t\t}\n\t\tlog.Printf(\"Save record in DB: %v\\n\", rec)\n\n\t\tif _, err = c.Write([]byte(\"OK\")); err != nil {\n\t\t\tlog.Printf(\"Response send error: %v\\n\", err)\n\t\t}\n\t}\n}",
"func GeneralConvHandler(req, name string, res http.ResponseWriter) string {\n\n\tfmt.Println(\"General conversation...\")\n\trand.Seed(time.Now().UnixNano())\n\tusername = name\n\tmessage := filterForMessagesComparision(req)\n\tmatch := false\n\n\t// determine type of message\n\tif !match {\n\t\tisGreetingPlain := func(s string) bool {\n\t\t\tfor i := 0; i < len(messagesParser.InitialGreetingsPlain); i++ {\n\t\t\t\tif strings.ToLower(s) == messagesParser.InitialGreetingsPlain[i] {\n\t\t\t\t\tmatch = true\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}(message)\n\n\t\tif isGreetingPlain {\n\t\t\ttemp := greetingPlainController(message)\n\t\t\tresp = jsonResponse{true, temp, true, nil}\n\t\t\tspeak = temp\n\t\t\tmarshalled, _ := json.Marshal(resp)\n\t\t\tres.Write(marshalled)\n\t\t}\n\t}\n\n\tif !match {\n\t\tisGreetingName := func(s string) bool {\n\t\t\tfor i := 0; i < len(messagesParser.InitialGreetingsName); i++ {\n\t\t\t\tif strings.ToLower(s) == messagesParser.InitialGreetingsName[i] {\n\t\t\t\t\tfmt.Println(\"contains \", strings.ToLower(s), \" \", messagesParser.InitialGreetingsName[i])\n\t\t\t\t\tmatch = true\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}(message)\n\n\t\tif isGreetingName {\n\t\t\ttemp := greetingNameController(message)\n\t\t\tresp = jsonResponse{true, temp, true, nil}\n\t\t\tspeak = temp\n\t\t\tmarshalled, _ := json.Marshal(resp)\n\t\t\tres.Write(marshalled)\n\t\t}\n\t}\n\n\tif !match {\n\t\tisHelp := func(s string) bool {\n\t\t\tfor i := 0; i < len(messagesParser.Help); i++ {\n\t\t\t\tif strings.ToLower(s) == messagesParser.Help[i] {\n\t\t\t\t\tmatch = true\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}(message)\n\n\t\tif isHelp {\n\t\t\ttemp := helpController(message)\n\t\t\tresp = jsonResponse{true, temp, true, nil}\n\t\t\tspeak = temp\n\t\t\tmarshalled, _ := json.Marshal(resp)\n\t\t\tres.Write(marshalled)\n\t\t}\n\t}\n\n\tif !match {\n\t\tisAbout := func(s string) bool {\n\t\t\tfor i := 0; i < len(messagesParser.About); i++ {\n\t\t\t\tif strings.ToLower(s) == messagesParser.About[i] {\n\t\t\t\t\tmatch = true\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}(message)\n\n\t\tif isAbout {\n\t\t\ttemp := aboutController(message)\n\t\t\tresp = jsonResponse{true, temp, true, nil}\n\t\t\tspeak = temp\n\t\t\tmarshalled, _ := json.Marshal(resp)\n\t\t\tres.Write(marshalled)\n\t\t}\n\t}\n\n\tif !match {\n\t\tfmt.Println(\"inside\")\n\t\tisAge := func(s string) bool {\n\t\t\tfor i := 0; i < len(messagesParser.Age); i++ {\n\t\t\t\tif strings.ToLower(s) == messagesParser.Age[i] {\n\t\t\t\t\tmatch = true\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}(message)\n\n\t\tif isAge {\n\t\t\ttemp := ageController(message)\n\t\t\tfmt.Println(\"temp age is \", temp)\n\t\t\tresp = jsonResponse{true, temp, true, nil}\n\t\t\tspeak = temp\n\t\t\tmarshalled, _ := json.Marshal(resp)\n\t\t\tres.Write(marshalled)\n\t\t}\n\t}\n\n\tif !match {\n\t\tisBirthday := func(s string) bool {\n\t\t\tfor i := 0; i < len(messagesParser.Birthday); i++ {\n\t\t\t\tif strings.ToLower(s) == messagesParser.Birthday[i] {\n\t\t\t\t\tmatch = true\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}(message)\n\n\t\tif isBirthday {\n\t\t\ttemp := birthdayController(message)\n\t\t\tresp = jsonResponse{true, temp, true, nil}\n\t\t\tspeak = temp\n\t\t\tmarshalled, _ := json.Marshal(resp)\n\t\t\tres.Write(marshalled)\n\t\t}\n\t}\n\n\treturn speak\n\n}",
"func ProcessWsData(client *Client, message Message) {\r\n\tvar msg Message\r\n\tmsg.Type = message.Type\r\n\tswitch message.Type {\r\n\tcase utils.MessageType[\"HEART_BEAT\"]:\r\n\t\tmsg.MsgData.Msg = \"pong\"\r\n\t\tclient.SendMsg(msg)\r\n\tcase utils.MessageType[\"NEW_PROBLEM\"]:\r\n\t\tmsg.MsgData.UserID = message.MsgData.UserID\r\n\t\tmsg.MsgData.Msg = \"new_problem\"\r\n\t\tmsg.MsgData.ProblemStatus = message.MsgData.ProblemStatus\r\n\t\tManager.Broadcast <- msg\r\n\tcase utils.MessageType[\"NEW_UPLOAD\"]:\r\n\t\tmsg.MsgData.UserID = message.MsgData.UserID\r\n\t\tmsg.MsgData.Msg = \"new_upload\"\r\n\t\tmsg.MsgData.ProblemStatus = message.MsgData.ProblemStatus\r\n\t\tManager.Broadcast <- msg\r\n\tcase utils.MessageType[\"NEW_FLAG_SUBMIT\"]:\r\n\t\tmsg.MsgData.UserID = message.MsgData.UserID\r\n\t\tmsg.MsgData.Msg = \"new_flag\"\r\n\t\tmsg.MsgData.ProblemStatus = message.MsgData.ProblemStatus\r\n\t\tManager.Broadcast <- msg\r\n\tcase utils.MessageType[\"CORRECT_ANSWER\"]:\r\n\t\tmsg.MsgData.UserID = message.MsgData.UserID\r\n\t\tmsg.MsgData.Msg = \"correct_answer\"\r\n\t\tmsg.MsgData.ProblemStatus = message.MsgData.ProblemStatus\r\n\t\tManager.Broadcast <- msg\r\n\tcase utils.MessageType[\"ERROR\"]:\r\n\t\tmsg.MsgData.Msg = \"error\"\r\n\t\tclient.SendMsg(msg)\r\n\t\tManager.Disconnect <- client\r\n\t}\r\n}",
"func (self Handlers) Handle(request []byte) []byte {\n\t//\tfmt.Println(\"New packet\")\n\tvar name string\n\n\tvar value = protocol.NamedType{}\n\tvar h = protocol.NamedTypeHandle()\n\tvar dec = codec.NewDecoderBytes(request, h)\n\tvar err = dec.Decode(&value)\n\tif err != nil {\n\t\tfmt.Println(\"Failed to decode\")\n\t\tfmt.Println(err.Error())\n\t\treturn []byte(\"success\")\n\t}\n\n\tswitch value.Name {\n\tcase \"protocol.TrainPacket\":\n\t\t//\t\tfmt.Println(\"Got packet\")\n\t\tpacket := protocol.TrainPacketFromMap(value.Value.(map[interface{}]interface{}))\n\t\tif packet.Incoming {\n\t\t\tname = packet.Dataset + \"-incoming\"\n\t\t} else {\n\t\t\tname = packet.Dataset + \"-outgoing\"\n\t\t}\n\n\t\t// Get the handler for packets of this dataset-incoming/outgoing and pass the training\n\t\t// packet onto the handler's channel.\n\t\thandler := self.Load(name)\n\t\tif handler != nil {\n\t\t\thandler.handleChannel <- &packet\n\t\t\treturn []byte(\"success\")\n\t\t} else {\n\t\t\tfmt.Println(\"Could not load handler for\", name)\n\t\t\treturn []byte(\"success\")\n\t\t}\n\tdefault:\n\t\tfmt.Println(\"Unknown request type\")\n\t\tfmt.Println(value)\n\t\tfmt.Println(\"<.>\")\n\t\treturn []byte(\"success\")\n\t}\n}",
"func (echo *echoImpl) SendString(_ fidl.Context, inValue string) error {\n\treturn nil\n}",
"func (echo *echoImpl) SendString(_ fidl.Context, inValue string) error {\n\tlog.Println(\"Received SendString request\", inValue)\n\tif err := echo.eventSender.OnString(inValue); err != nil {\n\t\tlog.Println(\"Failed to send event:\", err)\n\t}\n\t// We choose to return nil instead of the event sender error, since we don't\n\t// want the server to keep running even if it couldn't send the event.\n\treturn nil\n}",
"func ProcessOkString(w http.ResponseWriter, result []byte) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(result)\n}",
"func (r *Receiver) SetupHandlerOnReceiveStructSimple(handler OnReceiveStructSimple) { r.HandlerOnReceiveStructSimple = handler }",
"func (d *MoveLegsSkill) OnRecvString(data string) {\n\tlog.Info.Println(data)\n\t\n\tif data == \"start\" {\n\t\t//starts thread that runs the play function as part of the d class\n\t\tgo d.play()\n\t} else if data == \"stop\" {\n\t\td.stop <- true\n\t\thexabody.RelaxLegs()\n\t} else {\n\t\tlog.Info.Println(\"starting\")\n\t\tlegTest(data)\n\t\tlog.Info.Println(\"returned\")\n\t}\n\n}",
"func (o *Okcoin) WsHandleData(respRaw []byte) error {\n\tif bytes.Equal(respRaw, []byte(pongBytes)) {\n\t\treturn nil\n\t}\n\tvar dataResponse WebsocketDataResponse\n\terr := json.Unmarshal(respRaw, &dataResponse)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif dataResponse.ID != \"\" {\n\t\tif !o.Websocket.Match.IncomingWithData(dataResponse.ID, respRaw) {\n\t\t\treturn fmt.Errorf(\"couldn't match incoming message with id: %s and operation: %s\", dataResponse.ID, dataResponse.Operation)\n\t\t}\n\t\treturn nil\n\t}\n\tif len(dataResponse.Data) > 0 {\n\t\tswitch dataResponse.Arguments.Channel {\n\t\tcase wsInstruments:\n\t\t\treturn o.wsProcessInstruments(respRaw)\n\t\tcase wsTickers:\n\t\t\treturn o.wsProcessTickers(respRaw)\n\t\tcase wsCandle3M, wsCandle1M, wsCandle1W, wsCandle1D, wsCandle2D, wsCandle3D, wsCandle5D,\n\t\t\twsCandle12H, wsCandle6H, wsCandle4H, wsCandle2H, wsCandle1H, wsCandle30m, wsCandle15m,\n\t\t\twsCandle5m, wsCandle3m, wsCandle1m, wsCandle3Mutc, wsCandle1Mutc, wsCandle1Wutc, wsCandle1Dutc,\n\t\t\twsCandle2Dutc, wsCandle3Dutc, wsCandle5Dutc, wsCandle12Hutc, wsCandle6Hutc:\n\t\t\treturn o.wsProcessCandles(respRaw)\n\t\tcase wsTrades:\n\t\t\treturn o.wsProcessTrades(respRaw)\n\t\tcase wsOrderbooks,\n\t\t\twsOrderbooksL5,\n\t\t\twsOrderbookL1,\n\t\t\twsOrderbookTickByTickL400,\n\t\t\twsOrderbookTickByTickL50:\n\t\t\treturn o.wsProcessOrderbook(respRaw, dataResponse.Arguments.Channel)\n\t\tcase wsStatus:\n\t\t\tvar resp WebsocketStatus\n\t\t\terr = json.Unmarshal(respRaw, &resp)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfor x := range resp.Data {\n\t\t\t\tsystemStatus := fmt.Sprintf(\"%s %s on system %s %s service type From %s To %s\", systemStateString(resp.Data[x].State), resp.Data[x].Title, resp.Data[x].System, systemStatusServiceTypeString(resp.Data[x].ServiceType), resp.Data[x].Begin.Time().String(), resp.Data[x].End.Time().String())\n\t\t\t\tif resp.Data[x].Href != \"\" {\n\t\t\t\t\tsystemStatus = fmt.Sprintf(\"%s Href: %s\\n\", systemStatus, resp.Data[x].Href)\n\t\t\t\t}\n\t\t\t\tif resp.Data[x].RescheduleDescription != \"\" {\n\t\t\t\t\tsystemStatus = fmt.Sprintf(\"%s Rescheduled Description: %s\", systemStatus, resp.Data[x].RescheduleDescription)\n\t\t\t\t}\n\t\t\t\tlog.Warnf(log.ExchangeSys, systemStatus)\n\t\t\t}\n\t\t\to.Websocket.DataHandler <- resp\n\t\t\treturn nil\n\t\tcase wsAccount:\n\t\t\treturn o.wsProcessAccount(respRaw)\n\t\tcase wsOrder:\n\t\t\treturn o.wsProcessOrders(respRaw)\n\t\tcase wsOrdersAlgo:\n\t\t\treturn o.wsProcessAlgoOrder(respRaw)\n\t\tcase wsAlgoAdvance:\n\t\t\treturn o.wsProcessAdvancedAlgoOrder(respRaw)\n\t\t}\n\t\to.Websocket.DataHandler <- stream.UnhandledMessageWarning{\n\t\t\tMessage: o.Name + stream.UnhandledMessage + string(respRaw),\n\t\t}\n\t\treturn nil\n\t}\n\n\tvar errorResponse WebsocketErrorResponse\n\terr = json.Unmarshal(respRaw, &errorResponse)\n\tif err == nil && errorResponse.ErrorCode > 0 {\n\t\treturn fmt.Errorf(\"%v error - %v message: %s \",\n\t\t\to.Name,\n\t\t\terrorResponse.ErrorCode,\n\t\t\terrorResponse.Message)\n\t}\n\tvar eventResponse WebsocketEventResponse\n\terr = json.Unmarshal(respRaw, &eventResponse)\n\tif err == nil && eventResponse.Event != \"\" {\n\t\tswitch eventResponse.Event {\n\t\tcase \"login\":\n\t\t\tif o.Websocket.Match.IncomingWithData(\"login\", respRaw) {\n\t\t\t\to.Websocket.SetCanUseAuthenticatedEndpoints(eventResponse.Code == \"0\")\n\t\t\t}\n\t\tcase \"subscribe\", \"unsubscribe\":\n\t\t\to.Websocket.DataHandler <- eventResponse\n\t\tcase \"error\":\n\t\t\tif o.Verbose {\n\t\t\t\tlog.Debugf(log.ExchangeSys,\n\t\t\t\t\to.Name+\" - \"+eventResponse.Event+\" on channel: \"+eventResponse.Channel)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func handleApiRequest(conn net.Conn, write_first []byte) { // bytes.Buffer\n\t// write_first.WriteTo(conn)\n\tconn.Write(write_first)\n\t// Make a buffer to hold incoming data.\n\tbuf := make([]byte, 1024)\n\t// Read the incoming connection into the buffer.\n\tfor {\n\t\tn, err := conn.Read(buf)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tfmt.Println(\"read eof. closing\")\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Error reading:\", err.Error())\n\t\t\t}\n\t\t\tconn.Close()\n\t\t\tbreak\n\t\t}\n\t\tclean_cmd := strings.TrimSpace(string(buf[:n]))\n\t\tcommand := strings.Split(clean_cmd, \" \")\n\t\tlog.Println(\"received command: '\" + clean_cmd + \"'\")\n\t\tswitch command[0] {\n\t\tcase \"route\":\n\t\t\trouteRequests <- routeReq{command[1:], &conn}\n\t\t\treturn\n\t\tcase \"help\":\n\t\t\twriteHelp(conn, []byte(\"\"))\n\t\t\tcontinue\n\t\tdefault:\n\t\t\twriteHelp(conn, []byte(\"unknown command\\n\"))\n\t\t}\n\t}\n}",
"func handle(index int, val interface{}) {\n\tswitch val.(type) {\n\tcase lazlo.PatternMatch:\n\t\thandleMessageCB(index, pmTranslate(val.(lazlo.PatternMatch)))\n\tcase time.Time:\n\t\thandleTimerCB(index, val.(time.Time))\n\tcase map[string]interface{}:\n\t\thandleEventCB(index, val.(map[string]interface{}))\n\tcase *http.Request:\n\t\thandleLinkCB(index, val.(*http.Response))\n\tdefault:\n\t\terr := fmt.Errorf(\"luaMod handle:: unknown type: %T\", val)\n\t\tlazlo.Logger.Error(err)\n\t}\n}",
"func TestBasicMethodChannelStringCodecHandle(t *testing.T) {\n\tcodec := StringCodec{}\n\tmessenger := NewTestingBinaryMessenger()\n\tchannel := NewBasicMessageChannel(messenger, \"ch\", codec)\n\tchannel.HandleFunc(func(message interface{}) (reply interface{}, err error) {\n\t\tmessageString, ok := message.(string)\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"message is invalid type, expected string\")\n\t\t}\n\t\treply = messageString + \" world\"\n\t\treturn reply, nil\n\t})\n\tencodedMessage, err := codec.EncodeMessage(\"hello\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to encode message: %v\", err)\n\t}\n\tencodedReply, err := messenger.MockSend(\"ch\", encodedMessage)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treply, err := codec.DecodeMessage(encodedReply)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to decode reply: %v\", err)\n\t}\n\tt.Log(spew.Sdump(reply))\n\treplyString, ok := reply.(string)\n\tif !ok {\n\t\tt.Fatal(\"reply is invalid type, expected string\")\n\t}\n\tEqual(t, \"hello world\", replyString)\n}",
"func handleMasterMsg(data string) {\n\tdataSlice := strings.SplitN(strings.TrimSpace(data), \" \", 2)\n\tcommand := dataSlice[0]\n\tswitch command {\n\tcase \"get\":\n\t\tdoGet()\n\tcase \"alive\":\n\t\tdoAlive()\n\tcase \"broadcast\":\n\t\tpayload := dataSlice[1]\n\t\tdoBroadcast(payload)\n\tcase \"crash\":\n\t\t// self-destruct\n\t\tos.Exit(0)\n\tdefault:\n\t\tmsg := fmt.Sprintf(\"Invalid command %v from master\", command)\n\t\tdebugPrintln(msg)\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
handleGob handles the "GOB" request. It decodes the received GOB data into a struct. | func handleGob(rw *bufio.ReadWriter) {
log.Print("Receive GOB data:")
var data complexData
// Create a decoder that decodes directly into a struct variable.
dec := gob.NewDecoder(rw)
err := dec.Decode(&data)
if err != nil {
log.Println("Error decoding GOB data:", err)
return
}
// Print the complexData struct and the nested one, too, to prove
// that both travelled across the wire.
log.Printf("Outer complexData struct: \n%#v\n", data)
log.Printf("Inner complexData struct: \n%#v\n", data.C)
} | [
"func GobDecode(buffer []byte, value interface{}) error {\n buf := bytes.NewBuffer(buffer)\n decoder := gob.NewDecoder(buf)\n err := decoder.Decode(value)\n if err != nil {\n return gobDebug.Error(err)\n }\n return nil\n}",
"func GobDecode(data []byte, obj interface{}) error {\n\treturn gob.NewDecoder(bytes.NewBuffer(data)).Decode(obj)\n}",
"func GobUnmarshal(data []byte, v interface{}) error {\n\tb := bytes.NewBuffer(data)\n\treturn gob.NewDecoder(b).Decode(v)\n}",
"func gobDecode(buf []byte, into interface{}) error {\n\tif buf == nil {\n\t\treturn nil\n\t}\n\tdec := gob.NewDecoder(bytes.NewReader(buf))\n\treturn dec.Decode(into)\n}",
"func (mvcm *MvcMessage) GobDecode(buf []byte) error {\n\tr := bytes.NewBuffer(buf)\n\tdecoder := gob.NewDecoder(r)\n\terr := decoder.Decode(&mvcm.Cid)\n\tif err != nil {\n\t\tlogger.ErrLogger.Fatal(err)\n\t}\n\terr = decoder.Decode(&mvcm.Type)\n\tif err != nil {\n\t\tlogger.ErrLogger.Fatal(err)\n\t}\n\terr = decoder.Decode(&mvcm.Value)\n\tif err != nil {\n\t\tlogger.ErrLogger.Fatal(err)\n\t}\n\terr = decoder.Decode(&mvcm.Vector)\n\tif err != nil {\n\t\tlogger.ErrLogger.Fatal(err)\n\t}\n\treturn nil\n}",
"func (dec *Decoder) decodeGobPiece() {\n\tif dec.err != nil {\n\t\treturn\n\t}\n\tfor dec.err == nil && dec.gobBuf.Len() > 0 {\n\t\tid := dec.readTypeId()\n\t\tif id >= 0 {\n\t\t\tdec.inValue = true\n\t\t\tdec.decodedValue = dec.valueForType(id)\n\t\t\tif dec.err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw, ok := dec.seenTypes[id]\n\t\t\tif !ok || w.StructT == nil {\n\t\t\t\tdec.consumeNextUint(0)\n\t\t\t}\n\t\t\t// each gob will have a value so after we read it\n\t\t\t// let's return and add it to the returned *Gob's\n\t\t\tdec.readValue(id, &dec.decodedValue)\n\t\t\treturn\n\t\t}\n\t\tdec.inValue = false\n\t\t// we have a type definition\n\t\tdec.readType(-id)\n\t}\n}",
"func (pl *PartitionLoader) GobDecode(in []byte) error {\n\tpl.offset = binary.LittleEndian.Uint64(in)\n\treturn nil\n}",
"func GobUnmarshal(i interface{}, b []byte) error {\n\tbuf := bytes.NewBuffer(b)\n\tdecoder := gob.NewDecoder(buf)\n\treturn decoder.Decode(i)\n}",
"func (dec *Decoder) getGobPiece() {\n\tif dec.err != nil {\n\t\treturn\n\t}\n\tsize, width, err := readUint(dec.r, dec.buf[:], &dec.bytesProcessed)\n\tif err != nil {\n\t\tif err.Err == io.ErrUnexpectedEOF {\n\t\t\t// we actually are OK with an EOF here\n\t\t\terr.Err = io.EOF\n\t\t}\n\t\tdec.err = err\n\t\treturn\n\t}\n\tdec.gobBuf.Reset()\n\tdec.gobBuf.Grow(width + int(size))\n\t_, err_ := dec.gobBuf.Write(dec.buf[:width])\n\tif err != nil {\n\t\tdec.err = dec.genError(err_)\n\t\treturn\n\t}\n\tdec.gobBuf.Consumed(width)\n\t// read the entire gob into the gob buffer\n\t_, err_ = io.ReadFull(dec.r, dec.gobBuf.Bytes())\n\tif err_ != nil {\n\t\tif err_ == io.EOF {\n\t\t\tdec.err = dec.genError(io.ErrUnexpectedEOF)\n\t\t\treturn\n\t\t}\n\t\tdec.err = dec.genError(err_)\n\t\treturn\n\t}\n}",
"func (z *Float) GobDecode(buf []byte) error {}",
"func (rnn *RNN) GobDecode(b []byte) error {\n\tinput := bytes.NewBuffer(b)\n\tdec := gob.NewDecoder(input) // Will read from network.\n\n\tvar backup bkp\n\terr := dec.Decode(&backup)\n\trnn.bh = make([]float64, len(backup.Bh))\n\trnn.by = make([]float64, len(backup.By))\n\trnn.hprev = make([]float64, len(backup.Hprev))\n\tif err == nil {\n\t\trnn.whh = backup.Whh\n\t\trnn.why = backup.Why\n\t\trnn.wxh = backup.Wxh\n\t\trnn.config = backup.Config\n\t\tcopy(rnn.bh, backup.Bh)\n\t\tcopy(rnn.by, backup.By)\n\t\tcopy(rnn.hprev, backup.Hprev)\n\t}\n\treturn err\n}",
"func (d *Digest) GobDecode(p []byte) error {\n\th, n := binary.Uvarint(p)\n\tif n == 0 {\n\t\treturn errors.New(\"short buffer\")\n\t}\n\tif n < 0 {\n\t\treturn errors.New(\"invalid hash\")\n\t}\n\td.h = crypto.Hash(h)\n\tif len(p)-n != d.h.Size() {\n\t\treturn errors.New(\"invalid digest\")\n\t}\n\tcopy(d.b[:], p[n:])\n\treturn nil\n}",
"func ReadGob(path string, object interface{}) error {\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\tdecoder := gob.NewDecoder(file)\n\tif err = decoder.Decode(object); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (g *Gammas) GobDecode(data []byte) error {\n\tvar err error\n\tfor len(data) > 0 {\n\t\tg2 := new(bn256.G2)\n\t\tdata, err = g2.Unmarshal(data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*g = append(*g, g2)\n\t}\n\treturn nil\n}",
"func FromGob(data []byte, dst interface{}) error {\n\treturn NewGobber().From(data, dst)\n}",
"func (loc *LogOddsCell) GobDecode(buf []byte) error {\n\tr := bytes.NewBuffer(buf)\n\tdecoder := gob.NewDecoder(r)\n\n\terr := decoder.Decode(&loc.logOddsVal)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func GOB() (ret httprpc.Codec) {\n\treturn Danger(\n\t\tfunc(w io.Writer) DangerEncoder {\n\t\t\treturn gob.NewEncoder(w)\n\t\t},\n\t\tfunc(r io.Reader) DangerDecoder {\n\t\t\treturn gob.NewDecoder(r)\n\t\t},\n\t)\n}",
"func (d *DFA) GobDecode(bs []byte) error {\n\tbuffer := bytes.NewBuffer(bs)\n\tdecoder := gob.NewDecoder(buffer)\n\tvar initial State\n\tvar table []Cell\n\tif err := decoder.Decode(&initial); err != nil {\n\t\treturn errors.Wrapf(err, \"could not GOB decode initial state\")\n\t}\n\tif err := decoder.Decode(&table); err != nil {\n\t\treturn errors.Wrapf(err, \"could not GOB decode sparse table\")\n\t}\n\td.initial = initial\n\td.table = table\n\treturn nil\n}",
"func (m *repoManager) GobDecode(b []byte) error {\n\tbuf := bytes.NewBuffer(b)\n\tdec := gob.NewDecoder(buf)\n\n\tif err := dec.Decode(&(m.repoToUUID)); err != nil {\n\t\treturn err\n\t}\n\tif err := dec.Decode(&(m.versionToUUID)); err != nil {\n\t\treturn err\n\t}\n\t// Generate the inverse UUID to VersionID mapping.\n\tfor versionID, uuid := range m.versionToUUID {\n\t\tm.uuidToVersion[uuid] = versionID\n\t}\n\tif err := dec.Decode(&(m.repoID)); err != nil {\n\t\treturn err\n\t}\n\tif err := dec.Decode(&(m.versionID)); err != nil {\n\t\treturn err\n\t}\n\tif err := dec.Decode(&(m.instanceID)); err != nil {\n\t\treturn err\n\t}\n\n\tif err := dec.Decode(&(m.repos)); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
server listens for incoming requests and dispatches them to registered handler functions. | func server() error {
endpoint := NewEndpoint()
// Add the handle funcs.
endpoint.AddHandleFunc("STRING", handleStrings)
endpoint.AddHandleFunc("GOB", handleGob)
// Start listening.
return endpoint.Listen()
} | [
"func handlerServer(w http.ResponseWriter, r *http.Request) {\n\tsetHeader(w, r)\n\treadCookies(r)\n\tserver := r.URL.Query().Get(\"server\")\n\taction := r.URL.Query().Get(\"action\")\n\tswitch action {\n\tcase \"reloader\":\n\t\t_, _ = io.WriteString(w, getServerTubes(server))\n\t\treturn\n\tcase \"clearTubes\":\n\t\t_ = r.ParseForm()\n\t\tclearTubes(server, r.Form)\n\t\t_, _ = io.WriteString(w, `{\"result\":true}`)\n\t\treturn\n\t}\n\t_, _ = io.WriteString(w, tplServer(getServerTubes(server), server))\n}",
"func (c *ClnPlugin) listenServer() {\n\tfor {\n\t\tselect {\n\t\tcase <-c.done:\n\t\t\treturn\n\t\tdefault:\n\t\t\tid, result := c.server.Receive()\n\n\t\t\t// The server may return nil if it is stopped.\n\t\t\tif result == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tserid, _ := json.Marshal(&id)\n\t\t\tc.sendToCln(&Response{\n\t\t\t\tId: serid,\n\t\t\t\tJsonRpc: SpecVersion,\n\t\t\t\tResult: result,\n\t\t\t})\n\t\t}\n\t}\n}",
"func startServer() {\n\thttp.HandleFunc(\"/write\", writer)\n\thttp.HandleFunc(\"/read\", reader)\n\thttp.HandleFunc(\"/load\", loader)\n\n\t// Starting Http Server at port 7000\n\tfmt.Println(\"Starting Http server\")\n\thttp.ListenAndServe(\":7000\", nil)\n}",
"func handleServerRequests(t *testing.T, ctx context.Context, wg *sync.WaitGroup, sshCn ssh.Conn, reqs <-chan *ssh.Request) {\n\tdefer wg.Done()\n\tfor r := range reqs {\n\t\tif !r.WantReply {\n\t\t\tcontinue\n\t\t}\n\t\tif r.Type != \"tcpip-forward\" {\n\t\t\tr.Reply(false, nil)\n\t\t\tcontinue\n\t\t}\n\t\tvar args struct {\n\t\t\tHost string\n\t\t\tPort uint32\n\t\t}\n\t\tif !unmarshalData(r.Payload, &args) {\n\t\t\tr.Reply(false, nil)\n\t\t\tcontinue\n\t\t}\n\t\tln, err := net.Listen(\"tcp\", net.JoinHostPort(args.Host, strconv.Itoa(int(args.Port))))\n\t\tif err != nil {\n\t\t\tr.Reply(false, nil)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar resp struct{ Port uint32 }\n\t\t_, resp.Port = splitHostPort(ln.Addr().String())\n\t\tif err := r.Reply(true, marshalData(resp)); err != nil {\n\t\t\tt.Errorf(\"request reply error: %v\", err)\n\t\t\tln.Close()\n\t\t\tcontinue\n\t\t}\n\n\t\twg.Add(1)\n\t\tgo handleLocalListener(t, ctx, wg, sshCn, ln, args.Host)\n\n\t}\n}",
"func serve(c *config) (*http.Server, chan error) {\n\tprometheus.MustRegister(c.processDuration)\n\tprometheus.MustRegister(c.processCurrent)\n\tprometheus.MustRegister(c.errCounter)\n\n\t// Use DefaultServeMux as the handler\n\tsrv := &http.Server{Addr: c.listenAddr, Handler: nil}\n\thttp.HandleFunc(\"/\", c.handleWebhook)\n\thttp.HandleFunc(\"/_health\", handleHealth)\n\thttp.Handle(\"/metrics\", promhttp.Handler())\n\n\t// Start http server in a goroutine, so that it doesn't block other activities\n\tvar httpSrvResult = make(chan error)\n\tgo func() {\n\t\tlog.Println(\"Listening on\", c.listenAddr, \"with command\", c.command)\n\t\thttpSrvResult <- srv.ListenAndServe()\n\t}()\n\n\treturn srv, httpSrvResult\n}",
"func (c *Connection) listenServer() {\n\tin := bufio.NewScanner(c.Conn)\n\tfor in.Scan() {\n\t\tif p, err := Parse(in.Text()); err != nil {\n\t\t\tlogger.Print(\"parse error:\", err)\n\t\t} else {\n\t\t\tc.ServerChan <- p\n\t\t}\n\t}\n\tclose(done)\n}",
"func serve(svr *http.Server) {\n\tlog.Info(\"accepting connections\", zap.String(\"addr\", config.Bind))\n\tif err := svr.ListenAndServe(); err != nil {\n\t\tlog.Fatal(\"error serving requests\", zap.Error(err))\n\t}\n}",
"func Server(searcher zoekt.Streamer) http.Handler {\n\tregisterGob()\n\treturn &handler{Searcher: searcher}\n}",
"func (this *udpSrv) startHandlers() {\n go this.handleReads()\n}",
"func main() {\n\thttp.HandleFunc(\"/hello\", func(writer http.ResponseWriter, request *http.Request) {\n\t\twriter.Write([]byte(\"hello world!\\n\"))\n\t})\n\n\tif err := http.ListenAndServe(\":8080\", nil); err != nil {\n\t\tpanic(err)\n\t}\n\n}",
"func (h *handler) ListenAndServe() {\n\tgo func() {\n\t\tif err := h.startWebsocketRoute(); err != nil {\n\t\t\th.log.Fatal(err)\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tif err := h.startAPIRoute(); err != nil {\n\t\t\th.log.Fatal(err)\n\t\t}\n\t}()\n}",
"func server(port string) error {\n\tlistener, err := net.Listen(\"tcp\", \":\"+port)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer listener.Close()\n\tlog.Println(\"Started server at\", listener.Addr())\n\n\tfor {\n\t\tconn, err := listener.Accept()\n\t\tif err != nil {\n\t\t\tlog.Println(\"Connection failed:\", err)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Println(\"Accepted connection from\", conn.RemoteAddr())\n\t\t// Handle multiple client connections concurrently\n\t\tgo handleConnection(conn)\n\t}\n}",
"func serve(port int, handler connectionhandler) {\n \n if port < 1024 || port > 65535 {\n // todo: how does go handle errors.\n }\n\n portspec := fmt.Sprintf(\":%d\", port)\n\n sock, err := net.Listen(\"tcp\", portspec)\n if err != nil {\n // error\n fmt.Printf(\"%d\", err)\n }\n\n for {\n conn, err := sock.Accept()\n if err != nil {\n fmt.Printf(\"%d\", err) \n }\n go handler(conn) \n }\n}",
"func ServerAnswerHandler() {\n\tfor {\n\t\tmsg := make([]byte, 1024)\n\t\tif _, err := ws.Read(msg); err != nil {\n\t\t\tlog.Print(\"Connection closed...\")\n\t\t\tbreak\n\t\t} else {\n\t\t\tlog.Printf(\"Received: %s\", msg)\n\t\t}\n\t}\n}",
"func main() {\n\n\tvar dog dogHandler\n\tvar cat catHandler\n\n\tmux := http.NewServeMux()\n\n\tmux.Handle(\"/dog/\", dog)\n\tmux.Handle(\"/cat/\", cat)\n\n\t// The server listens and serves all requests coming to to locahost:9000\n\t// using mux as the multiplexer\n\thttp.ListenAndServe(\":9000\", mux)\n\n}",
"func (h *Handler) handleRequests() {\n\thttp.HandleFunc(\"/\", homePage)\n\thttp.HandleFunc(\"/customers\", h.returnAllCustomers)\n\tlog.Fatal(http.ListenAndServe(frontendPort, nil))\n}",
"func startEventsHandler(h *HTTP, tasks *sync.WaitGroup) {\n\tfor {\n\t\tif event := <-h.events; event != nil {\n\t\t\teventMessage := event.Error()\n\t\t\tfmt.Println(eventMessage)\n\t\t\tswitch {\n\t\t\tcase strings.Contains(eventMessage, \"http: Server closed\"):\n\t\t\t\th.Server.ErrorLog.Printf(\"server was closed\")\n\t\t\t\ttasks.Done()\n\t\t\t\treturn\n\t\t\tcase strings.Contains(eventMessage, \"bind: address already in use\"):\n\t\t\t\th.Server.ErrorLog.Printf(\"failed to start server: '%s' is already in use\", h.Options.Addr.String())\n\t\t\t\thandleShutdown(h, event)\n\t\t\t\ttasks.Done()\n\t\t\t\treturn\n\t\t\tcase strings.Contains(eventMessage, \"received signal: \"):\n\t\t\t\th.Server.ErrorLog.Printf(\"server %s\", eventMessage)\n\t\t\t\thandleShutdown(h, event)\n\t\t\t\th.Server.Close()\n\t\t\tdefault:\n\t\t\t\th.Server.ErrorLog.Printf(\"unknown event: %s\", event)\n\t\t\t}\n\t\t}\n\t}\n}",
"func registerHandlers(s *server) error {\n\terr := s.AddHandler(\"set\", cmdSet)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = s.AddHandler(\"get\", cmdGet)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = s.AddHandler(\"delete\", cmdDelete)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = s.AddHandler(\"stats\", cmdStats)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = s.AddHandler(\"quit\", cmdQuit)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (s *Service) serve() {\n\tdefer s.wg.Done()\n\n\tfor {\n\t\t// Wait for next connection.\n\t\tconn, err := s.Listener.Accept()\n\t\tif err != nil && strings.Contains(err.Error(), \"connection closed\") {\n\t\t\ts.Logger.Info(\"Listener closed\")\n\t\t\treturn\n\t\t} else if err != nil {\n\t\t\ts.Logger.Info(\"Error accepting snapshot request\", zap.Error(err))\n\t\t\tcontinue\n\t\t}\n\n\t\t// Handle connection in separate goroutine.\n\t\ts.wg.Add(1)\n\t\tgo func(conn net.Conn) {\n\t\t\tdefer s.wg.Done()\n\t\t\tdefer conn.Close()\n\t\t\tif err := s.handleConn(conn); err != nil {\n\t\t\t\ts.Logger.Info(err.Error())\n\t\t\t}\n\t\t}(conn)\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
/ Main Main starts either a client or a server, depending on whether the `connect` flag is set. Without the flag, the process starts as a server, listening for incoming requests. With the flag the process starts as a client and connects to the host specified by the flag value. Try "localhost" or "127.0.0.1" when running both processes on the same machine. main | func main() {
connect := flag.String("connect", "", "IP address of process to join. If empty, go into listen mode.")
flag.Parse()
// If the connect flag is set, go into client mode.
if *connect != "" {
err := client(*connect)
if err != nil {
log.Println("Error:", errors.WithStack(err))
}
log.Println("Client done.")
return
}
// Else go into server mode.
err := server()
if err != nil {
log.Println("Error:", errors.WithStack(err))
}
log.Println("Server done.")
} | [
"func main() {\n\tserver.New().Start()\n}",
"func ServerMain() {\n\tRunServer(ParseCommandLine())\n}",
"func main() {\n\tif len(os.Args) != 2 {\n\t\tlog.Fatal(\"Usage: ./server-go [server port]\")\n\t}\n\tserverPort := os.Args[1]\n\tserver(serverPort)\n}",
"func main() {\n\tif len(os.Args) != 2 {\n\t\tlog.Fatal(\"Usage: ./server-go [server port]\")\n\t}\n\tserver_port := os.Args[1]\n\tserver(server_port)\n}",
"func ClientMain(player Player) {\n\taddr := DefaultServerAddress\n\tif len(os.Args) > 1 {\n\t\tport, err := strconv.Atoi(os.Args[1])\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"invalid value for port: %q\", os.Args[1])\n\t\t}\n\t\taddr = &net.TCPAddr{\n\t\t\tIP: net.IPv4(127, 0, 0, 1),\n\t\t\tPort: port,\n\t\t}\n\t}\n\tvar state BasicState\n\tclient, err := OpenClient(addr, player, &state)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"cannot connect to server: %s\", err)\n\t\tos.Exit(1)\n\t}\n\tclient.DebugTo = os.Stderr\n\terr = client.Run()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error while running: %s\", err)\n\t\tos.Exit(2)\n\t}\n}",
"func StartMainServer(mainHost string, workerCount int) {\n\tserver := &fasthttp.Server{\n\t\tHandler: anyHTTPHandler,\n\t}\n\n\tpreforkServer := prefork.New(server, workerCount)\n\n\tif !prefork.IsChild() {\n\t\tfmt.Printf(\"Server started server on http://%s\\n\", mainHost)\n\t}\n\n\tif err := preforkServer.ListenAndServe(mainHost); err != nil {\n\t\tpanic(err)\n\t}\n}",
"func main() {\n\n\t// Process args.\n\n\t// the TCP address on which the fserver listens to RPC connections from the aserver\n\tfserverTcp := os.Args[1]\n\tfserverTcpG = fserverTcp\n\n\t// the UDP address on which the fserver receives client connections\n\tfserver := os.Args[2]\n\tfserverUdpAddr, err := net.ResolveUDPAddr(\"udp\", fserver)\n\thandleError(err)\n\n\tmsg := make([]byte, 1024)\n\n\t// Global fserver ip:port info\n\tfserverIpPort = fserver\n\n\t// Read the rest of the args as a fortune message\n\tfortune := strings.Join(os.Args[3:], \" \")\n\tfortuneG = fortune\n\n\t// Debug to see input from command line args\n\tfmt.Printf(\"fserver Listening on %s\\nFortune: %s\\n\", fserverIpPort, fortune)\n\n\t// concurrent running of rcp connection\n\n\tconn, err := net.ListenUDP(\"udp\", fserverUdpAddr)\n\thandleError(err)\n\n\tgo handleRpcConnection()\n\tdefer conn.Close()\n\n\t// refactor to global variable\n\tconndp = conn\n\t// udp client concurrency\n\tfor {\n\t\tn, clientAddr, err := conn.ReadFromUDP(msg)\n\t\thandleError(err)\n\t\tgo handleClientConnection(msg[:], n, clientAddr.String())\n\t}\n}",
"func main() {\n\t// get environment variables\n\tport := os.Getenv(portEnv)\n\t// default for port\n\tif port == \"\" {\n\t\tport = \"8080\"\n\t}\n\tlog.Print(\"[Info][Main] Creating server...\")\n\ts, err := sessions.NewServer(\":\"+port, os.Getenv(redisAddressEnv),\n\t\tos.Getenv(gameServerImageEnv), deserialiseEnvMap(os.Getenv(gameNodeSelectorEnv)),\n\t\tos.Getenv(cpuLimitEnv))\n\n\tif err != nil {\n\t\tlog.Fatalf(\"[Error][Main] %+v\", err)\n\t}\n\n\tif err := s.Start(); err != nil {\n\t\tlog.Fatalf(\"[Error][Main] %+v\", err)\n\t}\n}",
"func main() {\n\tvar port int\n\tvar version bool\n\n\t// parse the flags\n\tflag.IntVar(&port, \"port\", 8080, \"used port\")\n\tflag.BoolVar(&version, \"V\", false, \"version of the program\")\n\tflag.Parse()\n\n\t// if user type -V, the V flag is set up to true\n\tif version {\n\t\t// display the information about the version\n\t\tfmt.Println(\"version 1.0_a\")\n\t\t// otherwise run the server\n\t} else {\n\t\tportNr := strconv.Itoa(port)\n\t\thttp.HandleFunc(\"/time\", getTime)\n\t\thttp.HandleFunc(\"/\", unknownRoute)\n\t\terr := http.ListenAndServe(\":\"+portNr, nil)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"ListenAndServe: \", err)\n\t\t}\n\t}\n}",
"func main() {\n\tname := flag.String(\"name\", \"echo\", \"server name\")\n\tport := flag.String(\"port\", \"3000\", \"server port\")\n\tflag.Parse()\n\n\t// Echo instance\n\te := echo.New()\n\n\t// Middleware\n\te.Use(middleware.Logger())\n\te.Use(middleware.Recover())\n\n\t// Route => handler\n\te.GET(\"/\", func(c echo.Context) error {\n\t\treturn c.HTML(http.StatusOK, fmt.Sprintf(\"<div style='font-size: 8em;'>Hello from upstream server %s!</div>\", *name))\n\t})\n\te.GET(\"/alive\", func(c echo.Context) error {\n\t\tdata := map[string]interface{}{\n\t\t\t\"alive\": true,\n\t\t\t\"hostname\": \"localhost:\" + *port,\n\t\t\t\"serviceName\": *name,\n\t\t\t\"num_cpu\": runtime.NumCPU(),\n\t\t\t\"num_goroutine\": runtime.NumGoroutine(),\n\t\t\t\"go_version\": runtime.Version(),\n\t\t\t\"build_date\": Buildstamp,\n\t\t\t\"commit\": Commit,\n\t\t\t\"startup_time\": startupTime,\n\t\t}\n\t\treturn c.JSON(http.StatusOK, data)\n\t})\n\n\t// Start server\n\te.Logger.Fatal(e.Start(fmt.Sprintf(\":%s\", *port)))\n}",
"func MainServer(server string) {\n\n\t// Build core, and start goroutine\n\tcore := NewCore()\n\tgo core.main()\n\n\t// Build TCP listener and start goroutine\n\tlis := &Listener{core: core}\n\tgo lis.Listen(\"tcp\", server)\n\n\t// Register monitoring server\n\tgo monitoringServer(core)\n\n\t// Setup SIGINT signal handler, and wait\n\tchannel := make(chan os.Signal)\n\tsignal.Notify(channel, os.Interrupt)\n\t<-channel\n\tlog.Println(\"Stop\")\n}",
"func (d *Daemon) Main(serve func(string, string)) error {\n\tsetUmask()\n\tserve(d.SockPath, d.DbPath)\n\treturn nil\n}",
"func main() {\n\t// The default client is HTTP.\n\tlog.Printf(\"Listening on port 8080\")\n\tc, err := cloudevents.NewDefaultClient()\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client, %v\", err)\n\t}\n\tlog.Fatal(c.StartReceiver(context.Background(), receive))\n}",
"func main() {\n\tif len(os.Args) < 3 {\n\t\tfmt.Printf(\"wrong parameters\\nUsage: %s host port\\n\", os.Args[0])\n\t\tos.Exit(2)\n\t}\n\n\tmsg, err := client.Dial(os.Args[1], os.Args[2])\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(2)\n\t}\n\tfmt.Println(*msg)\n}",
"func main() {\n\t// Make websocket\n\tlog.Println(\"Starting sync server\")\n\n\t// TODO: Use command line flag credentials.\n\tclient, err := db.NewClient(\"localhost:28015\")\n\tif err != nil {\n\t\tlog.Fatal(\"Couldn't initialize database: \", err.Error())\n\t}\n\tdefer client.Close()\n\n\trouter := sync.NewServer(client)\n\n\t// Make web server\n\tn := negroni.Classic()\n\tn.UseHandler(router)\n\tn.Run(\":8000\")\n}",
"func main() {\n\taddr := flag.String(\"addr\",\n\t\tutil.EnvString(\"ADDR\", \"localhost:8080\"), util.HelpString(\"http service address\", \"ADDR\"))\n\tduration := flag.Float64(\"duration\",\n\t\tutil.EnvFloat64(\"DURATION\", 0.0), util.HelpString(\"duration in seconds (float) of each connection. 0 is forver (and the default)\", \"DURATION\"))\n\treconnects := flag.Int(\"reconnects\", util.EnvInt(\"RECONNECTS\", 1),\n\t\tutil.HelpString(\"how many times to reconnect befor quitting\", \"RECONNECTS\"))\n\tparallel := flag.Int(\"parallel\",\n\t\tutil.EnvInt(\"PARALLEL\", 1), util.HelpString(\"how many connections to make in parallel\", \"PARALLEL\"))\n\tflag.Parse()\n\n\tdetails := &ConnDetails{\n\t\tAddress: *addr,\n\t\tDuration: *duration,\n\t\tReconnects: *reconnects,\n\t\tParallel: *parallel,\n\t}\n\n\tlog.SetFlags(log.LstdFlags | log.Lmicroseconds)\n\tvar wg sync.WaitGroup\n\tlaunchParallelConnections(&wg, details)\n\twg.Wait()\n}",
"func main() {\n\t\n\tvar config Config\n\tReadConfig(&config)\n\n\tvar inputScanner *bufio.Scanner\n\n\tif config.Server.Enable {\n\t\t// communicate with TCP/IP server\n\t\tfmt.Printf(\"server mode\\n\")\n\t\t// TODO need to set inputScanner\n\n\t} else if config.Engines.Enable {\n\t\t// play games with multiple engines used\n\t\t// In this mode, we need to hold a full state of the game because no one send the game state.\n\t\tfmt.Printf(\"multi-engine mode\\n\")\n\t\tpanic(\"not implemented now. Can you send pull request?\")\n\t} else {\n\t\t// CLI mode\n\t\tfmt.Printf(\"cli mode\\n\")\n\t\tinputScanner = bufio.NewScanner(os.Stdin)\n\t}\n\n\tConnectEngine(inputScanner, config.Cli.Path)\n}",
"func main() {\n\tif len(os.Args) < 3 {\n\t\tfmt.Printf(\"argument is Invalid :%v\\n\", os.Args)\n\t\treturn\n\t}\n\tswitch os.Args[1] {\n\tcase \"master\":\n\t\tstartReq, err := json.Marshal(common.Request{\n\t\t\tUrl: os.Args[2],\n\t\t\tFlag: 1,\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Error(\"err:%v\", err)\n\t\t}\n\t\tdistribute.NewMaster().Run(startReq)\n\tcase \"slave\":\n\t\tdistribute.NewSlave(os.Args[2]).Run()\n\t}\n}",
"func main() {\n\tfmt.Println(\"Go Demo with net/http server\")\n\n\t// initialize empty itemStore\n\titemStore := store.InitializeStore()\n\tserver.StartRouter(itemStore)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The Lshortfile flag includes file name and line number in log messages. | func init() {
log.SetFlags(log.Lshortfile)
} | [
"func (f *FileDetail) ShortenSourceFile(n int) string {\n\tx := len(f.SourceFile) - n - 1\n\tif x <= 0 {\n\t\treturn f.SourceFile\n\t}\n\n\tidx := strings.Index(f.SourceFile[x:], string(filepath.Separator))\n\tif idx >= 0 {\n\t\tx = x + idx\n\t}\n\treturn fmt.Sprintf(\"...%s\", f.SourceFile[x:])\n}",
"func StatusShort(c *Client, files []File, untracked StatusUntrackedMode, lineprefix, lineending string) (string, error) {\n\tvar lsfiles []File\n\tif len(files) == 0 {\n\t\tlsfiles = []File{File(c.WorkDir)}\n\t} else {\n\t\tlsfiles = files\n\t}\n\n\tcfiles, err := LsFiles(c, LsFilesOptions{Cached: true}, lsfiles)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\ttree := make(map[IndexPath]*IndexEntry)\n\t// It's not an error to use \"git status\" before the first commit,\n\t// so discard the error\n\tif head, err := c.GetHeadCommit(); err == nil {\n\t\ti, err := LsTree(c, LsTreeOptions{FullTree: true, Recurse: true}, head, files)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\t// this should probably be an LsTreeMap library function, it would be\n\t\t// useful other places..\n\t\tfor _, e := range i {\n\t\t\ttree[e.PathName] = e\n\t\t}\n\t}\n\tvar ret string\n\tvar wtst, ist rune\n\tfor i, f := range cfiles {\n\t\twtst = ' '\n\t\tist = ' '\n\t\tfname, err := f.PathName.FilePath(c)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tswitch f.Stage() {\n\t\tcase Stage0:\n\t\t\tif head, ok := tree[f.PathName]; !ok {\n\t\t\t\tist = 'A'\n\t\t\t} else {\n\t\t\t\tif head.Sha1 == f.Sha1 {\n\t\t\t\t\tist = ' '\n\t\t\t\t} else {\n\t\t\t\t\tist = 'M'\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tstat, err := fname.Stat()\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\twtst = 'D'\n\t\t\t} else {\n\t\t\t\tmtime, err := fname.MTime()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\t\t\t\tif mtime != f.Mtime || stat.Size() != int64(f.Fsize) {\n\t\t\t\t\twtst = 'M'\n\t\t\t\t} else {\n\t\t\t\t\twtst = ' '\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ist != ' ' || wtst != ' ' {\n\t\t\t\tret += fmt.Sprintf(\"%c%c %v%v\", ist, wtst, fname, lineending)\n\t\t\t}\n\t\tcase Stage1:\n\t\t\tswitch cfiles[i+1].Stage() {\n\t\t\tcase Stage2:\n\t\t\t\tif i >= len(cfiles)-2 {\n\t\t\t\t\t// Stage3 is missing, we've reached the end of the index.\n\t\t\t\t\tret += fmt.Sprintf(\"MD %v%v\", fname, lineending)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tswitch cfiles[i+2].Stage() {\n\t\t\t\tcase Stage3:\n\t\t\t\t\t// There's a stage1, stage2, and stage3. If they weren't all different, read-tree would\n\t\t\t\t\t// have resolved it as a trivial stage0 merge.\n\t\t\t\t\tret += fmt.Sprintf(\"UU %v%v\", fname, lineending)\n\t\t\t\tdefault:\n\t\t\t\t\t// Stage3 is missing, but we haven't reached the end of the index.\n\t\t\t\t\tret += fmt.Sprintf(\"MD%v%v\", fname, lineending)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\tcase Stage3:\n\t\t\t\t// Stage2 is missing\n\t\t\t\tret += fmt.Sprintf(\"DM %v%v\", fname, lineending)\n\t\t\t\tcontinue\n\t\t\tdefault:\n\t\t\t\tpanic(\"Unhandled index\")\n\t\t\t}\n\t\tcase Stage2:\n\t\t\tif i == 0 || cfiles[i-1].Stage() != Stage1 {\n\t\t\t\t// If this is a Stage2, and the previous wasn't Stage1,\n\t\t\t\t// then we know the next one must be Stage3 or read-tree\n\t\t\t\t// would have handled it as a trivial merge.\n\t\t\t\tret += fmt.Sprintf(\"AA %v%v\", fname, lineending)\n\t\t\t}\n\t\t\t// If the previous was Stage1, it was handled by the previous\n\t\t\t// loop iteration.\n\t\t\tcontinue\n\t\tcase Stage3:\n\t\t\t// There can't be just a Stage3 or read-tree would\n\t\t\t// have resolved it as Stage0. All cases were handled\n\t\t\t// by Stage1 or Stage2\n\t\t\tcontinue\n\t\t}\n\t}\n\tif untracked != StatusUntrackedNo {\n\t\tlsfilesopts := LsFilesOptions{\n\t\t\tOthers: true,\n\t\t}\n\t\tif untracked == StatusUntrackedNormal {\n\t\t\tlsfilesopts.Directory = true\n\t\t}\n\n\t\tuntracked, err := LsFiles(c, lsfilesopts, lsfiles)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tfor _, f := range untracked {\n\t\t\tfname, err := f.PathName.FilePath(c)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\tif name := fname.String(); name == \".\" {\n\t\t\t\tret += \"?? ./\" + lineending\n\t\t\t} else {\n\t\t\t\tret += \"?? \" + name + lineending\n\t\t\t}\n\t\t}\n\t}\n\treturn ret, nil\n\n}",
"func LogFileName() {\n\tlFileLength = log.Lshortfile\n}",
"func (l Level) NameShort() string {\n\tswitch l {\n\tcase TraceLevel:\n\t\treturn \"TRC\"\n\tcase DebugLevel:\n\t\treturn \"DBG\"\n\tcase InfoLevel:\n\t\treturn \"INF\"\n\tcase WarnLevel:\n\t\treturn \"WRN\"\n\tcase ErrorLevel:\n\t\treturn \"ERR\"\n\tcase FatalLevel:\n\t\treturn \"FTL\"\n\tdefault:\n\t\treturn \"\"\n\t}\n}",
"func (o *ListIssueGroupOfProjectVersionParams) SetShowshortfilenames(showshortfilenames *bool) {\n\to.Showshortfilenames = showshortfilenames\n}",
"func callerShortfile(file string, lastsep_ ...rune) string {\n\tlastsep := '/'\n\tif len(lastsep_) > 0 {\n\t\tlastsep = lastsep_[0]\n\t}\n\tshort := file\n\tfor i := len(file) - 1; i > 0; i-- {\n\t\tif file[i] == byte(lastsep) {\n\t\t\tshort = file[i+1:]\n\t\t\tbreak\n\t\t}\n\t}\n\treturn short\n}",
"func ShortFlag(name string) FlagOption {\n\treturn func(f *Flag) {\n\t\tf.alias = name\n\t}\n}",
"func LogFilePath() {\n\tlFileLength = log.Llongfile\n}",
"func generateStdflagShortFile() string {\n\tvar s string\n\tvar sf string\n\n\t_, fn, ln, ok := runtime.Caller(2)\n\tif ok {\n\t\tsf = fn + \":\" + strconv.Itoa(ln)\n\t\tindex := strings.LastIndex(sf, \"/\")\n\t\tsf = sf[index+1:]\n\t}\n\ts = time.Now().Format(time.RFC3339) + \" \" + sf + \": \"\n\treturn s\n}",
"func (v Version) Short() string {\n\treturn strconv.Itoa(v.Major) + \".\" + strconv.Itoa(v.Minor) + \".\" + strconv.Itoa(v.Patch)\n}",
"func (v *Version) ShortVersion() string {\n\treturn fmt.Sprintf(\"%d.%d.%d\", v.Release, v.Fixpack, v.Hotfix)\n}",
"func (v Version) ShortString() string {\n\treturn fmt.Sprintf(\"%d.%d\", v.Major, v.Minor)\n}",
"func (f *Logger) ShowLineNumber(show int32) {\n\tif show < 0 {\n\t\treturn\n\t}\n\tatomic.StoreInt32(&f.showLineNum, show)\n}",
"func Short() string {\n\treturn shortVersion\n}",
"func (df *Dirfile) VerbosePrefix(prefix string) error {\n\tvpre := C.CString(prefix)\n\tdefer C.free(unsafe.Pointer(vpre))\n\terrcode := C.gd_verbose_prefix(df.d, vpre)\n\tif errcode != C.GD_E_OK {\n\t\treturn df.Error()\n\t}\n\treturn nil\n}",
"func DebugfFile(p token.Position, format string, args ...interface{}) {\n\tlogger.Printf(levelDebug.format(p)+format, args...)\n}",
"func InitDetailedLogger(f *os.File) {\n\n\tlog.SetReportCaller(true)\n\tlog.SetLevel(logrus.DebugLevel)\n\n\tlog.SetFormatter(&logrus.JSONFormatter{\n\t\tTimestampFormat: \"\",\n\t\tPrettyPrint: true,\n\t\tCallerPrettyfier: func(f *runtime.Frame) (string, string) {\n\t\t\ts := strings.Split(f.Function, \".\")\n\t\t\tfuncname := s[len(s)-1]\n\t\t\t_, filename := path.Split(f.File)\n\t\t\treturn funcname, filename\n\t\t},\n\t})\n\n\t// Set output of logs to Stdout\n\t// Change to f for redirecting to file\n\tlog.SetOutput(os.Stdout)\n\n}",
"func (r *Refactor) shortPath(path string) string {\n\tif rel, err := filepath.Rel(r.dir, path); err == nil && len(rel) < len(path) {\n\t\treturn rel\n\t}\n\treturn path\n}",
"func ShortHelp(rootName string, root *cmds.Command, path []string, out io.Writer) error {\n\tcmd, err := root.Get(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// default cmd to root if there is no path\n\tif path == nil && cmd == nil {\n\t\tcmd = root\n\t}\n\n\tpathStr := rootName\n\tif len(path) > 0 {\n\t\tpathStr += \" \" + strings.Join(path, \" \")\n\t}\n\n\tfields := helpFields{\n\t\tIndent: indentStr,\n\t\tPath: pathStr,\n\t\tTagline: cmd.Helptext.Tagline,\n\t\tSynopsis: cmd.Helptext.Synopsis,\n\t\tDescription: cmd.Helptext.ShortDescription,\n\t\tSubcommands: cmd.Helptext.Subcommands,\n\t\tMoreHelp: (cmd != root),\n\t}\n\n\twidth := getTerminalWidth(out) - len(indentStr)\n\n\t// autogen fields that are empty\n\tfields.Warning = generateWarningText(cmd)\n\tif len(cmd.Helptext.Usage) > 0 {\n\t\tfields.Usage = cmd.Helptext.Usage\n\t} else {\n\t\tfields.Usage = commandUsageText(width, cmd, rootName, path)\n\t}\n\tif len(fields.Subcommands) == 0 {\n\t\tfields.Subcommands = strings.Join(subcommandText(width, cmd, rootName, path, cmds.Active), \"\\n\")\n\t\tfields.ExperimentalSubcommands = strings.Join(subcommandText(width, cmd, rootName, path, cmds.Experimental), \"\\n\")\n\t\tfields.DeprecatedSubcommands = strings.Join(subcommandText(width, cmd, rootName, path, cmds.Deprecated), \"\\n\")\n\t\tfields.RemovedSubcommands = strings.Join(subcommandText(width, cmd, rootName, path, cmds.Removed), \"\\n\")\n\t}\n\tif len(fields.Synopsis) == 0 {\n\t\tfields.Synopsis = generateSynopsis(width, cmd, pathStr)\n\t}\n\n\t// trim the extra newlines (see TrimNewlines doc)\n\tfields.TrimNewlines()\n\n\t// indent all fields that have been set\n\tfields.IndentAll()\n\n\treturn shortHelpTemplate.Execute(out, fields)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewHealthController creates a health controller. | func NewHealthController(service *goa.Service) *HealthController {
return &HealthController{Controller: service.NewController("HealthController")}
} | [
"func NewHealthController(router *mux.Router, r *render.Render) *HealthController {\n\tctrl := &HealthController{router, r}\n\tctrl.Register()\n\treturn ctrl\n}",
"func NewController(w http.ResponseWriter, r *http.Request, name string) *Controller {\n\treturn &Controller{w, r, name, make(map[string]interface{})}\n}",
"func NewHealthCheckController(\n\tlogger logging.LoggerInterface,\n\tappMonitor application.MonitorIterface,\n\tdependenciesMonitor services.MonitorIterface,\n) *HealthCheckController {\n\treturn &HealthCheckController{\n\t\tlogger: logger,\n\t\tappMonitor: appMonitor,\n\t\tdependenciesMonitor: dependenciesMonitor,\n\t}\n}",
"func NewController() *Controller {\n\treturn &Controller{\n\t\tClouds: make(map[string]CloudProvider),\n\t\t// WorkerOptions: NewWorkerOptions(),\n\t\tprovisionErr: NewErrCloudProvision(),\n\t}\n}",
"func NewController() *Controller {\n controller := Controller{}\n\n return &controller\n}",
"func NewController(client kubernetes.Interface) *Controller {\n\tshared := informers.NewSharedInformerFactory(client, time.Second*30)\n\tinform := shared.Apps().V1().Deployments()\n\tcontrl := &Controller{\n\t\tclient: client,\n\t\tinformer: inform.Informer(),\n\t\tlister: inform.Lister(),\n\t\tlogger: logrus.New(),\n\t\tqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), \"regitseel\"),\n\t}\n\n\tinform.Informer().AddEventHandler(\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: contrl.enqueue,\n\t\t\tUpdateFunc: func(old, new interface{}) {\n\t\t\t\tcontrl.enqueue(new)\n\t\t\t},\n\t\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t\td := obj.(*appsv1.Deployment)\n\t\t\t\tif err := contrl.delete(d); err != nil {\n\t\t\t\t\tcontrl.logger.Errorf(\"failed to delete from api: %v\", d.Name)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t)\n\n\treturn contrl\n}",
"func NewHealthzController(service *goa.Service) *HealthzController {\n\treturn &HealthzController{Controller: service.NewController(\"HealthzController\")}\n}",
"func NewController(service auth.UseCase) *Controller {\n\treturn &Controller{\n\t\tservice: service,\n\t}\n}",
"func NewController(cfg *rest.Config, cs kubernetes.Interface, namespace string) (*Controller, error) {\n\trc, err := kubekit.RESTClient(cfg, &v1alpha1.SchemeGroupVersion, v1alpha1.AddToScheme)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Controller{\n\t\tcs: cs,\n\t\trc: rc,\n\t\tnamespace: namespace,\n\t\tpatcher: patcher.New(\"hlnr-microservice\", cmdutil.NewFactory(nil)),\n\t}, nil\n}",
"func NewHospitalController(hospitalService service.HospitalService) HospitalController {\r\n\treturn &hospitalController{\r\n\t\thospitalService: hospitalService,\r\n\t}\r\n}",
"func (app *Application) NewController(resource *Resource) *Controller {\n\tc := &Controller{\n\t\tresource: resource,\n\t\tcustomHandlers: make(map[route]handlerChain),\n\t}\n\n\tapp.controllers[c.resource] = c\n\treturn c\n}",
"func NewController(address string) controller.Controller {\n\treturn &Controller{address, nil}\n}",
"func NewController(t require.TestingT) *gomock.Controller {\n\treturn gomock.NewController(Reporter{T: t})\n}",
"func NewController(exec boil.ContextExecutor) Controller {\n\trepo := &personRepository{executor: exec}\n\tsvc := &personService{repo: repo}\n\tpc := &personController{service: svc}\n\treturn pc\n}",
"func newController(cfg Config) (*controller, error) {\n\tlog.Infof(\"starting the ingress admission controller, version: %s, listen: %s\", Version, cfg.Listen)\n\tc := &controller{config: &cfg}\n\n\tc.engine = echo.New()\n\tc.engine.HideBanner = true\n\tc.engine.Use(middleware.Recover())\n\tif cfg.EnableLogging {\n\t\tc.engine.Use(middleware.Logger())\n\t}\n\tc.engine.POST(\"/\", c.reviewHandler)\n\tc.engine.GET(\"/health\", c.healthHandler)\n\tc.engine.GET(\"/version\", c.versionHandler)\n\n\treturn c, nil\n}",
"func New() *Controller {\n\treturn &Controller{\n\t\tValidatePayload: ValidatePayload,\n\t}\n}",
"func NewController(params ControllerParams) (*Controller, error) {\n\t// If the BGP control plane is disabled, just return nil. This way the hive dependency graph is always static\n\t// regardless of config. The lifecycle has not been appended so no work will be done.\n\tif !params.DaemonConfig.BGPControlPlaneEnabled() {\n\t\treturn nil, nil\n\t}\n\n\tc := Controller{\n\t\tSig: params.Sig,\n\t\tBGPMgr: params.RouteMgr,\n\t\tPolicyResource: params.PolicyResource,\n\t\tNodeSpec: params.NodeSpec,\n\t}\n\n\tparams.Lifecycle.Append(&c)\n\n\treturn &c, nil\n}",
"func NewController(params ControllerParams) (*Controller, error) {\n\t// If the BGP control plane is disabled, just return nil. This way the hive dependency graph is always static\n\t// regardless of config. The lifecycle has not been appended so no work will be done.\n\tif !params.DaemonConfig.BGPControlPlaneEnabled() {\n\t\treturn nil, nil\n\t}\n\n\tc := Controller{\n\t\tSig: params.Sig,\n\t\tBGPMgr: params.RouteMgr,\n\t\tPolicyResource: params.PolicyResource,\n\t\tLocalNodeStore: params.LocalNodeStore,\n\t}\n\n\tparams.Lifecycle.Append(&c)\n\n\treturn &c, nil\n}",
"func NewController(cfg *rest.Config, cs kubernetes.Interface, namespace string) (*Controller, error) {\n\trc, err := kubekit.RESTClient(cfg, &v1alpha1.SchemeGroupVersion, v1alpha1.AddToScheme)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Controller{\n\t\tcs: cs,\n\t\trc: rc,\n\t\tnamespace: namespace,\n\t\tpatcher: patcher.New(\"hlnr-versioned-microservice\", cmdutil.NewFactory(nil)),\n\t}, nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Health runs the health action. | func (c *HealthController) Health(ctx *app.HealthHealthContext) error {
// HealthController_Health: start_implement
ver := "unknown"
semVer, err := semver.Make(MajorMinorPatch + "-" + ReleaseType + "+git.sha." + GitCommit)
if err == nil {
ver = semVer.String()
}
return ctx.OK([]byte("Health OK: " + time.Now().String() + ", semVer: " + ver + "\n"))
// HealthController_Health: end_implement
} | [
"func (cmd *HealthHealthCommand) Run(c *client.Client, args []string) error {\n\tvar path string\n\tif len(args) > 0 {\n\t\tpath = args[0]\n\t} else {\n\t\tpath = \"/api/_ah/health\"\n\t}\n\tlogger := goa.NewLogger(log.New(os.Stderr, \"\", log.LstdFlags))\n\tctx := goa.WithLogger(context.Background(), logger)\n\tresp, err := c.HealthHealth(ctx, path)\n\tif err != nil {\n\t\tgoa.LogError(ctx, \"failed\", \"err\", err)\n\t\treturn err\n\t}\n\n\tgoaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint)\n\treturn nil\n}",
"func (cmd *HealthHealthCommand) Run(c *client.Client, args []string) error {\n\tvar path string\n\tif len(args) > 0 {\n\t\tpath = args[0]\n\t} else {\n\t\tpath = \"/health\"\n\t}\n\tlogger := goa.NewLogger(log.New(os.Stderr, \"\", log.LstdFlags))\n\tctx := goa.WithLogger(context.Background(), logger)\n\tresp, err := c.HealthHealth(ctx, path)\n\tif err != nil {\n\t\tgoa.LogError(ctx, \"failed\", \"err\", err)\n\t\treturn err\n\t}\n\n\tgoaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint)\n\treturn nil\n}",
"func Health() (err error) {\n\treturn // yeah, we're good :)\n}",
"func Health(e *echo.Echo) {\n\te.GET(\"/health\", JSONWapper(func(c echo.Context) (int, interface{}) {\n\t\treturn http.StatusOK, nil\n\t}))\n}",
"func doHealth(sdk *sdk.SDK) {\n\ttick := time.Tick(2 * time.Second)\n\tfor {\n\t\terr := sdk.Health()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"[wrapper] Could not send health ping, %v\", err)\n\t\t}\n\t\t<-tick\n\t}\n}",
"func doHealth(s *gosdk.SDK, stop <-chan struct{}) {\n\ttick := time.Tick(2 * time.Second)\n\tfor {\n\t\terr := s.Health()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Could not send health ping, %v\", err)\n\t\t}\n\t\tselect {\n\t\tcase <-stop:\n\t\t\tlog.Print(\"Stopped health pings\")\n\t\t\treturn\n\t\tcase <-tick:\n\t\t}\n\t}\n}",
"func (r *GoMetricsRegistry) RunHealthchecks() {}",
"func PublishHealth(e events.Event) {\n\tSendEvent(e)\n}",
"func (c *MockController) Health() error {\n\tc.HealthFuncCalled++\n\n\treturn c.HealthFunc()\n}",
"func HealthHandler(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(\"Health called\")\n\tlog.Println(\"Request:\", r)\n\n\tw.WriteHeader(http.StatusOK)\n\n}",
"func (es *Eventstore) Health(ctx context.Context) error {\n\treturn es.repo.Health(ctx)\n}",
"func (k *API) Health(w http.ResponseWriter, r *http.Request) {\n\tutil.JSONResponse(w, http.StatusOK, model.HealthResponse{\n\t\tStatus: \"healthy\",\n\t})\n}",
"func (api *API) health(w http.ResponseWriter, req *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(\"OK\"))\n}",
"func (agent *AgentCommandRunner) Health() (string, error) {\n\targuments := []string{\"health\"}\n\toutput, err := agent.executeAgentCmdWithError(arguments)\n\treturn output, err\n}",
"func (c *Client) Health(ctx context.Context) (err error) {\n\t_, err = c.HealthEndpoint(ctx, nil)\n\treturn\n}",
"func (cmd *RootHealthCommand) Run(c *client.Client, args []string) error {\n\tvar path string\n\tif len(args) > 0 {\n\t\tpath = args[0]\n\t} else {\n\t\tpath = \"/\"\n\t}\n\tlogger := goa.NewLogger(log.New(os.Stderr, \"\", log.LstdFlags))\n\tctx := goa.WithLogger(context.Background(), logger)\n\tresp, err := c.RootHealth(ctx, path)\n\tif err != nil {\n\t\tgoa.LogError(ctx, \"failed\", \"err\", err)\n\t\treturn err\n\t}\n\n\tgoaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint)\n\treturn nil\n}",
"func HealthHandler(w http.ResponseWriter, r *http.Request) {\n\tif randomFailure() {\n\t\thttp.Error(w, \"health check failed\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Write([]byte(\"OK!\"))\n}",
"func (h *Handler) Health(w http.ResponseWriter, r *http.Request) {\n\twriteResponse(r, w, http.StatusOK, &SimpleResponse{\n\t\tTraceID: tracing.FromContext(r.Context()),\n\t\tMessage: \"OK\",\n\t})\n}",
"func (a *API) health(c *gin.Context) (*Health, error) {\n\treturn &Health{\n\t\tStatus: \"OK\",\n\t}, nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewGenerator starts foreground goroutine which generates sequence of unsigned ints and puts them in input channel, also it returnes stop channel which need to be triggered when generator need to be stopped | func NewGenerator(input chan<- uint) chan<- bool {
stop := make(chan bool)
go func() {
var current uint = 1
for {
select {
case input <- current:
current++
case <-stop:
close(input)
return
}
}
}()
return stop
} | [
"func generator(ctx context.Context) <-chan int {\n\tch := make(chan int)\n\tn := 0\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tfmt.Println(\"Got cancel signal.\")\n\t\t\t\treturn\n\t\t\tcase ch <- n:\n\t\t\t\tfmt.Println(\"Pushing number to channel\")\n\t\t\t\tn++\n\t\t\t}\n\t\t}\n\t}()\n\treturn ch\n}",
"func generate(source chan<- int) {\n\tfor i := 2; ; i++ {\n\t\tsource <- i // Send 'i' to channel 'source'.\n\t}\n}",
"func generator(nums ...int) <-chan int {\n\tout := make(chan int)\n\n\tgo func() {\n\t\tdefer close(out)\n\t\tfor _, val := range nums {\n\t\t\tout <- val\n\t\t}\n\t}()\n\n\treturn out\n}",
"func generate(ch chan int) {\n\tfor i := 2; ; i++ {\n\t\tch <- i\n\t}\n}",
"func gen(ctx context.Context) <-chan int {\r\n\tch := make(chan int)\r\n\tgo func() {\r\n\t\tvar n int\r\n\t\tfor {\r\n\t\t\tselect {\r\n\t\t\tcase <-ctx.Done():\r\n\t\t\t\treturn // avoid leaking of this goroutine when ctx is done.\r\n\t\t\tcase ch <- n:\r\n\t\t\t\tn++\r\n\t\t\t}\r\n\t\t}\r\n\t}()\r\n\treturn ch\r\n}",
"func gen(nums ...int) <-chan int {\n\tout := make(chan int)\n\tgo func() {\n\t\tfor _, n := range nums {\n\t\t\tout <- n\n\t\t}\n\t\tclose(out)\n\t}()\n\treturn out\n}",
"func gen(nums ...int) <-chan int {\n\tout := make(chan int)\n\tgo func() {\n\t\tdefer close(out)\n\t\tfor _, n := range nums {\n\t\t\tout <- n\n\t\t}\n\t}()\n\treturn out\n}",
"func GenerateSerialIntsStream(ctx context.Context) <-chan interface{} {\n\ts := make(chan interface{})\n\tgo func() {\n\t\tdefer close(s)\n\t\tfor i := 0; ; i++ {\n\t\t\tselect {\n\t\t\tcase s <- i:\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn s\n}",
"func NewNumberRangeGenerator(i interface{}) (outputChan chan stream.Record, controlChan chan ControlAction) {\n\tcfg := i.(*NumberRangeGeneratorConfig)\n\toutputChan = make(chan stream.Record, c.ChanSize)\n\tcontrolChan = make(chan ControlAction, 1)\n\tif cfg.IntervalSize == 0 {\n\t\tcfg.Log.Panic(cfg.Name, \" aborting due to interval size 0 which causes infinite loop\")\n\t}\n\tgo func() {\n\t\tif cfg.PanicHandlerFn != nil {\n\t\t\tdefer cfg.PanicHandlerFn()\n\t\t}\n\t\tcfg.Log.Info(cfg.Name, \" is running\")\n\t\tif cfg.WaitCounter != nil {\n\t\t\tcfg.WaitCounter.Add()\n\t\t\tdefer cfg.WaitCounter.Done()\n\t\t}\n\t\trowCount := int64(0)\n\t\tif cfg.StepWatcher != nil { // if we have been given a StepWatcher struct that can watch our rowCount and output channel length...\n\t\t\tcfg.StepWatcher.StartWatching(&rowCount, &outputChan)\n\t\t\tdefer cfg.StepWatcher.StopWatching()\n\n\t\t}\n\t\t// Iterate over the input records.\n\t\tsendRow := func(inputRec stream.Record, fromNum *float64, toNum *float64) (rowSentOK bool) {\n\t\t\t// Emit low date and hi date record.\n\t\t\trec := stream.NewRecord()\n\t\t\tif cfg.PassInputFieldsToOutput {\n\t\t\t\tinputRec.CopyTo(rec) // ensure the output record contains the input fields.\n\t\t\t}\n\t\t\tif cfg.OutputLeftPaddedNumZeros > 0 { // if we should output strings with leading zeros...\n\t\t\t\trec.SetData(cfg.OutputChanFieldName4LowNum, fmt.Sprintf(\"%0*.0f\", cfg.OutputLeftPaddedNumZeros, *fromNum))\n\t\t\t\trec.SetData(cfg.OutputChanFieldName4HighNum, fmt.Sprintf(\"%0*.0f\", cfg.OutputLeftPaddedNumZeros, *toNum))\n\t\t\t} else {\n\t\t\t\trec.SetData(cfg.OutputChanFieldName4LowNum, *fromNum)\n\t\t\t\trec.SetData(cfg.OutputChanFieldName4HighNum, *toNum)\n\t\t\t}\n\t\t\trowSentOK = safeSend(rec, outputChan, controlChan, sendNilControlResponse) // forward the record\n\t\t\tif rowSentOK {\n\t\t\t\tcfg.Log.Debug(cfg.Name, \" generated: lowNum=\", *fromNum, \"; highNum=\", *toNum)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase controlAction := <-controlChan: // if we have been asked to shutdown...\n\t\t\tcontrolAction.ResponseChan <- nil // respond that we're done with a nil error.\n\t\t\tcfg.Log.Info(cfg.Name, \" shutdown\")\n\t\t\treturn\n\t\tcase rec, ok := <-cfg.InputChan: // for each FromDate record...\n\t\t\tif !ok { // if the input chan was closed...\n\t\t\t\tcfg.InputChan = nil // disable this case.\n\t\t\t} else {\n\t\t\t\tcfg.Log.Info(cfg.Name, \" splitting number range \", rec.GetData(cfg.InputChanFieldName4LowNum), \" to \", rec.GetData(cfg.InputChanFieldName4HighNum), \" using interval value \", cfg.IntervalSize)\n\t\t\t\t// Get the FromDate and ToDate as strings.\n\t\t\t\tfromNumStr := rec.GetDataAsStringPreserveTimeZone(cfg.Log, cfg.InputChanFieldName4LowNum)\n\t\t\t\ttoNumStr := rec.GetDataAsStringPreserveTimeZone(cfg.Log, cfg.InputChanFieldName4HighNum)\n\t\t\t\t// Convert to float(64)\n\t\t\t\tfromNum, err := strconv.ParseFloat(fromNumStr, 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcfg.Log.Panic(cfg.Name, \" error parsing input field for low number: \", err)\n\t\t\t\t}\n\t\t\t\ttoNum, err := strconv.ParseFloat(toNumStr, 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcfg.Log.Panic(cfg.Name, \" error parsing input field for high number: \", err)\n\t\t\t\t}\n\n\t\t\t\t// Richard 20191011 - old extract field values direct to float:\n\t\t\t\t// fromNum, err := getFloat64FromInterface(rec.GetData(cfg.InputChanFieldName4LowNum))\n\t\t\t\t// toNum, err := getFloat64FromInterface(rec.GetData(cfg.InputChanFieldName4HighNum))\n\n\t\t\t\t// Add the increment and emit rows until it is greater than the ToDate.\n\t\t\t\tfor { // while we are outputting less than ToDate...\n\t\t\t\t\tto := fromNum + cfg.IntervalSize\n\t\t\t\t\tif to > toNum { // if this increment overruns the high number...\n\t\t\t\t\t\tbreak // don't output a row!\n\t\t\t\t\t}\n\t\t\t\t\tif rowSentOK := sendRow(rec, &fromNum, &to); !rowSentOK {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tatomic.AddInt64(&rowCount, 1) // increment the row count bearing in mind someone else is reporting on its values.\n\t\t\t\t\tfromNum = to // save FromDate with increment added.\n\t\t\t\t}\n\t\t\t\tif fromNum < toNum || atomic.AddInt64(&rowCount, 0) == 0 {\n\t\t\t\t\t// if we have a final portion of number to output a row for;\n\t\t\t\t\t// or we have not output a row (i.e. when min value = max value)...\n\t\t\t\t\tif rowSentOK := sendRow(rec, &fromNum, &toNum); !rowSentOK { // emit the final gap.\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tatomic.AddInt64(&rowCount, 1) // add a row count.\n\t\t\t\t}\n\t\t\t}\n\t\t\tif cfg.InputChan == nil { // if we processed all data...\n\t\t\t\tbreak // end gracefully.\n\t\t\t}\n\t\t}\n\t\t// Calculate output.\n\t\tclose(outputChan)\n\t\tcfg.Log.Info(cfg.Name, \" complete\")\n\t}()\n\treturn\n}",
"func newTrafficGenerator(t *testing.T, srcApp string, qps uint16) trafficGenerator {\n\twg := &sync.WaitGroup{}\n\twg.Add(1)\n\n\treturn &trafficGeneratorImpl{\n\t\tsrcApp: srcApp,\n\t\tqps: qps,\n\t\tforwarder: forwardToGrpcPort(t, srcApp),\n\t\twg: wg,\n\t\tresults: make(map[string]int),\n\t}\n}",
"func generator(msg string) <-chan string { // return read-only channel\n\tc := make(chan string)\n\tgo func() {\n\t\tfor i := 0; ; i++ {\n\t\t\tc <- fmt.Sprintf(\"%s %d\", msg, i)\n\t\t\ttime.Sleep(5 * time.Millisecond)\n\t\t}\n\t}()\n\treturn c\n}",
"func GenerateRandIntsStream(ctx context.Context) <-chan interface{} {\n\ts := make(chan interface{})\n\tgo func() {\n\t\tdefer close(s)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase s <- rand.Int():\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn s\n}",
"func GenerateConcurrent(bitsize int, stop chan struct{}) (<-chan *big.Int, <-chan error) {\n\tcount := runtime.GOMAXPROCS(0)\n\tints := make(chan *big.Int, count)\n\terrs := make(chan error, count)\n\n\t// In order to successfully close all goroutines below when the caller wants them to, they require\n\t// a channel that is close()d: just sending a struct{}{} would stop one but not all goroutines.\n\t// Instead of requiring the caller to close() the stop chan parameter we use our own chan for\n\t// this, so that we always stop all goroutines independent of whether the caller close()s stop\n\t// or sends a struct{}{} to it.\n\tstopped := make(chan struct{})\n\tgo func() {\n\t\tselect {\n\t\tcase <-stop:\n\t\t\tclose(stopped)\n\t\tcase <-stopped: // stopped can also be closed by a goroutine that encountered an error\n\t\t}\n\t}()\n\n\t// Start safe prime generation goroutines\n\tfor i := 0; i < count; i++ {\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\t// Pass stopped chan along; if closed, Generate() returns nil, nil\n\t\t\t\tx, err := Generate(bitsize, stopped)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs <- err\n\t\t\t\t\tclose(stopped)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// Only send result and continue generating if we have not been told to stop\n\t\t\t\tselect {\n\t\t\t\tcase <-stopped:\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t\tints <- x\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\treturn ints, errs\n}",
"func NewGenerator(options *Options) *Generator {\n\tvar g = &Generator{}\n\t// first we validate the flags\n\tif err := options.Validate(); err != nil {\n\t\tpanic(err)\n\t}\n\tg.options = options\n\t// we initiate the values on the generator\n\tg.init()\n\treturn g\n}",
"func generator(l *raft.Log, sz int) {\n\ttime.Sleep(InitializationDuration)\n\n\tfor {\n\t\tcommand := make([]byte, sz)\n\t\tif err := l.Apply(command); err != nil {\n\t\t\tlog.Fatalf(\"generate: %s\", err)\n\t\t}\n\t}\n}",
"func generateGoroutines(done chan bool, numGoroutines int) {\n\tfor i := 0; i < numGoroutines; i++ {\n\t\tgo func(done chan bool) {\n\t\t\t<-done\n\t\t}(done)\n\t}\n}",
"func genStreams() <-chan <-chan int {\n\tout := make(chan (<-chan int))\n\tgo func() {\n\t\tdefer close(out)\n\t\tfor i := 1; i <= 10; i++ {\n\t\t\tstream := make(chan int, 3)\n\t\t\tstream <- i\n\t\t\tstream <- i + 1\n\t\t\tstream <- i + 2\n\t\t\tclose(stream)\n\t\t\tout <- stream\n\t\t}\n\t}()\n\treturn out\n}",
"func createChan(n int) chan int{\n\tch := make(chan int) \t//make chan\n\tgo func() {ch <- n}() // send data to chan\n\treturn ch \t\t\t\t//return chan\n}",
"func makeLoopingOutputDevice(loop chan<- int, output chan<- int) func(int) {\n\treturn func(n int) {\n\t\tloop <- n\n\t\toutput <- n\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
genFields generates fields config for given AST | func genFields(fs []*ast.FieldDefinition) *jen.Statement {
//
// Generate config for fields
//
// == Example input SDL
//
// type Dog {
// name(style: NameComponentsStyle = SHORT): String!
// givenName: String @deprecated(reason: "No longer supported; please use name field.")
// }
//
// == Example output
//
// graphql.Fields{
// "name": graphql.Field{ ... },
// "givenName": graphql.Field{ ... },
// }
//
return jen.Qual(defsPkg, "Fields").Values(jen.DictFunc(func(d jen.Dict) {
for _, f := range fs {
d[jen.Lit(f.Name.Value)] = genField(f)
}
}))
} | [
"func genField(field *ast.FieldDefinition) *jen.Statement {\n\t//\n\t// Generate config for field\n\t//\n\t// == Example input SDL\n\t//\n\t// interface Pet {\n\t// \"name of the pet\"\n\t// name(style: NameComponentsStyle = SHORT): String!\n\t// \"\"\"\n\t// givenName of the pet ★\n\t// \"\"\"\n\t// givenName: String @deprecated(reason: \"No longer supported; please use name field.\")\n\t// }\n\t//\n\t// == Example output\n\t//\n\t// &graphql.Field{\n\t// Name: \"name\",\n\t// Type: graphql.NonNull(graphql.String),\n\t// Description: \"name of the pet\",\n\t// DeprecationReason: \"\",\n\t// Args: FieldConfigArgument{ ... },\n\t// }\n\t//\n\t// &graphql.Field{\n\t// Name: \"givenName\",\n\t// Type: graphql.String,\n\t// Description: \"givenName of the pet\",\n\t// DeprecationReason: \"No longer supported; please use name field.\",\n\t// Args: FieldConfigArgument{ ... },\n\t// }\n\t//\n\treturn jen.Op(\"&\").Qual(defsPkg, \"Field\").Values(jen.Dict{\n\t\tjen.Id(\"Args\"): genArguments(field.Arguments),\n\t\tjen.Id(\"DeprecationReason\"): genDeprecationReason(field.Directives),\n\t\tjen.Id(\"Description\"): genDescription(field),\n\t\tjen.Id(\"Name\"): jen.Lit(field.Name.Value),\n\t\tjen.Id(\"Type\"): genOutputTypeReference(field.Type),\n\t})\n}",
"func (fs *FileStat) GenerateFields() (string, error) {\n\ttb, e := fs.modTime.MarshalBinary()\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\tcb, e := fs.compressedBytes()\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\n\tformat := `\"%s\", \"%s\", %d, 0%o, binfs.MustHexDecode(\"%x\"), %t, binfs.MustHexDecode(\"%x\")`\n\treturn fmt.Sprintf(format,\n\t\tfs.path,\n\t\tfs.name,\n\t\tfs.size,\n\t\tfs.mode,\n\t\ttb,\n\t\tfs.isDir,\n\t\tcb,\n\t), nil\n}",
"func (g *Generator) collectAndGenerate(typeName string, genFn GeneratorFunc) {\n\tfields := make([]Field, 0, 100)\n\timports := make([]Import, 0, 100)\n\tfor _, file := range g.pkg.files {\n\t\t// Set the state for this run of the walker.\n\t\tfile.typeName = typeName\n\t\tfile.fields = nil\n\t\tif file.file != nil {\n\t\t\tast.Inspect(file.file, file.genDecl)\n\t\t\tfields = append(fields, file.fields...)\n\t\t\timports = append(imports, file.imports...)\n\t\t}\n\t}\n\n\tgenFn(typeName, fields, imports)\n\n}",
"func GenerateFieldsYml(fieldFiles []*YmlFile) ([]byte, error) {\n\tbuf := bytes.NewBufferString(\"\")\n\tfor _, p := range fieldFiles {\n\t\tfile, err := os.Open(p.Path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer file.Close()\n\n\t\tfs := bufio.NewScanner(file)\n\t\tfor fs.Scan() {\n\t\t\terr = writeIndentedLine(buf, fs.Text()+\"\\n\", p.Indent)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tif err := fs.Err(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn buf.Bytes(), nil\n}",
"func Generate(fields map[string]*indexer.Field) map[string]interface{} {\n\treturn mapFields(fields)\n}",
"func mapFields(list *ast.FieldList, srcDir string) (map[string]*ast.Field, error) {\n\tlength := list.NumFields()\n\n\tfieldMap := make(map[string]*ast.Field, length)\n\tfor _, f := range list.List {\n\t\tif len(f.Names) == 0 { // an embedded struct\n\t\t\tt, ok := f.Type.(*ast.Ident)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tembedName := t.Name\n\t\t\tembedStruct, err := parseGoStruct(embedName, filepath.Join(srcDir, strings.ToLower(embedName)+\".go\"))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to parse embedded go struct %q: %v\", embedName, err)\n\t\t\t}\n\t\t\tembedFields := findStructFields(embedStruct)\n\t\t\tif embedFields == nil {\n\t\t\t\treturn nil, fmt.Errorf(\"no fields found for embedded go struct %q\", embedName)\n\t\t\t}\n\t\t\tembedMap, err := mapFields(embedFields, srcDir)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to map fields for embedded go struct %q: %v\", embedName, err)\n\t\t\t}\n\t\t\tfor k, v := range embedMap {\n\t\t\t\tif _, ok := fieldMap[k]; !ok {\n\t\t\t\t\tfieldMap[k] = v\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tkey := f.Names[0].Name\n\t\tif invalidFieldNames[key] {\n\t\t\tkey = \"Field_\" + key\n\t\t}\n\t\tfieldMap[key] = f\n\t}\n\n\treturn fieldMap, nil\n}",
"func GenerateBaseFields(conf CurveConfig) error {\n\tif err := goff.GenerateFF(\"fr\", \"Element\", conf.RTorsion, filepath.Join(conf.OutputDir, \"fr\"), false); err != nil {\n\t\treturn err\n\t}\n\tif err := goff.GenerateFF(\"fp\", \"Element\", conf.FpModulus, filepath.Join(conf.OutputDir, \"fp\"), false); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func TraceFieldGenerator(ctx context.Context) []zapcore.Field {\n\tspanCtx := trace.FromContext(ctx).SpanContext()\n\n\treturn []zapcore.Field{\n\t\tzap.Uint64(\"dd.trace_id\", binary.BigEndian.Uint64(spanCtx.TraceID[8:])),\n\t\tzap.Uint64(\"dd.span_id\", binary.BigEndian.Uint64(spanCtx.SpanID[:])),\n\t}\n}",
"func fieldsYML() error {\n\treturn mage.GenerateFieldsYAML(mage.OSSBeatDir(\"module\"), \"module\")\n}",
"func (ConfigMacrosField) dIModuleFieldNode() {}",
"func expandFields(compiled *lang.CompiledExpr, define *lang.DefineExpr) lang.DefineFieldsExpr {\n\tvar fields lang.DefineFieldsExpr\n\tfor _, field := range define.Fields {\n\t\tif isEmbeddedField(field) {\n\t\t\tembedded := expandFields(compiled, compiled.LookupDefine(string(field.Type)))\n\t\t\tfields = append(fields, embedded...)\n\t\t} else {\n\t\t\tfields = append(fields, field)\n\t\t}\n\t}\n\treturn fields\n}",
"func (n ClassNode) Codegen(scope *Scope, c *Compiler) value.Value {\n\tstructDefn := scope.FindType(n.Name).Type.(*types.StructType)\n\n\tfieldnames := make([]string, 0, len(n.Variables))\n\tfields := make([]types.Type, 0, len(n.Variables))\n\n\tnames := map[string]bool{}\n\n\tfor _, f := range n.Variables {\n\t\tt := f.Type.Name\n\t\tname := f.Name.String()\n\t\tif _, found := names[name]; found {\n\t\t\tlog.Fatal(\"Class '%s' has two fields/methods named '%s'\\n\", n.Name, f.Name)\n\t\t}\n\t\tnames[name] = true\n\t\tty := scope.FindType(t).Type\n\t\tty = f.Type.BuildPointerType(ty)\n\t\tfields = append(fields, ty)\n\t\tfieldnames = append(fieldnames, name)\n\t}\n\n\tthisArg := VariableDefnNode{}\n\tthisArg.Name = NewNamedReference(\"this\")\n\tthisArg.Type = GeodeTypeRef{}\n\tthisArg.Type.Array = false\n\tthisArg.Type.Name = n.Name\n\tthisArg.Type.PointerLevel = 1\n\n\tstructDefn.Fields = fields\n\tstructDefn.Names = fieldnames\n\n\tmethodBaseArgs := []VariableDefnNode{thisArg}\n\tfor _, m := range n.Methods {\n\t\tm.Name.Value = fmt.Sprintf(\"class.%s.%s\", n.Name, m.Name)\n\t\tif _, found := names[m.Name.String()]; found {\n\t\t\tlog.Fatal(\"Class '%s' has two fields/methods named '%s'\\n\", n.Name, m.Name)\n\t\t}\n\t\tnames[m.Name.String()] = true\n\t\tm.Args = append(methodBaseArgs, m.Args...)\n\t\tm.Declare(scope, c)\n\t\tm.Codegen(scope, c)\n\t}\n\n\treturn nil\n}",
"func (p TreeWriter) getFields(leaf *yaml.RNode) (treeFields, error) {\n\tfieldsByName := map[string]*treeField{}\n\n\t// index nested and non-nested fields\n\tfor i := range p.Fields {\n\t\tf := p.Fields[i]\n\t\tseq, err := leaf.Pipe(&f)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif seq == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif fieldsByName[f.Name] == nil {\n\t\t\tfieldsByName[f.Name] = &treeField{name: f.Name}\n\t\t}\n\n\t\t// non-nested field -- add directly to the treeFields list\n\t\tif f.SubName == \"\" {\n\t\t\t// non-nested field -- only 1 element\n\t\t\tval, err := yaml.String(seq.Content()[0], yaml.Trim, yaml.Flow)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tfieldsByName[f.Name].value = val\n\t\t\tcontinue\n\t\t}\n\n\t\t// nested-field -- create a parent elem, and index by the 'match' value\n\t\tif fieldsByName[f.Name].subFieldByMatch == nil {\n\t\t\tfieldsByName[f.Name].subFieldByMatch = map[string]treeFields{}\n\t\t}\n\t\tindex := fieldsByName[f.Name].subFieldByMatch\n\t\tfor j := range seq.Content() {\n\t\t\telem := seq.Content()[j]\n\t\t\tmatches := f.Matches[elem]\n\t\t\tstr, err := yaml.String(elem, yaml.Trim, yaml.Flow)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// map the field by the name of the element\n\t\t\t// index the subfields by the matching element so we can put all the fields for the\n\t\t\t// same element under the same branch\n\t\t\tmatchKey := strings.Join(matches, \"/\")\n\t\t\tindex[matchKey] = append(index[matchKey], &treeField{name: f.SubName, value: str})\n\t\t}\n\t}\n\n\t// iterate over collection of all queried fields in the Resource\n\tfor _, field := range fieldsByName {\n\t\t// iterate over collection of elements under the field -- indexed by element name\n\t\tfor match, subFields := range field.subFieldByMatch {\n\t\t\t// create a new element for this collection of fields\n\t\t\t// note: we will convert name to an index later, but keep the match for sorting\n\t\t\telem := &treeField{name: match}\n\t\t\tfield.matchingElementsAndFields = append(field.matchingElementsAndFields, elem)\n\n\t\t\t// iterate over collection of queried fields for the element\n\t\t\tfor i := range subFields {\n\t\t\t\t// add to the list of fields for this element\n\t\t\t\telem.matchingElementsAndFields = append(elem.matchingElementsAndFields, subFields[i])\n\t\t\t}\n\t\t}\n\t\t// clear this cached data\n\t\tfield.subFieldByMatch = nil\n\t}\n\n\t// put the fields in a list so they are ordered\n\tfieldList := treeFields{}\n\tfor _, v := range fieldsByName {\n\t\tfieldList = append(fieldList, v)\n\t}\n\n\t// sort the fields\n\tsort.Sort(fieldList)\n\tfor i := range fieldList {\n\t\tfield := fieldList[i]\n\t\t// sort the elements under this field\n\t\tsort.Sort(field.matchingElementsAndFields)\n\n\t\tfor i := range field.matchingElementsAndFields {\n\t\t\telement := field.matchingElementsAndFields[i]\n\t\t\t// sort the elements under a list field by their name\n\t\t\tsort.Sort(element.matchingElementsAndFields)\n\t\t\t// set the name of the element to its index\n\t\t\telement.name = fmt.Sprintf(\"%d\", i)\n\t\t}\n\t}\n\n\treturn fieldList, nil\n}",
"func (_ *Frontend) GenerateFieldMap(types []GoType) string {\n\tout := strings.Builder{}\n\tfor _, v := range types {\n\t\tname := strcase.ToLowerCamel(v.Name)\n\t\tstmt := fmt.Sprintf(\"%s:'%s',\", name, name)\n\t\tout.WriteString(stmt)\n\t}\n\treturn out.String()\n\n}",
"func generate(copyrights string, collector *collector, templateBuilder templateBuilder) {\n\tfor _, pkg := range collector.Packages {\n\t\tfileTemplate := fileTpl{\n\t\t\tCopyright: copyrights,\n\n\t\t\tStandardImports: []string{\n\t\t\t\t\"fmt\",\n\t\t\t\t\"unicode\",\n\t\t\t\t\"unicode/utf8\",\n\t\t\t},\n\n\t\t\tCustomImports: []string{\n\t\t\t\t\"github.com/google/uuid\",\n\t\t\t},\n\t\t}\n\t\tfor _, f := range pkg.Files {\n\t\t\tfor _, d := range f.Decls {\n\t\t\t\tg, ok := d.(*ast.GenDecl)\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tstructs := structSearch(g)\n\t\t\t\tif len(structs) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tfor _, s := range structs {\n\t\t\t\t\tatLeastOneField := false\n\n\t\t\t\t\tfor _, field := range s.Type.Fields.List {\n\n\t\t\t\t\t\tpos := collector.FileSet.Position(field.Type.Pos())\n\t\t\t\t\t\ttyp := collector.Info.TypeOf(field.Type)\n\n\t\t\t\t\t\tcomposedType := \"\"\n\t\t\t\t\t\tbaseName := getType(typ, &composedType)\n\t\t\t\t\t\tfmt.Println(\"Add validation: \", pos, \": \", baseName, \"/\", composedType)\n\n\t\t\t\t\t\tif err := templateBuilder.generateCheck(field, s.Name, baseName, composedType); err != nil {\n\t\t\t\t\t\t\tfmt.Printf(\"struct %s: %s\\n\", s.Name, err)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tatLeastOneField = true\n\t\t\t\t\t}\n\n\t\t\t\t\tif !atLeastOneField {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\terr := templateBuilder.generateMethod(s.Name)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfmt.Printf(\"struct gen %s: %s\\n\", s.Name, err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfileTemplate.Package = pkg.Name\n\t\terr := templateBuilder.generateFile(pkg.Path, fileTemplate)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Generation error\", err)\n\t\t}\n\t}\n}",
"func fields(spec *ast.TypeSpec) []*ast.Field {\n\ts := make([]*ast.Field, 0)\n\tif structType, ok := spec.Type.(*ast.StructType); ok {\n\t\tfor _, field := range structType.Fields.List {\n\t\t\tif keyname(field) != \"\" {\n\t\t\t\ts = append(s, field)\n\t\t\t}\n\t\t}\n\t}\n\treturn s\n}",
"func genStruct(variables Variables, conf *Config, file *os.File) {\n\td := &Identer{}\n\n\td.S(\"type \" + variables[\"CamelCaseTable\"].(string) + \" struct {\").Nl()\n\n\t//every variable/type combination is seperatate by newline and some indent\n\tstructFields := VariablesTypesAndTags(\"\\n\\t\\t\", variables[\"Columns\"].([]*vrm.Column), conf)\n\td.S(\"\\t\\t\")\n\td.S(structFields)\n\td.S(\"\\n}\\n\")\n\n\tfile.WriteString(d.String())\n\n}",
"func Fields() {\n\tmg.SerialDeps(fieldsYML, moduleFieldsGo)\n}",
"func Fields() error {\n\treturn devtools.GenerateFieldsYAML()\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
genField generates field config for given AST | func genField(field *ast.FieldDefinition) *jen.Statement {
//
// Generate config for field
//
// == Example input SDL
//
// interface Pet {
// "name of the pet"
// name(style: NameComponentsStyle = SHORT): String!
// """
// givenName of the pet ★
// """
// givenName: String @deprecated(reason: "No longer supported; please use name field.")
// }
//
// == Example output
//
// &graphql.Field{
// Name: "name",
// Type: graphql.NonNull(graphql.String),
// Description: "name of the pet",
// DeprecationReason: "",
// Args: FieldConfigArgument{ ... },
// }
//
// &graphql.Field{
// Name: "givenName",
// Type: graphql.String,
// Description: "givenName of the pet",
// DeprecationReason: "No longer supported; please use name field.",
// Args: FieldConfigArgument{ ... },
// }
//
return jen.Op("&").Qual(defsPkg, "Field").Values(jen.Dict{
jen.Id("Args"): genArguments(field.Arguments),
jen.Id("DeprecationReason"): genDeprecationReason(field.Directives),
jen.Id("Description"): genDescription(field),
jen.Id("Name"): jen.Lit(field.Name.Value),
jen.Id("Type"): genOutputTypeReference(field.Type),
})
} | [
"func genFields(fs []*ast.FieldDefinition) *jen.Statement {\n\t//\n\t// Generate config for fields\n\t//\n\t// == Example input SDL\n\t//\n\t// type Dog {\n\t// name(style: NameComponentsStyle = SHORT): String!\n\t// givenName: String @deprecated(reason: \"No longer supported; please use name field.\")\n\t// }\n\t//\n\t// == Example output\n\t//\n\t// graphql.Fields{\n\t// \"name\": graphql.Field{ ... },\n\t// \"givenName\": graphql.Field{ ... },\n\t// }\n\t//\n\treturn jen.Qual(defsPkg, \"Fields\").Values(jen.DictFunc(func(d jen.Dict) {\n\t\tfor _, f := range fs {\n\t\t\td[jen.Lit(f.Name.Value)] = genField(f)\n\t\t}\n\t}))\n}",
"func (g *Generator) generateField(ctx valley.Context, fieldConfig valley.FieldConfig, value valley.Value) error {\n\terr := g.generateFieldConstraints(ctx, fieldConfig, value)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = g.generateFieldElementsConstraints(ctx, fieldConfig, value)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = g.generateFieldKeysConstraints(ctx, fieldConfig, value)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tg.wc(\"\\n\")\n\n\treturn nil\n}",
"func (ConfigMacrosField) dIModuleFieldNode() {}",
"func (g *Generator) generateFieldConstraints(ctx valley.Context, fieldConfig valley.FieldConfig, value valley.Value) error {\n\tctx = ctx.Clone()\n\tctx.PathKind = valley.PathKindField\n\n\terr := g.generateConstraints(ctx, fieldConfig.Constraints, value)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (n ClassNode) Codegen(scope *Scope, c *Compiler) value.Value {\n\tstructDefn := scope.FindType(n.Name).Type.(*types.StructType)\n\n\tfieldnames := make([]string, 0, len(n.Variables))\n\tfields := make([]types.Type, 0, len(n.Variables))\n\n\tnames := map[string]bool{}\n\n\tfor _, f := range n.Variables {\n\t\tt := f.Type.Name\n\t\tname := f.Name.String()\n\t\tif _, found := names[name]; found {\n\t\t\tlog.Fatal(\"Class '%s' has two fields/methods named '%s'\\n\", n.Name, f.Name)\n\t\t}\n\t\tnames[name] = true\n\t\tty := scope.FindType(t).Type\n\t\tty = f.Type.BuildPointerType(ty)\n\t\tfields = append(fields, ty)\n\t\tfieldnames = append(fieldnames, name)\n\t}\n\n\tthisArg := VariableDefnNode{}\n\tthisArg.Name = NewNamedReference(\"this\")\n\tthisArg.Type = GeodeTypeRef{}\n\tthisArg.Type.Array = false\n\tthisArg.Type.Name = n.Name\n\tthisArg.Type.PointerLevel = 1\n\n\tstructDefn.Fields = fields\n\tstructDefn.Names = fieldnames\n\n\tmethodBaseArgs := []VariableDefnNode{thisArg}\n\tfor _, m := range n.Methods {\n\t\tm.Name.Value = fmt.Sprintf(\"class.%s.%s\", n.Name, m.Name)\n\t\tif _, found := names[m.Name.String()]; found {\n\t\t\tlog.Fatal(\"Class '%s' has two fields/methods named '%s'\\n\", n.Name, m.Name)\n\t\t}\n\t\tnames[m.Name.String()] = true\n\t\tm.Args = append(methodBaseArgs, m.Args...)\n\t\tm.Declare(scope, c)\n\t\tm.Codegen(scope, c)\n\t}\n\n\treturn nil\n}",
"func JsonFieldGenerator() gopter.Gen {\n\tif jsonFieldGenerator != nil {\n\t\treturn jsonFieldGenerator\n\t}\n\n\tgenerators := make(map[string]gopter.Gen)\n\tAddIndependentPropertyGeneratorsForJsonField(generators)\n\tjsonFieldGenerator = gen.Struct(reflect.TypeOf(JsonField{}), generators)\n\n\treturn jsonFieldGenerator\n}",
"func TraceFieldGenerator(ctx context.Context) []zapcore.Field {\n\tspanCtx := trace.FromContext(ctx).SpanContext()\n\n\treturn []zapcore.Field{\n\t\tzap.Uint64(\"dd.trace_id\", binary.BigEndian.Uint64(spanCtx.TraceID[8:])),\n\t\tzap.Uint64(\"dd.span_id\", binary.BigEndian.Uint64(spanCtx.SpanID[:])),\n\t}\n}",
"func genEncDef(data *goData, t *vdl.Type, unionFieldName string) string {\n\tvarCount := 0\n\tinstName := \"(*m)\"\n\tif t.Kind() == vdl.Struct || t.Kind() == vdl.Union {\n\t\t// - Struct shouldn't be dereferenced because its value is passed around\n\t\t// as a pointer.\n\t\t// - Unions shouldn't be dereferenced because the structs representing\n\t\t// their fields are not represented as pointers within the union interface.\n\t\t// e.g. Union A (an interface) contains struct AB: A(AB{}) rather than\n\t\t// A(&AB{})\n\t\tinstName = \"m\"\n\t}\n\treturn genEncDefInternal(data, t, instName, \"t\", unionFieldName, \"\", &varCount)\n}",
"func (fm *avroFieldMapper) generateFieldValue(name, prefix string, avroType ast.Expr, generatedField *ast.Field) (mappedFields, error) {\n\tswitch f := avroType.(type) {\n\tcase *ast.StarExpr:\n\t\tmappedField, err := fm.generateFieldValue(name, prefix, f.X, generatedField)\n\t\tif err != nil {\n\t\t\treturn mappedFields{}, err\n\t\t}\n\n\t\t// check for a nil pointer\n\t\tif _, ok := generatedField.Type.(*ast.StarExpr); ok {\n\t\t\treturn mappedFields{\n\t\t\t\tfieldMapping: fm.generateNullableFieldValue(name, prefix, mappedField.name, fmt.Sprintf(\"&%s\", mappedField.fieldMapping)),\n\t\t\t\tpreProcessing: mappedField.preProcessing,\n\t\t\t}, nil\n\t\t}\n\t\treturn mappedFields{\n\t\t\tfieldMapping: fmt.Sprintf(\"&%s\", mappedField.fieldMapping),\n\t\t\tpreProcessing: mappedField.preProcessing,\n\t\t\tname: prefix + name,\n\t\t}, nil\n\tcase *ast.StructType: // This handles anonymous structs in the Avro struct but the generated code never has those\n\t\treturn mappedFields{}, errors.New(\"anonymous structs in the Avro struct are not handled\")\n\tcase *ast.ArrayType:\n\t\tif _, ok := generatedField.Type.(*ast.ArrayType); !ok {\n\t\t\treturn mappedFields{}, fmt.Errorf(\"avro type is array but generated isn't for field %q\", name)\n\t\t}\n\t\tif ptr, ok := f.Elt.(*ast.StarExpr); ok {\n\t\t\treturn fm.generateFieldValue(name, prefix, ptr.X, generatedField)\n\t\t}\n\t\treturn fm.generateFieldValue(name, prefix, f.Elt, generatedField)\n\tcase *ast.Ident: // This covers covers all of the basic scalar types but also identifiers for other defined types\n\t\ttypeName := f.Name\n\t\tswitch typeName { // handle builtin types\n\t\tcase \"string\", \"bool\", \"float64\":\n\t\t\treturn mappedFields{fieldMapping: \"z.\" + prefix + generatedField.Names[0].Name, name: prefix + name}, nil\n\t\tcase \"int64\":\n\t\t\t// special case for converting time fields and arrays of times\n\t\t\tm := mappedFields{fieldMapping: \"z.\" + prefix + generatedField.Names[0].Name, name: prefix + name}\n\t\t\tfieldType := generatedField.Type\n\t\t\tpointer := false\n\t\t\tif p, ok := generatedField.Type.(*ast.StarExpr); ok {\n\t\t\t\tpointer = true\n\t\t\t\tfieldType = p.X\n\t\t\t}\n\t\t\tif selector, ok := fieldType.(*ast.SelectorExpr); ok && selector.Sel.Name == \"Time\" {\n\t\t\t\tm = mappedFields{fieldMapping: fmt.Sprintf(\"generate.AvroTime(z.%s)\", prefix+generatedField.Names[0].Name), name: prefix + name}\n\t\t\t}\n\t\t\tif gArray, ok := fieldType.(*ast.ArrayType); ok {\n\t\t\t\taType := gArray.Elt\n\t\t\t\tif p, ok := gArray.Elt.(*ast.StarExpr); ok {\n\t\t\t\t\taType = p.X\n\t\t\t\t\tpointer = true\n\t\t\t\t}\n\t\t\t\tif selector, ok := aType.(*ast.SelectorExpr); ok && selector.Sel.Name == \"Time\" {\n\t\t\t\t\tm = mappedFields{fieldMapping: fmt.Sprintf(\"generate.AvroTimeSlice(z.%s)\", prefix+generatedField.Names[0].Name), name: prefix + name}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif pointer {\n\t\t\t\tm.fieldMapping = strings.Replace(m.fieldMapping, \"generate.AvroTime\", \"generate.AvroOptionalTime\", 1)\n\t\t\t}\n\t\t\treturn m, nil\n\t\t}\n\t\t// If not a built in type it could be a special Avro Union type\n\t\tif tmpl, ok := fm.unionTemplates[typeName]; ok {\n\t\t\tbuf := &bytes.Buffer{}\n\t\t\tformat := map[string]string{\"packageName\": fm.packageName, \"value\": \"z.\" + prefix + name}\n\t\t\t// special case for converting time fields\n\t\t\tfieldType := generatedField.Type\n\t\t\tpointer := false\n\t\t\tif p, ok := generatedField.Type.(*ast.StarExpr); ok {\n\t\t\t\tpointer = true\n\t\t\t\tfieldType = p.X\n\t\t\t}\n\t\t\tif selector, ok := fieldType.(*ast.SelectorExpr); ok && selector.Sel.Name == \"Time\" {\n\t\t\t\tformat = map[string]string{\"packageName\": fm.packageName, \"value\": fmt.Sprintf(\"generate.AvroTime(z.%s)\", prefix+name)}\n\t\t\t}\n\t\t\tif err := tmpl.Execute(buf, format); err != nil {\n\t\t\t\treturn mappedFields{}, fmt.Errorf(\"failed union template: %v\", err)\n\t\t\t}\n\t\t\tm := mappedFields{fieldMapping: buf.String(), name: typeName}\n\t\t\tif pointer {\n\t\t\t\tm.fieldMapping = strings.Replace(m.fieldMapping, \"generate.AvroTime\", \"generate.AvroOptionalTime\", 1)\n\t\t\t}\n\n\t\t\treturn m, nil\n\t\t}\n\n\t\tif strings.HasPrefix(typeName, \"UnionNull\") {\n\t\t\tchildTypeName := strings.TrimPrefix(typeName, \"UnionNull\")\n\t\t\tmf, err := fm.generateStructValue(name, prefix, childTypeName, generatedField)\n\t\t\tif err != nil {\n\t\t\t\treturn mappedFields{}, fmt.Errorf(\"failed generating UnionNull struct value: %v\", err)\n\t\t\t}\n\t\t\tbuf := &bytes.Buffer{}\n\t\t\ttemplateData := map[string]string{\n\t\t\t\t\"packageName\": fm.packageName,\n\t\t\t\t\"typeName\": childTypeName,\n\t\t\t\t\"value\": mf.fieldMapping,\n\t\t\t}\n\t\t\tif err := fm.unionNullStructTemplate.Execute(buf, templateData); err != nil {\n\t\t\t\treturn mappedFields{}, fmt.Errorf(\"failed generating UnionNull struct template: %v\", err)\n\t\t\t}\n\t\t\treturn mappedFields{fieldMapping: buf.String(), preProcessing: mf.preProcessing, name: typeName}, nil\n\t\t}\n\n\t\treturn fm.generateStructValue(name, prefix, typeName, generatedField)\n\t}\n\treturn mappedFields{}, fmt.Errorf(\"unhandled type for field %q\", prefix+name)\n}",
"func (f *DbField) GenFromString(dn, sn string) string {\n\tvar str string\n\n\tswitch f.Typ.GoType() {\n\tcase \"int\":\n\t\tfallthrough\n\tcase \"int32\":\n\t\tfallthrough\n\tcase \"int64\":\n\t\t{\n\t\t\twrk := \"\\t%s.%s, _ = strconv.ParseInt(%s,0,64)\\n\"\n\t\t\tstr = fmt.Sprintf(wrk, dn, f.TitledName(), sn)\n\t\t}\n\tcase \"float64\":\n\t\t{\n\t\t\twrk := \"\\t\\t%s.%s, _ = strconv.ParseFloat(%s, 64)\\n\"\n\t\t\tstr = fmt.Sprintf(wrk, dn, f.TitledName(), sn)\n\t\t}\n\tcase \"time.Time\":\n\t\t{\n\t\t\twrk := \"\\t%s.%s, _ = time.Parse(time.RFC3339, %s)\\n\"\n\t\t\tstr = fmt.Sprintf(wrk, dn, f.TitledName(), sn)\n\t\t}\n\tdefault:\n\t\tstr = fmt.Sprintf(\"\\t%s.%s = %s\\n\", dn, f.TitledName(), sn)\n\t}\n\n\treturn str\n}",
"func (p *OrmPlugin) generateFieldConversion(message *protogen.Message, field *protogen.Field, toORM bool, ofield *Field) error {\n\tdesc := field.Desc\n\tfieldName := fieldName(field)\n\tfieldType := p.fieldType(field)\n\tident := fieldIdent(field)\n\n\tif desc.IsList() { // Repeated Object ----------------------------------\n\t\t// Some repeated fields can be handled by github.com/lib/pq\n\t\tif p.IsAbleToMakePQArray(fieldType) {\n\t\t\tpqIdent, _, _ := p.fieldToPQArrayIdent(field)\n\t\t\tp.P(`if m.`, fieldName, ` != nil {`)\n\t\t\tp.P(`to.`, fieldName, ` = make(`, pqIdent, `, len(m.`, fieldName, `))`)\n\t\t\tp.P(`copy(to.`, fieldName, `, m.`, fieldName, `)`)\n\t\t\tp.P(`}`)\n\t\t} else if p.isOrmable(fieldType) { // Repeated ORMable type\n\t\t\t//fieldType = strings.Trim(fieldType, \"[]*\")\n\n\t\t\tp.P(`for _, v := range m.`, fieldName, ` {`)\n\t\t\tp.P(`if v != nil {`)\n\t\t\tif toORM {\n\t\t\t\tp.P(`if temp`, fieldName, `, cErr := v.ToORM(ctx); cErr == nil {`)\n\t\t\t} else {\n\t\t\t\tp.P(`if temp`, fieldName, `, cErr := v.ToPB(ctx); cErr == nil {`)\n\t\t\t}\n\t\t\tp.P(`to.`, fieldName, ` = append(to.`, fieldName, `, &temp`, fieldName, `)`)\n\t\t\tp.P(`} else {`)\n\t\t\tp.P(`return to, cErr`)\n\t\t\tp.P(`}`)\n\t\t\tp.P(`} else {`)\n\t\t\tp.P(`to.`, fieldName, ` = append(to.`, fieldName, `, nil)`)\n\t\t\tp.P(`}`)\n\t\t\tp.P(`}`) // end repeated for\n\t\t} else {\n\t\t\tp.P(`// Repeated type `, fieldType, ` is not an ORMable message type`)\n\t\t\tp.warning(\"repeated type %s was not ormable for desc %v\", fieldType, desc)\n\t\t}\n\t} else if desc.Enum() != nil { // Singular Enum, which is an int32 ---\n\t\tif toORM {\n\t\t\tif p.StringEnums {\n\t\t\t\tp.P(`to.`, fieldName, ` = `, ident, `_name[int32(m.`, fieldName, `)]`)\n\t\t\t} else {\n\t\t\t\tp.P(`to.`, fieldName, ` = int32(m.`, fieldName, `)`)\n\t\t\t}\n\t\t} else {\n\t\t\tif p.StringEnums {\n\t\t\t\tp.P(`to.`, fieldName, ` = `, ident, `(`, ident, `_value[m.`, fieldName, `])`)\n\t\t\t} else {\n\t\t\t\tp.P(`to.`, fieldName, ` = `, ident, `(m.`, fieldName, `)`)\n\t\t\t}\n\t\t}\n\t} else if desc.Message() != nil { // Singular Object -------------\n\t\t//Check for WKTs\n\t\tparts := strings.Split(fieldType, \".\")\n\t\tcoreType := parts[len(parts)-1]\n\t\t// Type is a WKT, convert to/from as ptr to base type\n\t\tif _, exists := wellKnownTypes[coreType]; exists { // Singular WKT -----\n\t\t\tif toORM {\n\t\t\t\tp.P(`if m.`, fieldName, ` != nil {`)\n\t\t\t\tp.P(`v := m.`, fieldName, `.Value`)\n\t\t\t\tp.P(`to.`, fieldName, ` = &v`)\n\t\t\t\tp.P(`}`)\n\t\t\t} else {\n\t\t\t\tp.P(`if m.`, fieldName, ` != nil {`)\n\t\t\t\tp.P(`to.`, fieldName, ` = &`, ident,\n\t\t\t\t\t`{Value: *m.`, fieldName, `}`)\n\t\t\t\tp.P(`}`)\n\t\t\t}\n\t\t} else if coreType == protoTypeUUIDValue { // Singular UUIDValue type ----\n\t\t\tif toORM {\n\t\t\t\tp.P(`if m.`, fieldName, ` != nil {`)\n\t\t\t\tp.P(`tempUUID, uErr := `, p.identFnCall(identUUIDFromStringFn, fmt.Sprintf(\"m.%s.Value\", fieldName)))\n\t\t\t\tp.P(`if uErr != nil {`)\n\t\t\t\tp.P(`return to, uErr`)\n\t\t\t\tp.P(`}`)\n\t\t\t\tp.P(`to.`, fieldName, ` = &tempUUID`)\n\t\t\t\tp.P(`}`)\n\t\t\t} else {\n\t\t\t\tp.P(`if m.`, fieldName, ` != nil {`)\n\t\t\t\tp.P(`to.`, fieldName, ` = &`, identTypesUUIDValue, `{Value: m.`, fieldName, `.String()}`)\n\t\t\t\tp.P(`}`)\n\t\t\t}\n\t\t} else if coreType == protoTypeUUID { // Singular UUID type --------------\n\t\t\tif toORM {\n\t\t\t\tp.P(`if m.`, fieldName, ` != nil {`)\n\t\t\t\tp.P(`to.`, fieldName, `, err = `, p.identFnCall(identUUIDFromStringFn, fmt.Sprintf(\"m.%s.Value\", fieldName)))\n\t\t\t\tp.P(`if err != nil {`)\n\t\t\t\tp.P(`return to, err`)\n\t\t\t\tp.P(`}`)\n\t\t\t\tp.P(`} else {`)\n\t\t\t\tp.P(`to.`, fieldName, ` = `, identNilUUID)\n\t\t\t\tp.P(`}`)\n\t\t\t} else {\n\t\t\t\tp.P(`to.`, fieldName, ` = &`, identTypesUUID, `{Value: m.`, fieldName, `.String()}`)\n\t\t\t}\n\t\t} else if coreType == protoTypeTimestamp { // Singular WKT Timestamp ---\n\t\t\tif toORM {\n\t\t\t\tp.P(`if m.`, fieldName, ` != nil {`)\n\t\t\t\tp.P(`var t `, identTime)\n\t\t\t\tp.P(`if t, err = `, identTimestamp, `(m.`, fieldName, `); err != nil {`)\n\t\t\t\tp.P(`return to, err`)\n\t\t\t\tp.P(`}`)\n\t\t\t\tp.P(`to.`, fieldName, ` = &t`)\n\t\t\t\tp.P(`}`)\n\t\t\t} else {\n\t\t\t\tp.P(`if m.`, fieldName, ` != nil {`)\n\t\t\t\tp.P(`if to.`, fieldName, `, err = `, identTimestampProto, `(*m.`, fieldName, `); err != nil {`)\n\t\t\t\tp.P(`return to, err`)\n\t\t\t\tp.P(`}`)\n\t\t\t\tp.P(`}`)\n\t\t\t}\n\t\t} else if coreType == protoTypeJSON {\n\t\t\tif toORM {\n\t\t\t\tp.P(`if m.`, fieldName, ` != nil {`)\n\t\t\t\tp.P(`to.`, fieldName, ` = &`, identpqJsonb, `{[]byte(m.`, fieldName, `.Value)}`)\n\t\t\t\tp.P(`}`)\n\t\t\t} else {\n\t\t\t\tp.P(`if m.`, fieldName, ` != nil {`)\n\t\t\t\tp.P(`to.`, fieldName, ` = &`, identTypesJSONValue, `{Value: string(m.`, fieldName, `.RawMessage)}`)\n\t\t\t\tp.P(`}`)\n\t\t\t}\n\t\t} else if coreType == protoTypeResource {\n\t\t\tresource := \"nil\" // assuming we do not know the PB type, nil means call codec for any resource\n\t\t\tif ofield != nil && ofield.ParentOriginName != \"\" {\n\t\t\t\tresource = \"&\" + ofield.ParentOriginName + \"{}\"\n\t\t\t}\n\t\t\tbtype := strings.TrimPrefix(ofield.Type, \"*\")\n\t\t\tnillable := strings.HasPrefix(ofield.Type, \"*\")\n\t\t\tiface := ofield.Type == \"interface{}\"\n\n\t\t\tif toORM {\n\t\t\t\tif nillable {\n\t\t\t\t\tp.P(`if m.`, fieldName, ` != nil {`)\n\t\t\t\t}\n\t\t\t\tswitch btype {\n\t\t\t\tcase \"int64\":\n\t\t\t\t\tp.P(`if v, err :=`, identResourceDecodeInt64Fn, `(`, resource, `, m.`, fieldName, `); err != nil {`)\n\t\t\t\t\tp.P(`\treturn to, err`)\n\t\t\t\t\tp.P(`} else {`)\n\t\t\t\t\tif nillable {\n\t\t\t\t\t\tp.P(`to.`, fieldName, ` = &v`)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tp.P(`to.`, fieldName, ` = v`)\n\t\t\t\t\t}\n\t\t\t\t\tp.P(`}`)\n\t\t\t\tcase \"[]byte\":\n\t\t\t\t\tp.P(`if v, err :=`, identResourceDecodeBytesFn, `(`, resource, `, m.`, fieldName, `); err != nil {`)\n\t\t\t\t\tp.P(`\treturn to, err`)\n\t\t\t\t\tp.P(`} else {`)\n\t\t\t\t\tp.P(`\tto.`, fieldName, ` = v`)\n\t\t\t\t\tp.P(`}`)\n\t\t\t\tdefault:\n\t\t\t\t\tp.P(`if v, err :=`, identResourceDecodeFn, `(`, resource, `, m.`, fieldName, `); err != nil {`)\n\t\t\t\t\tp.P(`return to, err`)\n\t\t\t\t\tp.P(`} else if v != nil {`)\n\t\t\t\t\tif nillable {\n\t\t\t\t\t\tp.P(`vv := v.(`, btype, `)`)\n\t\t\t\t\t\tp.P(`to.`, fieldName, ` = &vv`)\n\t\t\t\t\t} else if iface {\n\t\t\t\t\t\tp.P(`to.`, fieldName, `= v`)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tp.P(`to.`, fieldName, ` = v.(`, btype, `)`)\n\t\t\t\t\t}\n\t\t\t\t\tp.P(`}`)\n\t\t\t\t}\n\t\t\t\tif nillable {\n\t\t\t\t\tp.P(`}`)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !toORM {\n\t\t\t\tif nillable {\n\t\t\t\t\tp.P(`if m.`, fieldName, `!= nil {`)\n\t\t\t\t\tp.P(`\tif v, err := `, identResourceEncodeFn, `(`, resource, `, *m.`, fieldName, `); err != nil {`)\n\t\t\t\t\tp.P(`\t\treturn to, err`)\n\t\t\t\t\tp.P(`\t} else {`)\n\t\t\t\t\tp.P(`\t\tto.`, fieldName, ` = v`)\n\t\t\t\t\tp.P(`\t}`)\n\t\t\t\t\tp.P(`}`)\n\n\t\t\t\t} else {\n\t\t\t\t\tp.P(`if v, err := `, identResourceEncodeFn, `(`, resource, `, m.`, fieldName, `); err != nil {`)\n\t\t\t\t\tp.P(`return to, err`)\n\t\t\t\t\tp.P(`} else {`)\n\t\t\t\t\tp.P(`to.`, fieldName, ` = v`)\n\t\t\t\t\tp.P(`}`)\n\t\t\t\t}\n\t\t\t}\n\t\t} else if coreType == protoTypeInet { // Inet type for Postgres only, currently\n\t\t\tif toORM {\n\t\t\t\tp.P(`if m.`, fieldName, ` != nil {`)\n\t\t\t\tp.P(`if to.`, fieldName, `, err = `, identTypesParseInetFn, `(m.`, fieldName, `.Value); err != nil {`)\n\t\t\t\tp.P(`return to, err`)\n\t\t\t\tp.P(`}`)\n\t\t\t\tp.P(`}`)\n\t\t\t} else {\n\t\t\t\tp.P(`if m.`, fieldName, ` != nil && m.`, fieldName, `.IPNet != nil {`)\n\t\t\t\tp.P(`to.`, fieldName, ` = &`, identTypesInetValue, `{Value: m.`, fieldName, `.String()}`)\n\t\t\t\tp.P(`}`)\n\t\t\t}\n\t\t} else if coreType == protoTimeOnly { // Time only to support time via string\n\t\t\tif toORM {\n\t\t\t\tp.P(`if m.`, fieldName, ` != nil {`)\n\t\t\t\tp.P(`if to.`, fieldName, `, err = `, identTypesParseTimeFn, `(m.`, fieldName, `.Value); err != nil {`)\n\t\t\t\tp.P(`return to, err`)\n\t\t\t\tp.P(`}`)\n\t\t\t\tp.P(`}`)\n\t\t\t} else {\n\t\t\t\tp.P(`if m.`, fieldName, ` != \"\" {`)\n\t\t\t\tp.P(`if to.`, fieldName, `, err = `, identTypesTimeOnlyByStringFn, `( m.`, fieldName, `); err != nil {`)\n\t\t\t\tp.P(`return to, err`)\n\t\t\t\tp.P(`}`)\n\t\t\t\tp.P(`}`)\n\t\t\t}\n\t\t} else if p.isOrmable(fieldType) {\n\t\t\t// Not a WKT, but a type we're building converters for\n\t\t\tp.P(`if m.`, fieldName, ` != nil {`)\n\t\t\tif toORM {\n\t\t\t\tp.P(`temp`, fieldName, `, err := m.`, fieldName, `.ToORM (ctx)`)\n\t\t\t} else {\n\t\t\t\tp.P(`temp`, fieldName, `, err := m.`, fieldName, `.ToPB (ctx)`)\n\t\t\t}\n\t\t\tp.P(`if err != nil {`)\n\t\t\tp.P(`return to, err`)\n\t\t\tp.P(`}`)\n\t\t\tp.P(`to.`, fieldName, ` = &temp`, fieldName)\n\t\t\tp.P(`}`)\n\t\t}\n\t} else { // Singular raw ----------------------------------------------------\n\t\tp.P(`to.`, fieldName, ` = m.`, fieldName)\n\t}\n\treturn nil\n}",
"func (p *Planner) addField(ref int) {\n\tfieldName := p.visitor.Operation.FieldNameString(ref)\n\n\talias := ast.Alias{\n\t\tIsDefined: p.visitor.Operation.FieldAliasIsDefined(ref),\n\t}\n\n\tif alias.IsDefined {\n\t\taliasBytes := p.visitor.Operation.FieldAliasBytes(ref)\n\t\talias.Name = p.upstreamOperation.Input.AppendInputBytes(aliasBytes)\n\t}\n\n\ttypeName := p.visitor.Walker.EnclosingTypeDefinition.NameString(p.visitor.Definition)\n\tfor i := range p.visitor.Config.Fields {\n\t\tisDesiredField := p.visitor.Config.Fields[i].TypeName == typeName &&\n\t\t\tp.visitor.Config.Fields[i].FieldName == fieldName\n\n\t\t// chech that we are on a desired field and field path contains a single element - mapping is plain\n\t\tif isDesiredField && len(p.visitor.Config.Fields[i].Path) == 1 {\n\t\t\t// define alias when mapping path differs from fieldName and no alias has been defined\n\t\t\tif p.visitor.Config.Fields[i].Path[0] != fieldName && !alias.IsDefined {\n\t\t\t\talias.IsDefined = true\n\t\t\t\taliasBytes := p.visitor.Operation.FieldNameBytes(ref)\n\t\t\t\talias.Name = p.upstreamOperation.Input.AppendInputBytes(aliasBytes)\n\t\t\t}\n\n\t\t\t// override fieldName with mapping path value\n\t\t\tfieldName = p.visitor.Config.Fields[i].Path[0]\n\n\t\t\t// when provided field is a root type field save new field name\n\t\t\tif ref == p.rootFieldRef {\n\t\t\t\tp.rootFieldName = fieldName\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfield := p.upstreamOperation.AddField(ast.Field{\n\t\tName: p.upstreamOperation.Input.AppendInputString(fieldName),\n\t\tAlias: alias,\n\t})\n\n\tselection := ast.Selection{\n\t\tKind: ast.SelectionKindField,\n\t\tRef: field.Ref,\n\t}\n\n\tp.upstreamOperation.AddSelection(p.nodes[len(p.nodes)-1].Ref, selection)\n\tp.nodes = append(p.nodes, field)\n}",
"func (sb *schemaBuilder) buildField(field reflect.StructField) (*graphql.Field, error) {\n\tretType, err := sb.getType(field.Type)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &graphql.Field{\n\t\tResolve: func(ctx context.Context, source, args interface{}, selectionSet *graphql.SelectionSet) (interface{}, error) {\n\t\t\tvalue := reflect.ValueOf(source)\n\t\t\tif value.Kind() == reflect.Ptr {\n\t\t\t\tvalue = value.Elem()\n\t\t\t}\n\t\t\treturn value.FieldByIndex(field.Index).Interface(), nil\n\t\t},\n\t\tType: retType,\n\t\tParseArguments: nilParseArguments,\n\t}, nil\n}",
"func (f *DbField) GenToString(v string, st string) string {\n\tvar str string\n\tvar fldName string\n\n\tfldName = st + \".\" + f.TitledName()\n\tif st == \"\" {\n\t\tfldName = f.TitledName()\n\t}\n\n\tswitch f.Typ.GoType() {\n\tcase \"int\":\n\t\tfallthrough\n\tcase \"int32\":\n\t\tfallthrough\n\tcase \"int64\":\n\t\tstr = fmt.Sprintf(\"\\t%s = fmt.Sprintf(\\\"%%d\\\", %s)\\n\", v, fldName)\n\tcase \"float32\":\n\t\tfallthrough\n\tcase \"float64\":\n\t\tstr = \"\\t{\\n\"\n\t\tstr += fmt.Sprintf(\"\\t\\ts := fmt.Sprintf(\\\"%s.4f\\\", %s)\\n\", \"%\", fldName)\n\t\tstr += fmt.Sprintf(\"\\t\\t%s = strings.TrimRight(strings.TrimRight(s, \\\"0\\\"), \\\".\\\")\\n\", v)\n\t\tstr += \"\\t}\\n\"\n\tcase \"time.Time\":\n\t\t{\n\t\t\twrk := \"\\t{\\n\\t\\twrk, _ := %s.MarshalText()\\n\" +\n\t\t\t\t\"\\t\\t%s = wrk\\n\\t}\\n\"\n\t\t\tstr = fmt.Sprintf(wrk, fldName, v)\n\t\t}\n\tdefault:\n\t\tstr = fmt.Sprintf(\"\\t%s = %s\\n\", v, fldName)\n\t}\n\n\treturn str\n}",
"func AddIndependentPropertyGeneratorsForJsonField(gens map[string]gopter.Gen) {\n\tgens[\"SourceField\"] = gen.PtrOf(gen.AlphaString())\n}",
"func JsonField_ARMGenerator() gopter.Gen {\n\tif jsonField_ARMGenerator != nil {\n\t\treturn jsonField_ARMGenerator\n\t}\n\n\tgenerators := make(map[string]gopter.Gen)\n\tAddIndependentPropertyGeneratorsForJsonField_ARM(generators)\n\tjsonField_ARMGenerator = gen.Struct(reflect.TypeOf(JsonField_ARM{}), generators)\n\n\treturn jsonField_ARMGenerator\n}",
"func (fm *avroFieldMapper) generateChildStruct(expr ast.Expr, name, prefix string, avroFields *ast.FieldList) (mappedFields, *ast.FieldList, error) {\n\tvar stype *ast.StructType\n\tvar fields *ast.FieldList\n\tvar structName string\n\n\tif s, ok := expr.(*ast.StarExpr); ok {\n\t\texpr = s.X\n\t}\n\n\tif s, ok := expr.(*ast.StructType); ok {\n\t\tstype = s\n\t\tfields = stype.Fields\n\t} else {\n\t\tident, ok := expr.(*ast.Ident)\n\t\tif !ok {\n\t\t\treturn mappedFields{}, nil, fmt.Errorf(\"expected generated field %q to be a struct type or identifier for a named struct\", name)\n\t\t}\n\t\tstructName = ident.Name\n\t\tn, err := parseGoStruct(structName, filepath.Dir(fm.generatedSourcePath))\n\t\tif err != nil {\n\t\t\treturn mappedFields{}, nil, fmt.Errorf(\"unable to find type named %q: %v\", name, err)\n\t\t}\n\t\tif s, ok := n.Type.(*ast.StructType); ok {\n\t\t\tstype = s\n\t\t} else {\n\t\t\treturn mappedFields{}, nil, fmt.Errorf(\"type %q is not a struct\", name)\n\t\t}\n\t}\n\tgeneratedChildFieldMap, err := mapFields(stype.Fields, fm.generatedSourcePath)\n\tif err != nil {\n\t\treturn mappedFields{}, nil, fmt.Errorf(\"failed to parse generated Go struct: %v\", err)\n\t}\n\tmappedStruct, err := fm.generateFields(prefix, avroFields, generatedChildFieldMap)\n\tif err != nil {\n\t\treturn mappedFields{}, nil, fmt.Errorf(\"failed generating mappings for field %s: %v\", name, err)\n\t}\n\tmappedStruct.name = structName\n\n\treturn mappedStruct, fields, nil\n}",
"func (_ *Frontend) GenerateFieldMap(types []GoType) string {\n\tout := strings.Builder{}\n\tfor _, v := range types {\n\t\tname := strcase.ToLowerCamel(v.Name)\n\t\tstmt := fmt.Sprintf(\"%s:'%s',\", name, name)\n\t\tout.WriteString(stmt)\n\t}\n\treturn out.String()\n\n}",
"func (c *TypeConverter) genStructConverter(\n\tkeyPrefix string,\n\tfromPrefix string,\n\tindent string,\n\tfromFields []*compile.FieldSpec,\n\ttoFields []*compile.FieldSpec,\n\tfieldMap map[string]FieldMapperEntry,\n\tprevKeyPrefixes []string,\n) error {\n\n\tfor i := 0; i < len(toFields); i++ {\n\t\ttoField := toFields[i]\n\n\t\t// Check for same named field\n\t\tvar fromField *compile.FieldSpec\n\t\tfor j := 0; j < len(fromFields); j++ {\n\t\t\tif fromFields[j].Name == toField.Name {\n\t\t\t\tfromField = fromFields[j]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\ttoSubIdentifier := keyPrefix + PascalCase(toField.Name)\n\t\ttoIdentifier := \"out.\" + toSubIdentifier\n\t\toverriddenIdentifier := \"\"\n\t\tfromIdentifier := \"\"\n\n\t\t// Check for mapped field\n\t\tvar overriddenField *compile.FieldSpec\n\n\t\t// check if this toField satisfies a fieldMap transform\n\t\ttransformFrom, ok := fieldMap[toSubIdentifier]\n\t\tif ok {\n\t\t\t// no existing direct fromField, just assign the transform\n\t\t\tif fromField == nil {\n\t\t\t\tfromField = transformFrom.Field\n\t\t\t\tif c.useRecurGen {\n\t\t\t\t\tfromIdentifier = \"inOriginal.\" + transformFrom.QualifiedName\n\t\t\t\t} else {\n\t\t\t\t\tfromIdentifier = \"in.\" + transformFrom.QualifiedName\n\t\t\t\t}\n\t\t\t\t// else there is a conflicting direct fromField\n\t\t\t} else {\n\t\t\t\t// depending on Override flag either the direct fromField or transformFrom is the OverrideField\n\t\t\t\tif transformFrom.Override {\n\t\t\t\t\t// check for required/optional setting\n\t\t\t\t\tif !transformFrom.Field.Required {\n\t\t\t\t\t\toverriddenField = fromField\n\t\t\t\t\t\toverriddenIdentifier = \"in.\" + fromPrefix +\n\t\t\t\t\t\t\tPascalCase(overriddenField.Name)\n\t\t\t\t\t}\n\t\t\t\t\t// If override is true and the new field is required,\n\t\t\t\t\t// there's a default instantiation value and will always\n\t\t\t\t\t// overwrite.\n\t\t\t\t\tfromField = transformFrom.Field\n\t\t\t\t\tif c.useRecurGen {\n\t\t\t\t\t\tfromIdentifier = \"inOriginal.\" + transformFrom.QualifiedName\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfromIdentifier = \"in.\" + transformFrom.QualifiedName\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// If override is false and the from field is required,\n\t\t\t\t\t// From is always populated and will never be overwritten.\n\t\t\t\t\tif !fromField.Required {\n\t\t\t\t\t\toverriddenField = transformFrom.Field\n\t\t\t\t\t\tif c.useRecurGen {\n\t\t\t\t\t\t\tfromIdentifier = \"inOriginal.\" + transformFrom.QualifiedName\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\toverriddenIdentifier = \"in.\" + transformFrom.QualifiedName\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// neither direct or transform fromField was found\n\t\tif fromField == nil {\n\t\t\t// search the fieldMap toField identifiers for matching identifier prefix\n\t\t\t// e.g. the current toField is a struct and something within it has a transform\n\t\t\t// a full match identifiers for transform non-struct types would have been caught above\n\t\t\thasStructFieldMapping := false\n\t\t\tfor toID := range fieldMap {\n\t\t\t\tif strings.HasPrefix(toID, toSubIdentifier) {\n\t\t\t\t\thasStructFieldMapping = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// if there's no fromField and no fieldMap transform that could be applied\n\t\t\tif !hasStructFieldMapping {\n\t\t\t\tvar bypass bool\n\t\t\t\t// check if required field is filled from other resources\n\t\t\t\t// it can be used to set system default (customized tracing /auth required for clients),\n\t\t\t\t// or header propagating\n\t\t\t\tif c.optionalEntries != nil {\n\t\t\t\t\tfor toID := range c.optionalEntries {\n\t\t\t\t\t\tif strings.HasPrefix(toID, toSubIdentifier) {\n\t\t\t\t\t\t\tbypass = true\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// the toField is either covered by optionalEntries, or optional and\n\t\t\t\t// there's nothing that maps to it or its sub-fields so we should skip it\n\t\t\t\tif bypass || !toField.Required {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// unrecoverable error\n\t\t\t\treturn errors.Errorf(\n\t\t\t\t\t\"required toField %s does not have a valid fromField mapping\",\n\t\t\t\t\ttoField.Name,\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\n\t\tif fromIdentifier == \"\" && fromField != nil {\n\t\t\t// should we set this if no fromField ??\n\t\t\tfromIdentifier = \"in.\" + fromPrefix + PascalCase(fromField.Name)\n\t\t}\n\n\t\tif prevKeyPrefixes == nil {\n\t\t\tprevKeyPrefixes = []string{}\n\t\t}\n\n\t\tvar overriddenFieldName string\n\t\tvar overriddenFieldType compile.TypeSpec\n\t\tif overriddenField != nil {\n\t\t\toverriddenFieldName = overriddenField.Name\n\t\t\toverriddenFieldType = overriddenField.Type\n\t\t}\n\n\t\t// Override thrift type names to avoid naming collisions between endpoint\n\t\t// and client types.\n\t\tswitch toFieldType := compile.RootTypeSpec(toField.Type).(type) {\n\t\tcase\n\t\t\t*compile.BoolSpec,\n\t\t\t*compile.I8Spec,\n\t\t\t*compile.I16Spec,\n\t\t\t*compile.I32Spec,\n\t\t\t*compile.EnumSpec,\n\t\t\t*compile.I64Spec,\n\t\t\t*compile.DoubleSpec,\n\t\t\t*compile.StringSpec:\n\n\t\t\terr := c.genConverterForPrimitive(\n\t\t\t\ttoField,\n\t\t\t\ttoIdentifier,\n\t\t\t\tfromField,\n\t\t\t\tfromIdentifier,\n\t\t\t\toverriddenField,\n\t\t\t\toverriddenIdentifier,\n\t\t\t\tindent,\n\t\t\t\tprevKeyPrefixes,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase *compile.BinarySpec:\n\t\t\tfor _, line := range checkOptionalNil(indent, c.uninitialized, toIdentifier, prevKeyPrefixes, c.useRecurGen) {\n\t\t\t\tc.append(line)\n\t\t\t}\n\t\t\tc.append(toIdentifier, \" = []byte(\", fromIdentifier, \")\")\n\t\tcase *compile.StructSpec:\n\t\t\tvar (\n\t\t\t\tstFromPrefix = fromPrefix\n\t\t\t\tstFromType compile.TypeSpec\n\t\t\t\tfromTypeName string\n\t\t\t)\n\t\t\tif fromField != nil {\n\t\t\t\tstFromType = fromField.Type\n\t\t\t\tstFromPrefix = fromPrefix + PascalCase(fromField.Name)\n\n\t\t\t\tfromTypeName, _ = c.getIdentifierName(stFromType)\n\t\t\t}\n\n\t\t\ttoTypeName, err := c.getIdentifierName(toFieldType)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif converterMethodName, ok := c.convStructMap[toFieldType.Name]; ok {\n\t\t\t\t// the converter for this struct has already been generated, so just use it\n\t\t\t\tc.append(indent, \"out.\", keyPrefix+PascalCase(toField.Name), \" = \", converterMethodName, \"(\", fromIdentifier, \")\")\n\t\t\t} else if c.useRecurGen && fromTypeName != \"\" {\n\t\t\t\t// generate a callable converter inside function literal\n\t\t\t\terr = c.genConverterForStructWrapped(\n\t\t\t\t\ttoField,\n\t\t\t\t\ttoFieldType,\n\t\t\t\t\ttoTypeName,\n\t\t\t\t\ttoSubIdentifier,\n\t\t\t\t\tfromTypeName,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t\tstFromType,\n\t\t\t\t\tfieldMap,\n\t\t\t\t\tprevKeyPrefixes,\n\t\t\t\t\tindent,\n\t\t\t\t)\n\t\t\t} else {\n\t\t\t\terr = c.genConverterForStruct(\n\t\t\t\t\ttoField.Name,\n\t\t\t\t\ttoFieldType,\n\t\t\t\t\ttoField.Required,\n\t\t\t\t\tstFromType,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t\tkeyPrefix+PascalCase(toField.Name),\n\t\t\t\t\tstFromPrefix,\n\t\t\t\t\tindent,\n\t\t\t\t\tfieldMap,\n\t\t\t\t\tprevKeyPrefixes,\n\t\t\t\t)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase *compile.ListSpec:\n\t\t\terr := c.genConverterForList(\n\t\t\t\ttoFieldParam{\n\t\t\t\t\ttoFieldType,\n\t\t\t\t\ttoField.Name,\n\t\t\t\t\ttoField.Required,\n\t\t\t\t\ttoIdentifier,\n\t\t\t\t},\n\t\t\t\tfromFieldParam{\n\t\t\t\t\tfromField.Type,\n\t\t\t\t\tfromField.Name,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t},\n\t\t\t\toverriddenFieldParam{\n\t\t\t\t\toverriddenFieldType,\n\t\t\t\t\toverriddenFieldName,\n\t\t\t\t\toverriddenIdentifier,\n\t\t\t\t},\n\t\t\t\tindent,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase *compile.MapSpec:\n\t\t\terr := c.genConverterForMap(\n\t\t\t\ttoFieldParam{\n\t\t\t\t\ttoFieldType,\n\t\t\t\t\ttoField.Name,\n\t\t\t\t\ttoField.Required,\n\t\t\t\t\ttoIdentifier,\n\t\t\t\t},\n\t\t\t\tfromFieldParam{\n\t\t\t\t\tfromField.Type,\n\t\t\t\t\tfromField.Name,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t},\n\t\t\t\toverriddenFieldParam{\n\t\t\t\t\toverriddenFieldType,\n\t\t\t\t\toverriddenFieldName,\n\t\t\t\t\toverriddenIdentifier,\n\t\t\t\t},\n\t\t\t\tindent,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\t// fmt.Printf(\"Unknown type %s for field %s \\n\",\n\t\t\t// \ttoField.Type.TypeCode().String(), toField.Name,\n\t\t\t// )\n\n\t\t\t// pkgName, err := h.TypePackageName(toField.Type.IDLFile())\n\t\t\t// if err != nil {\n\t\t\t// \treturn nil, err\n\t\t\t// }\n\t\t\t// typeName := pkgName + \".\" + toField.Type.ThriftName()\n\t\t\t// line := toIdentifier + \"(*\" + typeName + \")\" + postfix\n\t\t\t// c.Lines = append(c.Lines, line)\n\t\t}\n\t}\n\n\treturn nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
genArguments generates argument field config for given AST | func genArguments(args []*ast.InputValueDefinition) *jen.Statement {
//
// Generate config for arguments
//
// == Example input SDL
//
// type Dog {
// name(
// "style is stylish"
// style: NameComponentsStyle = SHORT,
// ): String!
// }
//
// == Example output
//
// FieldConfigArgument{
// "style": &ArgumentConfig{ ... }
// },
//
return jen.Qual(defsPkg, "FieldConfigArgument").Values(
jen.DictFunc(func(d jen.Dict) {
for _, arg := range args {
d[jen.Lit(arg.Name.Value)] = genArgument(arg)
}
}),
)
} | [
"func genArgument(arg *ast.InputValueDefinition) *jen.Statement {\n\t//\n\t// Generate config for argument\n\t//\n\t// == Example input SDL\n\t//\n\t// type Dog {\n\t// name(\n\t// \"style is stylish\"\n\t// style: NameComponentsStyle = SHORT,\n\t// ): String!\n\t// }\n\t//\n\t// == Example output\n\t//\n\t// &ArgumentConfig{\n\t// Type: graphql.NonNull(graphql.String),\n\t// DefaultValue: \"SHORT\", // TODO: ???\n\t// Description: \"style is stylish\",\n\t// }\n\t//\n\treturn jen.Op(\"&\").Qual(defsPkg, \"ArgumentConfig\").Values(jen.Dict{\n\t\tjen.Id(\"DefaultValue\"): genValue(arg.DefaultValue),\n\t\tjen.Id(\"Description\"): genDescription(arg),\n\t\tjen.Id(\"Type\"): genInputTypeReference(arg.Type),\n\t})\n}",
"func (c JavacCommand) GenerateArgumentList() []string {\n\targumentArray := make([]string, 0)\n\targumentArray = append(argumentArray, \"-d\", c.DestinationDirectory)\n\n\tif c.DebuggingInformation != \"\" {\n\t\targumentArray = append(argumentArray, c.DebuggingInformation)\n\t}\n\n\tif c.Deprecation {\n\t\targumentArray = append(argumentArray, \"-deprecation\")\n\t}\n\n\tif len(c.SourceFiles) != 0 {\n\t\targumentArray = append(argumentArray, c.SourceFiles...)\n\t}\n\n\tif len(c.ClassPath) != 0 {\n\t\targumentArray = append(argumentArray, \"-cp\", strings.Join(c.ClassPath, \":\"))\n\t}\n\n\tif c.SourceVersion != \"\" {\n\t\targumentArray = append(argumentArray, \"-source\", c.SourceVersion)\n\t}\n\n\tif c.LintWarnings != \"\" {\n\t\targumentArray = append(argumentArray, c.LintWarnings)\n\t}\n\n\tif c.Encoding != \"\" {\n\t\targumentArray = append(argumentArray, \"-encoding\", c.Encoding)\n\t}\n\n\tif c.Verbose {\n\t\targumentArray = append(argumentArray, \"-verbose\")\n\t}\n\n\tif c.Target != \"\" {\n\t\targumentArray = append(argumentArray, \"-target\", c.Target)\n\t}\n\n\treturn argumentArray\n}",
"func (*Base) Arguments(p ASTPass, l *ast.Fodder, args *ast.Arguments, r *ast.Fodder, ctx Context) {\n\tp.Fodder(p, l, ctx)\n\tfor i := range args.Positional {\n\t\targ := &args.Positional[i]\n\t\tp.Visit(p, &arg.Expr, ctx)\n\t\tp.Fodder(p, &arg.CommaFodder, ctx)\n\t}\n\tfor i := range args.Named {\n\t\targ := &args.Named[i]\n\t\tp.Fodder(p, &arg.NameFodder, ctx)\n\t\tp.Fodder(p, &arg.EqFodder, ctx)\n\t\tp.Visit(p, &arg.Arg, ctx)\n\t\tp.Fodder(p, &arg.CommaFodder, ctx)\n\t}\n\tp.Fodder(p, r, ctx)\n}",
"func genArgs(optionMap map[string]string) []string {\n\toptions := []string{}\n\tfor k, v := range optionMap {\n\t\tif v != \"\" {\n\t\t\tk = fmt.Sprintf(\"%s=%s\", k, v)\n\t\t}\n\t\toptions = append(options, k)\n\t}\n\treturn options\n}",
"func genArgs(vals []reflect.Value, n int) [][]reflect.Value {\n\tif n == 0 {\n\t\treturn [][]reflect.Value{nil}\n\t}\n\tnext := genArgs(vals, n-1)\n\tres := [][]reflect.Value{}\n\tfor _, base := range next {\n\t\tfor _, val := range vals {\n\t\t\tres = append(res, append([]reflect.Value{val}, base...))\n\t\t}\n\t}\n\treturn res\n}",
"func collectArguments() Arguments {\n\tendpoint := config.Config.ChooseEndpoint(flags.APIEndpoint)\n\ttoken := config.Config.ChooseToken(endpoint, flags.Token)\n\tscheme := config.Config.ChooseScheme(endpoint, flags.Token)\n\treturn Arguments{\n\t\tapiEndpoint: endpoint,\n\t\ttoken: token,\n\t\tscheme: scheme,\n\t}\n}",
"func genFields(fs []*ast.FieldDefinition) *jen.Statement {\n\t//\n\t// Generate config for fields\n\t//\n\t// == Example input SDL\n\t//\n\t// type Dog {\n\t// name(style: NameComponentsStyle = SHORT): String!\n\t// givenName: String @deprecated(reason: \"No longer supported; please use name field.\")\n\t// }\n\t//\n\t// == Example output\n\t//\n\t// graphql.Fields{\n\t// \"name\": graphql.Field{ ... },\n\t// \"givenName\": graphql.Field{ ... },\n\t// }\n\t//\n\treturn jen.Qual(defsPkg, \"Fields\").Values(jen.DictFunc(func(d jen.Dict) {\n\t\tfor _, f := range fs {\n\t\t\td[jen.Lit(f.Name.Value)] = genField(f)\n\t\t}\n\t}))\n}",
"func (s *BasePhpParserListener) EnterAttributeArgList(ctx *AttributeArgListContext) {}",
"func (s *BasePhpParserListener) EnterActualArguments(ctx *ActualArgumentsContext) {}",
"func (s *BasePhpParserListener) EnterGenericDynamicArgs(ctx *GenericDynamicArgsContext) {}",
"func (s *BasePhpParserListener) EnterArguments(ctx *ArgumentsContext) {}",
"func (p *Planner) configureFieldArgumentSource(upstreamFieldRef, downstreamFieldRef int, argumentName string, sourcePath []string) {\n\tfieldArgument, ok := p.visitor.Operation.FieldArgument(downstreamFieldRef, []byte(argumentName))\n\tif !ok {\n\t\treturn\n\t}\n\tvalue := p.visitor.Operation.ArgumentValue(fieldArgument)\n\tif value.Kind != ast.ValueKindVariable {\n\t\tp.applyInlineFieldArgument(upstreamFieldRef, downstreamFieldRef, argumentName, sourcePath)\n\t\treturn\n\t}\n\tvariableName := p.visitor.Operation.VariableValueNameBytes(value.Ref)\n\tvariableNameStr := p.visitor.Operation.VariableValueNameString(value.Ref)\n\n\tcontextVariable := &resolve.ContextVariable{\n\t\tPath: []string{variableNameStr},\n\t\tRenderAsGraphQLValue: true,\n\t}\n\tcontextVariable.SetJsonValueType(p.visitor.Definition, p.visitor.Definition, p.argTypeRef)\n\n\tcontextVariableName, exists := p.variables.AddVariable(contextVariable)\n\tvariableValueRef, argRef := p.upstreamOperation.AddVariableValueArgument([]byte(argumentName), variableName) // add the argument to the field, but don't redefine it\n\tp.upstreamOperation.AddArgumentToField(upstreamFieldRef, argRef)\n\n\tif exists { // if the variable exists we don't have to put it onto the variables declaration again, skip\n\t\treturn\n\t}\n\n\tfor _, i := range p.visitor.Operation.OperationDefinitions[p.visitor.Walker.Ancestors[0].Ref].VariableDefinitions.Refs {\n\t\tref := p.visitor.Operation.VariableDefinitions[i].VariableValue.Ref\n\t\tif !p.visitor.Operation.VariableValueNameBytes(ref).Equals(variableName) {\n\t\t\tcontinue\n\t\t}\n\t\timportedType := p.visitor.Importer.ImportType(p.visitor.Operation.VariableDefinitions[i].Type, p.visitor.Operation, p.upstreamOperation)\n\t\tp.upstreamOperation.AddVariableDefinitionToOperationDefinition(p.nodes[0].Ref, variableValueRef, importedType)\n\t}\n\n\tp.upstreamVariables, _ = sjson.SetRawBytes(p.upstreamVariables, variableNameStr, []byte(contextVariableName))\n}",
"func collectArguments() Arguments {\n\tendpoint := config.Config.ChooseEndpoint(flags.APIEndpoint)\n\ttoken := config.Config.ChooseToken(endpoint, flags.Token)\n\tscheme := config.Config.ChooseScheme(endpoint, flags.Token)\n\n\treturn Arguments{\n\t\tapiEndpoint: endpoint,\n\t\tauthToken: token,\n\t\tscheme: scheme,\n\t\tclusterNameOrID: \"\",\n\t\tuserProvidedToken: flags.Token,\n\t\tverbose: flags.Verbose,\n\t}\n}",
"func (p *Provider) ExploreArguments(schema *models.Schema) map[string]*graphql.ArgumentConfig {\n\targuments := map[string]*graphql.ArgumentConfig{}\n\tfor _, module := range p.GetAll() {\n\t\tif p.shouldIncludeArgument(schema, module.Name(), module.Type()) {\n\t\t\tif arg, ok := module.(modulecapabilities.GraphQLArguments); ok {\n\t\t\t\tfor name, argument := range arg.Arguments() {\n\t\t\t\t\tif argument.ExploreArgumentsFunction != nil {\n\t\t\t\t\t\targuments[name] = argument.ExploreArgumentsFunction()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn arguments\n}",
"func genField(field *ast.FieldDefinition) *jen.Statement {\n\t//\n\t// Generate config for field\n\t//\n\t// == Example input SDL\n\t//\n\t// interface Pet {\n\t// \"name of the pet\"\n\t// name(style: NameComponentsStyle = SHORT): String!\n\t// \"\"\"\n\t// givenName of the pet ★\n\t// \"\"\"\n\t// givenName: String @deprecated(reason: \"No longer supported; please use name field.\")\n\t// }\n\t//\n\t// == Example output\n\t//\n\t// &graphql.Field{\n\t// Name: \"name\",\n\t// Type: graphql.NonNull(graphql.String),\n\t// Description: \"name of the pet\",\n\t// DeprecationReason: \"\",\n\t// Args: FieldConfigArgument{ ... },\n\t// }\n\t//\n\t// &graphql.Field{\n\t// Name: \"givenName\",\n\t// Type: graphql.String,\n\t// Description: \"givenName of the pet\",\n\t// DeprecationReason: \"No longer supported; please use name field.\",\n\t// Args: FieldConfigArgument{ ... },\n\t// }\n\t//\n\treturn jen.Op(\"&\").Qual(defsPkg, \"Field\").Values(jen.Dict{\n\t\tjen.Id(\"Args\"): genArguments(field.Arguments),\n\t\tjen.Id(\"DeprecationReason\"): genDeprecationReason(field.Directives),\n\t\tjen.Id(\"Description\"): genDescription(field),\n\t\tjen.Id(\"Name\"): jen.Lit(field.Name.Value),\n\t\tjen.Id(\"Type\"): genOutputTypeReference(field.Type),\n\t})\n}",
"func (s *BasePhpParserListener) EnterAttributeNamedArgList(ctx *AttributeNamedArgListContext) {}",
"func generateMethodArg(ctx context.Context, t *testing.T, argGenerator *MethodArgumentGenerator) *reflect.Value {\n\tt.Helper()\n\texchName := strings.ToLower(argGenerator.Exchange.GetName())\n\tvar input reflect.Value\n\tswitch {\n\tcase argGenerator.MethodInputType.AssignableTo(stringParam):\n\t\tswitch argGenerator.MethodName {\n\t\tcase \"GetDepositAddress\":\n\t\t\tif argGenerator.argNum == 2 {\n\t\t\t\t// account type\n\t\t\t\tinput = reflect.ValueOf(\"trading\")\n\t\t\t} else {\n\t\t\t\t// Crypto Chain\n\t\t\t\tinput = reflect.ValueOf(cryptoChainPerExchange[exchName])\n\t\t\t}\n\t\tdefault:\n\t\t\t// OrderID\n\t\t\tinput = reflect.ValueOf(\"1337\")\n\t\t}\n\tcase argGenerator.MethodInputType.AssignableTo(credentialsParam):\n\t\tinput = reflect.ValueOf(&account.Credentials{\n\t\t\tKey: \"test\",\n\t\t\tSecret: \"test\",\n\t\t\tClientID: \"test\",\n\t\t\tPEMKey: \"test\",\n\t\t\tSubAccount: \"test\",\n\t\t\tOneTimePassword: \"test\",\n\t\t})\n\tcase argGenerator.MethodInputType.Implements(contextParam):\n\t\t// Need to deploy a context.Context value as nil value is not\n\t\t// checked throughout codebase.\n\t\tinput = reflect.ValueOf(ctx)\n\tcase argGenerator.MethodInputType.AssignableTo(feeBuilderParam):\n\t\tinput = reflect.ValueOf(&exchange.FeeBuilder{\n\t\t\tFeeType: exchange.OfflineTradeFee,\n\t\t\tAmount: 1337,\n\t\t\tPurchasePrice: 1337,\n\t\t\tPair: argGenerator.AssetParams.Pair,\n\t\t})\n\tcase argGenerator.MethodInputType.AssignableTo(currencyPairParam):\n\t\tinput = reflect.ValueOf(argGenerator.AssetParams.Pair)\n\tcase argGenerator.MethodInputType.AssignableTo(assetParam):\n\t\tinput = reflect.ValueOf(argGenerator.AssetParams.Asset)\n\tcase argGenerator.MethodInputType.AssignableTo(klineParam):\n\t\tinput = reflect.ValueOf(kline.OneDay)\n\tcase argGenerator.MethodInputType.AssignableTo(codeParam):\n\t\tif argGenerator.MethodName == \"GetAvailableTransferChains\" {\n\t\t\tinput = reflect.ValueOf(currency.ETH)\n\t\t} else {\n\t\t\tinput = reflect.ValueOf(argGenerator.AssetParams.Pair.Base)\n\t\t}\n\tcase argGenerator.MethodInputType.AssignableTo(timeParam):\n\t\tif !argGenerator.StartTimeSet {\n\t\t\tinput = reflect.ValueOf(argGenerator.Start)\n\t\t\targGenerator.StartTimeSet = true\n\t\t} else {\n\t\t\tinput = reflect.ValueOf(argGenerator.End)\n\t\t}\n\tcase argGenerator.MethodInputType.AssignableTo(currencyPairsParam):\n\t\tb := argGenerator.Exchange.GetBase()\n\t\tif argGenerator.AssetParams.Asset != asset.Empty {\n\t\t\tinput = reflect.ValueOf(b.CurrencyPairs.Pairs[argGenerator.AssetParams.Asset].Available)\n\t\t} else {\n\t\t\tinput = reflect.ValueOf(currency.Pairs{\n\t\t\t\targGenerator.AssetParams.Pair,\n\t\t\t})\n\t\t}\n\tcase argGenerator.MethodInputType.AssignableTo(withdrawRequestParam):\n\t\treq := &withdraw.Request{\n\t\t\tExchange: exchName,\n\t\t\tDescription: \"1337\",\n\t\t\tAmount: 1,\n\t\t\tClientOrderID: \"1337\",\n\t\t}\n\t\tif argGenerator.MethodName == \"WithdrawCryptocurrencyFunds\" {\n\t\t\treq.Type = withdraw.Crypto\n\t\t\tswitch {\n\t\t\tcase !isFiat(t, argGenerator.AssetParams.Pair.Base.Item.Lower):\n\t\t\t\treq.Currency = argGenerator.AssetParams.Pair.Base\n\t\t\tcase !isFiat(t, argGenerator.AssetParams.Pair.Quote.Item.Lower):\n\t\t\t\treq.Currency = argGenerator.AssetParams.Pair.Quote\n\t\t\tdefault:\n\t\t\t\treq.Currency = currency.ETH\n\t\t\t}\n\n\t\t\treq.Crypto = withdraw.CryptoRequest{\n\t\t\t\tAddress: \"1337\",\n\t\t\t\tAddressTag: \"1337\",\n\t\t\t\tChain: cryptoChainPerExchange[exchName],\n\t\t\t}\n\t\t} else {\n\t\t\treq.Type = withdraw.Fiat\n\t\t\tb := argGenerator.Exchange.GetBase()\n\t\t\tif len(b.Config.BaseCurrencies) > 0 {\n\t\t\t\treq.Currency = b.Config.BaseCurrencies[0]\n\t\t\t} else {\n\t\t\t\treq.Currency = currency.USD\n\t\t\t}\n\t\t\treq.Fiat = withdraw.FiatRequest{\n\t\t\t\tBank: banking.Account{\n\t\t\t\t\tEnabled: true,\n\t\t\t\t\tID: \"1337\",\n\t\t\t\t\tBankName: \"1337\",\n\t\t\t\t\tBankAddress: \"1337\",\n\t\t\t\t\tBankPostalCode: \"1337\",\n\t\t\t\t\tBankPostalCity: \"1337\",\n\t\t\t\t\tBankCountry: \"1337\",\n\t\t\t\t\tAccountName: \"1337\",\n\t\t\t\t\tAccountNumber: \"1337\",\n\t\t\t\t\tSWIFTCode: \"1337\",\n\t\t\t\t\tIBAN: \"1337\",\n\t\t\t\t\tBSBNumber: \"1337\",\n\t\t\t\t\tBankCode: 1337,\n\t\t\t\t\tSupportedCurrencies: req.Currency.String(),\n\t\t\t\t\tSupportedExchanges: exchName,\n\t\t\t\t},\n\t\t\t\tIsExpressWire: false,\n\t\t\t\tRequiresIntermediaryBank: false,\n\t\t\t\tIntermediaryBankAccountNumber: 1338,\n\t\t\t\tIntermediaryBankName: \"1338\",\n\t\t\t\tIntermediaryBankAddress: \"1338\",\n\t\t\t\tIntermediaryBankCity: \"1338\",\n\t\t\t\tIntermediaryBankCountry: \"1338\",\n\t\t\t\tIntermediaryBankPostalCode: \"1338\",\n\t\t\t\tIntermediarySwiftCode: \"1338\",\n\t\t\t\tIntermediaryBankCode: 1338,\n\t\t\t\tIntermediaryIBAN: \"1338\",\n\t\t\t\tWireCurrency: \"1338\",\n\t\t\t}\n\t\t}\n\t\tinput = reflect.ValueOf(req)\n\tcase argGenerator.MethodInputType.AssignableTo(orderSubmitParam):\n\t\tinput = reflect.ValueOf(&order.Submit{\n\t\t\tExchange: exchName,\n\t\t\tType: order.Limit,\n\t\t\tSide: order.Buy,\n\t\t\tPair: argGenerator.AssetParams.Pair,\n\t\t\tAssetType: argGenerator.AssetParams.Asset,\n\t\t\tPrice: 1337,\n\t\t\tAmount: 1,\n\t\t\tClientID: \"1337\",\n\t\t\tClientOrderID: \"13371337\",\n\t\t\tImmediateOrCancel: true,\n\t\t})\n\tcase argGenerator.MethodInputType.AssignableTo(orderModifyParam):\n\t\tinput = reflect.ValueOf(&order.Modify{\n\t\t\tExchange: exchName,\n\t\t\tType: order.Limit,\n\t\t\tSide: order.Buy,\n\t\t\tPair: argGenerator.AssetParams.Pair,\n\t\t\tAssetType: argGenerator.AssetParams.Asset,\n\t\t\tPrice: 1337,\n\t\t\tAmount: 1,\n\t\t\tClientOrderID: \"13371337\",\n\t\t\tOrderID: \"1337\",\n\t\t\tImmediateOrCancel: true,\n\t\t})\n\tcase argGenerator.MethodInputType.AssignableTo(orderCancelParam):\n\t\tinput = reflect.ValueOf(&order.Cancel{\n\t\t\tExchange: exchName,\n\t\t\tType: order.Limit,\n\t\t\tSide: order.Buy,\n\t\t\tPair: argGenerator.AssetParams.Pair,\n\t\t\tAssetType: argGenerator.AssetParams.Asset,\n\t\t\tOrderID: \"1337\",\n\t\t})\n\tcase argGenerator.MethodInputType.AssignableTo(orderCancelsParam):\n\t\tinput = reflect.ValueOf([]order.Cancel{\n\t\t\t{\n\t\t\t\tExchange: exchName,\n\t\t\t\tType: order.Market,\n\t\t\t\tSide: order.Buy,\n\t\t\t\tPair: argGenerator.AssetParams.Pair,\n\t\t\t\tAssetType: argGenerator.AssetParams.Asset,\n\t\t\t\tOrderID: \"1337\",\n\t\t\t},\n\t\t})\n\tcase argGenerator.MethodInputType.AssignableTo(getOrdersRequestParam):\n\t\tinput = reflect.ValueOf(&order.MultiOrderRequest{\n\t\t\tType: order.AnyType,\n\t\t\tSide: order.AnySide,\n\t\t\tFromOrderID: \"1337\",\n\t\t\tAssetType: argGenerator.AssetParams.Asset,\n\t\t\tPairs: currency.Pairs{argGenerator.AssetParams.Pair},\n\t\t})\n\tdefault:\n\t\tinput = reflect.Zero(argGenerator.MethodInputType)\n\t}\n\targGenerator.argNum++\n\n\treturn &input\n}",
"func (c *compileContext) makeArgumentResolver(typ schema.InputableType) (argumentResolver, error) {\n\tswitch t := typ.(type) {\n\tcase *schema.InputObjectType:\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\treturn t.Decode(ctx, v)\n\t\t}, nil\n\tcase *schema.ListType:\n\t\telementResolver, err := c.makeArgumentResolver(t.Unwrap().(schema.InputableType))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\tif v == nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\n\t\t\tlistCreator := t.Unwrap().(schema.InputableType).InputListCreator()\n\n\t\t\tif av, ok := v.(schema.LiteralArray); ok {\n\t\t\t\treturn listCreator.NewList(len(av), func(i int) (interface{}, error) {\n\t\t\t\t\treturn elementResolver(ctx, av[i])\n\t\t\t\t})\n\t\t\t}\n\n\t\t\t// if we get a non-list value we have to wrap into a single element\n\t\t\t// list.\n\t\t\t// See https://facebook.github.io/graphql/June2018/#sec-Type-System.List\n\t\t\tresultElement, err := elementResolver(ctx, v)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn listCreator.NewList(1, func(i int) (interface{}, error) {\n\t\t\t\treturn resultElement, nil\n\t\t\t})\n\t\t}, nil\n\n\tcase *schema.NotNilType:\n\t\telementResolver, err := c.makeArgumentResolver(t.Unwrap().(schema.InputableType))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\tif v == nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Required value was not supplied\")\n\t\t\t}\n\t\t\treturn elementResolver(ctx, v)\n\t\t}, nil\n\tcase *schema.ScalarType:\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\treturn t.Decode(ctx, v)\n\t\t}, nil\n\tcase *schema.EnumType:\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\tif v == nil {\n\t\t\t\treturn t.Decode(ctx, v)\n\t\t\t}\n\t\t\tval, ok := v.(schema.LiteralString)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"Expected string, got %v\", v)\n\t\t\t}\n\t\t\treturn t.Decode(ctx, val)\n\t\t}, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Invalid type for input argument: %v\", typ)\n\t}\n}",
"func (s *BaseKotlinParserListener) EnterValueArguments(ctx *ValueArgumentsContext) {}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
genArgument generates argument config for given AST | func genArgument(arg *ast.InputValueDefinition) *jen.Statement {
//
// Generate config for argument
//
// == Example input SDL
//
// type Dog {
// name(
// "style is stylish"
// style: NameComponentsStyle = SHORT,
// ): String!
// }
//
// == Example output
//
// &ArgumentConfig{
// Type: graphql.NonNull(graphql.String),
// DefaultValue: "SHORT", // TODO: ???
// Description: "style is stylish",
// }
//
return jen.Op("&").Qual(defsPkg, "ArgumentConfig").Values(jen.Dict{
jen.Id("DefaultValue"): genValue(arg.DefaultValue),
jen.Id("Description"): genDescription(arg),
jen.Id("Type"): genInputTypeReference(arg.Type),
})
} | [
"func genArguments(args []*ast.InputValueDefinition) *jen.Statement {\n\t//\n\t// Generate config for arguments\n\t//\n\t// == Example input SDL\n\t//\n\t// type Dog {\n\t// name(\n\t// \"style is stylish\"\n\t// style: NameComponentsStyle = SHORT,\n\t// ): String!\n\t// }\n\t//\n\t// == Example output\n\t//\n\t// FieldConfigArgument{\n\t// \"style\": &ArgumentConfig{ ... }\n\t// },\n\t//\n\treturn jen.Qual(defsPkg, \"FieldConfigArgument\").Values(\n\t\tjen.DictFunc(func(d jen.Dict) {\n\t\t\tfor _, arg := range args {\n\t\t\t\td[jen.Lit(arg.Name.Value)] = genArgument(arg)\n\t\t\t}\n\t\t}),\n\t)\n}",
"func arg(gt graphql.Input, dv interface{}, opts ...string) *graphql.ArgumentConfig {\n\tdes := \"\"\n\tif len(opts) > 0 {\n\t\tdes = opts[0]\n\t}\n\n\treturn &graphql.ArgumentConfig{\n\t\tType: gt,\n\t\tDescription: des,\n\t\tDefaultValue: dv,\n\t}\n}",
"func (s *BaseObjectiveCParserListener) EnterArgumentExpression(ctx *ArgumentExpressionContext) {}",
"func parseArgument(p *parser) (*ast.Argument, error) {\n\tvar label string\n\tvar labelStartPos, labelEndPos ast.Position\n\n\texpr, err := parseExpression(p, lowestBindingPower)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.skipSpaceAndComments()\n\n\t// If a colon follows the expression, the expression was our label.\n\tif p.current.Is(lexer.TokenColon) {\n\t\tlabelEndPos = p.current.EndPos\n\n\t\tidentifier, ok := expr.(*ast.IdentifierExpression)\n\t\tif !ok {\n\t\t\treturn nil, p.syntaxError(\n\t\t\t\t\"expected identifier for label, got %s\",\n\t\t\t\texpr,\n\t\t\t)\n\t\t}\n\t\tlabel = identifier.Identifier.Identifier\n\t\tlabelStartPos = expr.StartPosition()\n\n\t\t// Skip the identifier\n\t\tp.nextSemanticToken()\n\n\t\texpr, err = parseExpression(p, lowestBindingPower)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif len(label) > 0 {\n\t\treturn ast.NewArgument(\n\t\t\tp.memoryGauge,\n\t\t\tlabel,\n\t\t\t&labelStartPos,\n\t\t\t&labelEndPos,\n\t\t\texpr,\n\t\t), nil\n\t}\n\treturn ast.NewUnlabeledArgument(p.memoryGauge, expr), nil\n}",
"func NewArgument(meta ScriptMetaData, node *node32, value Value) Argument {\n\treturn &argument{astNode: astNode{meta: meta, node: node}, value: value}\n}",
"func (ArgumentFalse) argumentNode() {}",
"func (s *BasePhpParserListener) EnterActualArgument(ctx *ActualArgumentContext) {}",
"func ForArgument(argRaw markers.Argument) Argument {\n\tres := Argument{\n\t\tOptional: argRaw.Optional,\n\t}\n\n\tif argRaw.ItemType != nil {\n\t\titemType := ForArgument(*argRaw.ItemType)\n\t\tres.ItemType = &itemType\n\t}\n\n\tswitch argRaw.Type {\n\tcase markers.IntType:\n\t\tres.Type = \"int\"\n\tcase markers.StringType:\n\t\tres.Type = \"string\"\n\tcase markers.BoolType:\n\t\tres.Type = \"bool\"\n\tcase markers.AnyType:\n\t\tres.Type = \"any\"\n\tcase markers.SliceType:\n\t\tres.Type = \"slice\"\n\tcase markers.RawType:\n\t\tres.Type = \"raw\"\n\tcase markers.InvalidType:\n\t\tres.Type = \"invalid\"\n\t}\n\n\treturn res\n}",
"func NewArgument(name PascalString, dataType NodeId, valueRank int32, noOfArrayDimensions int32, arrayDimensions []uint32, description LocalizedText) *_Argument {\n\t_result := &_Argument{\n\t\tName: name,\n\t\tDataType: dataType,\n\t\tValueRank: valueRank,\n\t\tNoOfArrayDimensions: noOfArrayDimensions,\n\t\tArrayDimensions: arrayDimensions,\n\t\tDescription: description,\n\t\t_ExtensionObjectDefinition: NewExtensionObjectDefinition(),\n\t}\n\t_result._ExtensionObjectDefinition._ExtensionObjectDefinitionChildRequirements = _result\n\treturn _result\n}",
"func (c JavacCommand) GenerateArgumentList() []string {\n\targumentArray := make([]string, 0)\n\targumentArray = append(argumentArray, \"-d\", c.DestinationDirectory)\n\n\tif c.DebuggingInformation != \"\" {\n\t\targumentArray = append(argumentArray, c.DebuggingInformation)\n\t}\n\n\tif c.Deprecation {\n\t\targumentArray = append(argumentArray, \"-deprecation\")\n\t}\n\n\tif len(c.SourceFiles) != 0 {\n\t\targumentArray = append(argumentArray, c.SourceFiles...)\n\t}\n\n\tif len(c.ClassPath) != 0 {\n\t\targumentArray = append(argumentArray, \"-cp\", strings.Join(c.ClassPath, \":\"))\n\t}\n\n\tif c.SourceVersion != \"\" {\n\t\targumentArray = append(argumentArray, \"-source\", c.SourceVersion)\n\t}\n\n\tif c.LintWarnings != \"\" {\n\t\targumentArray = append(argumentArray, c.LintWarnings)\n\t}\n\n\tif c.Encoding != \"\" {\n\t\targumentArray = append(argumentArray, \"-encoding\", c.Encoding)\n\t}\n\n\tif c.Verbose {\n\t\targumentArray = append(argumentArray, \"-verbose\")\n\t}\n\n\tif c.Target != \"\" {\n\t\targumentArray = append(argumentArray, \"-target\", c.Target)\n\t}\n\n\treturn argumentArray\n}",
"func (s *BasePhpParserListener) EnterAttributeArgList(ctx *AttributeArgListContext) {}",
"func (s *BasePlSqlParserListener) EnterArgument(ctx *ArgumentContext) {}",
"func (s *BaseasmZ80Listener) EnterArgument(ctx *ArgumentContext) {}",
"func generateMethodArg(ctx context.Context, t *testing.T, argGenerator *MethodArgumentGenerator) *reflect.Value {\n\tt.Helper()\n\texchName := strings.ToLower(argGenerator.Exchange.GetName())\n\tvar input reflect.Value\n\tswitch {\n\tcase argGenerator.MethodInputType.AssignableTo(stringParam):\n\t\tswitch argGenerator.MethodName {\n\t\tcase \"GetDepositAddress\":\n\t\t\tif argGenerator.argNum == 2 {\n\t\t\t\t// account type\n\t\t\t\tinput = reflect.ValueOf(\"trading\")\n\t\t\t} else {\n\t\t\t\t// Crypto Chain\n\t\t\t\tinput = reflect.ValueOf(cryptoChainPerExchange[exchName])\n\t\t\t}\n\t\tdefault:\n\t\t\t// OrderID\n\t\t\tinput = reflect.ValueOf(\"1337\")\n\t\t}\n\tcase argGenerator.MethodInputType.AssignableTo(credentialsParam):\n\t\tinput = reflect.ValueOf(&account.Credentials{\n\t\t\tKey: \"test\",\n\t\t\tSecret: \"test\",\n\t\t\tClientID: \"test\",\n\t\t\tPEMKey: \"test\",\n\t\t\tSubAccount: \"test\",\n\t\t\tOneTimePassword: \"test\",\n\t\t})\n\tcase argGenerator.MethodInputType.Implements(contextParam):\n\t\t// Need to deploy a context.Context value as nil value is not\n\t\t// checked throughout codebase.\n\t\tinput = reflect.ValueOf(ctx)\n\tcase argGenerator.MethodInputType.AssignableTo(feeBuilderParam):\n\t\tinput = reflect.ValueOf(&exchange.FeeBuilder{\n\t\t\tFeeType: exchange.OfflineTradeFee,\n\t\t\tAmount: 1337,\n\t\t\tPurchasePrice: 1337,\n\t\t\tPair: argGenerator.AssetParams.Pair,\n\t\t})\n\tcase argGenerator.MethodInputType.AssignableTo(currencyPairParam):\n\t\tinput = reflect.ValueOf(argGenerator.AssetParams.Pair)\n\tcase argGenerator.MethodInputType.AssignableTo(assetParam):\n\t\tinput = reflect.ValueOf(argGenerator.AssetParams.Asset)\n\tcase argGenerator.MethodInputType.AssignableTo(klineParam):\n\t\tinput = reflect.ValueOf(kline.OneDay)\n\tcase argGenerator.MethodInputType.AssignableTo(codeParam):\n\t\tif argGenerator.MethodName == \"GetAvailableTransferChains\" {\n\t\t\tinput = reflect.ValueOf(currency.ETH)\n\t\t} else {\n\t\t\tinput = reflect.ValueOf(argGenerator.AssetParams.Pair.Base)\n\t\t}\n\tcase argGenerator.MethodInputType.AssignableTo(timeParam):\n\t\tif !argGenerator.StartTimeSet {\n\t\t\tinput = reflect.ValueOf(argGenerator.Start)\n\t\t\targGenerator.StartTimeSet = true\n\t\t} else {\n\t\t\tinput = reflect.ValueOf(argGenerator.End)\n\t\t}\n\tcase argGenerator.MethodInputType.AssignableTo(currencyPairsParam):\n\t\tb := argGenerator.Exchange.GetBase()\n\t\tif argGenerator.AssetParams.Asset != asset.Empty {\n\t\t\tinput = reflect.ValueOf(b.CurrencyPairs.Pairs[argGenerator.AssetParams.Asset].Available)\n\t\t} else {\n\t\t\tinput = reflect.ValueOf(currency.Pairs{\n\t\t\t\targGenerator.AssetParams.Pair,\n\t\t\t})\n\t\t}\n\tcase argGenerator.MethodInputType.AssignableTo(withdrawRequestParam):\n\t\treq := &withdraw.Request{\n\t\t\tExchange: exchName,\n\t\t\tDescription: \"1337\",\n\t\t\tAmount: 1,\n\t\t\tClientOrderID: \"1337\",\n\t\t}\n\t\tif argGenerator.MethodName == \"WithdrawCryptocurrencyFunds\" {\n\t\t\treq.Type = withdraw.Crypto\n\t\t\tswitch {\n\t\t\tcase !isFiat(t, argGenerator.AssetParams.Pair.Base.Item.Lower):\n\t\t\t\treq.Currency = argGenerator.AssetParams.Pair.Base\n\t\t\tcase !isFiat(t, argGenerator.AssetParams.Pair.Quote.Item.Lower):\n\t\t\t\treq.Currency = argGenerator.AssetParams.Pair.Quote\n\t\t\tdefault:\n\t\t\t\treq.Currency = currency.ETH\n\t\t\t}\n\n\t\t\treq.Crypto = withdraw.CryptoRequest{\n\t\t\t\tAddress: \"1337\",\n\t\t\t\tAddressTag: \"1337\",\n\t\t\t\tChain: cryptoChainPerExchange[exchName],\n\t\t\t}\n\t\t} else {\n\t\t\treq.Type = withdraw.Fiat\n\t\t\tb := argGenerator.Exchange.GetBase()\n\t\t\tif len(b.Config.BaseCurrencies) > 0 {\n\t\t\t\treq.Currency = b.Config.BaseCurrencies[0]\n\t\t\t} else {\n\t\t\t\treq.Currency = currency.USD\n\t\t\t}\n\t\t\treq.Fiat = withdraw.FiatRequest{\n\t\t\t\tBank: banking.Account{\n\t\t\t\t\tEnabled: true,\n\t\t\t\t\tID: \"1337\",\n\t\t\t\t\tBankName: \"1337\",\n\t\t\t\t\tBankAddress: \"1337\",\n\t\t\t\t\tBankPostalCode: \"1337\",\n\t\t\t\t\tBankPostalCity: \"1337\",\n\t\t\t\t\tBankCountry: \"1337\",\n\t\t\t\t\tAccountName: \"1337\",\n\t\t\t\t\tAccountNumber: \"1337\",\n\t\t\t\t\tSWIFTCode: \"1337\",\n\t\t\t\t\tIBAN: \"1337\",\n\t\t\t\t\tBSBNumber: \"1337\",\n\t\t\t\t\tBankCode: 1337,\n\t\t\t\t\tSupportedCurrencies: req.Currency.String(),\n\t\t\t\t\tSupportedExchanges: exchName,\n\t\t\t\t},\n\t\t\t\tIsExpressWire: false,\n\t\t\t\tRequiresIntermediaryBank: false,\n\t\t\t\tIntermediaryBankAccountNumber: 1338,\n\t\t\t\tIntermediaryBankName: \"1338\",\n\t\t\t\tIntermediaryBankAddress: \"1338\",\n\t\t\t\tIntermediaryBankCity: \"1338\",\n\t\t\t\tIntermediaryBankCountry: \"1338\",\n\t\t\t\tIntermediaryBankPostalCode: \"1338\",\n\t\t\t\tIntermediarySwiftCode: \"1338\",\n\t\t\t\tIntermediaryBankCode: 1338,\n\t\t\t\tIntermediaryIBAN: \"1338\",\n\t\t\t\tWireCurrency: \"1338\",\n\t\t\t}\n\t\t}\n\t\tinput = reflect.ValueOf(req)\n\tcase argGenerator.MethodInputType.AssignableTo(orderSubmitParam):\n\t\tinput = reflect.ValueOf(&order.Submit{\n\t\t\tExchange: exchName,\n\t\t\tType: order.Limit,\n\t\t\tSide: order.Buy,\n\t\t\tPair: argGenerator.AssetParams.Pair,\n\t\t\tAssetType: argGenerator.AssetParams.Asset,\n\t\t\tPrice: 1337,\n\t\t\tAmount: 1,\n\t\t\tClientID: \"1337\",\n\t\t\tClientOrderID: \"13371337\",\n\t\t\tImmediateOrCancel: true,\n\t\t})\n\tcase argGenerator.MethodInputType.AssignableTo(orderModifyParam):\n\t\tinput = reflect.ValueOf(&order.Modify{\n\t\t\tExchange: exchName,\n\t\t\tType: order.Limit,\n\t\t\tSide: order.Buy,\n\t\t\tPair: argGenerator.AssetParams.Pair,\n\t\t\tAssetType: argGenerator.AssetParams.Asset,\n\t\t\tPrice: 1337,\n\t\t\tAmount: 1,\n\t\t\tClientOrderID: \"13371337\",\n\t\t\tOrderID: \"1337\",\n\t\t\tImmediateOrCancel: true,\n\t\t})\n\tcase argGenerator.MethodInputType.AssignableTo(orderCancelParam):\n\t\tinput = reflect.ValueOf(&order.Cancel{\n\t\t\tExchange: exchName,\n\t\t\tType: order.Limit,\n\t\t\tSide: order.Buy,\n\t\t\tPair: argGenerator.AssetParams.Pair,\n\t\t\tAssetType: argGenerator.AssetParams.Asset,\n\t\t\tOrderID: \"1337\",\n\t\t})\n\tcase argGenerator.MethodInputType.AssignableTo(orderCancelsParam):\n\t\tinput = reflect.ValueOf([]order.Cancel{\n\t\t\t{\n\t\t\t\tExchange: exchName,\n\t\t\t\tType: order.Market,\n\t\t\t\tSide: order.Buy,\n\t\t\t\tPair: argGenerator.AssetParams.Pair,\n\t\t\t\tAssetType: argGenerator.AssetParams.Asset,\n\t\t\t\tOrderID: \"1337\",\n\t\t\t},\n\t\t})\n\tcase argGenerator.MethodInputType.AssignableTo(getOrdersRequestParam):\n\t\tinput = reflect.ValueOf(&order.MultiOrderRequest{\n\t\t\tType: order.AnyType,\n\t\t\tSide: order.AnySide,\n\t\t\tFromOrderID: \"1337\",\n\t\t\tAssetType: argGenerator.AssetParams.Asset,\n\t\t\tPairs: currency.Pairs{argGenerator.AssetParams.Pair},\n\t\t})\n\tdefault:\n\t\tinput = reflect.Zero(argGenerator.MethodInputType)\n\t}\n\targGenerator.argNum++\n\n\treturn &input\n}",
"func (app ApplicationArguments) Argument(name, description string, shorts ...rune) *kingpin.FlagClause {\n\treturn app.add(name, description, false, shorts...)\n}",
"func (s *BasePhpParserListener) EnterAttributeNamedArgList(ctx *AttributeNamedArgListContext) {}",
"func (s *BasePhpParserListener) EnterAttributeNamedArg(ctx *AttributeNamedArgContext) {}",
"func (s *BaseLangListener) EnterArg(ctx *ArgContext) {}",
"func GenAST(program []Statement) AST {\n\tvar ast AST\n\tfor _, stmt := range program {\n\t\tv, err := ParseVerb(stmt)\n\t\tif err != nil { //TODO\n\t\t\t//panic(ParserError{stmtIndex: stmtIndex, tok: stmt[0], message: fmt.Sprintf(\"First token in statement must be a word, was %s\", stmt[0].tokType.toString())})\n\t\t\tpanic(err)\n\t\t}\n\t\tast = append(ast, v)\n\t}\n\treturn ast\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
AddReceipt adds receipt for user. | func (client Client) AddReceipt(userId string, text string) error {
addReceiptUrl := client.backendUrl + "/internal/receipt"
request := addReceiptRequest{ReceiptString: text, UserId: userId}
reader, err := getReader(request)
if err != nil {
return err
}
response, err := http.Post(addReceiptUrl, "text/javascript", reader)
if err != nil {
return err
}
switch response.StatusCode {
case http.StatusOK:
return nil
default:
return errors.New(response.Status)
}
return nil
} | [
"func (ptu *PaymentTypeUpdate) AddReceipt(r ...*Receipt) *PaymentTypeUpdate {\n\tids := make([]int, len(r))\n\tfor i := range r {\n\t\tids[i] = r[i].ID\n\t}\n\treturn ptu.AddReceiptIDs(ids...)\n}",
"func (cc *CustomerCreate) AddReceipt(r ...*Receipt) *CustomerCreate {\n\tids := make([]int, len(r))\n\tfor i := range r {\n\t\tids[i] = r[i].ID\n\t}\n\treturn cc.AddReceiptIDs(ids...)\n}",
"func (puo *ProductUpdateOne) AddReceipt(r ...*Receipt) *ProductUpdateOne {\n\tids := make([]int, len(r))\n\tfor i := range r {\n\t\tids[i] = r[i].ID\n\t}\n\treturn puo.AddReceiptIDs(ids...)\n}",
"func (pu *ProductUpdate) AddReceipt(r ...*Receipt) *ProductUpdate {\n\tids := make([]int, len(r))\n\tfor i := range r {\n\t\tids[i] = r[i].ID\n\t}\n\treturn pu.AddReceiptIDs(ids...)\n}",
"func (service *Service) AddUser(accountId types.ID) error {\n\t// you can be delegate of a user after the user designate you as a delegate.\n\tif isDelegate, err := service.accounts.IsDelegateOf(service.addr, accountId); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to call Accounts.IsDelegateOf\")\n\t} else if !isDelegate {\n\t\treturn ErrDelegationNotAllowed\n\t}\n\tservice.accountIds = append(service.accountIds, accountId)\n\treturn nil\n}",
"func (r *Receipt) AddItem(item *Item) {\n r.Items = append(r.Items, item)\n r.Taxes += float64(item.Quantity) * item.Taxes\n r.Total += float64(item.Quantity) * (item.Price + item.Taxes)\n}",
"func (c *BasicClaimManager) AddReceipt(seqNo int64,\n\tbDataFile string, bData []byte, bSig []byte,\n\ttData map[ffmpeg.VideoProfile][]byte, tStart time.Time, tEnd time.Time) ([]byte, error) {\n\n\t_, ok := c.segClaimMap[seqNo]\n\tif ok {\n\t\treturn []byte{}, fmt.Errorf(\"Receipt for %v:%v already exists\", c.jobID.String(), seqNo)\n\t}\n\n\t// ensure that all our profiles match up: check that lengths match\n\tif len(c.pLookup) != len(tData) {\n\t\treturn []byte{}, fmt.Errorf(\"Job %v Mismatched profiles in segment; not claiming\", c.jobID)\n\t\t// XXX record error in db\n\t}\n\n\t// ensure profiles match up, part 2: check for unknown profiles in the list\n\thashes := make([][]byte, len(tData))\n\tfor profile, td := range tData {\n\t\ti, ok := c.pLookup[profile]\n\t\tif !ok {\n\t\t\treturn []byte{}, fmt.Errorf(\"Job %v cannot find profile: %v\", c.jobID, profile)\n\t\t\t// XXX record error in db\n\t\t}\n\t\thashes[i] = crypto.Keccak256(td) // set index based on profile ordering\n\t}\n\ttHash := crypto.Keccak256(hashes...)\n\tbHash := crypto.Keccak256(bData)\n\n\tcd := &claimData{\n\t\tseqNo: seqNo,\n\t\tbFile: bDataFile,\n\t\tdataHash: bHash,\n\t\tbSig: bSig,\n\t\tclaimConcatTDatahash: tHash,\n\t}\n\n\tif err := c.db.InsertReceipt(c.jobID, seqNo, bDataFile, bHash, bSig, tHash, tStart, tEnd); err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\tc.cost = new(big.Int).Add(c.cost, c.totalSegCost)\n\tc.segClaimMap[seqNo] = cd\n\tc.unclaimedSegs[seqNo] = true\n\t// glog.Infof(\"Added %v. unclaimSegs: %v\", seqNo, c.unclaimedSegs)\n\n\treturn tHash, nil\n}",
"func InsertReceipt(r *Receipt) (int64, error) {\n\tvar tid = int64(0)\n\tres, err := RRdb.Prepstmt.InsertReceipt.Exec(r.PRCPTID, r.BID, r.TCID, r.PMTID, r.DEPID, r.DID, r.RAID, r.Dt, r.DocNo, r.Amount, r.AcctRuleReceive, r.ARID, r.AcctRuleApply, r.FLAGS, r.Comment, r.OtherPayorName, r.CreateBy, r.LastModBy)\n\tif nil == err {\n\t\tid, err := res.LastInsertId()\n\t\tif err == nil {\n\t\t\ttid = int64(id)\n\t\t\tr.RCPTID = tid\n\t\t}\n\t} else {\n\t\terr = insertError(err, \"Receipt\", *r)\n\t}\n\treturn tid, err\n}",
"func (ptu *PaymentTypeUpdate) AddReceiptIDs(ids ...int) *PaymentTypeUpdate {\n\tptu.mutation.AddReceiptIDs(ids...)\n\treturn ptu\n}",
"func (_UsersData *UsersDataTransactor) AddUser(opts *bind.TransactOpts, uuid [16]byte, userAddress common.Address, orgUuid [16]byte, publicKey [2][32]byte, idCartNoHash [32]byte, time *big.Int) (*types.Transaction, error) {\n\treturn _UsersData.contract.Transact(opts, \"addUser\", uuid, userAddress, orgUuid, publicKey, idCartNoHash, time)\n}",
"func (base *Base) AddReminder(user int, reminder Reminder) error {\n userPath := fmt.Sprintf(\"%s/%d\", base.Source, user)\n\n // Adding user to memory if necessary\n flag := false\n for _, u := range base.Users {\n if u == user {\n flag = true\n }\n }\n if !flag {\n base.Users = append(base.Users, user)\n oops := os.Mkdir(userPath, os.ModeDir)\n if oops != nil {\n return oops\n }\n }\n\n // Saving reminder to memory\n reminderPath := fmt.Sprintf(\"%s/%d.json\", userPath, time.Now().Unix())\n raw, oops := json.Marshal(reminder)\n if oops != nil {\n return oops\n }\n oops = ioutil.WriteFile(reminderPath, raw, 0644)\n if oops != nil {\n return oops\n }\n\n return nil\n}",
"func (_AnchorChain *AnchorChainTransactor) AddUser(opts *bind.TransactOpts, user common.Address) (*types.Transaction, error) {\n\treturn _AnchorChain.contract.Transact(opts, \"addUser\", user)\n}",
"func (puo *ProductUpdateOne) AddReceiptIDs(ids ...int) *ProductUpdateOne {\n\tpuo.mutation.AddReceiptIDs(ids...)\n\treturn puo\n}",
"func (_UsersData *UsersDataTransactorSession) AddUser(uuid [16]byte, userAddress common.Address, orgUuid [16]byte, publicKey [2][32]byte, idCartNoHash [32]byte, time *big.Int) (*types.Transaction, error) {\n\treturn _UsersData.Contract.AddUser(&_UsersData.TransactOpts, uuid, userAddress, orgUuid, publicKey, idCartNoHash, time)\n}",
"func NewReceipt() *Receipt {\n\treturn &Receipt{}\n}",
"func (d *Dao) TxAddUserDiscount(tx *sql.Tx, r *model.VipUserDiscountHistory) (eff int64, err error) {\n\tvar res xsql.Result\n\tif res, err = tx.Exec(_addUserDiscount, r.Mid, r.DiscountID, r.OrderNo, r.Status); err != nil {\n\t\terr = errors.WithStack(err)\n\t\treturn\n\t}\n\teff, err = res.RowsAffected()\n\treturn\n}",
"func (a *Client) AddStockReceipts(params *AddStockReceiptsParams, authInfo runtime.ClientAuthInfoWriter) (*AddStockReceiptsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewAddStockReceiptsParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"addStockReceipts\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/accounts/{koronaAccountId}/stockReceipts\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &AddStockReceiptsReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*AddStockReceiptsOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for addStockReceipts: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func NewReceipt(values map[string]string, contentID uuid.UUID, userID uuid.UUID) *Receipt {\n\tif values == nil {\n\t\tvalues = make(map[string]string, 0)\n\t}\n\treturn &Receipt{\n\t\tID: uuid.NewUUID(),\n\t\tValues: values,\n\t\tSendState: READY,\n\t\tCreated: time.Now(),\n\t\tContentID: contentID,\n\t\tUserID: userID,\n\t}\n}",
"func (vu *VaultUsers) AddUser(email string, publicKeyString string, masterPassphrase []byte) error {\n\tif vu.users[email] != nil {\n\t\treturn errors.New(\"User already exists in vault:\" + email)\n\t}\n\n\tuser, err := NewVaultUser(vu.path, email, publicKeyString)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := user.SetEncryptedMasterKey(masterPassphrase); err != nil {\n\t\treturn err\n\t}\n\n\tif err := user.Save(); err != nil {\n\t\treturn err\n\t}\n\tvu.users[email] = user\n\treturn nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
MergeUnique Merges `source` string slice into `dest` and returns result. Inserts from `source` only when `dest` does not `Contain` given string. | func MergeUnique(dest, source []string) []string {
for _, str := range source {
if !Contain(dest, str) {
dest = append(dest, str)
}
}
return dest
} | [
"func MergeAndDeduplicateSlice(src []string, target []string) []string {\n\tm := make(map[string]bool)\n\tfor i := range src {\n\t\tm[src[i]] = true\n\t}\n\n\tfor i := range target {\n\t\tif _, ok := m[target[i]]; !ok {\n\t\t\tsrc = append(src, target[i])\n\t\t}\n\t}\n\n\treturn src\n}",
"func mergeStrings(tgt *[]string, src []string) bool {\n\tif t := util.NewStringSet(*tgt, src); len(t) != len(*tgt) {\n\t\t*tgt = t.Keys()\n\t\treturn true\n\t}\n\treturn false\n}",
"func UniqueAppend(slice []string, s ...string) []string {\n\tfor i := range s {\n\t\tif IndexOf(slice, s[i]) != -1 {\n\t\t\tcontinue\n\t\t}\n\n\t\tslice = append(slice, s[i])\n\t}\n\n\treturn slice\n}",
"func DedupStrings(src []string) []string {\n\tm := make(map[string]struct{}, len(src))\n\tdst := make([]string, 0, len(src))\n\n\tfor _, v := range src {\n\t\t// Skip empty items\n\t\tif len(v) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\t// Skip duplicates\n\t\tif _, ok := m[v]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tm[v] = struct{}{}\n\t\tdst = append(dst, v)\n\t}\n\n\treturn dst\n}",
"func StringUniqueAppend(slice []string, s string) []string {\n\treturn strings.UniqueAppend(slice, s)\n}",
"func mergeStrings(v1, v2 *[]string) *[]string {\n\tswitch {\n\tcase v2 == nil && v1 == nil:\n\t\treturn &[]string{}\n\tcase v2 == nil:\n\t\treturn v1\n\tcase v1 == nil:\n\t\treturn v2\n\t}\n\t// merge the two uniquely\n\tvar ret []string\n\tm := make(map[string]bool)\n\tfor _, s := range *v1 {\n\t\tif m[s] {\n\t\t\tcontinue\n\t\t}\n\t\tret = append(ret, s)\n\t}\n\tfor _, s := range *v2 {\n\t\tif m[s] {\n\t\t\tcontinue\n\t\t}\n\t\tret = append(ret, s)\n\t}\n\treturn &ret\n}",
"func MergeUnique(left, right []string) []string {\n\treturn CollectVariety(left, right, GetUnique, GetUnique, GetUnique)\n}",
"func MergeStringSlices(slice1 []string, slice2 []string) []string {\n\tfor _, item := range slice2 {\n\t\tif !IsStringPresent(slice1, item) {\n\t\t\tslice1 = append(slice1, item)\n\t\t}\n\t}\n\treturn slice1\n}",
"func AppendUniqueSlices(a, b []string) []string {\n\tfor _, e := range a {\n\t\tif !SliceContainsString(e, b) {\n\t\t\tb = append(b, e)\n\t\t}\n\t}\n\treturn b\n}",
"func appendUnique(s []string, e string) []string {\n\tif !contains(s, e) {\n\t\treturn append(s, e)\n\t}\n\treturn s\n}",
"func UniqueStringSlice(in []string) (out []string) {\n\tvar inVal, outVal string\n\n\t// if in nil or empty...\n\tif nil == in || 0 == len(in) {\n\t\treturn\n\t}\n\nUniqueLoop:\n\tfor _, inVal = range in {\n\t\t// see if empty, but don't change values for non-empty\n\t\tinVal = strings.TrimSpace(inVal)\n\t\tif \"\" == inVal {\n\t\t\tcontinue UniqueLoop\n\t\t}\n\n\t\tfor _, outVal = range out {\n\t\t\tif outVal == inVal {\n\t\t\t\tcontinue UniqueLoop\n\t\t\t}\n\t\t}\n\n\t\tout = append(out, inVal)\n\t}\n\n\treturn\n}",
"func (c StringArrayCollection) Merge(i interface{}) Collection {\n\tm := i.([]string)\n\tvar d = make([]string, len(c.value))\n\tcopy(d, c.value)\n\n\tfor i := 0; i < len(m); i++ {\n\t\texist := false\n\t\tfor j := 0; j < len(d); j++ {\n\t\t\tif d[j] == m[i] {\n\t\t\t\texist = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !exist {\n\t\t\td = append(d, m[i])\n\t\t}\n\t}\n\n\treturn StringArrayCollection{\n\t\tvalue: d,\n\t}\n}",
"func AddStringIfMissing(slice []string, s string) (bool, []string) {\n\tfor _, item := range slice {\n\t\tif item == s {\n\t\t\treturn false, slice\n\t\t}\n\t}\n\treturn true, append(slice, s)\n}",
"func CombineStringSlices(ins ...[]string) (out []string, delta int) {\n\tvar i int\n\tvar in []string\n\tvar inVal, outVal string\n\n\t// if there was no input...\n\tif 0 == len(ins) {\n\t\treturn\n\t}\n\n\tfor i, in = range ins {\n\t\t// if \"empty\", just move on\n\t\tif nil == in || 0 == len(in) {\n\t\t\tcontinue\n\t\t}\n\n\tValueLoop:\n\t\tfor _, inVal = range UniqueStringSlice(in) {\n\t\t\tfor _, outVal = range out {\n\t\t\t\tif outVal == inVal {\n\t\t\t\t\tcontinue ValueLoop\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tout = append(out, inVal)\n\t\t\tif 0 < i {\n\t\t\t\tdelta++\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}",
"func appendIfMissing(slice []string, s string) []string {\n\tfor _, e := range slice {\n\t\tif e == s {\n\t\t\treturn slice\n\t\t}\n\t}\n\treturn append(slice, s)\n}",
"func StringDeduplicate(ss []string) []string {\n\tresult := make([]string, 0)\n\tseen := make(map[string]string)\n\tfor i := range ss {\n\t\tif _, ok := seen[ss[i]]; !ok {\n\t\t\tresult = append(result, ss[i])\n\t\t\tseen[ss[i]] = ss[i]\n\t\t}\n\t}\n\treturn result\n}",
"func mergeTags(existing string, tags []string) string {\n\tif existing == \"\" {\n\t\treturn strings.Join(tags, \",\")\n\t}\n\told := strings.Split(existing, \",\")\n\tvar merged []string\n\tfor _, o := range old {\n\t\tfound := false\n\t\tfor _, tag := range tags {\n\t\t\tif tag == o {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tmerged = append(merged, o)\n\t\t}\n\t}\n\tfor _, tag := range tags {\n\t\tfound := false\n\t\tfor _, o := range merged {\n\t\t\tif tag == o {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tmerged = append(merged, tag)\n\t\t}\n\t}\n\treturn strings.Join(merged, \",\")\n}",
"func appendIfUnique(slice []string, elem string) []string {\r\n\tfor _, m := range slice {\r\n\t\tif m == elem {\r\n\t\t\treturn slice\r\n\t\t}\r\n\t}\r\n\r\n\treturn append(slice, elem)\r\n}",
"func unique(slice []string, s string) []string {\n\tfor _, el := range slice {\n\t\tif el == s {\n\t\t\treturn slice\n\t\t}\n\t}\n\treturn append(slice, s)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Empty indicates if kv is not set | func (kv BatchKeyRotateKV) Empty() bool {
return kv.Key == "" && kv.Value == ""
} | [
"func hasNonEmptyKV(kvMap map[string]string) bool {\n\tfor k, v := range kvMap {\n\t\tif strings.TrimSpace(k) == \"\" && strings.TrimSpace(v) == \"\" {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (kv BatchJobReplicateKV) Empty() bool {\n\treturn kv.Key == \"\" && kv.Value == \"\"\n}",
"func (k Kind) Empty() bool {\n\treturn k == \"\"\n}",
"func (h headerUtil) setIfEmpty(key, value string) {\n\tif v := h.Get(key); len(v) == 0 {\n\t\th.Set(key, value)\n\t}\n}",
"func (e EmptyKVStore) Has(key []byte) (bool, error) { return false, nil }",
"func Empty(v interface{}) error {\n\tif !isZero(v) {\n\t\treturn errFieldSet.New()\n\t}\n\treturn nil\n}",
"func kvSet(L *lua.LState) int {\n\tkv := checkKeyValue(L) // arg 1\n\tkey := L.ToString(2)\n\tvalue := L.ToString(3)\n\tL.Push(lua.LBool(nil == kv.Set(key, value)))\n\treturn 1 // Number of returned values\n}",
"func (q QuizKey) Empty() bool {\n\treturn string(q) == \"\"\n}",
"func (tag Tag) IsEmpty() bool {\n\treturn tag.Key == \"\"\n}",
"func (k FlagsEmptyKey) String() string {\n return FlagsEmpty(k).String()\n}",
"func (k Key) IsEmpty() bool {\n\treturn len(k) == 0\n}",
"func IsEmptyKey(observe byte) bool {\n\treturn observe == byte(EmptyKey)\n}",
"func (kl KeyLocator) Empty() bool {\n\treturn len(kl.Name)+len(kl.Digest) == 0\n}",
"func (md Metrics) isEmpty() bool {\n\n\tconst noData = \"METRIC DATA NOT FOUND\"\n\tbEmpty := false\n\n\tif 0 == strings.Compare(md.MetricName, noData) {\n\t\tbEmpty = true\n\t} else if 0 == len(md.Values) {\n\t\tbEmpty = true\n\t}\n\treturn bEmpty\n}",
"func (n KeyReference[T]) IsEmpty() bool {\n\treturn n.KeyNode == nil\n}",
"func IsSet(key string) bool { return viper.IsSet(key) }",
"func kvRemove(L *lua.LState) int {\n\tkv := checkKeyValue(L) // arg 1\n\tL.Push(lua.LBool(nil == kv.Remove()))\n\treturn 1 // Number of returned values\n}",
"func Empty() Optional {\n\treturn emtpy\n}",
"func (d *ResourceData) IsEmpty(key string) bool {\n\tg := d.RawValues.Get(key)\n\treturn g.Type == gjson.Null || len(g.Raw) == 0 || g.Raw == \"\\\"\\\"\" || emptyObjectOrArray(g)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Match matches input kv with kv, value will be wildcard matched depending on the user input | func (kv BatchKeyRotateKV) Match(ikv BatchKeyRotateKV) bool {
if kv.Empty() {
return true
}
if strings.EqualFold(kv.Key, ikv.Key) {
return wildcard.Match(kv.Value, ikv.Value)
}
return false
} | [
"func Match(goos, kv, key string) (value string, ok bool) {\n\tif len(kv) <= len(key) || kv[len(key)] != '=' {\n\t\treturn \"\", false\n\t}\n\n\tif goos == \"windows\" {\n\t\t// Case insensitive.\n\t\tif !strings.EqualFold(kv[:len(key)], key) {\n\t\t\treturn \"\", false\n\t\t}\n\t} else {\n\t\t// Case sensitive.\n\t\tif kv[:len(key)] != key {\n\t\t\treturn \"\", false\n\t\t}\n\t}\n\n\treturn kv[len(key)+1:], true\n}",
"func (kv BatchJobReplicateKV) Match(ikv BatchJobReplicateKV) bool {\n\tif kv.Empty() {\n\t\treturn true\n\t}\n\tif strings.EqualFold(kv.Key, ikv.Key) {\n\t\treturn wildcard.Match(kv.Value, ikv.Value)\n\t}\n\treturn false\n}",
"func matches(key, pattern string, split rune) error {\n\tkf := strings.FieldsFunc(key, func(r rune) bool { return r == split })\n\tpf := strings.FieldsFunc(pattern, func(r rune) bool { return r == split })\n\n\tif len(kf) > len(pf) {\n\t\treturn errFieldsMismatch\n\t}\n\n\tfor i, k := range kf {\n\t\tif !strings.HasPrefix(pf[i], k) {\n\t\t\treturn errMismatch\n\t\t}\n\t}\n\n\t// I guess everything is ok\n\treturn nil\n}",
"func (m AllKeysMatcher) Match(key string, attributes map[string]interface{}, bucketingKey *string) bool {\n\treturn true\n}",
"func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool {\n\tfor k, v := range toCheck {\n\t\t// Check if key exists.\n\t\tif canonicalKey {\n\t\t\tk = http.CanonicalHeaderKey(k)\n\t\t}\n\t\tif values := toMatch[k]; values == nil {\n\t\t\treturn false\n\t\t} else if v != nil {\n\t\t\t// If value was defined as an empty string we only check that the\n\t\t\t// key exists. Otherwise we also check for equality.\n\t\t\tvalueExists := false\n\t\t\tfor _, value := range values {\n\t\t\t\tif v.MatchString(value) {\n\t\t\t\t\tvalueExists = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !valueExists {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}",
"func (f filters) matchAny(k string, v []byte) bool {\n\tfor _, filter := range f {\n\t\tif filter(k, v) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (kt KeyToken) Match(okt KeyToken) bool {\n\tif kt.Tok.IsKeyword() && kt.Key != \"\" {\n\t\treturn kt.Tok.Match(okt.Tok) && kt.Key == okt.Key\n\t}\n\treturn kt.Tok.Match(okt.Tok)\n}",
"func (s Service) Match(pre string) Service {\n\tservice, lpre := Service{}, len(pre)\n\tfor k, v := range s {\n\t\tif len(k) > lpre && k[:lpre] == pre {\n\t\t\tservice[k[lpre:]] = v\n\t\t}\n\t}\n\treturn service\n}",
"func (l *Label) Match(labelStr string) bool {\n\tpair := strings.Split(labelStr, \"=\")\n\t// fmt.Printf(\"Label - pair: %v\\n\", pair)\n\t// fmt.Printf(\"Label - l: %v\\n\", l)\n\n\tif l.Key == pair[0] && l.Value == pair[1] {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (kl KeyTokenList) Match(okt KeyToken) bool {\n\tfor _, kt := range kl {\n\t\tif kt.Match(okt) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func KeyMatchFunc(args ...interface{}) (interface{}, error) {\n\tname1 := args[0].(string)\n\tname2 := args[1].(string)\n\n\treturn (bool)(KeyMatch(name1, name2)), nil\n}",
"func ExtCaseInsensitiveMatch(mval interface{}, sval map[string]interface{}) bool {\n\tspecif, ok := sval[\"value\"]\n\tif !ok {\n\t\treturn false\n\t}\n\n\tspecval, ok := specif.(string)\n\tif !ok {\n\t\treturn false\n\t}\n\n\tswitch mcast := mval.(type) {\n\tcase string:\n\t\tif strings.ToLower(specval) == strings.ToLower(mcast) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (r *Key) matcher(c *Client) func([]byte) bool {\n\treturn func(b []byte) bool {\n\t\tcr, err := unmarshalKey(b, c, r)\n\t\tif err != nil {\n\t\t\tc.Config.Logger.Warning(\"failed to unmarshal provided resource in matcher.\")\n\t\t\treturn false\n\t\t}\n\t\tnr := r.urlNormalized()\n\t\tncr := cr.urlNormalized()\n\t\tc.Config.Logger.Infof(\"looking for %v\\nin %v\", nr, ncr)\n\n\t\tif nr.Project == nil && ncr.Project == nil {\n\t\t\tc.Config.Logger.Info(\"Both Project fields null - considering equal.\")\n\t\t} else if nr.Project == nil || ncr.Project == nil {\n\t\t\tc.Config.Logger.Info(\"Only one Project field is null - considering unequal.\")\n\t\t\treturn false\n\t\t} else if *nr.Project != *ncr.Project {\n\t\t\treturn false\n\t\t}\n\t\tif nr.Name == nil && ncr.Name == nil {\n\t\t\tc.Config.Logger.Info(\"Both Name fields null - considering equal.\")\n\t\t} else if nr.Name == nil || ncr.Name == nil {\n\t\t\tc.Config.Logger.Info(\"Only one Name field is null - considering unequal.\")\n\t\t\treturn false\n\t\t} else if *nr.Name != *ncr.Name {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n}",
"func (ef *Filter) ExactMatch(key, source string) bool {\n\tfieldValues, ok := ef.filter[key]\n\t//do not filter if there is no filter set or cannot determine filter\n\tif !ok || len(fieldValues) == 0 {\n\t\treturn true\n\t}\n\t// try to match full name value to avoid O(N) regular expression matching\n\treturn fieldValues[source]\n}",
"func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool {\n\tfor k, v := range toCheck {\n\t\t// Check if key exists.\n\t\tif canonicalKey {\n\t\t\tk = http.CanonicalHeaderKey(k)\n\t\t}\n\t\tif values := toMatch[k]; values == nil {\n\t\t\treturn false\n\t\t} else if v != \"\" {\n\t\t\t// If value was defined as an empty string we only check that the\n\t\t\t// key exists. Otherwise we also check for equality.\n\t\t\tvalueExists := false\n\t\t\tfor _, value := range values {\n\t\t\t\tif v == value {\n\t\t\t\t\tvalueExists = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !valueExists {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}",
"func (a AnyValue) Match(v driver.Value) bool {\n\treturn true\n}",
"func (s *setting) Match(exists string) (bool, bool) {\n\tfor _, o := range s.Options {\n\t\tif o == exists {\n\t\t\treturn true, false\n\t\t} else if o == exists+\":\" {\n\t\t\treturn true, true\n\t\t}\n\t}\n\treturn false, false\n}",
"func (f RabinKarp) MatchAll(p string, v []string) Matches {\n\tvar matches Matches\n\tfor _, value := range v {\n\t\tif ok, match := f(p, value); ok {\n\t\t\tmatches = append(matches, match)\n\t\t}\n\t}\n\treturn matches\n}",
"func KeyMatch(key1 string, key2 string) bool {\n\ti := strings.Index(key2, \"*\")\n\tif i == -1 {\n\t\treturn key1 == key2\n\t}\n\n\tif len(key1) > i {\n\t\treturn key1[:i] == key2[:i]\n\t}\n\treturn key1 == key2[:i]\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Validate validates input replicate retries. | func (r BatchKeyRotateRetry) Validate() error {
if r.Attempts < 0 {
return errInvalidArgument
}
if r.Delay < 0 {
return errInvalidArgument
}
return nil
} | [
"func (r BatchReplicateRetry) Validate() error {\n\tif r.Attempts < 0 {\n\t\treturn errInvalidArgument\n\t}\n\n\tif r.Delay < 0 {\n\t\treturn errInvalidArgument\n\t}\n\n\treturn nil\n}",
"func (o *RetryOptions) Validate() {\n\tif o.MaxAttempts <= 0 {\n\t\to.MaxAttempts = 1\n\t}\n\n\tconst floor = 100 * time.Millisecond\n\tif o.MinDelay < floor {\n\t\to.MinDelay = floor\n\t}\n\n\tif o.MaxDelay <= 0 {\n\t\to.MaxDelay = time.Duration(float64(o.MinDelay) * math.Pow(backoffFactor, float64(o.MaxAttempts)))\n\t}\n}",
"func validateReadinessEndpointWithRetries(caBundle []*x509.Certificate, proxy, endpoint *url.URL, retries int) error {\n\tvar err error\n\tfor i := 0; i < retries; i++ {\n\t\terr = runReadinessProbe(caBundle, proxy, endpoint)\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(proxyProbeWaitTime)\n\t}\n\n\treturn err\n}",
"func validateGenerateResyncInputs(auts, key, opc, rand []byte) error {\n\tif len(auts) != ExpectedAutsBytes {\n\t\treturn fmt.Errorf(\"incorrect auts size. Expected %v bytes, but got %v bytes\", ExpectedAutsBytes, len(auts))\n\t}\n\tif len(key) != ExpectedKeyBytes {\n\t\treturn fmt.Errorf(\"incorrect key size. Expected %v bytes, but got %v bytes\", ExpectedKeyBytes, len(key))\n\t}\n\tif len(opc) != ExpectedOpcBytes {\n\t\treturn fmt.Errorf(\"incorrect opc size. Expected %v bytes, but got %v bytes\", ExpectedOpcBytes, len(opc))\n\t}\n\tif len(rand) != RandChallengeBytes {\n\t\treturn fmt.Errorf(\"incorrect rand size. Expected %v bytes, but got %v bytes\", RandChallengeBytes, len(rand))\n\t}\n\treturn nil\n}",
"func (kv BatchJobReplicateKV) Validate() error {\n\tif kv.Key == \"\" {\n\t\treturn errInvalidArgument\n\t}\n\treturn nil\n}",
"func repeatIsValid(re *Regexp, n int) bool {\n\tif re.Op == OpRepeat {\n\t\tm := re.Max\n\t\tif m == 0 {\n\t\t\treturn true\n\t\t}\n\t\tif m < 0 {\n\t\t\tm = re.Min\n\t\t}\n\t\tif m > n {\n\t\t\treturn false\n\t\t}\n\t\tif m > 0 {\n\t\t\tn /= m\n\t\t}\n\t}\n\tfor _, sub := range re.Sub {\n\t\tif !repeatIsValid(sub, n) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (k Keeper) ValidateRepay(ctx sdk.Context, sender sdk.AccAddress, coins sdk.Coins) error {\n\tsenderAcc := k.accountKeeper.GetAccount(ctx, sender)\n\tsenderCoins := senderAcc.SpendableCoins(ctx.BlockTime())\n\n\tfor _, coin := range coins {\n\t\tif senderCoins.AmountOf(coin.Denom).LT(coin.Amount) {\n\t\t\treturn sdkerrors.Wrapf(types.ErrInsufficientBalanceForRepay, \"account can only repay up to %s%s\", senderCoins.AmountOf(coin.Denom), coin.Denom)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func CheckReReplicate(failChan <-chan string) {\n\tfor {\n\t\tip := <-failChan\n\t\tif Master == \"\" {\n\t\t\tfileList := Metas[ip]\n\t\t\tdelete(Metas, ip)\n\t\t\t// re-replicate each file\n\t\t\tvar dst []string\n\t\t\tvar src []string\n\t\t\tfor _, reFile := range fileList {\n\t\t\t\tdst, src = checkFileReplica(reFile)\n\t\t\t\tif len(dst) > 0 && len(src) < replica {\n\t\t\t\t\treReplicate(reFile.FileName, dst, src)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}",
"func ValidateReplicas(pl *PartitionList, _ RebalanceConfig) (*PartitionList, error) {\n\tfor _, p := range pl.Partitions {\n\t\treplicaset := toBrokerSet(p.Replicas)\n\t\tif len(replicaset) != len(p.Replicas) {\n\t\t\treturn nil, fmt.Errorf(\"partition %v has duplicated replicas\", p)\n\t\t}\n\t}\n\n\treturn nil, nil\n}",
"func (b Takuzu) Validate() (bool, error) {\n\tfinished := true\n\n\tcomputeVal := func(cells []Cell) (val int) {\n\t\tfor i := 0; i < len(cells); i++ {\n\t\t\tval += cells[i].Value * 1 << uint(i)\n\t\t}\n\t\treturn\n\t}\n\n\tlineVals := make(map[int]bool)\n\tcolVals := make(map[int]bool)\n\n\tfor i := 0; i < b.Size; i++ {\n\t\tvar d []Cell\n\t\tvar full bool\n\t\tvar err error\n\n\t\t// Let's check line i\n\t\td = b.GetLine(i)\n\t\tfull, err = checkRange(d)\n\t\tif err != nil {\n\t\t\terr := err.(validationError)\n\t\t\terr.LineNumber = &i\n\t\t\treturn false, err\n\t\t}\n\t\tif full {\n\t\t\thv := computeVal(d)\n\t\t\tif lineVals[hv] {\n\t\t\t\terr := validationError{\n\t\t\t\t\tErrorType: ErrorDuplicate,\n\t\t\t\t\tLineNumber: &i,\n\t\t\t\t}\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tlineVals[hv] = true\n\t\t} else {\n\t\t\tfinished = false\n\t\t}\n\n\t\t// Let's check column i\n\t\td = b.GetColumn(i)\n\t\tfull, err = checkRange(d)\n\t\tif err != nil {\n\t\t\terr := err.(validationError)\n\t\t\terr.ColumnNumber = &i\n\t\t\treturn false, err\n\t\t}\n\t\tif full {\n\t\t\thv := computeVal(d)\n\t\t\tif colVals[hv] {\n\t\t\t\terr := validationError{\n\t\t\t\t\tErrorType: ErrorDuplicate,\n\t\t\t\t\tColumnNumber: &i,\n\t\t\t\t}\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tcolVals[hv] = true\n\t\t} else {\n\t\t\tfinished = false\n\t\t}\n\t}\n\treturn finished, nil\n}",
"func (err *WSError) Retry() bool {\n\tfor _, unretriable := range err.Get() {\n\t\tif reflect.TypeOf(err.ErrObj) == reflect.TypeOf(unretriable) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (r *Restore) Validate() error {\n\tlogrus.Trace(\"validating restore action configuration\")\n\n\t// verify bucket is provided\n\tif len(r.Bucket) == 0 {\n\t\treturn fmt.Errorf(\"no bucket provided\")\n\t}\n\n\t// verify filename is provided\n\tif len(r.Filename) == 0 {\n\t\treturn fmt.Errorf(\"no filename provided\")\n\t}\n\n\t// verify timeout is provided\n\tif r.Timeout == 0 {\n\t\treturn fmt.Errorf(\"timeout must be greater than 0\")\n\t}\n\n\treturn nil\n}",
"func (r *Retrier) RunRetry() error {\n\t// Start signal handler.\n\tsigHandler := signals.NewSignalHandler(10)\n\tgo sigHandler.Register()\n\n\tfinish := make(chan bool, 1)\n\tgo func() {\n\t\tselect {\n\t\tcase <-finish:\n\t\t\treturn\n\t\tcase <-time.After(10 * time.Second):\n\t\t\treturn\n\t\tdefault:\n\t\t\tfor {\n\t\t\t\tif sigHandler.GetState() != 0 {\n\t\t\t\t\tlogger.Critical(\"detected signal. retry failed.\")\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor i := 0; i < r.retries; i++ {\n\t\terr := r.retryable.Try()\n\t\tif err != nil {\n\t\t\tlogger.Info(\"Retryable error: %v\", err)\n\t\t\ttime.Sleep(time.Duration(r.sleepSeconds) * time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tfinish <- true\n\t\treturn nil\n\t}\n\n\tfinish <- true\n\treturn fmt.Errorf(\"unable to succeed at retry after %d attempts at %d seconds\", r.retries, r.sleepSeconds)\n}",
"func (m *ReplicaStatus) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateStatus(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (t BatchJobReplicateResourceType) Validate() error {\n\tswitch t {\n\tcase BatchJobReplicateResourceMinIO:\n\tdefault:\n\t\treturn errInvalidArgument\n\t}\n\treturn nil\n}",
"func ValidateInputLength(cepRaw interface{}) observable.Observable {\n\treturn observable.Create(func(emitter *observer.Observer, disposed bool) {\n\t\tcep, _ := cepRaw.(string)\n\t\tcepLength := len(cep)\n\t\tif cepLength <= cepSize {\n\t\t\temitter.OnNext(cep)\n\t\t\temitter.OnDone()\n\t\t} else {\n\t\t\temitter.OnError(errors.New(\"Cep length is less than 8 characters\"))\n\t\t}\n\t})\n}",
"func (sv *StubbedValidator) StubSuccessValidateRestart() {\n\tsv.revalidationError = nil\n}",
"func (c *Client) ShouldRetry(rpcName string, err error) bool {\n\treturn false\n}",
"func (ut *Client) ShouldRetry(name string, err error) bool {\n\treturn true\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Validate validates input key rotation encryption options. | func (e BatchJobKeyRotateEncryption) Validate() error {
if e.Type != sses3 && e.Type != ssekms {
return errInvalidArgument
}
spaces := strings.HasPrefix(e.Key, " ") || strings.HasSuffix(e.Key, " ")
if e.Type == ssekms && spaces {
return crypto.ErrInvalidEncryptionKeyID
}
if e.Type == ssekms && GlobalKMS != nil {
ctx := kms.Context{}
if e.Context != "" {
b, err := base64.StdEncoding.DecodeString(e.Context)
if err != nil {
return err
}
json := jsoniter.ConfigCompatibleWithStandardLibrary
if err := json.Unmarshal(b, &ctx); err != nil {
return err
}
}
e.kmsContext = kms.Context{}
for k, v := range ctx {
e.kmsContext[k] = v
}
ctx["MinIO batch API"] = "batchrotate" // Context for a test key operation
if _, err := GlobalKMS.GenerateKey(GlobalContext, e.Key, ctx); err != nil {
return err
}
}
return nil
} | [
"func (k EncryptionKeyDerived) Validate() error {\n\tif k.Length == 0 {\n\t\treturn ErrMissingField(\"length\")\n\t}\n\tif k.Length <= 0 {\n\t\treturn ErrInvalidKeyLength\n\t}\n\tif k.Algorithm == \"\" {\n\t\treturn ErrMissingField(\"algorithm\")\n\t}\n\tif k.Algorithm != KeyDerivationAlgorithmScrypt {\n\t\treturn ErrInvalidKeyDerivationAlgorithm\n\t}\n\n\tif k.Parameters == nil {\n\t\treturn ErrMissingField(\"parameters\")\n\t}\n\tparameters, ok := k.Parameters.(validator)\n\tif ok {\n\t\tif err := parameters.Validate(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif k.Metadata == nil {\n\t\treturn ErrMissingField(\"metadata\")\n\t}\n\tmetadata, ok := k.Metadata.(validator)\n\tif ok {\n\t\tif err := metadata.Validate(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (redis *RedisOptions) validateOptions() error {\n\tif redis.HostPort == \"\" {\n\t\treturn nil\n\t}\n\n\tif redis.DBTokens == redis.DBKeys {\n\t\treturn ErrIdenticalRedisDBs\n\t}\n\treturn nil\n}",
"func (e aesGCMEncodedEncryptor) ConfiguredToRotate() bool {\n\treturn len(e.primaryKey) == requiredKeyLength && len(e.secondaryKey) == requiredKeyLength\n}",
"func (k Key) Validate() error {\n\n\t// check method\n\tif err := k.hasValidMethod(); err != nil {\n\t\treturn err\n\t}\n\n\t//check label\n\tif err := k.hasValidLabel(); err != nil {\n\t\treturn err\n\t}\n\n\t// check secret\n\tif err := k.hasValidSecret32(); err != nil {\n\t\treturn err\n\t}\n\n\t// check algo\n\tif err := k.hasValidAlgo(); err != nil {\n\t\treturn err\n\t}\n\n\t// check digits\n\tif err := k.hasValidDigits(); err != nil {\n\t\treturn err\n\t}\n\n\t// check period\n\tif err := k.hasValidPeriod(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (k EncryptionKeySecretKey) Validate() error {\n\tif k.Length == 0 {\n\t\treturn ErrMissingField(\"length\")\n\t}\n\tif k.Length <= 0 {\n\t\treturn ErrInvalidKeyLength\n\t}\n\tif k.ID.IsZero() {\n\t\treturn ErrMissingField(\"id\")\n\t}\n\treturn nil\n}",
"func (o *Options) Verify() error {\n\tif o.GC != \"\" {\n\t\tvalid := isInArray(validGCOptions, o.GC)\n\t\tif !valid {\n\t\t\treturn fmt.Errorf(`invalid gc option '%s': valid values are %s`,\n\t\t\t\to.GC,\n\t\t\t\tstrings.Join(validGCOptions, \", \"))\n\t\t}\n\t}\n\n\tif o.Scheduler != \"\" {\n\t\tvalid := isInArray(validSchedulerOptions, o.Scheduler)\n\t\tif !valid {\n\t\t\treturn fmt.Errorf(`invalid scheduler option '%s': valid values are %s`,\n\t\t\t\to.Scheduler,\n\t\t\t\tstrings.Join(validSchedulerOptions, \", \"))\n\t\t}\n\t}\n\n\tif o.Serial != \"\" {\n\t\tvalid := isInArray(validSerialOptions, o.Serial)\n\t\tif !valid {\n\t\t\treturn fmt.Errorf(`invalid serial option '%s': valid values are %s`,\n\t\t\t\to.Serial,\n\t\t\t\tstrings.Join(validSerialOptions, \", \"))\n\t\t}\n\t}\n\n\tif o.PrintSizes != \"\" {\n\t\tvalid := isInArray(validPrintSizeOptions, o.PrintSizes)\n\t\tif !valid {\n\t\t\treturn fmt.Errorf(`invalid size option '%s': valid values are %s`,\n\t\t\t\to.PrintSizes,\n\t\t\t\tstrings.Join(validPrintSizeOptions, \", \"))\n\t\t}\n\t}\n\n\tif o.PanicStrategy != \"\" {\n\t\tvalid := isInArray(validPanicStrategyOptions, o.PanicStrategy)\n\t\tif !valid {\n\t\t\treturn fmt.Errorf(`invalid panic option '%s': valid values are %s`,\n\t\t\t\to.PanicStrategy,\n\t\t\t\tstrings.Join(validPanicStrategyOptions, \", \"))\n\t\t}\n\t}\n\n\tif o.Opt != \"\" {\n\t\tif !isInArray(validOptOptions, o.Opt) {\n\t\t\treturn fmt.Errorf(\"invalid -opt=%s: valid values are %s\", o.Opt, strings.Join(validOptOptions, \", \"))\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (m *Rotation) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\t// no validation rules for Rx\n\n\t// no validation rules for Ry\n\n\t// no validation rules for Rz\n\n\treturn nil\n}",
"func (a KeyAlgorithm) ValidKeySize(size int) error {\n\tswitch a {\n\tcase ECDSAKey:\n\t\tif !(size == 0 || size == 256 || size == 384 || size == 521) {\n\t\t\treturn fmt.Errorf(\"invalid ecdsa key size %d - key size must be either 256, 384 or 521\", size)\n\t\t}\n\t\treturn nil\n\tcase RSAKey:\n\t\tif !(size == 0 || (size >= minRSAKeySize && size <= maxRSAKeySize)) {\n\t\t\treturn fmt.Errorf(\"invalid rsa key size %d - key size must be between %d and %d\", size, minRSAKeySize, maxRSAKeySize)\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"invalid key algorithm\")\n}",
"func (o *CryptoHandlerOpts) ValidateForEncryptDecrypt() error {\n\tif o.EncProvider == ThEncryptProviderSimple && o.SimpleKey == \"\" {\n\t\treturn fmt.Errorf(\"You must supply a valid simple-key when using the simply provider. \" +\n\t\t\t\"The simple provider uses AES and so the AES key should be either 16 or 32 byte to select AES-128 or AES-256 encryption\")\n\n\t}\n\tif (o.EncProvider == ThEncryptProviderVault || o.EncProvider == ThEncryptProviderVaultCli) && o.NamedEncKey == \"\" {\n\t\treturn fmt.Errorf(\"You must supply a vault-namedkey when using the vault provider \")\n\t}\n\treturn nil\n}",
"func (m *LicenseKeys) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateLicenseKey(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func validateEncryptedDEK(encryptedDEK []byte) error {\n\tif len(encryptedDEK) == 0 {\n\t\treturn fmt.Errorf(\"encrypted DEK is empty\")\n\t}\n\tif len(encryptedDEK) > encryptedDEKMaxSize {\n\t\treturn fmt.Errorf(\"encrypted DEK is %d bytes, which exceeds the max size of %d\", len(encryptedDEK), encryptedDEKMaxSize)\n\t}\n\treturn nil\n}",
"func (m *MultiClusterLicenseKeys) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateVsan(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func validateKeyLength(key string) error {\n\tdata, err := base64.StdEncoding.DecodeString(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(data) != EARKeyLength {\n\t\treturn fmt.Errorf(\"key length should be 32 it is %d\", len(data))\n\t}\n\n\treturn nil\n}",
"func (c *Config) validateKeys() error {\n\tfor _, key := range c.keys {\n\t\tswitch len(key) {\n\t\tcase 5, 13, 16: // These are 'ASCII' strings, or at least N-byte strings of the right size.\n\t\t\t// No need to check.\n\t\tcase 10, 26, 32: // These are hex encoded byte strings.\n\t\t\t// Just to validate it is a valid hex string, don't need the result.\n\t\t\tif _, err := hex.DecodeString(key); err != nil {\n\t\t\t\treturn errors.Errorf(\"key with length 10, 26, or 32 should only contain hexadecimal digits: %q\", key)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn errors.Errorf(\"invalid key length: %q\", key)\n\t\t}\n\t}\n\treturn nil\n}",
"func (m *Rotation) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateSchedule(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (a *Service) validateEncryptionAlgorithm(encryptionAlgorithm string) error {\n\n\t// convert to Jose content type\n\tencAlgorithm := jose.ContentEncryption(encryptionAlgorithm)\n\n\tif encAlgorithm == jose.A128CBC_HS256 ||\n\t\tencAlgorithm == jose.A192CBC_HS384 ||\n\t\tencAlgorithm == jose.A256CBC_HS512 ||\n\t\tencAlgorithm == jose.A128GCM ||\n\t\tencAlgorithm == jose.A192GCM ||\n\t\tencAlgorithm == jose.A256GCM {\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"invalid encryption algorithm\")\n}",
"func (o FlexvolumeToCSIOpts) Validate() error {\n\tif o.SourceStorageClass == \"\" {\n\t\treturn errors.New(\"source storage class is required\")\n\t}\n\tif o.DestinationStorageClass == \"\" {\n\t\treturn errors.New(\"destination storage class is required\")\n\t}\n\tif o.NodeName == \"\" {\n\t\treturn errors.New(\"node name is required\")\n\t}\n\tif o.PVMigratorBinPath == \"\" {\n\t\treturn errors.New(\"pv migrator binary path is required\")\n\t}\n\tif o.CephMigratorImage == \"\" {\n\t\treturn errors.New(\"ceph migrator image is required\")\n\t}\n\tif _, err := exec.LookPath(o.PVMigratorBinPath); err != nil {\n\t\treturn errors.Wrapf(err, \"which %s\", o.PVMigratorBinPath)\n\t}\n\treturn nil\n}",
"func (m *EncryptionAtRestConfig) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateKmsConfigUUID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOpType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (r BatchKeyRotateRetry) Validate() error {\n\tif r.Attempts < 0 {\n\t\treturn errInvalidArgument\n\t}\n\n\tif r.Delay < 0 {\n\t\treturn errInvalidArgument\n\t}\n\n\treturn nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
KeyRotate rotates encryption key of an object | func (r *BatchJobKeyRotateV1) KeyRotate(ctx context.Context, api ObjectLayer, objInfo ObjectInfo) error {
srcBucket := r.Bucket
srcObject := objInfo.Name
if objInfo.DeleteMarker || !objInfo.VersionPurgeStatus.Empty() {
return nil
}
sseKMS := crypto.S3KMS.IsEncrypted(objInfo.UserDefined)
sseS3 := crypto.S3.IsEncrypted(objInfo.UserDefined)
if !sseKMS && !sseS3 { // neither sse-s3 nor sse-kms disallowed
return errInvalidEncryptionParameters
}
if sseKMS && r.Encryption.Type == sses3 { // previously encrypted with sse-kms, now sse-s3 disallowed
return errInvalidEncryptionParameters
}
versioned := globalBucketVersioningSys.PrefixEnabled(srcBucket, srcObject)
versionSuspended := globalBucketVersioningSys.PrefixSuspended(srcBucket, srcObject)
lock := api.NewNSLock(r.Bucket, objInfo.Name)
lkctx, err := lock.GetLock(ctx, globalOperationTimeout)
if err != nil {
return err
}
ctx = lkctx.Context()
defer lock.Unlock(lkctx)
opts := ObjectOptions{
VersionID: objInfo.VersionID,
Versioned: versioned,
VersionSuspended: versionSuspended,
NoLock: true,
}
obj, err := api.GetObjectInfo(ctx, r.Bucket, objInfo.Name, opts)
if err != nil {
return err
}
oi := obj.Clone()
var (
newKeyID string
newKeyContext kms.Context
)
encMetadata := make(map[string]string)
for k, v := range oi.UserDefined {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
encMetadata[k] = v
}
}
if (sseKMS || sseS3) && r.Encryption.Type == ssekms {
if err = r.Encryption.Validate(); err != nil {
return err
}
newKeyID = strings.TrimPrefix(r.Encryption.Key, crypto.ARNPrefix)
newKeyContext = r.Encryption.kmsContext
}
if err = rotateKey(ctx, []byte{}, newKeyID, []byte{}, r.Bucket, oi.Name, encMetadata, newKeyContext); err != nil {
return err
}
// Since we are rotating the keys, make sure to update the metadata.
oi.metadataOnly = true
oi.keyRotation = true
for k, v := range encMetadata {
oi.UserDefined[k] = v
}
if _, err := api.CopyObject(ctx, r.Bucket, oi.Name, r.Bucket, oi.Name, oi, ObjectOptions{
VersionID: oi.VersionID,
}, ObjectOptions{
VersionID: oi.VersionID,
NoLock: true,
}); err != nil {
return err
}
return nil
} | [
"func rotateKey(oldKey []byte, newKey []byte, metadata map[string]string) error {\n\tdelete(metadata, SSECustomerKey) // make sure we do not save the key by accident\n\n\tif metadata[ServerSideEncryptionSealAlgorithm] != SSESealAlgorithmDareSha256 { // currently DARE-SHA256 is the only option\n\t\treturn errObjectTampered\n\t}\n\tiv, err := base64.StdEncoding.DecodeString(metadata[ServerSideEncryptionIV])\n\tif err != nil || len(iv) != SSEIVSize {\n\t\treturn errObjectTampered\n\t}\n\tsealedKey, err := base64.StdEncoding.DecodeString(metadata[ServerSideEncryptionSealedKey])\n\tif err != nil || len(sealedKey) != 64 {\n\t\treturn errObjectTampered\n\t}\n\n\tsha := sha256.New() // derive key encryption key\n\tsha.Write(oldKey)\n\tsha.Write(iv)\n\tkeyEncryptionKey := sha.Sum(nil)\n\n\tobjectEncryptionKey := bytes.NewBuffer(nil) // decrypt object encryption key\n\tn, err := sio.Decrypt(objectEncryptionKey, bytes.NewReader(sealedKey), sio.Config{\n\t\tKey: keyEncryptionKey,\n\t})\n\tif n != 32 || err != nil { // Either the provided key does not match or the object was tampered.\n\t\tif subtle.ConstantTimeCompare(oldKey, newKey) == 1 {\n\t\t\treturn errInvalidSSEParameters // AWS returns special error for equal but invalid keys.\n\t\t}\n\t\treturn errSSEKeyMismatch // To provide strict AWS S3 compatibility we return: access denied.\n\t}\n\tif subtle.ConstantTimeCompare(oldKey, newKey) == 1 {\n\t\treturn nil // we don't need to rotate keys if newKey == oldKey\n\t}\n\n\tnonce := make([]byte, 32) // generate random values for key derivation\n\tif _, err = io.ReadFull(rand.Reader, nonce); err != nil {\n\t\treturn err\n\t}\n\n\tniv := sha256.Sum256(nonce[:]) // derive key encryption key\n\tsha = sha256.New()\n\tsha.Write(newKey)\n\tsha.Write(niv[:])\n\tkeyEncryptionKey = sha.Sum(nil)\n\n\tsealedKeyW := bytes.NewBuffer(nil) // sealedKey := 16 byte header + 32 byte payload + 16 byte tag\n\tn, err = sio.Encrypt(sealedKeyW, bytes.NewReader(objectEncryptionKey.Bytes()), sio.Config{\n\t\tKey: keyEncryptionKey,\n\t})\n\tif n != 64 || err != nil {\n\t\treturn errors.New(\"failed to seal object encryption key\") // if this happens there's a bug in the code (may panic ?)\n\t}\n\n\tmetadata[ServerSideEncryptionIV] = base64.StdEncoding.EncodeToString(niv[:])\n\tmetadata[ServerSideEncryptionSealAlgorithm] = SSESealAlgorithmDareSha256\n\tmetadata[ServerSideEncryptionSealedKey] = base64.StdEncoding.EncodeToString(sealedKeyW.Bytes())\n\treturn nil\n}",
"func (b *jwtBackend) rotateKey(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {\n keyName := d.Get(\"name\").(string)\n\n lock := locksutil.LockForKey(b.locks, keyName)\n lock.Lock()\n defer lock.Unlock()\n\n key, err := b.readKey(ctx, req, keyName)\n if err != nil {\n return nil, err\n }\n if key == nil {\n return logical.ErrorResponse(\"no such key\"), nil\n }\n\n b.logger.Info(\"rotating key\", \"key\", keyName, \"strength\", key.KeySize)\n\n currentVer, ok := key.Versions[key.CurrentVersion]\n if ok {\n currentVer.PrivateKey = nil\n currentVer.DeletedAt = ptypes.TimestampNow()\n } else {\n b.logger.Warn(\"current key version is missing from storage\", \"key\", keyName, \"version\", key.CurrentVersion)\n }\n\n sortedVersions := make([]uint32, 0)\n for key := range key.Versions {\n sortedVersions = append(sortedVersions, key)\n }\n\n if len(sortedVersions) >= int(key.MaxVersions) {\n sort.Slice(sortedVersions, func(i, j int) bool {\n creationI, _ := ptypes.Timestamp(key.Versions[sortedVersions[i]].CreatedAt)\n creationJ, _ := ptypes.Timestamp(key.Versions[sortedVersions[j]].CreatedAt)\n\n return creationI.Before(creationJ)\n })\n\n remaining := (uint32(len(sortedVersions)) - key.MaxVersions) + 1\n for i := 0; i < len(sortedVersions) && remaining > 0; i++ {\n delete(key.Versions, sortedVersions[i])\n b.logger.Debug(\"deleted key version\", \"key\", keyName, \"version\", sortedVersions[i])\n remaining--\n }\n }\n\n keyVersion := key.CurrentVersion + 1\n if keyVersion < key.CurrentVersion {\n b.logger.Warn(\"keyId has wrapped around\", \"key\", keyName, \"version\", key.CurrentVersion)\n }\n\n ver, err := b.generateKeyVersion(int(key.KeySize))\n\n key.Versions[keyVersion] = ver\n key.CurrentVersion = keyVersion\n b.writeKey(ctx, req, keyName, key)\n\n response := &logical.Response{\n Data: encodeKey(key),\n }\n return logical.RespondWithStatusCode(response, req, http.StatusOK)\n}",
"func (b *jwtBackend) rotatePath() *framework.Path {\n return &framework.Path{\n Pattern: \"keys/\" + framework.GenericNameRegex(\"name\") + \"/rotate\",\n HelpSynopsis: \"Rotates a signing key\",\n HelpDescription: `\n\nExchanges an existing private/public key pair and replaces it with a new version. This endpoint will\nautomatically remove old key versions and delete any prior stored private keys.\n\n`,\n Fields: map[string]*framework.FieldSchema{\n \"name\": {\n Type: framework.TypeString,\n Description: \"Signing key identifier\",\n },\n },\n Callbacks: map[logical.Operation]framework.OperationFunc{\n logical.UpdateOperation: b.rotateKey,\n },\n }\n}",
"func (s HTTPStore) RotateKey(role data.RoleName) ([]byte, error) {\n\turl, err := s.buildKeyURL(role)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq, err := http.NewRequest(\"POST\", url.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := s.roundTrip.RoundTrip(req)\n\tif err != nil {\n\t\treturn nil, NetworkError{Wrapped: err}\n\t}\n\tdefer resp.Body.Close()\n\tif err := translateStatusToError(resp, role.String()+\" key\"); err != nil {\n\t\treturn nil, err\n\t}\n\tb := io.LimitReader(resp.Body, MaxKeySize)\n\tbody, err := ioutil.ReadAll(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn body, nil\n}",
"func (k keyCredential) Rotate(tx transaction.Transaction) (*msgraph.KeyCredential, *crypto.Jwk, error) {\n\tkeysInUse, err := k.filterRevokedKeys(tx)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tkeyCredential, jwk, err := k.new(tx)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tkeysInUse = append(keysInUse, *keyCredential)\n\n\tapp := util.EmptyApplication().Keys(keysInUse).Build()\n\tif err := k.Application().Patch(tx.Ctx, tx.Instance.GetObjectId(), app); err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"updating application with keycredential: %w\", err)\n\t}\n\n\treturn keyCredential, jwk, nil\n}",
"func (m *MetadataSwizzler) RotateKey(role data.RoleName, key data.PublicKey) error {\n\troleSpecifier := data.CanonicalRootRole\n\tif data.IsDelegation(role) {\n\t\troleSpecifier = role.Parent()\n\t}\n\n\tb, err := m.MetadataCache.GetSized(roleSpecifier.String(), store.NoSizeLimit)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsignedThing := &data.Signed{}\n\tif err := json.Unmarshal(b, signedThing); err != nil {\n\t\treturn err\n\t}\n\n\t// get keys before the keys are rotated\n\tpubKeys, err := getPubKeys(m.CryptoService, signedThing, roleSpecifier)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif roleSpecifier == data.CanonicalRootRole {\n\t\tsignedRoot, err := data.RootFromSigned(signedThing)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsignedRoot.Signed.Roles[role].KeyIDs = []string{key.ID()}\n\t\tsignedRoot.Signed.Keys[key.ID()] = key\n\t\tif signedThing, err = signedRoot.ToSigned(); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tsignedTargets, err := data.TargetsFromSigned(signedThing, roleSpecifier)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, roleObject := range signedTargets.Signed.Delegations.Roles {\n\t\t\tif roleObject.Name == role {\n\t\t\t\troleObject.KeyIDs = []string{key.ID()}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tsignedTargets.Signed.Delegations.Keys[key.ID()] = key\n\t\tif signedThing, err = signedTargets.ToSigned(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tmetaBytes, err := serializeMetadata(m.CryptoService, signedThing, roleSpecifier, pubKeys...)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn m.MetadataCache.Set(roleSpecifier.String(), metaBytes)\n}",
"func (km *KeysetManager) Rotate(kt *tinkpb.KeyTemplate) error {\n\tif kt == nil {\n\t\treturn fmt.Errorf(\"keyset_manager: cannot rotate, need key template\")\n\t}\n\tkeyData, err := NewKeyData(kt)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"keyset_manager: cannot create KeyData: %s\", err)\n\t}\n\tkeyID := km.newKeyID()\n\toutputPrefixType := kt.OutputPrefixType\n\tif outputPrefixType == tinkpb.OutputPrefixType_UNKNOWN_PREFIX {\n\t\toutputPrefixType = tinkpb.OutputPrefixType_TINK\n\t}\n\tkey := &tinkpb.Keyset_Key{\n\t\tKeyData: keyData,\n\t\tStatus: tinkpb.KeyStatusType_ENABLED,\n\t\tKeyId: keyID,\n\t\tOutputPrefixType: outputPrefixType,\n\t}\n\t// Set the new key as the primary key\n\tkm.ks.Key = append(km.ks.Key, key)\n\tkm.ks.PrimaryKeyId = keyID\n\treturn nil\n}",
"func (c *Client) Rotate(tokenBytes []byte) error {\n\n\tnewPriv, newPub, err := RotateClientKeys(c.serverPublicKeyBytes, c.clientPrivateKeyBytes, tokenBytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpub, err := PointUnmarshal(newPub)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.clientPrivateKeyBytes = newPriv\n\tc.clientPrivateKey = new(big.Int).SetBytes(newPriv)\n\tc.serverPublicKeyBytes = newPub\n\tc.serverPublicKey = pub\n\tc.negKey = gf.Neg(c.clientPrivateKey)\n\tc.invKey = gf.Inv(c.clientPrivateKey)\n\n\treturn nil\n}",
"func (e aesGCMEncodedEncryptor) RotateEncryption(ciphertext string) (string, error) {\n\tif !e.ConfiguredToRotate() {\n\t\treturn \"\", &EncryptionError{errors.New(\"key rotation not configured\")}\n\t}\n\n\tplaintext, err := e.Decrypt(ciphertext)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn e.Encrypt(plaintext)\n}",
"func TestEncryptionRotation(t *testing.T, scenario RotationScenario) {\n\t// test data\n\tns := scenario.Namespace\n\tlabelSelector := scenario.LabelSelector\n\n\t// step 1: create the desired resource\n\te := NewE(t)\n\tclientSet := GetClients(e)\n\tscenario.CreateResourceFunc(e, GetClients(e), ns)\n\n\t// step 2: run provided encryption scenario\n\tTestEncryptionType(t, scenario.BasicScenario, scenario.EncryptionProvider)\n\n\t// step 3: take samples\n\trawEncryptedResourceWithKey1 := scenario.GetRawResourceFunc(e, clientSet, ns)\n\n\t// step 4: force key rotation and wait for migration to complete\n\tlastMigratedKeyMeta, err := GetLastKeyMeta(t, clientSet.Kube, ns, labelSelector)\n\trequire.NoError(e, err)\n\trequire.NoError(e, ForceKeyRotation(e, scenario.UnsupportedConfigFunc, fmt.Sprintf(\"test-key-rotation-%s\", rand.String(4))))\n\tWaitForNextMigratedKey(e, clientSet.Kube, lastMigratedKeyMeta, scenario.TargetGRs, ns, labelSelector)\n\tscenario.AssertFunc(e, clientSet, scenario.EncryptionProvider, ns, labelSelector)\n\n\t// step 5: verify if the provided resource was encrypted with a different key (step 2 vs step 4)\n\trawEncryptedResourceWithKey2 := scenario.GetRawResourceFunc(e, clientSet, ns)\n\tif rawEncryptedResourceWithKey1 == rawEncryptedResourceWithKey2 {\n\t\tt.Errorf(\"expected the resource to has a different content after a key rotation,\\ncontentBeforeRotation %s\\ncontentAfterRotation %s\", rawEncryptedResourceWithKey1, rawEncryptedResourceWithKey2)\n\t}\n\n\t// TODO: assert conditions - operator and encryption migration controller must report status as active not progressing, and not failing for all scenarios\n}",
"func rotate(r float64) string { return fmt.Sprintf(`rotate(%g)`, r) }",
"func (g *Generator) rekey() error {\n\tfor i := keySize / g.cipher.BlockSize(); i > 0; i-- {\n\t\tg.readBlock(g.key[g.cipher.BlockSize()*i:])\n\t}\n\n\treturn g.updateCipher()\n}",
"func RotateAccessKeys() string {\n\tsess := session.Must(session.NewSessionWithOptions(session.Options{\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t\tProfile: \"default\",\n\t}))\n\tclient := iam.New(sess)\n\tdeleteCurrentIamKey(client)\n\tnewKeyOutput, err := client.CreateAccessKey(&iam.CreateAccessKeyInput{})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tcfg := readCredentialsFile()\n\tfmt.Println(\"new IAM key is \", *newKeyOutput.AccessKey.AccessKeyId)\n\tcfg.Section(\"default_original\").Key(\"aws_access_key_id\").SetValue(*newKeyOutput.AccessKey.AccessKeyId)\n\tcfg.Section(\"default_original\").Key(\"aws_secret_access_key\").SetValue(*newKeyOutput.AccessKey.SecretAccessKey)\n\tlocation := writeCredentialsFile(cfg)\n\n\treturn location\n}",
"func lattigo_genRotationKeysForRotations(keygenHandle Handle5, skHandle Handle5, ks *C.int64_t, ksLen uint64) Handle5 {\n\tvar keygen *rlwe.KeyGenerator\n\tkeygen = getStoredKeyGenerator(keygenHandle)\n\n\tvar sk *rlwe.SecretKey\n\tsk = getStoredSecretKey(skHandle)\n\n\trotations := make([]int, ksLen)\n\tsize := unsafe.Sizeof(uint64(0))\n\tbasePtrIn := uintptr(unsafe.Pointer(ks))\n\tfor i := range rotations {\n\t\trotations[i] = int(*(*int64)(unsafe.Pointer(basePtrIn + size*uintptr(i))))\n\t}\n\n\tvar rotKeys *rlwe.RotationKeySet\n\t// The second argument determines if conjugation keys are generated or not. This wrapper API does\n\t// not support generating a conjugation key.\n\trotKeys = (*keygen).GenRotationKeysForRotations(rotations, false, sk)\n\treturn marshal.CrossLangObjMap.Add(unsafe.Pointer(rotKeys))\n}",
"func rotEncipher(text string, key int, alphabet string) string {\n\tsize := len(alphabet)\n\talphaRunes := []rune(alphabet)\n\trunes := []rune(text)\n\tfor i, char := range runes {\n\t\tif pos := strings.IndexRune(alphabet, char); pos != -1 {\n\t\t\trunes[i] = alphaRunes[mod(pos+key, size)]\n\t\t}\n\t}\n\treturn string(runes)\n}",
"func k8sRotate(t *testing.T, dir string) {\n\tk8sUpdate(t, dir, rotatedHubbleServerCertificate, rotatedHubbleServerPrivkey, rotatedHubbleServerCA)\n}",
"func RotateEncryptionKeys(dbp zesty.DBProvider) (err error) {\n\tdefer errors.DeferredAnnotatef(&err, \"Failed to rotate encrypted callbacks to new key\")\n\n\tvar last string\n\tfor {\n\t\tvar lastID *string\n\t\tif last != \"\" {\n\t\t\tlastID = &last\n\t\t}\n\t\t// load all callbacks\n\t\tcallbacks, err := listCallbacks(dbp, utask.MaxPageSize, lastID, false)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(callbacks) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tlast = callbacks[len(callbacks)-1].PublicID\n\n\t\tfor _, c := range callbacks {\n\t\t\tsp, err := dbp.TxSavepoint()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// load callback locked\n\t\t\tcb, err := loadFromPublicID(dbp, c.PublicID, true)\n\t\t\tif err != nil {\n\t\t\t\tdbp.RollbackTo(sp)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// update callback (encrypt)\n\t\t\tif err := cb.update(dbp); err != nil {\n\t\t\t\tdbp.RollbackTo(sp)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// commit\n\t\t\tif err := dbp.Commit(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func RotateEncryption(ciphertext string) (string, error) {\n\treturn defaultEncryptor.RotateEncryption(ciphertext)\n}",
"func (b *backend) pathConfigRotate() *framework.Path {\n return &framework.Path{\n\tPattern: fmt.Sprintf(\"config/rotate/?$\"),\n\tHelpSynopsis: \"Use the existing key to generate a set a new key\",\n\tHelpDescription: \"Use this endpoint to use the current key to generate a new key, and use that\",\n\n\tFields: map[string]*framework.FieldSchema{\n\t \"key_name\": &framework.FieldSchema{\n\t\tType: framework.TypeString,\n\t\tDescription: \"The name for the newly generated key.\",\n\t },\n\t},\n\n\tCallbacks: map[logical.Operation]framework.OperationFunc{\n\t logical.UpdateOperation: b.pathRotateKey,\n\t},\n }\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Start the batch key rottion job, resumes if there was a pending job via "job.ID" | func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job BatchJobRequest) error {
ri := &batchJobInfo{
JobID: job.ID,
JobType: string(job.Type()),
StartTime: job.Started,
}
if err := ri.load(ctx, api, job); err != nil {
return err
}
globalBatchJobsMetrics.save(job.ID, ri)
lastObject := ri.Object
delay := job.KeyRotate.Flags.Retry.Delay
if delay == 0 {
delay = batchKeyRotateJobDefaultRetryDelay
}
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
skip := func(info FileInfo) (ok bool) {
if r.Flags.Filter.OlderThan > 0 && time.Since(info.ModTime) < r.Flags.Filter.OlderThan {
// skip all objects that are newer than specified older duration
return false
}
if r.Flags.Filter.NewerThan > 0 && time.Since(info.ModTime) >= r.Flags.Filter.NewerThan {
// skip all objects that are older than specified newer duration
return false
}
if !r.Flags.Filter.CreatedAfter.IsZero() && r.Flags.Filter.CreatedAfter.Before(info.ModTime) {
// skip all objects that are created before the specified time.
return false
}
if !r.Flags.Filter.CreatedBefore.IsZero() && r.Flags.Filter.CreatedBefore.After(info.ModTime) {
// skip all objects that are created after the specified time.
return false
}
if len(r.Flags.Filter.Tags) > 0 {
// Only parse object tags if tags filter is specified.
tagMap := map[string]string{}
tagStr := info.Metadata[xhttp.AmzObjectTagging]
if len(tagStr) != 0 {
t, err := tags.ParseObjectTags(tagStr)
if err != nil {
return false
}
tagMap = t.ToMap()
}
for _, kv := range r.Flags.Filter.Tags {
for t, v := range tagMap {
if kv.Match(BatchKeyRotateKV{Key: t, Value: v}) {
return true
}
}
}
// None of the provided tags filter match skip the object
return false
}
if len(r.Flags.Filter.Metadata) > 0 {
for _, kv := range r.Flags.Filter.Metadata {
for k, v := range info.Metadata {
if !strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") && !isStandardHeader(k) {
continue
}
// We only need to match x-amz-meta or standardHeaders
if kv.Match(BatchKeyRotateKV{Key: k, Value: v}) {
return true
}
}
}
// None of the provided metadata filters match skip the object.
return false
}
if r.Flags.Filter.KMSKeyID != "" {
if v, ok := info.Metadata[xhttp.AmzServerSideEncryptionKmsID]; ok && strings.TrimPrefix(v, crypto.ARNPrefix) != r.Flags.Filter.KMSKeyID {
return false
}
}
return true
}
workerSize, err := strconv.Atoi(env.Get("_MINIO_BATCH_KEYROTATION_WORKERS", strconv.Itoa(runtime.GOMAXPROCS(0)/2)))
if err != nil {
return err
}
wk, err := workers.New(workerSize)
if err != nil {
// invalid worker size.
return err
}
retryAttempts := ri.RetryAttempts
ctx, cancel := context.WithCancel(ctx)
results := make(chan ObjectInfo, 100)
if err := api.Walk(ctx, r.Bucket, r.Prefix, results, ObjectOptions{
WalkMarker: lastObject,
WalkFilter: skip,
}); err != nil {
cancel()
// Do not need to retry if we can't list objects on source.
return err
}
for result := range results {
result := result
sseKMS := crypto.S3KMS.IsEncrypted(result.UserDefined)
sseS3 := crypto.S3.IsEncrypted(result.UserDefined)
if !sseKMS && !sseS3 { // neither sse-s3 nor sse-kms disallowed
continue
}
wk.Take()
go func() {
defer wk.Give()
for attempts := 1; attempts <= retryAttempts; attempts++ {
attempts := attempts
stopFn := globalBatchJobsMetrics.trace(batchKeyRotationMetricObject, job.ID, attempts, result)
success := true
if err := r.KeyRotate(ctx, api, result); err != nil {
stopFn(err)
logger.LogIf(ctx, err)
success = false
} else {
stopFn(nil)
}
ri.trackCurrentBucketObject(r.Bucket, result, success)
ri.RetryAttempts = attempts
globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk after every 10secs.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
if success {
break
}
}
}()
}
wk.Wait()
ri.Complete = ri.ObjectsFailed == 0
ri.Failed = ri.ObjectsFailed > 0
globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
buf, _ := json.Marshal(ri)
if err := r.Notify(ctx, bytes.NewReader(buf)); err != nil {
logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err))
}
cancel()
if ri.Failed {
ri.ObjectsFailed = 0
ri.Bucket = ""
ri.Object = ""
ri.Objects = 0
time.Sleep(delay + time.Duration(rnd.Float64()*float64(delay)))
}
return nil
} | [
"func (r *JobRunner) startJob(ctx context.Context, startedAt time.Time) error {\n\tr.conf.Job.StartedAt = startedAt.UTC().Format(time.RFC3339Nano)\n\n\treturn roko.NewRetrier(\n\t\troko.WithMaxAttempts(7),\n\t\troko.WithStrategy(roko.Exponential(2*time.Second, 0)),\n\t).DoWithContext(ctx, func(rtr *roko.Retrier) error {\n\t\tresponse, err := r.apiClient.StartJob(ctx, r.conf.Job)\n\n\t\tif err != nil {\n\t\t\tif response != nil && api.IsRetryableStatus(response) {\n\t\t\t\tr.logger.Warn(\"%s (%s)\", err, rtr)\n\t\t\t} else if api.IsRetryableError(err) {\n\t\t\t\tr.logger.Warn(\"%s (%s)\", err, rtr)\n\t\t\t} else {\n\t\t\t\tr.logger.Warn(\"Buildkite rejected the call to start the job (%s)\", err)\n\t\t\t\trtr.Break()\n\t\t\t}\n\t\t}\n\n\t\treturn err\n\t})\n}",
"func (c *Client) JobStartAction(\n\tjobID string,\n\tresourceVersion uint64,\n\tinstanceRanges []*task.InstanceRange,\n\tbatchSize uint32,\n) error {\n\tvar response *job.StartResponse\n\tvar err error\n\tid := &peloton.JobID{\n\t\tValue: jobID,\n\t}\n\terr = c.retryUntilConcurrencyControlSucceeds(\n\t\tid,\n\t\tresourceVersion,\n\t\tfunc(resourceVersionParam uint64) error {\n\t\t\trequest := &job.StartRequest{\n\t\t\t\tId: id,\n\t\t\t\tRanges: instanceRanges,\n\t\t\t\tResourceVersion: resourceVersionParam,\n\t\t\t\tStartConfig: &job.StartConfig{\n\t\t\t\t\tBatchSize: batchSize,\n\t\t\t\t},\n\t\t\t}\n\t\t\tresponse, err = c.jobClient.Start(c.ctx, request)\n\t\t\treturn err\n\t\t},\n\t)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprintResponseJSON(response)\n\treturn nil\n}",
"func StartJob(\n\tctx context.Context,\n\tconn sqlexec.SQLExecutor,\n\tjobID int64,\n) error {\n\tctx = util.WithInternalSourceType(ctx, kv.InternalLoadData)\n\t_, err := conn.ExecuteInternal(ctx,\n\t\t`UPDATE mysql.load_data_jobs\n\t\tSET start_time = CURRENT_TIMESTAMP(6), update_time = CURRENT_TIMESTAMP(6)\n\t\tWHERE job_id = %? AND start_time IS NULL;`,\n\t\tjobID)\n\treturn err\n}",
"func (driver *rclone) startJob(ctx context.Context, transferID string, srcRemote string, srcPath string, srcToken string, destRemote string, destPath string, destToken string) (*datatx.TxInfo, error) {\n\tlogger := appctx.GetLogger(ctx)\n\n\tdriver.pDriver.Lock()\n\tdefer driver.pDriver.Unlock()\n\n\tvar txID string\n\tvar cTime *typespb.Timestamp\n\n\tif transferID == \"\" {\n\t\ttxID = uuid.New().String()\n\t\tcTime = &typespb.Timestamp{Seconds: uint64(time.Now().Unix())}\n\t} else { // restart existing transfer if transferID is specified\n\t\tlogger.Debug().Msgf(\"Restarting transfer (txID: %s)\", transferID)\n\t\ttxID = transferID\n\t\ttransfer, err := driver.pDriver.model.getTransfer(txID)\n\t\tif err != nil {\n\t\t\terr = errors.Wrap(err, \"rclone: error retrying transfer (transferID: \"+txID+\")\")\n\t\t\treturn &datatx.TxInfo{\n\t\t\t\tId: &datatx.TxId{OpaqueId: txID},\n\t\t\t\tStatus: datatx.Status_STATUS_INVALID,\n\t\t\t\tCtime: nil,\n\t\t\t}, err\n\t\t}\n\t\tseconds, _ := strconv.ParseInt(transfer.Ctime, 10, 64)\n\t\tcTime = &typespb.Timestamp{Seconds: uint64(seconds)}\n\t\t_, endStatusFound := txEndStatuses[transfer.TransferStatus.String()]\n\t\tif !endStatusFound {\n\t\t\terr := errors.New(\"rclone: transfer still running, unable to restart\")\n\t\t\treturn &datatx.TxInfo{\n\t\t\t\tId: &datatx.TxId{OpaqueId: txID},\n\t\t\t\tStatus: transfer.TransferStatus,\n\t\t\t\tCtime: cTime,\n\t\t\t}, err\n\t\t}\n\t\tsrcToken = transfer.SrcToken\n\t\tsrcRemote = transfer.SrcRemote\n\t\tsrcPath = transfer.SrcPath\n\t\tdestToken = transfer.DestToken\n\t\tdestRemote = transfer.DestRemote\n\t\tdestPath = transfer.DestPath\n\t\tdelete(driver.pDriver.model.Transfers, txID)\n\t}\n\n\ttransferStatus := datatx.Status_STATUS_TRANSFER_NEW\n\n\ttransfer := &transfer{\n\t\tTransferID: txID,\n\t\tJobID: int64(-1),\n\t\tTransferStatus: transferStatus,\n\t\tSrcToken: srcToken,\n\t\tSrcRemote: srcRemote,\n\t\tSrcPath: srcPath,\n\t\tDestToken: destToken,\n\t\tDestRemote: destRemote,\n\t\tDestPath: destPath,\n\t\tCtime: fmt.Sprint(cTime.Seconds), // TODO do we need nanos here?\n\t}\n\n\tdriver.pDriver.model.Transfers[txID] = transfer\n\n\ttype rcloneAsyncReqJSON struct {\n\t\tSrcFs string `json:\"srcFs\"`\n\t\t// SrcToken string `json:\"srcToken\"`\n\t\tDstFs string `json:\"dstFs\"`\n\t\t// DstToken string `json:\"destToken\"`\n\t\tAsync bool `json:\"_async\"`\n\t}\n\tsrcFs := fmt.Sprintf(\":webdav,headers=\\\"x-access-token,%v\\\",url=\\\"%v\\\":%v\", srcToken, srcRemote, srcPath)\n\tdstFs := fmt.Sprintf(\":webdav,headers=\\\"x-access-token,%v\\\",url=\\\"%v\\\":%v\", destToken, destRemote, destPath)\n\trcloneReq := &rcloneAsyncReqJSON{\n\t\tSrcFs: srcFs,\n\t\tDstFs: dstFs,\n\t\tAsync: true,\n\t}\n\tdata, err := json.Marshal(rcloneReq)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"rclone: error pulling transfer: error marshalling rclone req data\")\n\t\ttransfer.TransferStatus = datatx.Status_STATUS_INVALID\n\t\treturn &datatx.TxInfo{\n\t\t\tId: &datatx.TxId{OpaqueId: txID},\n\t\t\tStatus: datatx.Status_STATUS_INVALID,\n\t\t\tCtime: cTime,\n\t\t}, driver.pDriver.model.saveTransfer(err)\n\t}\n\n\ttransferFileMethod := \"/sync/copy\"\n\tremotePathIsFolder, err := driver.remotePathIsFolder(srcRemote, srcPath, srcToken)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"rclone: error pulling transfer: error stating src path\")\n\t\ttransfer.TransferStatus = datatx.Status_STATUS_INVALID\n\t\treturn &datatx.TxInfo{\n\t\t\tId: &datatx.TxId{OpaqueId: txID},\n\t\t\tStatus: datatx.Status_STATUS_INVALID,\n\t\t\tCtime: cTime,\n\t\t}, driver.pDriver.model.saveTransfer(err)\n\t}\n\tif !remotePathIsFolder {\n\t\terr = errors.Wrap(err, \"rclone: error pulling transfer: path is a file, only folder transfer is implemented\")\n\t\ttransfer.TransferStatus = datatx.Status_STATUS_INVALID\n\t\treturn &datatx.TxInfo{\n\t\t\tId: &datatx.TxId{OpaqueId: txID},\n\t\t\tStatus: datatx.Status_STATUS_INVALID,\n\t\t\tCtime: cTime,\n\t\t}, driver.pDriver.model.saveTransfer(err)\n\t}\n\n\tu, err := url.Parse(driver.config.Endpoint)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"rclone: error pulling transfer: error parsing driver endpoint\")\n\t\ttransfer.TransferStatus = datatx.Status_STATUS_INVALID\n\t\treturn &datatx.TxInfo{\n\t\t\tId: &datatx.TxId{OpaqueId: txID},\n\t\t\tStatus: datatx.Status_STATUS_INVALID,\n\t\t\tCtime: cTime,\n\t\t}, driver.pDriver.model.saveTransfer(err)\n\t}\n\tu.Path = path.Join(u.Path, transferFileMethod)\n\trequestURL := u.String()\n\treq, err := http.NewRequest(\"POST\", requestURL, bytes.NewReader(data))\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"rclone: error pulling transfer: error framing post request\")\n\t\ttransfer.TransferStatus = datatx.Status_STATUS_TRANSFER_FAILED\n\t\treturn &datatx.TxInfo{\n\t\t\tId: &datatx.TxId{OpaqueId: txID},\n\t\t\tStatus: transfer.TransferStatus,\n\t\t\tCtime: cTime,\n\t\t}, driver.pDriver.model.saveTransfer(err)\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.SetBasicAuth(driver.config.AuthUser, driver.config.AuthPass)\n\tres, err := driver.client.Do(req)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"rclone: error pulling transfer: error sending post request\")\n\t\ttransfer.TransferStatus = datatx.Status_STATUS_TRANSFER_FAILED\n\t\treturn &datatx.TxInfo{\n\t\t\tId: &datatx.TxId{OpaqueId: txID},\n\t\t\tStatus: transfer.TransferStatus,\n\t\t\tCtime: cTime,\n\t\t}, driver.pDriver.model.saveTransfer(err)\n\t}\n\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != http.StatusOK {\n\t\tvar errorResData rcloneHTTPErrorRes\n\t\tif err = json.NewDecoder(res.Body).Decode(&errorResData); err != nil {\n\t\t\terr = errors.Wrap(err, \"rclone driver: error decoding response data\")\n\t\t\ttransfer.TransferStatus = datatx.Status_STATUS_TRANSFER_FAILED\n\t\t\treturn &datatx.TxInfo{\n\t\t\t\tId: &datatx.TxId{OpaqueId: txID},\n\t\t\t\tStatus: transfer.TransferStatus,\n\t\t\t\tCtime: cTime,\n\t\t\t}, driver.pDriver.model.saveTransfer(err)\n\t\t}\n\t\te := errors.New(\"rclone: rclone request responded with error, \" + fmt.Sprintf(\" status: %v, error: %v\", errorResData.Status, errorResData.Error))\n\t\ttransfer.TransferStatus = datatx.Status_STATUS_TRANSFER_FAILED\n\t\treturn &datatx.TxInfo{\n\t\t\tId: &datatx.TxId{OpaqueId: txID},\n\t\t\tStatus: transfer.TransferStatus,\n\t\t\tCtime: cTime,\n\t\t}, driver.pDriver.model.saveTransfer(e)\n\t}\n\n\ttype rcloneAsyncResJSON struct {\n\t\tJobID int64 `json:\"jobid\"`\n\t}\n\tvar resData rcloneAsyncResJSON\n\tif err = json.NewDecoder(res.Body).Decode(&resData); err != nil {\n\t\terr = errors.Wrap(err, \"rclone: error decoding response data\")\n\t\ttransfer.TransferStatus = datatx.Status_STATUS_TRANSFER_FAILED\n\t\treturn &datatx.TxInfo{\n\t\t\tId: &datatx.TxId{OpaqueId: txID},\n\t\t\tStatus: transfer.TransferStatus,\n\t\t\tCtime: cTime,\n\t\t}, driver.pDriver.model.saveTransfer(err)\n\t}\n\n\ttransfer.JobID = resData.JobID\n\n\tif err := driver.pDriver.model.saveTransfer(nil); err != nil {\n\t\terr = errors.Wrap(err, \"rclone: error pulling transfer\")\n\t\treturn &datatx.TxInfo{\n\t\t\tId: &datatx.TxId{OpaqueId: txID},\n\t\t\tStatus: datatx.Status_STATUS_INVALID,\n\t\t\tCtime: cTime,\n\t\t}, err\n\t}\n\n\t// start separate dedicated process to periodically check the transfer progress\n\tgo func() {\n\t\t// runs for as long as no end state or time out has been reached\n\t\tstartTimeMs := time.Now().Nanosecond() / 1000\n\t\ttimeout := driver.config.JobTimeout\n\n\t\tdriver.pDriver.Lock()\n\t\tdefer driver.pDriver.Unlock()\n\n\t\tfor {\n\t\t\ttransfer, err := driver.pDriver.model.getTransfer(txID)\n\t\t\tif err != nil {\n\t\t\t\ttransfer.TransferStatus = datatx.Status_STATUS_INVALID\n\t\t\t\terr = driver.pDriver.model.saveTransfer(err)\n\t\t\t\tlogger.Error().Err(err).Msgf(\"rclone driver: unable to retrieve transfer with id: %v\", txID)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// check for end status first\n\t\t\t_, endStatusFound := txEndStatuses[transfer.TransferStatus.String()]\n\t\t\tif endStatusFound {\n\t\t\t\tlogger.Info().Msgf(\"rclone driver: transfer endstatus reached: %v\", transfer.TransferStatus)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// check for possible timeout and if true were done\n\t\t\tcurrentTimeMs := time.Now().Nanosecond() / 1000\n\t\t\ttimePastMs := currentTimeMs - startTimeMs\n\n\t\t\tif timePastMs > timeout {\n\t\t\t\tlogger.Info().Msgf(\"rclone driver: transfer timed out: %vms (timeout = %v)\", timePastMs, timeout)\n\t\t\t\t// set status to EXPIRED and save\n\t\t\t\ttransfer.TransferStatus = datatx.Status_STATUS_TRANSFER_EXPIRED\n\t\t\t\tif err := driver.pDriver.model.saveTransfer(nil); err != nil {\n\t\t\t\t\tlogger.Error().Err(err).Msgf(\"rclone driver: save transfer failed: %v\", err)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tjobID := transfer.JobID\n\t\t\ttype rcloneStatusReqJSON struct {\n\t\t\t\tJobID int64 `json:\"jobid\"`\n\t\t\t}\n\t\t\trcloneStatusReq := &rcloneStatusReqJSON{\n\t\t\t\tJobID: jobID,\n\t\t\t}\n\n\t\t\tdata, err := json.Marshal(rcloneStatusReq)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error().Err(err).Msgf(\"rclone driver: marshalling request failed: %v\", err)\n\t\t\t\ttransfer.TransferStatus = datatx.Status_STATUS_INVALID\n\t\t\t\tif err := driver.pDriver.model.saveTransfer(nil); err != nil {\n\t\t\t\t\tlogger.Error().Err(err).Msgf(\"rclone driver: save transfer failed: %v\", err)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\ttransferFileMethod := \"/job/status\"\n\n\t\t\tu, err := url.Parse(driver.config.Endpoint)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error().Err(err).Msgf(\"rclone driver: could not parse driver endpoint: %v\", err)\n\t\t\t\ttransfer.TransferStatus = datatx.Status_STATUS_INVALID\n\t\t\t\tif err := driver.pDriver.model.saveTransfer(nil); err != nil {\n\t\t\t\t\tlogger.Error().Err(err).Msgf(\"rclone driver: save transfer failed: %v\", err)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tu.Path = path.Join(u.Path, transferFileMethod)\n\t\t\trequestURL := u.String()\n\n\t\t\treq, err := http.NewRequest(\"POST\", requestURL, bytes.NewReader(data))\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error().Err(err).Msgf(\"rclone driver: error framing post request: %v\", err)\n\t\t\t\ttransfer.TransferStatus = datatx.Status_STATUS_INVALID\n\t\t\t\tif err := driver.pDriver.model.saveTransfer(nil); err != nil {\n\t\t\t\t\tlogger.Error().Err(err).Msgf(\"rclone driver: save transfer failed: %v\", err)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treq.Header.Set(\"Content-Type\", \"application/json\")\n\t\t\treq.SetBasicAuth(driver.config.AuthUser, driver.config.AuthPass)\n\t\t\tres, err := driver.client.Do(req)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error().Err(err).Msgf(\"rclone driver: error sending post request: %v\", err)\n\t\t\t\ttransfer.TransferStatus = datatx.Status_STATUS_INVALID\n\t\t\t\tif err := driver.pDriver.model.saveTransfer(nil); err != nil {\n\t\t\t\t\tlogger.Error().Err(err).Msgf(\"rclone driver: save transfer failed: %v\", err)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tdefer res.Body.Close()\n\n\t\t\tif res.StatusCode != http.StatusOK {\n\t\t\t\tvar errorResData rcloneHTTPErrorRes\n\t\t\t\tif err = json.NewDecoder(res.Body).Decode(&errorResData); err != nil {\n\t\t\t\t\terr = errors.Wrap(err, \"rclone driver: error decoding response data\")\n\t\t\t\t\tlogger.Error().Err(err).Msgf(\"rclone driver: error reading response body: %v\", err)\n\t\t\t\t}\n\t\t\t\tlogger.Error().Err(err).Msgf(\"rclone driver: rclone request responded with error, status: %v, error: %v\", errorResData.Status, errorResData.Error)\n\t\t\t\ttransfer.TransferStatus = datatx.Status_STATUS_INVALID\n\t\t\t\tif err := driver.pDriver.model.saveTransfer(nil); err != nil {\n\t\t\t\t\tlogger.Error().Err(err).Msgf(\"rclone driver: save transfer failed: %v\", err)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\ttype rcloneStatusResJSON struct {\n\t\t\t\tFinished bool `json:\"finished\"`\n\t\t\t\tSuccess bool `json:\"success\"`\n\t\t\t\tID int64 `json:\"id\"`\n\t\t\t\tError string `json:\"error\"`\n\t\t\t\tGroup string `json:\"group\"`\n\t\t\t\tStartTime string `json:\"startTime\"`\n\t\t\t\tEndTime string `json:\"endTime\"`\n\t\t\t\tDuration float64 `json:\"duration\"`\n\t\t\t\t// think we don't need this\n\t\t\t\t// \"output\": {} // output of the job as would have been returned if called synchronously\n\t\t\t}\n\t\t\tvar resData rcloneStatusResJSON\n\t\t\tif err = json.NewDecoder(res.Body).Decode(&resData); err != nil {\n\t\t\t\tlogger.Error().Err(err).Msgf(\"rclone driver: error decoding response data: %v\", err)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif resData.Error != \"\" {\n\t\t\t\tlogger.Error().Err(err).Msgf(\"rclone driver: rclone responded with error: %v\", resData.Error)\n\t\t\t\ttransfer.TransferStatus = datatx.Status_STATUS_TRANSFER_FAILED\n\t\t\t\tif err := driver.pDriver.model.saveTransfer(nil); err != nil {\n\t\t\t\t\tlogger.Error().Err(err).Msgf(\"rclone driver: error saving transfer: %v\", err)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// transfer complete\n\t\t\tif resData.Finished && resData.Success {\n\t\t\t\tlogger.Info().Msg(\"rclone driver: transfer job finished\")\n\t\t\t\ttransfer.TransferStatus = datatx.Status_STATUS_TRANSFER_COMPLETE\n\t\t\t\tif err := driver.pDriver.model.saveTransfer(nil); err != nil {\n\t\t\t\t\tlogger.Error().Err(err).Msgf(\"rclone driver: error saving transfer: %v\", err)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// transfer completed unsuccessfully without error\n\t\t\tif resData.Finished && !resData.Success {\n\t\t\t\tlogger.Info().Msgf(\"rclone driver: transfer job failed\")\n\t\t\t\ttransfer.TransferStatus = datatx.Status_STATUS_TRANSFER_FAILED\n\t\t\t\tif err := driver.pDriver.model.saveTransfer(nil); err != nil {\n\t\t\t\t\tlogger.Error().Err(err).Msgf(\"rclone driver: error saving transfer: %v\", err)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// transfer not yet finished: continue\n\t\t\tif !resData.Finished {\n\t\t\t\tlogger.Info().Msgf(\"rclone driver: transfer job in progress\")\n\t\t\t\ttransfer.TransferStatus = datatx.Status_STATUS_TRANSFER_IN_PROGRESS\n\t\t\t\tif err := driver.pDriver.model.saveTransfer(nil); err != nil {\n\t\t\t\t\tlogger.Error().Err(err).Msgf(\"rclone driver: error saving transfer: %v\", err)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t<-time.After(time.Millisecond * time.Duration(driver.config.JobStatusCheckInterval))\n\t\t}\n\t}()\n\n\treturn &datatx.TxInfo{\n\t\tId: &datatx.TxId{OpaqueId: txID},\n\t\tStatus: transferStatus,\n\t\tCtime: cTime,\n\t}, nil\n}",
"func (server *WorkerServer) StartJob(ctx context.Context, cmd *proto.Command) (*proto.Job, error) {\n\tmd, ok := metadata.FromIncomingContext(ctx)\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.PermissionDenied, \"User identity unknown\")\n\t}\n\n\tif len(md[\"user\"]) == 0 {\n\t\treturn nil, status.Errorf(codes.PermissionDenied, \"User identity unknown\")\n\t}\n\n\tjobID, err := server.jobsManager.CreateJob(cmd.Cmd, cmd.Args, md[\"user\"][0])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &proto.Job{\n\t\tId: jobID,\n\t}, nil\n}",
"func (m *Manager) Start(ID string) error {\n\te, ok := m.Entries[ID]\n\tif !ok {\n\t\treturn ErrorInvalidJobID\n\t}\n\te.Start(m.ctx)\n\treturn nil\n}",
"func (c *client) startNewJob(ctx context.Context, opts launcher.LaunchOptions, jobInterface v12.JobInterface, ns string, safeName string, safeSha string) ([]runtime.Object, error) {\n\tlog.Logger().Infof(\"about to create a new job for name %s and sha %s\", safeName, safeSha)\n\n\t// lets see if we are using a version stream to store the git operator configuration\n\tfolder := filepath.Join(opts.Dir, \"versionStream\", \"git-operator\")\n\texists, err := files.DirExists(folder)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to check if folder exists %s\", folder)\n\t}\n\tif !exists {\n\t\t// lets try the original location\n\t\tfolder = filepath.Join(opts.Dir, \".jx\", \"git-operator\")\n\t}\n\n\tjobFileName := \"job.yaml\"\n\n\tfileNamePath := filepath.Join(opts.Dir, \".jx\", \"git-operator\", \"filename.txt\")\n\texists, err = files.FileExists(fileNamePath)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to check for file %s\", fileNamePath)\n\t}\n\tif exists {\n\t\tdata, err := ioutil.ReadFile(fileNamePath)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to load file %s\", fileNamePath)\n\t\t}\n\t\tjobFileName = strings.TrimSpace(string(data))\n\t\tif jobFileName == \"\" {\n\t\t\treturn nil, errors.Errorf(\"the job name file %s is empty\", fileNamePath)\n\t\t}\n\t}\n\n\tfileName := filepath.Join(folder, jobFileName)\n\texists, err = files.FileExists(fileName)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to find file %s in repository %s\", fileName, safeName)\n\t}\n\tif !exists {\n\t\treturn nil, errors.Errorf(\"repository %s does not have a Job file: %s\", safeName, fileName)\n\t}\n\n\tresource := &v1.Job{}\n\terr = yamls.LoadFile(fileName, resource)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to load Job file %s in repository %s\", fileName, safeName)\n\t}\n\n\tif !opts.NoResourceApply {\n\t\t// now lets check if there is a resources dir\n\t\tresourcesDir := filepath.Join(folder, \"resources\")\n\t\texists, err = files.DirExists(resourcesDir)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to check if resources directory %s exists in repository %s\", resourcesDir, safeName)\n\t\t}\n\t\tif exists {\n\t\t\tabsDir, err := filepath.Abs(resourcesDir)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"failed to get absolute resources dir %s\", resourcesDir)\n\t\t\t}\n\n\t\t\tcmd := &cmdrunner.Command{\n\t\t\t\tName: \"kubectl\",\n\t\t\t\tArgs: []string{\"apply\", \"-f\", absDir},\n\t\t\t}\n\t\t\tlog.Logger().Infof(\"running command: %s\", cmd.CLI())\n\t\t\t_, err = c.runner(cmd)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"failed to apply resources in dir %s\", absDir)\n\t\t\t}\n\t\t}\n\t}\n\n\t// lets try use a maximum of 31 characters and a minimum of 10 for the sha\n\tnamePrefix := trimLength(safeName, 20)\n\n\tid := uuid.New().String()\n\tresourceName := namePrefix + \"-\" + id\n\n\tresource.Name = resourceName\n\n\tif resource.Labels == nil {\n\t\tresource.Labels = map[string]string{}\n\t}\n\tresource.Labels[constants.DefaultSelectorKey] = constants.DefaultSelectorValue\n\tresource.Labels[launcher.RepositoryLabelKey] = safeName\n\tresource.Labels[launcher.CommitShaLabelKey] = safeSha\n\n\tr2, err := jobInterface.Create(ctx, resource, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create Job %s in namespace %s\", resourceName, ns)\n\t}\n\tlog.Logger().Infof(\"created Job %s in namespace %s\", resourceName, ns)\n\treturn []runtime.Object{r2}, nil\n}",
"func (b *CreateBatchesWorker) Process(message *workers.Msg) {\n\n\tvar msg BatchPart\n\tdata := message.Args().ToJson()\n\terr := json.Unmarshal([]byte(data), &msg)\n\tcheckErr(b.Logger, err)\n\n\tl := b.Logger.With(\n\t\tzap.String(\"worker\", nameCreateBatches),\n\t\tzap.Int(\"part\", msg.Part),\n\t\tzap.Int(\"totalParts\", msg.TotalParts),\n\t)\n\n\tb.Workers.Statsd.Incr(CreateBatchesWorkerStart, msg.Job.Labels(), 1)\n\n\terr = b.Workers.MarathonDB.Model(&msg.Job).Column(\"job.status\", \"App\").Where(\"job.id = ?\", msg.Job.ID).Select()\n\tb.checkErr(&msg.Job, err)\n\n\tif msg.Job.Status == stoppedJobStatus {\n\t\tl.Info(\"stopped job\")\n\t\tb.Workers.Statsd.Incr(CreateBatchesWorkerCompleted, msg.Job.Labels(), 1)\n\t\treturn\n\t}\n\tl.Info(\"starting\")\n\n\t// if is the first element\n\tif msg.Part == 0 {\n\t\tmsg.Job.TagRunning(b.Workers.MarathonDB, nameCreateBatches, \"starting\")\n\t}\n\n\tstart := time.Now()\n\t_, buffer, err := b.Workers.S3Client.DownloadChunk(int64(msg.Start), int64(msg.Size), msg.Job.CSVPath)\n\tlabels := msg.Job.Labels()\n\tlabels = append(labels, fmt.Sprintf(\"error:%t\", err != nil))\n\tb.Workers.Statsd.Timing(GetCsvFromS3Timing, time.Now().Sub(start), labels, 1)\n\tb.checkErr(&msg.Job, err)\n\n\tids := b.getIDs(buffer, &msg)\n\n\t// pull from db, send to control and send to kafka\n\tb.processIDs(ids, &msg)\n\n\tcompletedParts := b.setAsComplete(msg.Part, &msg.Job)\n\n\tif completedParts == msg.TotalParts {\n\t\tids = b.getSplitedIds(msg.TotalParts, &msg.Job)\n\n\t\tb.processIDs(ids, &msg)\n\n\t\tif msg.Job.TotalUsers == 0 {\n\t\t\t_, err := b.Workers.MarathonDB.Model(&msg.Job).Set(\"status = 'stopped', updated_at = ?, completed_at = ?\", time.Now().UnixNano(), time.Now().UnixNano()).Where(\"id = ?\", msg.Job.ID).Update()\n\t\t\tb.checkErr(&msg.Job, err)\n\t\t\t//b.updateCompletedAt(time.Now().UnixNano(), &msg.Job)\n\t\t\tmsg.Job.TagError(b.Workers.MarathonDB, nameCreateBatches, \"the job has finished without finding any valid user ids\")\n\t\t\tb.Workers.Statsd.Incr(CreateBatchesWorkerError, msg.Job.Labels(), 1)\n\t\t} else {\n\t\t\tmsg.Job.TagSuccess(b.Workers.MarathonDB, nameCreateBatches, \"finished\")\n\t\t\tb.Workers.Statsd.Incr(CreateBatchesWorkerCompleted, msg.Job.Labels(), 1)\n\t\t}\n\n\t\t// TODO: schedule a job to run after send all messages. This job will check\n\t\t// for errors and delete waste if a error happen\n\t} else {\n\t\tstr := fmt.Sprintf(\"complete part %d of %d\", completedParts, msg.TotalParts)\n\t\tmsg.Job.TagRunning(b.Workers.MarathonDB, nameCreateBatches, str)\n\t}\n\tids = nil\n\n\tl.Info(\"finished\")\n}",
"func (r *Runner) Start(job Job) error {\n\tselect {\n\tcase r.jobs <- job:\n\t\treturn nil\n\tdefault:\n\t\treturn errors.New(\"jobs queue overflowed\")\n\t}\n}",
"func (jr *JobRunner) runJob(id string, cmd *exec.Cmd) error {\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\toutput := io.MultiReader(stdout, stderr)\n\n\tlb, err := NewLogBuffer(id)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer lb.Close()\n\n\tjr.store.UpdateRecordOutput(id, lb)\n\n\terr = cmd.Start()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tjr.store.UpdateRecordState(id, JobState(Running))\n\n\tif _, err := io.Copy(lb, output); err != nil {\n\t\treturn err\n\t}\n\n\treturn cmd.Wait()\n}",
"func (j *AuroraJob) JobKey() *aurora.JobKey {\n\treturn j.jobConfig.Key\n}",
"func Enqueue(job *models.Job) {\n\n\tsession, err := mgo.Dial(os.Getenv(\"MONGODB_URI\"))\n\tif err != nil {\n\t\tlogInternalError(\"DB SESSION\", err)\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tfailed(session, job, fmt.Errorf(\"%v\", err))\n\t\t}\n\t\tsession.Close()\n\t}()\n\n\tc := models.Jobs(session)\n\n\tif err = c.UpdateId(job.ID, bson.M{\n\t\t\"$set\": bson.M{\n\t\t\t\"status\": models.Running,\n\t\t\t\"started_at\": time.Now(),\n\t\t},\n\t}); err != nil {\n\t\tfailed(session, job, err)\n\t\treturn\n\t}\n\n\tif err = c.FindId(job.ID).One(job); err != nil {\n\t\tfailed(session, job, err)\n\t\treturn\n\t}\n\n\tmachine, err := fetchMachineConfig()\n\tif err != nil {\n\t\tfailed(session, job, err)\n\t\treturn\n\t}\n\tif len(job.Workflow) == 0 {\n\t\tfailed(session, job, fmt.Errorf(\"No any workflow specified\"))\n\t\treturn\n\t}\n\timg := job.Workflow[0]\n\n\tenv := []string{\n\t\tfmt.Sprintf(\"REFERENCE=%s\", \"GRCh37.fa\"),\n\t}\n\tfor key, input := range job.Resource.Inputs {\n\t\tenv = append(env, fmt.Sprintf(\"%s=%s\", key, input))\n\t}\n\tfor key, param := range job.Parameters {\n\t\tenv = append(env, fmt.Sprintf(\"%s=%s\", key, param))\n\t}\n\n\t// Ensure outputs directory exsits.\n\tos.MkdirAll(filepath.Join(job.Resource.URL, \"out\"), os.ModePerm)\n\n\targ := daap.Args{\n\t\tMachine: machine,\n\t\tMounts: []daap.Mount{\n\t\t\t// Mount inputs and outpus directory.\n\t\t\tdaap.Volume(job.Resource.URL, \"/var/data\"),\n\t\t},\n\t\tEnv: env,\n\t}\n\n\tprocess := daap.NewProcess(img, arg)\n\n\tctx := context.Background()\n\tif err = process.Run(ctx); err != nil {\n\t\tfailed(session, job, err)\n\t\treturn\n\t}\n\n\tout, err := ioutil.ReadAll(process.Stdout)\n\tif err != nil {\n\t\tfailed(session, job, err)\n\t\treturn\n\t}\n\tserr, err := ioutil.ReadAll(process.Stderr)\n\tif err != nil {\n\t\tfailed(session, job, err)\n\t\treturn\n\t}\n\tapplog, err := ioutil.ReadAll(process.Log)\n\tif err != nil {\n\t\tfailed(session, job, err)\n\t\treturn\n\t}\n\n\terr = models.Jobs(session).UpdateId(job.ID, bson.M{\n\t\t\"$set\": bson.M{\n\t\t\t\"stdout\": string(out),\n\t\t\t\"stderr\": string(serr),\n\t\t\t\"applog\": string(applog),\n\t\t},\n\t})\n\tif err != nil {\n\t\tfailed(session, job, err)\n\t\treturn\n\t}\n\n\t// TODO: Use \"Salamander\"\n\tresults, err := detectResultFiles(job)\n\tif err != nil {\n\t\tfailed(session, job, err)\n\t\treturn\n\t}\n\n\tif err := c.UpdateId(job.ID, bson.M{\n\t\t\"$set\": bson.M{\n\t\t\t\"status\": models.Completed,\n\t\t\t\"results\": results,\n\t\t\t\"finished_at\": time.Now(),\n\t\t},\n\t}); err != nil {\n\t\tfailed(session, job, err)\n\t}\n\n}",
"func (self *JobPipeline) Start() {\n\tself.host, _ = os.Hostname()\n\tself.id = fmt.Sprintf(\"%s|%s\", self.TaskName, self.host)\n\tself.refresh_interval = time.Second\n\tself.last_update = time.Now().Add(-1 * self.refresh_interval)\n\tself.queue = []*Document{}\n\tself.running_jobs = map[string]bool{}\n\n\tlog.Printf(\"Starting %s\", self.TaskName)\n\tself.workers = make([]*Worker, self.NumWorkers)\n\tfor i := 0; i < self.NumWorkers; i++ {\n\t\tself.workers[i] = &Worker{Client: self.Client, Task: self.Task}\n\t\tself.workers[i].Start()\n\t}\n}",
"func startNewSession(job Job, initial State, \n seshExp time.Duration, storage Storage, out chan<- Job) {\n id, err := GenerateRandomString(32)\n if (err != nil) {\n // The random generator failed.\n job.SetResult(errors.New(\"Session-Id generation failed\"))\n out <- job\n return\n }\n\n // Generate the current state.\n state, err := initial.Next(job)\n if (err != nil) {\n job.SetResult(err)\n out <- job\n return\n }\n\n // Store the mapping from the id to the current state.\n storage.Set(id, StorageValue{state, time.Now().Add(seshExp)})\n job.SetResult(state.Result())\n job.SetHeader(\"Session-Id\", id)\n out <- job\n}",
"func (s *Store) Start() error {\n\ts.stmtOnce.Do(s.initStmt)\n\n\tctx := context.Background()\n\terr := internal.RunInTxWithRetry(ctx, s.db, func(ctx context.Context, tx *sql.Tx) error {\n\t\t_, err := tx.ExecContext(\n\t\t\tctx,\n\t\t\t`UPDATE jobqueue_jobs SET state = ?, completed = ? WHERE state = ?`,\n\t\t\tjobqueue.Failed,\n\t\t\ttime.Now().UnixNano(),\n\t\t\tjobqueue.Working,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}, func(err error) bool {\n\t\treturn internal.IsDeadlock(err)\n\t})\n\tif err != nil {\n\t\treturn s.wrapError(err)\n\t}\n\treturn nil\n}",
"func (j *Job) Start() {\n\tj.status.Set(StatusReady)\n}",
"func (fc *FederatedController) syncFLJob(key string) (bool, error) {\n\tstartTime := time.Now()\n\tdefer func() {\n\t\tklog.V(4).Infof(\"Finished syncing federatedlearning job %q (%v)\", key, time.Since(startTime))\n\t}()\n\n\tns, name, err := cache.SplitMetaNamespaceKey(key)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif len(ns) == 0 || len(name) == 0 {\n\t\treturn false, fmt.Errorf(\"invalid federatedlearning job key %q: either namespace or name is missing\", key)\n\t}\n\tsharedFLJob, err := fc.jobLister.FederatedLearningJobs(ns).Get(name)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\tklog.V(4).Infof(\"FLJob has been deleted: %v\", key)\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, err\n\t}\n\tflJob := *sharedFLJob\n\t// set kind for flJob in case that the kind is None\n\tflJob.SetGroupVersionKind(neptunev1.SchemeGroupVersion.WithKind(\"FederatedLearningJob\"))\n\t// if flJob was finished previously, we don't want to redo the termination\n\tif IsFLJobFinished(&flJob) {\n\t\treturn true, nil\n\t}\n\tselector, _ := GenerateSelector(&flJob)\n\tpods, err := fc.podStore.Pods(flJob.Namespace).List(selector)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tactivePods := k8scontroller.FilterActivePods(pods)\n\tactive := int32(len(activePods))\n\tsucceeded, failed := getStatus(pods)\n\tconditions := len(flJob.Status.Conditions)\n\t// flJob first start\n\tif flJob.Status.StartTime == nil {\n\t\tnow := metav1.Now()\n\t\tflJob.Status.StartTime = &now\n\t}\n\n\tvar manageJobErr error\n\tjobFailed := false\n\tvar failureReason string\n\tvar failureMessage string\n\tphase := flJob.Status.Phase\n\n\tif failed > 0 {\n\t\tjobFailed = true\n\t\tfailureReason = \"workerFailed\"\n\t\tfailureMessage = \"the worker of FLJob failed\"\n\t}\n\n\tif jobFailed {\n\t\tflJob.Status.Conditions = append(flJob.Status.Conditions, NewFLJobCondition(neptunev1.FLJobCondFailed, failureReason, failureMessage))\n\t\tflJob.Status.Phase = neptunev1.FLJobFailed\n\t\tfc.recorder.Event(&flJob, v1.EventTypeWarning, failureReason, failureMessage)\n\t} else {\n\t\t// in the First time, we create the pods\n\t\tif len(pods) == 0 {\n\t\t\tactive, manageJobErr = fc.createPod(&flJob)\n\t\t}\n\t\tcomplete := false\n\t\tif succeeded > 0 && active == 0 {\n\t\t\tcomplete = true\n\t\t}\n\t\tif complete {\n\t\t\tflJob.Status.Conditions = append(flJob.Status.Conditions, NewFLJobCondition(neptunev1.FLJobCondComplete, \"\", \"\"))\n\t\t\tnow := metav1.Now()\n\t\t\tflJob.Status.CompletionTime = &now\n\t\t\tfc.recorder.Event(&flJob, v1.EventTypeNormal, \"Completed\", \"FLJob completed\")\n\t\t\tflJob.Status.Phase = neptunev1.FLJobSucceeded\n\t\t} else {\n\t\t\tflJob.Status.Phase = neptunev1.FLJobRunning\n\t\t}\n\t}\n\n\tforget := false\n\t// Check if the number of jobs succeeded increased since the last check. If yes \"forget\" should be true\n\t// This logic is linked to the issue: https://github.com/kubernetes/kubernetes/issues/56853 that aims to\n\t// improve the FLJob backoff policy when parallelism > 1 and few FLJobs failed but others succeed.\n\t// In this case, we should clear the backoff delay.\n\tif flJob.Status.Succeeded < succeeded {\n\t\tforget = true\n\t}\n\n\t// no need to update the flJob if the status hasn't changed since last time\n\tif flJob.Status.Active != active || flJob.Status.Succeeded != succeeded || flJob.Status.Failed != failed || len(flJob.Status.Conditions) != conditions || flJob.Status.Phase != phase {\n\t\tflJob.Status.Active = active\n\t\tflJob.Status.Succeeded = succeeded\n\t\tflJob.Status.Failed = failed\n\n\t\tif jobFailed && !IsFLJobFinished(&flJob) {\n\t\t\t// returning an error will re-enqueue FLJob after the backoff period\n\t\t\treturn forget, fmt.Errorf(\"failed pod(s) detected for flJob key %q\", key)\n\t\t}\n\n\t\tforget = true\n\t}\n\n\treturn forget, manageJobErr\n}",
"func (tc *testContext) runJob(name string, command []string) (string, error) {\n\t// Create a job which runs the provided command via SSH\n\tkeyMountDir := \"/private-key\"\n\tkeyMode := int32(0600)\n\tjob := &batch.Job{\n\t\tObjectMeta: meta.ObjectMeta{\n\t\t\tGenerateName: name + \"-job-\",\n\t\t},\n\t\tSpec: batch.JobSpec{\n\t\t\tTemplate: core.PodTemplateSpec{\n\t\t\t\tSpec: core.PodSpec{\n\t\t\t\t\tOS: &core.PodOS{Name: core.Linux},\n\t\t\t\t\tHostNetwork: true,\n\t\t\t\t\tRestartPolicy: core.RestartPolicyNever,\n\t\t\t\t\tServiceAccountName: tc.workloadNamespace,\n\t\t\t\t\tContainers: []core.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: name,\n\t\t\t\t\t\t\tImage: tc.toolsImage,\n\t\t\t\t\t\t\tImagePullPolicy: core.PullIfNotPresent,\n\t\t\t\t\t\t\tCommand: command,\n\t\t\t\t\t\t\tVolumeMounts: []core.VolumeMount{{\n\t\t\t\t\t\t\t\tName: \"private-key\",\n\t\t\t\t\t\t\t\tMountPath: keyMountDir,\n\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVolumes: []core.Volume{{Name: \"private-key\", VolumeSource: core.VolumeSource{\n\t\t\t\t\t\tSecret: &core.SecretVolumeSource{\n\t\t\t\t\t\t\tSecretName: secrets.PrivateKeySecret,\n\t\t\t\t\t\t\tDefaultMode: &keyMode,\n\t\t\t\t\t\t},\n\t\t\t\t\t}}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tjobsClient := tc.client.K8s.BatchV1().Jobs(tc.workloadNamespace)\n\tjob, err := jobsClient.Create(context.TODO(), job, meta.CreateOptions{})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error creating job: %w\", err)\n\t}\n\n\t// Wait for the job to complete then gather and return the pod output\n\tif err = tc.waitUntilJobSucceeds(job.GetName()); err != nil {\n\t\treturn \"\", fmt.Errorf(\"error waiting for job to succeed: %w\", err)\n\t}\n\tlabelSelector := \"job-name=\" + job.Name\n\tlogs, err := tc.getLogs(labelSelector)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error getting logs from job pod: %w\", err)\n\t}\n\treturn logs, nil\n}",
"func (j *Job) begin() (Executor, error) {\n\tj.mu.Lock()\n\tdefer j.mu.Unlock()\n\n\tif j.st > monitor.PENDING {\n\t\treturn nil, JobBegunError{}\n\t}\n\n\tj.st = monitor.ACTIVE\n\n\tgo j.finish()\n\n\treturn j.exec, nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
toGA is an utility method to return the baseInstance data as a GA Instance object | func (bi *baseInstance) toGA() *ga.Instance {
inst := &ga.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*ga.NetworkInterface{{}}}
if bi.aliasRange != "" {
inst.NetworkInterfaces[0].AliasIpRanges = []*ga.AliasIpRange{
{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName},
}
}
return inst
} | [
"func (bi *baseInstance) toBeta() *beta.Instance {\n\tinst := &beta.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*beta.NetworkInterface{{}}}\n\tif bi.aliasRange != \"\" {\n\t\tinst.NetworkInterfaces[0].AliasIpRanges = []*beta.AliasIpRange{\n\t\t\t{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName},\n\t\t}\n\t}\n\treturn inst\n}",
"func (bil *baseInstanceList) newGAGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {\n\treturn func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {\n\t\tm.Lock.Lock()\n\t\tdefer m.Lock.Unlock()\n\n\t\tif _, found := m.Objects[*key]; !found {\n\t\t\tm.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toGA()}\n\t\t}\n\t\treturn false, nil, nil\n\t}\n}",
"func (conf GAConfig) NewGA() (*GA, error) {\n\t// Check for default values\n\tif conf.RNG == nil {\n\t\tconf.RNG = rand.New(rand.NewSource(time.Now().UnixNano()))\n\t}\n\t// Check the configuration is valid\n\tif conf.NPops == 0 {\n\t\treturn nil, errors.New(\"NPops has to be strictly higher than 0\")\n\t}\n\tif conf.PopSize == 0 {\n\t\treturn nil, errors.New(\"PopSize has to be strictly higher than 0\")\n\t}\n\tif conf.NGenerations == 0 {\n\t\treturn nil, errors.New(\"NGenerations has to be strictly higher than 0\")\n\t}\n\tif conf.HofSize == 0 {\n\t\treturn nil, errors.New(\"HofSize has to be strictly higher than 0\")\n\t}\n\tif conf.Model == nil {\n\t\treturn nil, errors.New(\"Model has to be provided\")\n\t}\n\tif modelErr := conf.Model.Validate(); modelErr != nil {\n\t\treturn nil, modelErr\n\t}\n\tif conf.Migrator != nil {\n\t\tif migErr := conf.Migrator.Validate(); migErr != nil {\n\t\t\treturn nil, migErr\n\t\t}\n\t\tif conf.MigFrequency == 0 {\n\t\t\treturn nil, errors.New(\"MigFrequency should be higher than 0\")\n\t\t}\n\t}\n\tif conf.Speciator != nil {\n\t\tif specErr := conf.Speciator.Validate(); specErr != nil {\n\t\t\treturn nil, specErr\n\t\t}\n\t}\n\t// Initialize the GA\n\treturn &GA{GAConfig: conf}, nil\n}",
"func ToBaseFiatPeg(fiatPeg FiatPeg) BaseFiatPeg {\n\tvar baseFiatPeg BaseFiatPeg\n\tbaseFiatPeg.Owners = fiatPeg.GetOwners()\n\tbaseFiatPeg.PegHash = fiatPeg.GetPegHash()\n\tbaseFiatPeg.RedeemedAmount = fiatPeg.GetRedeemedAmount()\n\tbaseFiatPeg.TransactionAmount = fiatPeg.GetTransactionAmount()\n\tbaseFiatPeg.TransactionID = fiatPeg.GetTransactionID()\n\treturn baseFiatPeg\n}",
"func getPSQLDataStoreObj() *postgreSqlDataStore {\n dbOnce.Do(func() {\n sqlObj.dblogger = logging.GetAppLoggerObj()\n sqlObj.dblogger.Trace(\"PostgreSql DB Object is created successfully\")\n return\n })\n return sqlObj\n}",
"func ToGObject(p unsafe.Pointer) *C.GObject {\n\treturn (*C.GObject)(p)\n}",
"func SomeGraphToJSONable(\n\tinstance *SomeGraph) (\n\ttarget map[string]interface{}, err error) {\n\n\tif instance == nil {\n\t\tpanic(\"unexpected nil instance\")\n\t}\n\n\ttarget = make(map[string]interface{})\n\tdefer func() {\n\t\tif err != nil {\n\t\t\ttarget = nil\n\t\t}\n\t}()\n\t////\n\t// Serialize instance registry of SomeClass\n\t////\n\n\tif len(instance.SomeClasses) > 0 {\n\t\ttargetSomeClasses := make(map[string]interface{})\n\t\tfor id := range instance.SomeClasses {\n\t\t\tsomeClassInstance := instance.SomeClasses[id]\n\n\t\t\tif id != someClassInstance.ID {\n\t\t\t\terr = fmt.Errorf(\n\t\t\t\t\t\"expected the instance of SomeClass to have the ID %s according to the registry, but got: %s\",\n\t\t\t\t\tid, someClassInstance.ID)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttargetSomeClasses[id] = SomeClassToJSONable(\n\t\t\t\tsomeClassInstance)\n\t\t}\n\n\t\ttarget[\"some_classes\"] = targetSomeClasses\n\t}\n\n\t////\n\t// Serialize instance registry of OtherClass\n\t////\n\n\tif len(instance.OtherClasses) > 0 {\n\t\ttargetOtherClasses := make(map[string]interface{})\n\t\tfor id := range instance.OtherClasses {\n\t\t\totherClassInstance := instance.OtherClasses[id]\n\n\t\t\tif id != otherClassInstance.ID {\n\t\t\t\terr = fmt.Errorf(\n\t\t\t\t\t\"expected the instance of OtherClass to have the ID %s according to the registry, but got: %s\",\n\t\t\t\t\tid, otherClassInstance.ID)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttargetOtherClasses[id] = OtherClassToJSONable(\n\t\t\t\totherClassInstance)\n\t\t}\n\n\t\ttarget[\"other_classes\"] = targetOtherClasses\n\t}\n\n\treturn\n}",
"func (v *Variant) ToGVariant() *C.GVariant {\n\tif v == nil {\n\t\treturn nil\n\t}\n\treturn v.native()\n}",
"func NewGraph(base Base) {\n\n}",
"func ProtoToInstance(p *spannerpb.SpannerInstance) *spanner.Instance {\n\tobj := &spanner.Instance{\n\t\tName: dcl.StringOrNil(p.Name),\n\t\tProject: dcl.StringOrNil(p.Project),\n\t\tConfig: dcl.StringOrNil(p.Config),\n\t\tDisplayName: dcl.StringOrNil(p.DisplayName),\n\t\tNodeCount: dcl.Int64OrNil(p.NodeCount),\n\t\tState: ProtoToSpannerInstanceStateEnum(p.GetState()),\n\t}\n\treturn obj\n}",
"func (s StatsGraph) construct() StatsGraphClass { return &s }",
"func GetPostgressInstance() *Postgres {\n\t// Singleton instance\n\tonce.Do(func() {\n\t\tinstance = connection()\n\t})\n\treturn instance\n}",
"func InstanceToProto(resource *spanner.Instance) *spannerpb.SpannerInstance {\n\tp := &spannerpb.SpannerInstance{\n\t\tName: dcl.ValueOrEmptyString(resource.Name),\n\t\tProject: dcl.ValueOrEmptyString(resource.Project),\n\t\tConfig: dcl.ValueOrEmptyString(resource.Config),\n\t\tDisplayName: dcl.ValueOrEmptyString(resource.DisplayName),\n\t\tNodeCount: dcl.ValueOrEmptyInt64(resource.NodeCount),\n\t\tState: SpannerInstanceStateEnumToProto(resource.State),\n\t}\n\n\treturn p\n}",
"func newDataInstance(repo datastore.Repo, t *testing.T, name dvid.DataString) *Data {\n\tconfig := dvid.NewConfig()\n\tconfig.SetVersioned(true)\n\tdataservice, err := repo.NewData(labelsT, name, config)\n\tif err != nil {\n\t\tt.Errorf(\"Unable to create labels64 instance %q: %s\\n\", name, err.Error())\n\t}\n\tlabels, ok := dataservice.(*Data)\n\tif !ok {\n\t\tt.Errorf(\"Can't cast labels data service into Data\\n\")\n\t}\n\treturn labels\n}",
"func (track *Track) ToDb() interface{} {\n\treturn track.Id\n}",
"func OtherClassToJSONable(\n\tinstance *OtherClass) (\n\ttarget map[string]interface{}) {\n\n\tif instance == nil {\n\t\tpanic(\"unexpected nil instance\")\n\t}\n\n\ttarget = make(map[string]interface{})\n\n\t////\n\t// Serialize ReferenceSome\n\t////\n\n\ttarget[\"reference_some\"] = instance.ReferenceSome.ID\n\n\t////\n\t// Serialize ArrayOfSomes\n\t////\n\n\tcount0 := len(instance.ArrayOfSomes)\n\tslice0 := instance.ArrayOfSomes\n\ttarget0 := make([]interface{}, count0)\n\tfor i0 := 0; i0 < count0; i0++ {\n\t\ttarget0[i0] = slice0[i0].ID\n\t}\n\ttarget[\"array_of_somes\"] = target0\n\n\t////\n\t// Serialize MapOfSomes\n\t////\n\n\ttarget1 := make(map[string]interface{})\n\tmap1 := instance.MapOfSomes\n\tfor k1, v1 := range map1 {\n\t\ttarget1[k1] = v1.ID\n\t}\n\ttarget[\"map_of_somes\"] = target1\n\n\treturn\n}",
"func (dao *DaoPetMongodb) toBo(gbo godal.IGenericBo) *BoPet {\n\tif gbo == nil {\n\t\treturn nil\n\t}\n\treturn (&BoPet{}).fromGenericBo(gbo)\n}",
"func newdbBasePostgres() dbBaser {\n\tb := new(dbBasePostgres)\n\tb.ins = b\n\treturn b\n}",
"func toRecord(cache airtabledb.DB, src Feature, dst interface{}) {\n\tdV := reflect.ValueOf(dst).Elem().FieldByName(\"Fields\")\n\tsV := reflect.ValueOf(src)\n\tcopyFields(cache, sV, dV)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
toGA is an utility method to return the baseInstance data as a beta Instance object | func (bi *baseInstance) toBeta() *beta.Instance {
inst := &beta.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*beta.NetworkInterface{{}}}
if bi.aliasRange != "" {
inst.NetworkInterfaces[0].AliasIpRanges = []*beta.AliasIpRange{
{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName},
}
}
return inst
} | [
"func (bi *baseInstance) toGA() *ga.Instance {\n\tinst := &ga.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*ga.NetworkInterface{{}}}\n\tif bi.aliasRange != \"\" {\n\t\tinst.NetworkInterfaces[0].AliasIpRanges = []*ga.AliasIpRange{\n\t\t\t{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName},\n\t\t}\n\t}\n\treturn inst\n}",
"func (bil *baseInstanceList) newGAGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {\n\treturn func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {\n\t\tm.Lock.Lock()\n\t\tdefer m.Lock.Unlock()\n\n\t\tif _, found := m.Objects[*key]; !found {\n\t\t\tm.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toGA()}\n\t\t}\n\t\treturn false, nil, nil\n\t}\n}",
"func InstanceToProto(resource *spanner.Instance) *spannerpb.SpannerInstance {\n\tp := &spannerpb.SpannerInstance{\n\t\tName: dcl.ValueOrEmptyString(resource.Name),\n\t\tProject: dcl.ValueOrEmptyString(resource.Project),\n\t\tConfig: dcl.ValueOrEmptyString(resource.Config),\n\t\tDisplayName: dcl.ValueOrEmptyString(resource.DisplayName),\n\t\tNodeCount: dcl.ValueOrEmptyInt64(resource.NodeCount),\n\t\tState: SpannerInstanceStateEnumToProto(resource.State),\n\t}\n\n\treturn p\n}",
"func newDataInstance(uuid dvid.UUID, t *testing.T, name dvid.InstanceName) *Data {\n\tconfig := dvid.NewConfig()\n\tdataservice, err := datastore.NewData(uuid, labelsT, name, config)\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to create labelblk instance %q: %v\\n\", name, err)\n\t}\n\tlabels, ok := dataservice.(*Data)\n\tif !ok {\n\t\tt.Fatalf(\"Can't cast labels data service into Data\\n\")\n\t}\n\treturn labels\n}",
"func newDataInstance(repo datastore.Repo, t *testing.T, name dvid.DataString) *Data {\n\tconfig := dvid.NewConfig()\n\tconfig.SetVersioned(true)\n\tdataservice, err := repo.NewData(labelsT, name, config)\n\tif err != nil {\n\t\tt.Errorf(\"Unable to create labels64 instance %q: %s\\n\", name, err.Error())\n\t}\n\tlabels, ok := dataservice.(*Data)\n\tif !ok {\n\t\tt.Errorf(\"Can't cast labels data service into Data\\n\")\n\t}\n\treturn labels\n}",
"func ProtoToInstance(p *spannerpb.SpannerInstance) *spanner.Instance {\n\tobj := &spanner.Instance{\n\t\tName: dcl.StringOrNil(p.Name),\n\t\tProject: dcl.StringOrNil(p.Project),\n\t\tConfig: dcl.StringOrNil(p.Config),\n\t\tDisplayName: dcl.StringOrNil(p.DisplayName),\n\t\tNodeCount: dcl.Int64OrNil(p.NodeCount),\n\t\tState: ProtoToSpannerInstanceStateEnum(p.GetState()),\n\t}\n\treturn obj\n}",
"func (b *Simple) Instance() *device.Instance { return b.To }",
"func InstanceToProto(resource *sql.Instance) *sqlpb.SqlInstance {\n\tp := &sqlpb.SqlInstance{\n\t\tBackendType: SqlInstanceBackendTypeEnumToProto(resource.BackendType),\n\t\tConnectionName: dcl.ValueOrEmptyString(resource.ConnectionName),\n\t\tDatabaseVersion: SqlInstanceDatabaseVersionEnumToProto(resource.DatabaseVersion),\n\t\tEtag: dcl.ValueOrEmptyString(resource.Etag),\n\t\tGceZone: dcl.ValueOrEmptyString(resource.GceZone),\n\t\tInstanceType: SqlInstanceInstanceTypeEnumToProto(resource.InstanceType),\n\t\tMasterInstanceName: dcl.ValueOrEmptyString(resource.MasterInstanceName),\n\t\tMaxDiskSize: SqlInstanceMaxDiskSizeToProto(resource.MaxDiskSize),\n\t\tName: dcl.ValueOrEmptyString(resource.Name),\n\t\tProject: dcl.ValueOrEmptyString(resource.Project),\n\t\tRegion: dcl.ValueOrEmptyString(resource.Region),\n\t\tRootPassword: dcl.ValueOrEmptyString(resource.RootPassword),\n\t\tCurrentDiskSize: SqlInstanceCurrentDiskSizeToProto(resource.CurrentDiskSize),\n\t\tDiskEncryptionConfiguration: SqlInstanceDiskEncryptionConfigurationToProto(resource.DiskEncryptionConfiguration),\n\t\tFailoverReplica: SqlInstanceFailoverReplicaToProto(resource.FailoverReplica),\n\t\tMasterInstance: SqlInstanceMasterInstanceToProto(resource.MasterInstance),\n\t\tReplicaConfiguration: SqlInstanceReplicaConfigurationToProto(resource.ReplicaConfiguration),\n\t\tScheduledMaintenance: SqlInstanceScheduledMaintenanceToProto(resource.ScheduledMaintenance),\n\t\tSettings: SqlInstanceSettingsToProto(resource.Settings),\n\t}\n\tfor _, r := range resource.IPAddresses {\n\t\tp.IpAddresses = append(p.IpAddresses, SqlInstanceIPAddressesToProto(&r))\n\t}\n\n\treturn p\n}",
"func instancesToProto(insts registry.Instances) []*instances.Instance {\n\tret := make([]*instances.Instance, 0)\n\tfor _, inst := range insts {\n\t\tprotoInst := &instances.Instance{\n\t\t\tInstanceId: proto.String(inst.Id),\n\t\t\tHostname: proto.String(inst.Hostname),\n\t\t\tMachineClass: proto.String(inst.MachineClass),\n\t\t\tServiceName: proto.String(inst.Name),\n\t\t\tServiceDescription: proto.String(inst.Description),\n\t\t\tServiceVersion: proto.Uint64(inst.Version),\n\t\t\tAzName: proto.String(inst.AzName),\n\t\t\tSubTopic: make([]string, 0),\n\t\t}\n\t\tfor _, ep := range inst.Endpoints {\n\t\t\tif ep.Subscribe != \"\" {\n\t\t\t\tprotoInst.SubTopic = append(protoInst.SubTopic, ep.Subscribe)\n\t\t\t}\n\t\t}\n\t\tret = append(ret, protoInst)\n\t}\n\treturn ret\n}",
"func GetInstance(ctx *pulumi.Context,\n\tname string, id pulumi.ID, state *InstanceState, opts ...pulumi.ResourceOpt) (*Instance, error) {\n\tinputs := make(map[string]interface{})\n\tif state != nil {\n\t\tinputs[\"agentVersion\"] = state.AgentVersion\n\t\tinputs[\"amiId\"] = state.AmiId\n\t\tinputs[\"architecture\"] = state.Architecture\n\t\tinputs[\"autoScalingType\"] = state.AutoScalingType\n\t\tinputs[\"availabilityZone\"] = state.AvailabilityZone\n\t\tinputs[\"createdAt\"] = state.CreatedAt\n\t\tinputs[\"deleteEbs\"] = state.DeleteEbs\n\t\tinputs[\"deleteEip\"] = state.DeleteEip\n\t\tinputs[\"ebsBlockDevices\"] = state.EbsBlockDevices\n\t\tinputs[\"ebsOptimized\"] = state.EbsOptimized\n\t\tinputs[\"ec2InstanceId\"] = state.Ec2InstanceId\n\t\tinputs[\"ecsClusterArn\"] = state.EcsClusterArn\n\t\tinputs[\"elasticIp\"] = state.ElasticIp\n\t\tinputs[\"ephemeralBlockDevices\"] = state.EphemeralBlockDevices\n\t\tinputs[\"hostname\"] = state.Hostname\n\t\tinputs[\"infrastructureClass\"] = state.InfrastructureClass\n\t\tinputs[\"installUpdatesOnBoot\"] = state.InstallUpdatesOnBoot\n\t\tinputs[\"instanceProfileArn\"] = state.InstanceProfileArn\n\t\tinputs[\"instanceType\"] = state.InstanceType\n\t\tinputs[\"lastServiceErrorId\"] = state.LastServiceErrorId\n\t\tinputs[\"layerIds\"] = state.LayerIds\n\t\tinputs[\"os\"] = state.Os\n\t\tinputs[\"platform\"] = state.Platform\n\t\tinputs[\"privateDns\"] = state.PrivateDns\n\t\tinputs[\"privateIp\"] = state.PrivateIp\n\t\tinputs[\"publicDns\"] = state.PublicDns\n\t\tinputs[\"publicIp\"] = state.PublicIp\n\t\tinputs[\"registeredBy\"] = state.RegisteredBy\n\t\tinputs[\"reportedAgentVersion\"] = state.ReportedAgentVersion\n\t\tinputs[\"reportedOsFamily\"] = state.ReportedOsFamily\n\t\tinputs[\"reportedOsName\"] = state.ReportedOsName\n\t\tinputs[\"reportedOsVersion\"] = state.ReportedOsVersion\n\t\tinputs[\"rootBlockDevices\"] = state.RootBlockDevices\n\t\tinputs[\"rootDeviceType\"] = state.RootDeviceType\n\t\tinputs[\"rootDeviceVolumeId\"] = state.RootDeviceVolumeId\n\t\tinputs[\"securityGroupIds\"] = state.SecurityGroupIds\n\t\tinputs[\"sshHostDsaKeyFingerprint\"] = state.SshHostDsaKeyFingerprint\n\t\tinputs[\"sshHostRsaKeyFingerprint\"] = state.SshHostRsaKeyFingerprint\n\t\tinputs[\"sshKeyName\"] = state.SshKeyName\n\t\tinputs[\"stackId\"] = state.StackId\n\t\tinputs[\"state\"] = state.State\n\t\tinputs[\"status\"] = state.Status\n\t\tinputs[\"subnetId\"] = state.SubnetId\n\t\tinputs[\"tenancy\"] = state.Tenancy\n\t\tinputs[\"virtualizationType\"] = state.VirtualizationType\n\t}\n\ts, err := ctx.ReadResource(\"aws:opsworks/instance:Instance\", name, id, inputs, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Instance{s: s}, nil\n}",
"func (dao *DaoPetMongodb) toBo(gbo godal.IGenericBo) *BoPet {\n\tif gbo == nil {\n\t\treturn nil\n\t}\n\treturn (&BoPet{}).fromGenericBo(gbo)\n}",
"func ProtoToInstanceTemplate(p *betapb.ComputeBetaInstanceTemplate) *beta.InstanceTemplate {\n\tobj := &beta.InstanceTemplate{\n\t\tCreationTimestamp: dcl.StringOrNil(p.GetCreationTimestamp()),\n\t\tDescription: dcl.StringOrNil(p.Description),\n\t\tId: dcl.Int64OrNil(p.Id),\n\t\tSelfLink: dcl.StringOrNil(p.SelfLink),\n\t\tName: dcl.StringOrNil(p.Name),\n\t\tProperties: ProtoToComputeBetaInstanceTemplateProperties(p.GetProperties()),\n\t\tProject: dcl.StringOrNil(p.Project),\n\t}\n\treturn obj\n}",
"func gceInfo(inst *instance) error {\n\tvar err error\n\tinst.zone, err = metadata.Zone()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinst.name, err = metadata.InstanceName()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinst.hostname, err = metadata.Hostname()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinst.project, err = metadata.ProjectID()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (i *Interface) Instance() interface{} {\n\treturn i.base.instance\n}",
"func ProtoToInstance(p *sqlpb.SqlInstance) *sql.Instance {\n\tobj := &sql.Instance{\n\t\tBackendType: ProtoToSqlInstanceBackendTypeEnum(p.GetBackendType()),\n\t\tConnectionName: dcl.StringOrNil(p.ConnectionName),\n\t\tDatabaseVersion: ProtoToSqlInstanceDatabaseVersionEnum(p.GetDatabaseVersion()),\n\t\tEtag: dcl.StringOrNil(p.Etag),\n\t\tGceZone: dcl.StringOrNil(p.GceZone),\n\t\tInstanceType: ProtoToSqlInstanceInstanceTypeEnum(p.GetInstanceType()),\n\t\tMasterInstanceName: dcl.StringOrNil(p.MasterInstanceName),\n\t\tMaxDiskSize: ProtoToSqlInstanceMaxDiskSize(p.GetMaxDiskSize()),\n\t\tName: dcl.StringOrNil(p.Name),\n\t\tProject: dcl.StringOrNil(p.Project),\n\t\tRegion: dcl.StringOrNil(p.Region),\n\t\tRootPassword: dcl.StringOrNil(p.RootPassword),\n\t\tCurrentDiskSize: ProtoToSqlInstanceCurrentDiskSize(p.GetCurrentDiskSize()),\n\t\tDiskEncryptionConfiguration: ProtoToSqlInstanceDiskEncryptionConfiguration(p.GetDiskEncryptionConfiguration()),\n\t\tFailoverReplica: ProtoToSqlInstanceFailoverReplica(p.GetFailoverReplica()),\n\t\tMasterInstance: ProtoToSqlInstanceMasterInstance(p.GetMasterInstance()),\n\t\tReplicaConfiguration: ProtoToSqlInstanceReplicaConfiguration(p.GetReplicaConfiguration()),\n\t\tScheduledMaintenance: ProtoToSqlInstanceScheduledMaintenance(p.GetScheduledMaintenance()),\n\t\tSettings: ProtoToSqlInstanceSettings(p.GetSettings()),\n\t}\n\tfor _, r := range p.GetIpAddresses() {\n\t\tobj.IPAddresses = append(obj.IPAddresses, *ProtoToSqlInstanceIPAddresses(r))\n\t}\n\treturn obj\n}",
"func expectedNewInstance(jobID, datasetID string) *dataset.NewInstance {\n\tnewInstance := &dataset.NewInstance{\n\t\tLinks: &dataset.Links{\n\t\t\tDataset: dataset.Link{\n\t\t\t\tURL: \"http://localhost:22000/datasets/\" + datasetID,\n\t\t\t\tID: datasetID,\n\t\t\t},\n\t\t\tJob: dataset.Link{\n\t\t\t\tURL: \"http://import-api/jobs/\" + jobID,\n\t\t\t\tID: jobID,\n\t\t\t},\n\t\t},\n\t\tDimensions: []dataset.CodeList{},\n\t\tImportTasks: &dataset.InstanceImportTasks{\n\t\t\tImportObservations: &dataset.ImportObservationsTask{\n\t\t\t\tState: dataset.StateCreated.String(),\n\t\t\t},\n\t\t\tBuildHierarchyTasks: []*dataset.BuildHierarchyTask{},\n\t\t\tBuildSearchIndexTasks: []*dataset.BuildSearchIndexTask{},\n\t\t},\n\t\tType: \"cantabular_blob\",\n\t}\n\tif datasetID == \"dataset1\" {\n\t\tnewInstance.Dimensions = []dataset.CodeList{{ID: \"codelist11\"}, {ID: \"codelist12\"}}\n\t\tnewInstance.LowestGeography = \"lowest_geo\"\n\t} else if datasetID == \"dataset2\" {\n\t\tnewInstance.Dimensions = []dataset.CodeList{{ID: \"codelist21\"}, {ID: \"codelist22\"}, {ID: \"codelist23\"}}\n\t}\n\treturn newInstance\n}",
"func InstanceTemplateToProto(resource *beta.InstanceTemplate) *betapb.ComputeBetaInstanceTemplate {\n\tp := &betapb.ComputeBetaInstanceTemplate{\n\t\tCreationTimestamp: dcl.ValueOrEmptyString(resource.CreationTimestamp),\n\t\tDescription: dcl.ValueOrEmptyString(resource.Description),\n\t\tId: dcl.ValueOrEmptyInt64(resource.Id),\n\t\tSelfLink: dcl.ValueOrEmptyString(resource.SelfLink),\n\t\tName: dcl.ValueOrEmptyString(resource.Name),\n\t\tProperties: ComputeBetaInstanceTemplatePropertiesToProto(resource.Properties),\n\t\tProject: dcl.ValueOrEmptyString(resource.Project),\n\t}\n\n\treturn p\n}",
"func toBetaHealthCheck(hc *computealpha.HealthCheck) (*computebeta.HealthCheck, error) {\n\tret := &computebeta.HealthCheck{}\n\terr := copyViaJSON(ret, hc)\n\treturn ret, err\n}",
"func GetInstanceBD() *connection {\n\treturn instance\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
newBaseInstanceList is the baseInstanceList constructor | func newBaseInstanceList(allocateCIDR bool, clusterCIDR *net.IPNet, subnetMaskSize int) *baseInstanceList {
cidrSet, _ := cidrset.NewCIDRSet(clusterCIDR, subnetMaskSize)
return &baseInstanceList{
allocateCIDR: allocateCIDR,
clusterCIDR: clusterCIDR,
subnetMaskSize: subnetMaskSize,
cidrSet: cidrSet,
instances: make(map[meta.Key]*baseInstance),
}
} | [
"func newList(e exec.Executor) *chunk.List {\n\tbase := e.Base()\n\treturn chunk.NewList(base.RetFieldTypes(), base.InitCap(), base.MaxChunkSize())\n}",
"func newSubInstance(parent *BaseInstance, name string) *BaseInstance {\n\tsi := parent.module.subinstance\n\tif si == nil {\n\t\tsi = make(map[string][]*BaseInstance)\n\t\tparent.module.subinstance = si\n\t}\n\n\tbi := &BaseInstance{\n\t\tname: name,\n\t\tinput: parent.input,\n\t\tinput2: parent.input2,\n\t\trules: newRules(),\n\t\tenabled: false,\n\t\tsubinstance: true,\n\t\tinstance: parent.instance,\n\t\tmodule: parent.module,\n\t}\n\n\tsi[parent.name] = append(si[parent.name], bi)\n\treturn bi\n}",
"func newList(ctx TransactionContextInterface) *list {\n\t stateList := new(ledgerapi.StateList)\n\t stateList.Ctx = ctx\n\t stateList.Class = \"Asset\"\n\t stateList.Deserialize = func(bytes []byte, state ledgerapi.StateInterface) error {\n\t\t return Deserialize(bytes, state.(*Asset))\n\t }\n \n\t list := new(list)\n\t list.stateList = stateList\n \n\t return list\n }",
"func newBaseRuntime(erp *ECALRuntimeProvider, node *parser.ASTNode) *baseRuntime {\n\tinstanceCounter++\n\treturn &baseRuntime{fmt.Sprint(instanceCounter), erp, node, false}\n}",
"func New() *List { return new(List).Init() }",
"func newSubInstance(parent *BaseInstance, name string) *BaseInstance {\n\tbi := &BaseInstance{\n\t\tname: name,\n\t\tinput: parent.input,\n\t\tinput2: parent.input2,\n\t\trules: newRules(),\n\t\tenabled: false,\n\t\tinstance: parent.instance,\n\t}\n\treturn bi\n}",
"func newIDList(p *idElementPool) *idList {\n\tl := &idList{Pool: p}\n\treturn l.Init()\n}",
"func newList(data interface{}) *List {\n\tnewL := new(List)\n\tnewL.Insert(data)\n\treturn newL\n}",
"func ListBase(base uint32) {\n\tsyscall.Syscall(gpListBase, 1, uintptr(base), 0, 0)\n}",
"func newList(vert bool, width, height float32) *List {\n\n\tli := new(List)\n\tli.initialize(vert, width, height)\n\treturn li\n}",
"func newList(rowType reflect.Type) []*Info {\n\tvar list columnList\n\tvar state = stateT{}\n\tlist.addFields(rowType, state)\n\treturn list\n}",
"func newList() *List {\n\tl := &List{\n\t\tch: make(chan sh.QData),\n\t}\n\treturn l\n}",
"func (s *BasevhdlListener) EnterInstantiation_list(ctx *Instantiation_listContext) {}",
"func New() *List {\n return &List{size:0}\n}",
"func newAccessList() accessList {\n\treturn make(map[common.Address]accessListSlots)\n}",
"func NewBase() Base {\r\n\treturn Base{\r\n\t\tActive: \"\",\r\n\t\tTitle: \"Lemonade Stand Supply\",\r\n\t}\r\n}",
"func newElementBase(ln *line, parent element) elementBase {\n\treturn elementBase{\n\t\tln: ln,\n\t\tparent: parent,\n\t}\n}",
"func newDeltaList() *deltaList {\n\treturn &deltaList{\n\t\tcount: 0,\n\t\thead: nil,\n\t}\n}",
"func newListProcessor(ctx context.Context, dynamicClient dynamic.Interface, workerFn workerFunc) *listProcessor {\n\treturn &listProcessor{\n\t\tconcurrency: defaultConcurrency,\n\t\tworkerFn: workerFn,\n\t\tdynamicClient: dynamicClient,\n\t\tctx: ctx,\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
getOrCreateBaseInstance lazily creates a new base instance, assigning if allocateCIDR is true | func (bil *baseInstanceList) getOrCreateBaseInstance(key *meta.Key) *baseInstance {
bil.lock.Lock()
defer bil.lock.Unlock()
inst, found := bil.instances[*key]
if !found {
inst = &baseInstance{name: key.Name, zone: key.Zone}
if bil.allocateCIDR {
nextRange, _ := bil.cidrSet.AllocateNext()
inst.aliasRange = nextRange.String()
}
bil.instances[*key] = inst
}
return inst
} | [
"func newBaseInstanceList(allocateCIDR bool, clusterCIDR *net.IPNet, subnetMaskSize int) *baseInstanceList {\n\tcidrSet, _ := cidrset.NewCIDRSet(clusterCIDR, subnetMaskSize)\n\treturn &baseInstanceList{\n\t\tallocateCIDR: allocateCIDR,\n\t\tclusterCIDR: clusterCIDR,\n\t\tsubnetMaskSize: subnetMaskSize,\n\t\tcidrSet: cidrSet,\n\t\tinstances: make(map[meta.Key]*baseInstance),\n\t}\n}",
"func newProcBase(name, bin, serviceAddr string, loggers []Logger) *procBase {\n\tlog.Infof(\"%s has addr %s\", name, serviceAddr)\n\treturn &procBase{\n\t\tname: name,\n\t\tbin: bin,\n\t\tserviceAddr: serviceAddr,\n\t\tloggers: loggers,\n\t}\n}",
"func (ipAddressStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {\n\t_ = obj.(*networking.IPAddress)\n\n}",
"func appNumOnUNetBaseCreate(baseID uuid.UUID) *types.Bitmap {\n\tif appNumOnUNetBaseGet(baseID) == nil {\n\t\tlog.Functionf(\"appNumOnUNetBaseCreate (%s)\", baseID.String())\n\t\tappNumBase[baseID.String()] = new(types.Bitmap)\n\t}\n\treturn appNumOnUNetBaseGet(baseID)\n}",
"func newInstance(moduleName, name string, priv interface{}) (*BaseInstance, error) {\n\tm, found := modules[moduleName]\n\tif !found {\n\t\treturn nil, fmt.Errorf(\"No such module: %s\", moduleName)\n\t}\n\n\tif _, exists := m.instance[name]; exists {\n\t\treturn nil, fmt.Errorf(\"%s already exists in %s\", name, moduleName)\n\t}\n\n\tbi := &BaseInstance{name: name, module: m, subinstance: false}\n\n\tringName := fmt.Sprintf(\"input-%s\", name)\n\tbi.input = dpdk.RingCreate(ringName, m.ringParam.Count, m.ringParam.SocketId, dpdk.RING_F_SC_DEQ)\n\tif bi.input == nil {\n\t\treturn nil, fmt.Errorf(\"Input ring creation faild for %s.\\n\", name)\n\t}\n\n\tif m.ringParam.SecondaryInput {\n\t\tringName := fmt.Sprintf(\"input2-%s\", name)\n\t\tbi.input2 = dpdk.RingCreate(ringName, m.ringParam.Count, m.ringParam.SocketId, dpdk.RING_F_SC_DEQ)\n\t\tif bi.input2 == nil {\n\t\t\treturn nil, fmt.Errorf(\"Second input ring creation failed for %s\", name)\n\t\t}\n\t}\n\n\tbi.rules = newRules()\n\n\tif m.moduleType == TypeInterface || m.moduleType == TypeRIF {\n\t\tbi.counter = NewCounter()\n\t}\n\n\tinstance, err := m.factory(bi, priv)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Creating module '%s' with name '%s' failed: %v\\n\", moduleName, name, err)\n\t}\n\tbi.instance = instance\n\n\t// Set rule observer, if the module complies to RulesNotify.\n\tif rn, ok := instance.(RulesNotify); ok {\n\t\tbi.rules.setRulesNotify(rn)\n\t}\n\n\tm.instance[name] = bi\n\n\treturn bi, nil\n}",
"func newBaseRuntime(erp *ECALRuntimeProvider, node *parser.ASTNode) *baseRuntime {\n\tinstanceCounter++\n\treturn &baseRuntime{fmt.Sprint(instanceCounter), erp, node, false}\n}",
"func New(bridgeName, stateDir string, ipNet *net.IPNet) (*IPAllocator, error) {\n\tif err := os.MkdirAll(stateDir, 0666); err != nil {\n\t\treturn nil, fmt.Errorf(\"attempt to create state directory %s failed: %v\", stateDir, err)\n\t}\n\n\t// open the database\n\t// this will block until closed which is file for our use case of assigning\n\t// one IP and being done.\n\t// TODO: make this more graceful if someone else wants to use this as a lib.\n\tdbpath := path.Join(stateDir, DBFile)\n\tdb, err := bolt.Open(dbpath, 0666, nil)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, fmt.Errorf(\"You have not allocated any IPs\")\n\t\t}\n\t\treturn nil, fmt.Errorf(\"Opening database at %s failed: %v\", dbpath, err)\n\t}\n\n\t// create the ip allocator bucket if it does not exist\n\tif err := db.Update(func(tx *bolt.Tx) error {\n\t\tif _, err := tx.CreateBucketIfNotExists(IPBucket); err != nil {\n\t\t\treturn fmt.Errorf(\"Creating bucket %s failed: %v\", IPBucket, err)\n\t\t}\n\t\treturn nil\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\n\tbr, err := net.InterfaceByName(bridgeName)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Getting bridge interface %s failed: %v\", bridgeName, err)\n\t}\n\n\tipAllocator := &IPAllocator{\n\t\tBridge: br,\n\t\tIPNet: ipNet,\n\t\tdb: db,\n\t}\n\n\treturn ipAllocator, nil\n}",
"func newInstance(moduleName, name string, priv interface{}) (*BaseInstance, error) {\n\tfactory, found := instanceFactories[moduleName]\n\tif !found {\n\t\treturn nil, fmt.Errorf(\"Module '%s' doesn't exist.\\n\", moduleName)\n\t}\n\n\trp, ok := ringParams[moduleName]\n\tif !ok {\n\t\trp = defaultRingParam\n\t}\n\n\tbi := &BaseInstance{name: name}\n\n\tringName := fmt.Sprintf(\"input-%s\", name)\n\tbi.input = dpdk.RingCreate(ringName, rp.Count, rp.SocketId, dpdk.RING_F_SC_DEQ)\n\tif bi.input == nil {\n\t\treturn nil, fmt.Errorf(\"Input ring creation faild for %s.\\n\", name)\n\t}\n\n\tif rp.SecondaryInput {\n\t\tringName := fmt.Sprintf(\"input2-%s\", name)\n\t\tbi.input2 = dpdk.RingCreate(ringName, rp.Count, rp.SocketId, dpdk.RING_F_SC_DEQ)\n\t\tif bi.input2 == nil {\n\t\t\treturn nil, fmt.Errorf(\"Second input ring creation failed for %s\", name)\n\t\t}\n\t}\n\n\tbi.rules = newRules()\n\n\tinstance, err := factory(bi, priv)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Creating module '%s' with name '%s' failed: %v\\n\", moduleName, name, err)\n\t}\n\tbi.instance = instance\n\n\treturn bi, nil\n}",
"func MakeBase(name, key, owner string, defaultValue interface{}, lifetime Lifetime, expose bool) Base {\n\treturn Base{\n\t\tname: name,\n\t\tkey: key,\n\t\towner: owner,\n\t\tdefaultValue: defaultValue,\n\t\tlifetime: lifetime,\n\t\texpose: expose,\n\t}\n}",
"func NewBase(opt Opts) Dialer {\n\trv := &base{\n\t\tnetDialer: net.Dialer{\n\t\t\tTimeout: opt.GetTimeout(),\n\t\t\tControl: reuseport.Control,\n\t\t},\n\t\ttlsConfigs: cache.New(TLSConfigCacheSize,\n\t\t\tTLSConfigTTL,\n\t\t\tcache.NoopEvictCallback),\n\t\ttlsSkipVerify: opt.GetTLSSkipVerify(),\n\t}\n\n\treturn rv\n}",
"func NewBasePool() BasePool {\n\treturn BasePool{\n\t\tlastTuneTs: *atomicutil.NewTime(time.Now()),\n\t}\n}",
"func newCache(nbClient libovsdbclient.Client) (*LBCache, error) {\n\t// first, list all load balancers\n\tlbs, err := listLBs(nbClient)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := LBCache{}\n\tc.existing = make(map[string]*CachedLB, len(lbs))\n\n\tfor i := range lbs {\n\t\tc.existing[lbs[i].UUID] = &lbs[i]\n\t}\n\n\tps := func(item *nbdb.LogicalSwitch) bool {\n\t\treturn len(item.LoadBalancer) > 0\n\t}\n\tswitches, err := libovsdbops.FindLogicalSwitchesWithPredicate(nbClient, ps)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, ls := range switches {\n\t\tfor _, lbuuid := range ls.LoadBalancer {\n\t\t\tif lb, ok := c.existing[lbuuid]; ok {\n\t\t\t\tlb.Switches.Insert(ls.Name)\n\t\t\t}\n\t\t}\n\t}\n\n\tpr := func(item *nbdb.LogicalRouter) bool {\n\t\treturn len(item.LoadBalancer) > 0\n\t}\n\trouters, err := libovsdbops.FindLogicalRoutersWithPredicate(nbClient, pr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, router := range routers {\n\t\tfor _, lbuuid := range router.LoadBalancer {\n\t\t\tif lb, ok := c.existing[lbuuid]; ok {\n\t\t\t\tlb.Routers.Insert(router.Name)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Get non-empty LB groups\n\tpg := func(item *nbdb.LoadBalancerGroup) bool {\n\t\treturn len(item.LoadBalancer) > 0\n\t}\n\tgroups, err := libovsdbops.FindLoadBalancerGroupsWithPredicate(nbClient, pg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, group := range groups {\n\t\tfor _, lbuuid := range group.LoadBalancer {\n\t\t\tif lb, ok := c.existing[lbuuid]; ok {\n\t\t\t\tlb.Groups.Insert(group.Name)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &c, nil\n}",
"func newPrimary() *proxy {\n\tvar (\n\t\tp = &proxy{}\n\t\ttracker = mock.NewStatsTracker()\n\t\tsmap = newSmap()\n\t)\n\n\tp.owner.smap = newSmapOwner(cmn.GCO.Get())\n\tp.si = meta.NewSnode(\"primary\", apc.Proxy, meta.NetInfo{}, meta.NetInfo{}, meta.NetInfo{})\n\n\tsmap.addProxy(p.si)\n\tsmap.Primary = p.si\n\tp.owner.smap.put(smap)\n\n\tconfig := cmn.GCO.BeginUpdate()\n\tconfig.ConfigDir = \"/tmp/ais-tests\"\n\tconfig.Periodic.RetrySyncTime = cos.Duration(time.Millisecond * 100)\n\tconfig.Keepalive.Proxy.Name = \"heartbeat\"\n\tconfig.Keepalive.Proxy.Interval = cos.Duration(3 * time.Second)\n\tconfig.Timeout.CplaneOperation = cos.Duration(2 * time.Second)\n\tconfig.Timeout.MaxKeepalive = cos.Duration(4 * time.Second)\n\tconfig.Client.Timeout = cos.Duration(10 * time.Second)\n\tconfig.Client.TimeoutLong = cos.Duration(10 * time.Second)\n\tconfig.Cksum.Type = cos.ChecksumXXHash\n\tcmn.GCO.CommitUpdate(config)\n\tcmn.GCO.SetInitialGconfPath(\"/tmp/ais-tests/ais.config\")\n\n\tp.client.data = &http.Client{}\n\tp.client.control = &http.Client{}\n\tp.keepalive = newPalive(p, tracker, atomic.NewBool(true))\n\n\to := newBMDOwnerPrx(config)\n\to.put(newBucketMD())\n\tp.owner.bmd = o\n\n\te := newEtlMDOwnerPrx(config)\n\te.put(newEtlMD())\n\tp.owner.etl = e\n\n\tp.gmm = memsys.PageMM()\n\treturn p\n}",
"func AllocateInternalIPs() error {\n\t// Reserve the IPv4 router IP if it is part of the IPv4\n\t// allocation range to ensure that we do not hand out the\n\t// router IP to a container.\n\tallocRange := node.GetIPv4AllocRange()\n\tnodeIP := node.GetExternalIPv4()\n\tif allocRange.Contains(nodeIP) {\n\t\terr := ipamConf.IPv4Allocator.Allocate(nodeIP)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).WithField(logfields.IPAddr, nodeIP).Debug(\"Unable to reserve IPv4 router address\")\n\t\t}\n\t}\n\n\tinternalIP := node.GetInternalIPv4()\n\tif internalIP == nil {\n\t\tinternalIP = ip.GetNextIP(node.GetIPv4AllocRange().IP)\n\t}\n\terr := ipamConf.IPv4Allocator.Allocate(internalIP)\n\tif err != nil {\n\t\t// If the allocation fails here it is likely that, in a kubernetes\n\t\t// environment, cilium was not able to retrieve the node's pod-cidr\n\t\t// which will cause cilium to start with a default IPv4 allocation range\n\t\t// different from the previous running instance.\n\t\t// Since cilium_host IP is always automatically derived from the IPv4\n\t\t// allocation range it is safe to assume cilium_host IP will always\n\t\t// belong to the IPv4AllocationRange.\n\t\t// Unless of course the user manually specifies a different IPv4range\n\t\t// between restarts which he can only solve by deleting the IPv4\n\t\t// address from cilium_host as well deleting the node_config.h.\n\t\treturn ErrAllocation(fmt.Errorf(\"Unable to allocate internal IPv4 node IP %s: %s.\",\n\t\t\tinternalIP, err))\n\t}\n\tnode.SetInternalIPv4(internalIP)\n\n\t// Reserve the IPv6 router and node IP if it is part of the IPv6\n\t// allocation range to ensure that we do not hand out the router IP to\n\t// a container.\n\tallocRange = node.GetIPv6AllocRange()\n\tfor _, ip6 := range []net.IP{node.GetIPv6()} {\n\t\tif allocRange.Contains(ip6) {\n\t\t\terr := ipamConf.IPv6Allocator.Allocate(ip6)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).WithField(logfields.IPAddr, ip6).Debug(\"Unable to reserve IPv6 address\")\n\t\t\t}\n\t\t}\n\t}\n\n\trouterIP := node.GetIPv6Router()\n\tif routerIP == nil {\n\t\trouterIP = ip.GetNextIP(node.GetIPv6AllocRange().IP)\n\t}\n\tif !routerIP.Equal(node.GetIPv6()) {\n\t\terr = ipamConf.IPv6Allocator.Allocate(routerIP)\n\t\tif err != nil {\n\t\t\treturn ErrAllocation(fmt.Errorf(\"Unable to allocate internal IPv6 router IP %s: %s.\",\n\t\t\t\trouterIP, err))\n\t\t}\n\t}\n\tnode.SetIPv6Router(routerIP)\n\n\treturn nil\n}",
"func (b *BridgeNetworkDriver) Create(name string, subnet string) (*Network, error) {\n\t// 取到网段字符串中的网关ip地址和网络的ip段\n\tip, IPRange, _ := net.ParseCIDR(subnet)\n\tIPRange.IP = ip\n\n\tn := &Network{\n\t\tName: name,\n\t\tIPRange: IPRange,\n\t\tDriver: b.Name(),\n\t}\n\n\terr := b.initBridge(n)\n\treturn n, err\n}",
"func newRoundrobinBalanced(cfg Config) Picker {\n\tscs := make([]balancer.SubConn, 0, len(cfg.SubConnToResolverAddress))\n\tfor sc := range cfg.SubConnToResolverAddress {\n\t\tscs = append(scs, sc)\n\t}\n\treturn &rrBalanced{\n\t\tp: RoundrobinBalanced,\n\t\tscs: scs,\n\t\tscToAddr: cfg.SubConnToResolverAddress,\n\t}\n}",
"func (m *InstancePoolManagerImpl) GetInstancePoolForInstance(instanceDetails OciRef) (*InstancePoolNodeGroup, error) {\n\n\tif instanceDetails.CompartmentID == \"\" {\n\t\t// cfg.Global.CompartmentID would be set to tenancy OCID at runtime if compartment was not set.\n\t\tinstanceDetails.CompartmentID = m.cfg.Global.CompartmentID\n\t}\n\n\tif instanceDetails.AvailabilityDomain != \"\" && instanceDetails.InstanceID != \"\" && instanceDetails.PoolID != \"\" &&\n\t\tinstanceDetails.PrivateIPAddress != \"\" && instanceDetails.Shape != \"\" {\n\t\t// It's possible that this instance belongs to an instance pool that was not specified via --nodes argument.\n\t\treturn m.staticInstancePools[instanceDetails.PoolID], nil\n\t}\n\n\t// Details are missing from this instance.\n\t// Try to resolve them, though it may not be a member of an instance-pool we manage.\n\tresolvedInstanceDetails, err := m.instancePoolCache.findInstanceByDetails(instanceDetails)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if resolvedInstanceDetails == nil {\n\t\treturn nil, nil\n\t}\n\n\tkubeClient := m.staticInstancePools[resolvedInstanceDetails.PoolID].kubeClient\n\n\t// Optionally annotate & label the node so that it does not need to be searched for in subsequent iterations.\n\t_ = annotateNode(kubeClient, resolvedInstanceDetails.Name, ociInstanceIDAnnotation, resolvedInstanceDetails.InstanceID)\n\t_ = annotateNode(kubeClient, resolvedInstanceDetails.Name, ociInstancePoolIDAnnotation, resolvedInstanceDetails.PoolID)\n\t_ = annotateNode(kubeClient, resolvedInstanceDetails.Name, ociAnnotationCompartmentID, resolvedInstanceDetails.CompartmentID)\n\t_ = labelNode(kubeClient, resolvedInstanceDetails.Name, apiv1.LabelTopologyZone, resolvedInstanceDetails.AvailabilityDomain)\n\t_ = labelNode(kubeClient, resolvedInstanceDetails.Name, apiv1.LabelFailureDomainBetaZone, resolvedInstanceDetails.AvailabilityDomain)\n\t_ = labelNode(kubeClient, resolvedInstanceDetails.Name, apiv1.LabelInstanceType, resolvedInstanceDetails.Shape)\n\t_ = labelNode(kubeClient, resolvedInstanceDetails.Name, apiv1.LabelInstanceTypeStable, resolvedInstanceDetails.Shape)\n\t_ = setNodeProviderID(kubeClient, resolvedInstanceDetails.Name, resolvedInstanceDetails.InstanceID)\n\n\treturn m.staticInstancePools[resolvedInstanceDetails.PoolID], nil\n}",
"func (p *pool) AllocateBlock(ctx context.Context, nodeName, requestUID string) (*coilv2.AddressBlock, error) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tnextIndex, ok := p.allocated.NextClear(0)\n\tif !ok {\n\t\tnextIndex = p.allocated.Len()\n\t}\n\n\tap := &coilv2.AddressPool{}\n\terr := p.client.Get(ctx, client.ObjectKey{Name: p.name}, ap)\n\tif err != nil {\n\t\tp.log.Error(err, \"failed to get AddressPool\")\n\t\treturn nil, err\n\t}\n\tif ap.DeletionTimestamp != nil {\n\t\tp.log.Info(\"unable to curve out a block because pool is under deletion\")\n\t\treturn nil, ErrNoBlock\n\t}\n\n\tvar currentIndex uint\n\tfor _, ss := range ap.Spec.Subnets {\n\t\tvar ones, bits int\n\t\tif ss.IPv4 != nil {\n\t\t\t_, n, _ := net.ParseCIDR(*ss.IPv4) // ss was validated\n\t\t\tones, bits = n.Mask.Size()\n\t\t} else {\n\t\t\t_, n, _ := net.ParseCIDR(*ss.IPv6) // ss was validated\n\t\t\tones, bits = n.Mask.Size()\n\t\t}\n\t\tsize := uint(1) << (bits - ones - int(ap.Spec.BlockSizeBits))\n\t\tif nextIndex >= (currentIndex + size) {\n\t\t\tcurrentIndex += size\n\t\t\tcontinue\n\t\t}\n\n\t\tipv4, ipv6 := ss.GetBlock(nextIndex-currentIndex, int(ap.Spec.BlockSizeBits))\n\n\t\tr := &coilv2.AddressBlock{}\n\t\tr.Name = fmt.Sprintf(\"%s-%d\", p.name, nextIndex)\n\t\tif err := controllerutil.SetControllerReference(ap, r, p.scheme); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tr.Labels = map[string]string{\n\t\t\tconstants.LabelPool: p.name,\n\t\t\tconstants.LabelNode: nodeName,\n\t\t\tconstants.LabelRequest: requestUID,\n\t\t}\n\t\tcontrollerutil.AddFinalizer(r, constants.FinCoil)\n\t\tr.Index = int32(nextIndex)\n\t\tif ipv4 != nil {\n\t\t\ts := ipv4.String()\n\t\t\tr.IPv4 = &s\n\t\t}\n\t\tif ipv6 != nil {\n\t\t\ts := ipv6.String()\n\t\t\tr.IPv6 = &s\n\t\t}\n\t\tif err := p.client.Create(ctx, r); err != nil {\n\t\t\tp.log.Error(err, \"failed to create AddressBlock\", \"index\", nextIndex, \"node\", nodeName)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tp.log.Info(\"created AddressBlock\", \"index\", nextIndex, \"node\", nodeName)\n\t\tp.allocated.Set(nextIndex)\n\t\tp.allocatedBlocks.Inc()\n\t\treturn r, nil\n\t}\n\n\tp.log.Error(ErrNoBlock, \"no available blocks\")\n\treturn nil, ErrNoBlock\n}",
"func NewUnsafe(host string, port, db uint) (*Wredis, error) {\n\tw, err := NewPool(host, port, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tw.safe = false\n\treturn w, nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
newGAGetHook creates a new closure with the current baseInstanceList to be used as a MockInstances.GetHook | func (bil *baseInstanceList) newGAGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {
return func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {
m.Lock.Lock()
defer m.Lock.Unlock()
if _, found := m.Objects[*key]; !found {
m.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toGA()}
}
return false, nil, nil
}
} | [
"func (bil *baseInstanceList) newBetaGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockBetaInstances) (bool, *beta.Instance, error) {\n\treturn func(ctx context.Context, key *meta.Key, m *cloud.MockBetaInstances) (bool, *beta.Instance, error) {\n\t\tm.Lock.Lock()\n\t\tdefer m.Lock.Unlock()\n\n\t\tif _, found := m.Objects[*key]; !found {\n\t\t\tm.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toBeta()}\n\t\t}\n\t\treturn false, nil, nil\n\t}\n}",
"func (bil *baseInstanceList) newMockCloud() cloud.Cloud {\n\tc := cloud.NewMockGCE(nil)\n\n\t// insert hooks to lazy create a instance when needed\n\tc.MockInstances.GetHook = bil.newGAGetHook()\n\tc.MockBetaInstances.GetHook = bil.newBetaGetHook()\n\n\treturn c\n}",
"func (f *AutoIndexingServiceGetIndexesFunc) PushHook(hook func(context.Context, shared.GetIndexesOptions) ([]types.Index, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *AutoIndexingServiceGetListTagsFunc) PushHook(hook func(context.Context, api.RepoName, ...string) ([]*gitdomain.Tag, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func HookGet(hookList *HookList, hookId uint64) *Hook {\n\tc_hook_list := (*C.GHookList)(C.NULL)\n\tif hookList != nil {\n\t\tc_hook_list = (*C.GHookList)(hookList.ToC())\n\t}\n\n\tc_hook_id := (C.gulong)(hookId)\n\n\tretC := C.g_hook_get(c_hook_list, c_hook_id)\n\tretGo := HookNewFromC(unsafe.Pointer(retC))\n\n\treturn retGo\n}",
"func (f *ExtensionStoreGetFeaturedExtensionsFunc) PushHook(hook func(context.Context) ([]*stores.Extension, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *AutoIndexingServiceGetIndexByIDFunc) PushHook(hook func(context.Context, int) (types.Index, bool, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *AutoIndexingServiceGetIndexesByIDsFunc) PushHook(hook func(context.Context, ...int) ([]types.Index, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *AutoIndexingServiceGetUnsafeDBFunc) PushHook(hook func() database.DB) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func newGoGetter() *goGetter {\n\treturn &goGetter{}\n}",
"func NewHookLister(indexer cache.Indexer) HookLister {\n\treturn &hookLister{indexer: indexer}\n}",
"func (f *ExternalServiceStoreListFunc) PushHook(hook func(context.Context, database.ExternalServicesListOptions) ([]*types.ExternalService, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *AutoIndexingServiceNumRepositoriesWithCodeIntelligenceFunc) PushHook(hook func(context.Context) (int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *UploadServiceGetListTagsFunc) PushHook(hook func(context.Context, api.RepoName, ...string) ([]*gitdomain.Tag, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *ReleaseStoreGetLatestFunc) PushHook(hook func(context.Context, int32, string, bool) (*stores.Release, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *DBStoreGetUploadsFunc) PushHook(hook func(context.Context, dbstore.GetUploadsOptions) ([]dbstore.Upload, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *ReleaseStoreGetLatestBatchFunc) PushHook(hook func(context.Context, []int32, string, bool) ([]*stores.Release, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *ReleaseStoreGetArtifactsFunc) PushHook(hook func(context.Context, int64) ([]byte, []byte, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *UploadServiceGetUploadsFunc) PushHook(hook func(context.Context, shared1.GetUploadsOptions) ([]types.Upload, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
newBetaGetHook creates a new closure with the current baseInstanceList to be used as a MockBetaInstances.GetHook | func (bil *baseInstanceList) newBetaGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockBetaInstances) (bool, *beta.Instance, error) {
return func(ctx context.Context, key *meta.Key, m *cloud.MockBetaInstances) (bool, *beta.Instance, error) {
m.Lock.Lock()
defer m.Lock.Unlock()
if _, found := m.Objects[*key]; !found {
m.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toBeta()}
}
return false, nil, nil
}
} | [
"func (bil *baseInstanceList) newGAGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {\n\treturn func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {\n\t\tm.Lock.Lock()\n\t\tdefer m.Lock.Unlock()\n\n\t\tif _, found := m.Objects[*key]; !found {\n\t\t\tm.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toGA()}\n\t\t}\n\t\treturn false, nil, nil\n\t}\n}",
"func (bil *baseInstanceList) newMockCloud() cloud.Cloud {\n\tc := cloud.NewMockGCE(nil)\n\n\t// insert hooks to lazy create a instance when needed\n\tc.MockInstances.GetHook = bil.newGAGetHook()\n\tc.MockBetaInstances.GetHook = bil.newBetaGetHook()\n\n\treturn c\n}",
"func NewHookLister(indexer cache.Indexer) HookLister {\n\treturn &hookLister{indexer: indexer}\n}",
"func HookGet(hookList *HookList, hookId uint64) *Hook {\n\tc_hook_list := (*C.GHookList)(C.NULL)\n\tif hookList != nil {\n\t\tc_hook_list = (*C.GHookList)(hookList.ToC())\n\t}\n\n\tc_hook_id := (C.gulong)(hookId)\n\n\tretC := C.g_hook_get(c_hook_list, c_hook_id)\n\tretGo := HookNewFromC(unsafe.Pointer(retC))\n\n\treturn retGo\n}",
"func (bi *baseInstance) toBeta() *beta.Instance {\n\tinst := &beta.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*beta.NetworkInterface{{}}}\n\tif bi.aliasRange != \"\" {\n\t\tinst.NetworkInterfaces[0].AliasIpRanges = []*beta.AliasIpRange{\n\t\t\t{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName},\n\t\t}\n\t}\n\treturn inst\n}",
"func (t *T) Beta(name string, f interface{}) bool {\n\tt.Helper()\n\treturn t.invokeFeature(feature.Beta, name, f)\n}",
"func newLoadBalancerBackendPoolUpdater(az *Cloud, interval time.Duration) *loadBalancerBackendPoolUpdater {\n\treturn &loadBalancerBackendPoolUpdater{\n\t\taz: az,\n\t\tinterval: interval,\n\t\toperations: make([]batchOperation, 0),\n\t}\n}",
"func (f *AutoIndexingServiceGetListTagsFunc) PushHook(hook func(context.Context, api.RepoName, ...string) ([]*gitdomain.Tag, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func NewLifeHook(e *Engine) Hook {\n\treturn Hook{\n\t\tOnStart: OnStart(e),\n\t\tOnStop: OnStop(e),\n\t}\n}",
"func (f *ReleaseStoreGetLatestBatchFunc) PushHook(hook func(context.Context, []int32, string, bool) ([]*stores.Release, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func HookAlloc(hookList *HookList) *Hook {\n\tc_hook_list := (*C.GHookList)(C.NULL)\n\tif hookList != nil {\n\t\tc_hook_list = (*C.GHookList)(hookList.ToC())\n\t}\n\n\tretC := C.g_hook_alloc(c_hook_list)\n\tretGo := HookNewFromC(unsafe.Pointer(retC))\n\n\treturn retGo\n}",
"func NewHook(peerID int, token string) *VkHook {\n\thook := &VkHook{\n\t\tPeerID: peerID,\n\t\tVK: api.NewVK(token),\n\t\tUseLevels: DefaultLevels,\n\t}\n\n\treturn hook\n}",
"func (f *AutoIndexingServiceGetIndexesFunc) PushHook(hook func(context.Context, shared.GetIndexesOptions) ([]types.Index, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func getHook(data *domain.Data, repo config.Repository) (config.Hook, bool, error) {\n\tfor _, hook := range repo.Hooks {\n\t\tf, err := matchHook(data, hook)\n\t\tif err != nil {\n\t\t\treturn config.Hook{}, false, err\n\t\t}\n\t\tif f {\n\t\t\treturn hook, true, nil\n\t\t}\n\t}\n\treturn config.Hook{}, false, nil\n}",
"func GetBindHook() BindHook {\n\treturn bindHook\n}",
"func NewHook(client *Client, levels []logrus.Level) *Hook {\n\n\treturn &Hook{client, levels}\n}",
"func newLabo(s *goquery.Selection, l *Labo) *Labo {\n\tfor _, fn := range laboFn {\n\t\tfn(s, l)\n\t}\n\treturn l\n}",
"func newLoadBalancerController() *LoadBalancerController {\n\tkubeClient := fake.NewSimpleClientset()\n\tbackendConfigClient := backendconfigclient.NewSimpleClientset()\n\tfakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues())\n\n\t(fakeGCE.Compute().(*cloud.MockGCE)).MockGlobalForwardingRules.InsertHook = loadbalancers.InsertGlobalForwardingRuleHook\n\tnamer := namer_util.NewNamer(clusterUID, \"\")\n\n\tstopCh := make(chan struct{})\n\tctxConfig := context.ControllerContextConfig{\n\t\tNamespace: api_v1.NamespaceAll,\n\t\tResyncPeriod: 1 * time.Minute,\n\t\tDefaultBackendSvcPort: test.DefaultBeSvcPort,\n\t\tHealthCheckPath: \"/\",\n\t\tDefaultBackendHealthCheckPath: \"/healthz\",\n\t}\n\tctx := context.NewControllerContext(kubeClient, nil, backendConfigClient, nil, fakeGCE, namer, ctxConfig)\n\tlbc := NewLoadBalancerController(ctx, stopCh)\n\t// TODO(rramkumar): Fix this so we don't have to override with our fake\n\tlbc.instancePool = instances.NewNodePool(instances.NewFakeInstanceGroups(sets.NewString(), namer), namer)\n\tlbc.l7Pool = loadbalancers.NewLoadBalancerPool(fakeGCE, namer, events.RecorderProducerMock{})\n\tlbc.instancePool.Init(&instances.FakeZoneLister{Zones: []string{\"zone-a\"}})\n\n\tlbc.hasSynced = func() bool { return true }\n\n\t// Create the default-backend service.\n\tdefaultSvc := test.NewService(test.DefaultBeSvcPort.ID.Service, api_v1.ServiceSpec{\n\t\tType: api_v1.ServiceTypeNodePort,\n\t\tPorts: []api_v1.ServicePort{\n\t\t\t{\n\t\t\t\tName: \"http\",\n\t\t\t\tPort: 80,\n\t\t\t},\n\t\t},\n\t})\n\taddService(lbc, defaultSvc)\n\n\treturn lbc\n}",
"func NewChangelistLandedUpdater(t testing.TB) *ChangelistLandedUpdater {\n\tmock := &ChangelistLandedUpdater{}\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
newMockCloud returns a mock GCE instance with the appropriate handlers hooks | func (bil *baseInstanceList) newMockCloud() cloud.Cloud {
c := cloud.NewMockGCE(nil)
// insert hooks to lazy create a instance when needed
c.MockInstances.GetHook = bil.newGAGetHook()
c.MockBetaInstances.GetHook = bil.newBetaGetHook()
return c
} | [
"func NewCloudMock() *CloudMock {\n\taddress, grpcServer, mockTrace := startMockServer()\n\n\tconn, err := grpc.Dial(address, grpc.WithInsecure())\n\tif err != nil {\n\t\tlog.Fatalf(\"did not connect: %s\", err)\n\t}\n\n\ttraceClient := cloudtrace.NewTraceServiceClient(conn)\n\tmetricClient := monitoring.NewMetricServiceClient(conn)\n\treturn &CloudMock{\n\t\tconn,\n\t\tgrpcServer,\n\t\tmockTrace,\n\t\ttraceClient,\n\t\tmetricClient,\n\t}\n}",
"func NewFakeGCECloud(vals TestClusterValues) *Cloud {\n\tservice, err := compute.NewService(context.Background(), option.WithoutAuthentication())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tgce := &Cloud{\n\t\tregion: vals.Region,\n\t\tservice: service,\n\t\tmanagedZones: []string{vals.ZoneName},\n\t\tprojectID: vals.ProjectID,\n\t\tnetworkProjectID: vals.ProjectID,\n\t\tClusterID: fakeClusterID(vals.ClusterID),\n\t\tonXPN: vals.OnXPN,\n\t\tmetricsCollector: newLoadBalancerMetrics(),\n\t\tprojectsBasePath: getProjectsBasePath(service.BasePath),\n\t}\n\tc := cloud.NewMockGCE(&gceProjectRouter{gce})\n\tgce.c = c\n\treturn gce\n}",
"func newK8SCloud(opts Options) (CloudProvider, error) {\n\n\tif opts.Name == \"\" {\n\t\treturn nil, errors.New(\"K8SCloud: Invalid cloud name\")\n\t}\n\tif opts.Host == \"\" {\n\t\treturn nil, errors.New(\"K8SCloud: Invalid cloud host\")\n\t}\n\tif opts.K8SNamespace == \"\" {\n\t\topts.K8SNamespace = apiv1.NamespaceDefault\n\t}\n\n\tcloud := &K8SCloud{\n\t\tname: opts.Name,\n\t\thost: opts.Host,\n\t\tbearerToken: opts.K8SBearerToken,\n\t\tnamespace: opts.K8SNamespace,\n\t\tinsecure: opts.Insecure,\n\t}\n\tconfig := &rest.Config{\n\t\tHost: opts.Host,\n\t\tBearerToken: opts.K8SBearerToken,\n\t\tTLSClientConfig: rest.TLSClientConfig{\n\t\t\tInsecure: opts.Insecure,\n\t\t},\n\t}\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcloud.client = clientset\n\treturn cloud, nil\n}",
"func NewK8sClient(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *K8sClient {\n\tmock := &K8sClient{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NewCloud(cfg CloudConfig, metricsRegisterer prometheus.Registerer) (Cloud, error) {\n\tmetadataSess := session.Must(session.NewSession(aws.NewConfig()))\n\tmetadata := services.NewEC2Metadata(metadataSess)\n\tif len(cfg.Region) == 0 {\n\t\tregion, err := metadata.Region()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to introspect region from EC2Metadata, specify --aws-region instead if EC2Metadata is unavailable\")\n\t\t}\n\t\tcfg.Region = region\n\t}\n\n\tif len(cfg.VpcID) == 0 {\n\t\tvpcId, err := metadata.VpcID()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to introspect vpcID from EC2Metadata, specify --aws-vpc-id instead if EC2Metadata is unavailable\")\n\t\t}\n\t\tcfg.VpcID = vpcId\n\t}\n\n\tawsCFG := aws.NewConfig().WithRegion(cfg.Region).WithSTSRegionalEndpoint(endpoints.RegionalSTSEndpoint).WithMaxRetries(cfg.MaxRetries)\n\tsess := session.Must(session.NewSession(awsCFG))\n\tinjectUserAgent(&sess.Handlers)\n\n\tif cfg.ThrottleConfig != nil {\n\t\tthrottler := throttle.NewThrottler(cfg.ThrottleConfig)\n\t\tthrottler.InjectHandlers(&sess.Handlers)\n\t}\n\tif metricsRegisterer != nil {\n\t\tmetricsCollector, err := metrics.NewCollector(metricsRegisterer)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to initialize sdk metrics collector\")\n\t\t}\n\t\tmetricsCollector.InjectHandlers(&sess.Handlers)\n\t}\n\n\treturn &defaultCloud{\n\t\tcfg: cfg,\n\t\tec2: services.NewEC2(sess),\n\t\telbv2: services.NewELBV2(sess),\n\t\tacm: services.NewACM(sess),\n\t\twafv2: services.NewWAFv2(sess),\n\t\twafRegional: services.NewWAFRegional(sess, cfg.Region),\n\t\tshield: services.NewShield(sess),\n\t\trgt: services.NewRGT(sess),\n\t}, nil\n}",
"func newAWSCloud(config io.Reader, awsServices Services) (*Cloud, error) {\n\t// We have some state in the Cloud object - in particular the attaching map\n\t// Log so that if we are building multiple Cloud objects, it is obvious!\n\tglog.Infof(\"Building AWS cloudprovider\")\n\n\t// Register handler for ECR credentials\n\t// Register regions, in particular for ECR credentials\n\tonce.Do(func() {\n\t\tRecognizeWellKnownRegions()\n\t})\n\n\tmetadata, err := awsServices.Metadata()\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"error creating AWS metadata client: %v\", err)\n\t}\n\n\tcfg, err := readAWSCloudConfig(config, metadata)\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"unable to read AWS cloud provider config file: %v\", err)\n\t}\n\n\tzone := cfg.Global.Zone\n\tif len(zone) <= 1 {\n\t\treturn nil, errors.Errorf(\"invalid AWS zone in config file: %s\", zone)\n\t}\n\tregionName, err := azToRegion(zone)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !cfg.Global.DisableStrictZoneCheck {\n\t\tvalid := isRegionValid(regionName)\n\t\tif !valid {\n\t\t\treturn nil, errors.Errorf(\"not a valid AWS zone (unknown region): %s\", zone)\n\t\t}\n\t} else {\n\t\tglog.Warningf(\"Strict AWS zone checking is disabled. Proceeding with zone: %s\", zone)\n\t}\n\n\tec2, err := awsServices.Compute(regionName)\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"error creating AWS EC2 client: %v\", err)\n\t}\n\n\telb, err := awsServices.LoadBalancing(regionName)\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"error creating AWS ELB client: %v\", err)\n\t}\n\n\tawsCloud := &Cloud{\n\t\tec2: ec2,\n\t\telb: elb,\n\t\tmetadata: metadata,\n\t\tcfg: cfg,\n\t\tregion: regionName,\n\t}\n\n\tselfAWSInstance, err := awsCloud.buildSelfAWSInstance()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tawsCloud.selfAWSInstance = selfAWSInstance\n\tawsCloud.vpcID = selfAWSInstance.vpcID\n\n\tfilterTags := map[string]string{}\n\tif cfg.Global.KubernetesClusterTag != \"\" {\n\t\tfilterTags[TagNameKubernetesCluster] = cfg.Global.KubernetesClusterTag\n\t} else {\n\t\t// TODO: Clean up double-API query\n\t\tinfo, err := selfAWSInstance.describeInstance()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, tag := range info.Tags {\n\t\t\tif orEmpty(tag.Key) == TagNameKubernetesCluster {\n\t\t\t\tfilterTags[TagNameKubernetesCluster] = orEmpty(tag.Value)\n\t\t\t}\n\t\t}\n\t}\n\n\tif filterTags[TagNameKubernetesCluster] == \"\" {\n\t\tglog.Errorf(\"Tag %q not found; Kubernetes may behave unexpectedly.\", TagNameKubernetesCluster)\n\t}\n\n\tawsCloud.filterTags = filterTags\n\tif len(filterTags) > 0 {\n\t\tglog.Infof(\"AWS cloud filtering on tags: %v\", filterTags)\n\t} else {\n\t\tglog.Infof(\"AWS cloud - no tag filtering\")\n\t}\n\n\treturn awsCloud, nil\n}",
"func NewCloudFormationAPI(t NewCloudFormationAPIT) *CloudFormationAPI {\n\tmock := &CloudFormationAPI{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NewMockAzureCloud(location string) *MockAzureCloud {\n\treturn &MockAzureCloud{\n\t\tLocation: location,\n\t\tResourceGroupsClient: &MockResourceGroupsClient{\n\t\t\tRGs: map[string]resources.Group{},\n\t\t},\n\t\tVirtualNetworksClient: &MockVirtualNetworksClient{\n\t\t\tVNets: map[string]network.VirtualNetwork{},\n\t\t},\n\t\tSubnetsClient: &MockSubnetsClient{\n\t\t\tSubnets: map[string]network.Subnet{},\n\t\t},\n\t\tRouteTablesClient: &MockRouteTablesClient{\n\t\t\tRTs: map[string]network.RouteTable{},\n\t\t},\n\t\tNetworkSecurityGroupsClient: &MockNetworkSecurityGroupsClient{\n\t\t\tNSGs: map[string]network.SecurityGroup{},\n\t\t},\n\t\tApplicationSecurityGroupsClient: &MockApplicationSecurityGroupsClient{\n\t\t\tASGs: map[string]network.ApplicationSecurityGroup{},\n\t\t},\n\t\tVMScaleSetsClient: &MockVMScaleSetsClient{\n\t\t\tVMSSes: map[string]compute.VirtualMachineScaleSet{},\n\t\t},\n\t\tVMScaleSetVMsClient: &MockVMScaleSetVMsClient{\n\t\t\tVMs: map[string]compute.VirtualMachineScaleSetVM{},\n\t\t},\n\t\tDisksClient: &MockDisksClient{\n\t\t\tDisks: map[string]compute.Disk{},\n\t\t},\n\t\tRoleAssignmentsClient: &MockRoleAssignmentsClient{\n\t\t\tRAs: map[string]authz.RoleAssignment{},\n\t\t},\n\t\tNetworkInterfacesClient: &MockNetworkInterfacesClient{\n\t\t\tNIs: map[string]network.Interface{},\n\t\t},\n\t\tLoadBalancersClient: &MockLoadBalancersClient{\n\t\t\tLBs: map[string]network.LoadBalancer{},\n\t\t},\n\t\tPublicIPAddressesClient: &MockPublicIPAddressesClient{\n\t\t\tPubIPs: map[string]network.PublicIPAddress{},\n\t\t},\n\t\tNatGatewaysClient: &MockNatGatewaysClient{\n\t\t\tNGWs: map[string]network.NatGateway{},\n\t\t},\n\t}\n}",
"func NewGCEClient() *gce.Cloud {\n\tvar configReader func() io.Reader\n\tif flags.F.ConfigFilePath != \"\" {\n\t\tklog.Infof(\"Reading config from path %q\", flags.F.ConfigFilePath)\n\t\tconfig, err := os.Open(flags.F.ConfigFilePath)\n\t\tif err != nil {\n\t\t\tklog.Fatalf(\"%v\", err)\n\t\t}\n\t\tdefer config.Close()\n\n\t\tallConfig, err := io.ReadAll(config)\n\t\tif err != nil {\n\t\t\tklog.Fatalf(\"Error while reading config (%q): %v\", flags.F.ConfigFilePath, err)\n\t\t}\n\t\tklog.V(4).Infof(\"Cloudprovider config file contains: %q\", string(allConfig))\n\n\t\tconfigReader = generateConfigReaderFunc(allConfig)\n\t} else {\n\t\tklog.V(2).Infof(\"No cloudprovider config file provided, using default values.\")\n\t\tconfigReader = func() io.Reader { return nil }\n\t}\n\n\t// Creating the cloud interface involves resolving the metadata server to get\n\t// an oauth token. If this fails, the token provider assumes it's not on GCE.\n\t// No errors are thrown. So we need to keep retrying till it works because\n\t// we know we're on GCE.\n\tfor {\n\t\tprovider, err := cloudprovider.GetCloudProvider(\"gce\", configReader())\n\t\tif err == nil {\n\t\t\tcloud := provider.(*gce.Cloud)\n\t\t\t// Configure GCE rate limiting\n\t\t\trl, err := ratelimit.NewGCERateLimiter(flags.F.GCERateLimit.Values(), flags.F.GCEOperationPollInterval)\n\t\t\tif err != nil {\n\t\t\t\tklog.Fatalf(\"Error configuring rate limiting: %v\", err)\n\t\t\t}\n\t\t\tcloud.SetRateLimiter(rl)\n\t\t\t// If this controller is scheduled on a node without compute/rw\n\t\t\t// it won't be allowed to list backends. We can assume that the\n\t\t\t// user has no need for Ingress in this case. If they grant\n\t\t\t// permissions to the node they will have to restart the controller\n\t\t\t// manually to re-create the client.\n\t\t\t// TODO: why do we bail with success out if there is a permission error???\n\t\t\tif _, err = cloud.ListGlobalBackendServices(); err == nil || utils.IsHTTPErrorCode(err, http.StatusForbidden) {\n\t\t\t\treturn cloud\n\t\t\t}\n\t\t\tklog.Warningf(\"Failed to list backend services, retrying: %v\", err)\n\t\t} else {\n\t\t\tklog.Warningf(\"Failed to get cloud provider, retrying: %v\", err)\n\t\t}\n\t\ttime.Sleep(cloudClientRetryInterval)\n\t}\n}",
"func NewCloudProvider(config *providercfg.Config) (cloudprovider.Interface, error) {\n\t// The global logger has been replaced with the logger we constructed in\n\t// cloud_provider_oci.go so capture it here and then pass it into all components.\n\tlogger := zap.L()\n\tlogger = logger.With(zap.String(\"component\", \"cloud-controller-manager\"))\n\n\tcp, err := providercfg.NewConfigurationProvider(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trateLimiter := client.NewRateLimiter(logger.Sugar(), config.RateLimiter)\n\n\tc, err := client.New(logger.Sugar(), cp, &rateLimiter)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif config.CompartmentID == \"\" {\n\t\tlogger.Info(\"Compartment not supplied in config: attempting to infer from instance metadata\")\n\t\tmetadata, err := metadata.New().Get()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tconfig.CompartmentID = metadata.CompartmentID\n\t}\n\n\tif !config.LoadBalancer.Disabled && config.VCNID == \"\" {\n\t\tlogger.Info(\"No VCN provided in cloud provider config. Falling back to looking up VCN via LB subnet.\")\n\t\tsubnet, err := c.Networking().GetSubnet(context.Background(), config.LoadBalancer.Subnet1)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"get subnet for loadBalancer.subnet1\")\n\t\t}\n\t\tconfig.VCNID = *subnet.VcnId\n\t}\n\n\tmetricPusher, err := metrics.NewMetricPusher(logger.Sugar())\n\tif err != nil {\n\t\tlogger.Sugar().With(\"error\", err).Error(\"Metrics collection could not be enabled\")\n\t\t// disable metrics\n\t\tmetricPusher = nil\n\t}\n\n\tif metricPusher != nil {\n\t\tlogger.Info(\"Metrics collection has been enabled\")\n\t} else {\n\t\tlogger.Info(\"Metrics collection has not been enabled\")\n\t}\n\n\treturn &CloudProvider{\n\t\tclient: c,\n\t\tconfig: config,\n\t\tlogger: logger.Sugar(),\n\t\tinstanceCache: cache.NewTTLStore(instanceCacheKeyFn, time.Duration(24)*time.Hour),\n\t\tmetricPusher: metricPusher,\n\t}, nil\n}",
"func newHTTPCloud(config io.Reader) (*httpCloud, error) {\n\tif config != nil {\n\t\tvar cfg Config\n\t\tif err := gcfg.ReadInto(&cfg, config); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Couldn't read config: %v\", err)\n\t\t}\n\n\t\tinstancesURL := cfg.Global.InstancesURL\n\t\t// Validate URL\n\t\t_, err := url.ParseRequestURI(instancesURL)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Can't parse the instances-url provided: %s\", err)\n\t\t}\n\t\t// Handle Trailing slashes\n\t\tinstancesURL = strings.TrimRight(instancesURL, \"/\")\n\n\t\tschedulerExtensionURL := cfg.Global.SchedulerExtensionURL\n\t\t// Validate URL\n\t\t_, err = url.ParseRequestURI(schedulerExtensionURL)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Can't parse the scheduler-extension-url provided: %s\", err)\n\t\t}\n\t\t// Handle Trailing slashes\n\t\tschedulerExtensionURL = strings.TrimRight(schedulerExtensionURL, \"/\")\n\n\t\treturn &httpCloud{\n\t\t\tinstancesURL: instancesURL,\n\t\t\tinstancesSupported: cfg.Global.InstancesSupported,\n\t\t\ttcpLoadBalancerSupported: cfg.Global.TcpLoadBalancerSupported,\n\t\t\tzonesSupported: cfg.Global.ZonesSupported,\n\t\t\tclustersSupported: cfg.Global.ClustersSupported,\n\t\t\tschedulerExtensionURL: schedulerExtensionURL,\n\t\t\tschedulerExtensionSupported: cfg.Global.SchedulerExtensionSupported,\n\t\t}, nil\n\t}\n\treturn nil, fmt.Errorf(\"Config file is empty or is not provided\")\n}",
"func newClient() (*storage.Client, error) {\n\tctx := context.Background()\n\n\tbyteKey, err := gcloud.GetDecodedKey()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get gcp key, err: %w\", err)\n\t}\n\tclient, err := storage.NewClient(ctx, option.WithCredentialsJSON(byteKey))\n\tif err != nil {\n\t\tlog.Println(\"failed to login with GCP_KEY, trying with default application credentials...\")\n\t\tclient, err = storage.NewClient(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to open Google Cloud Storage client: %w\", err)\n\t\t}\n\t}\n\n\treturn client, nil\n}",
"func NewMockCloudSpanner(t *testing.T, ts time.Time) *MockCloudSpanner {\n\tmcs := &MockCloudSpanner{\n\t\tt: t,\n\t\tmsgs: make(chan MockCtlMsg, 1000),\n\t\treadTs: ts,\n\t\tsessions: map[string]*sppb.Session{},\n\t}\n\treturn mcs\n}",
"func NewGCSUploader(t testing.TB) *GCSUploader {\n\tmock := &GCSUploader{}\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NewGCSClient(t testing.TB) *GCSClient {\n\tmock := &GCSClient{}\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func New(d diag.Sink, cloudURL string, project *workspace.Project, insecure bool) (Backend, error) {\n\tcloudURL = ValueOrDefaultURL(cloudURL)\n\taccount, err := workspace.GetAccount(cloudURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"getting stored credentials: %w\", err)\n\t}\n\tapiToken := account.AccessToken\n\n\tclient := client.NewClient(cloudURL, apiToken, insecure, d)\n\tcapabilities := detectCapabilities(d, client)\n\n\treturn &cloudBackend{\n\t\td: d,\n\t\turl: cloudURL,\n\t\tclient: client,\n\t\tcapabilities: capabilities,\n\t\tcurrentProject: project,\n\t}, nil\n}",
"func newCloudlyckeClient() *http.Client {\n\treturn &http.Client{}\n}",
"func NewCloudCommunications()(*CloudCommunications) {\n m := &CloudCommunications{\n Entity: *NewEntity(),\n }\n return m\n}",
"func NewMockIonosCloudManager(t testing.TB) *MockIonosCloudManager {\n\tmock := &MockIonosCloudManager{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetTask returns a new task for the action | func GetTask(name, action string, conf *config.MountConfig) (iface.Task, error) {
switch action {
case "", "create":
return NewCreateTask(name, conf), nil
case "remove", "rm":
return NewRemoveTask(name, conf), nil
default:
return nil, fmt.Errorf("Invalid mount action %q for task %q", action, name)
}
} | [
"func GetTask(name, action string, conf *config.ComposeConfig) (iface.Task, error) {\n\tcomposeAction, err := getAction(action, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewTask(name, conf, composeAction), nil\n}",
"func GetTask(r *http.Request) *task.Task {\n\tif rv := r.Context().Value(model.ApiTaskKey); rv != nil {\n\t\tif t, ok := rv.(*task.Task); ok {\n\t\t\treturn t\n\t\t}\n\t}\n\treturn nil\n}",
"func (s *RPCClient) GetTask(request rpc.RequestTaskPayload) (*rpc.NewTaskPayloadResponse, error) {\n\tvar resp rpc.NewTaskPayloadResponse\n\tif err := s.performJSONCall(\"POST\", \"/rpc/v1/task/payload\", request, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &resp, nil\n}",
"func (a *agent) GetTask(ctx context.Context, msg *api.AgentID) (*api.Task, error) {\n\tvar task *api.Task = new(api.Task)\n\tselect {\n\tcase task, ok := <-a.work[msg.GetAgentID()]:\n\t\tif ok {\n\t\t\treturn task, nil\n\t\t}\n\t\treturn task, errors.New(\"channel closed\")\n\tdefault:\n\t\treturn task, nil\n\t}\n}",
"func (ds *DNSSuite) GetTask() *boomer.Task {\n\tvar fn func()\n\n\tswitch ds.Type {\n\tcase dns.TypeA:\n\t\tfn = ds.doA\n\t}\n\n\treturn &boomer.Task{\n\t\tName: \"dns\",\n\t\tOnStart: func() {},\n\t\tOnStop: func() {},\n\t\tFn: fn,\n\t}\n}",
"func (d *dispatcher) Get(state string) *Task {\n\ttask, ok := d.Tasks[state]\n\tif !ok {\n\t\treturn &Task{\n\t\t\tHandler: NotFoundHandler,\n\t\t}\n\t}\n\treturn task\n}",
"func GetTask(c common.Client, uri string) (*Task, error) {\n\tvar task Task\n\treturn &task, task.Get(c, uri, &task)\n}",
"func (v1 *V1) GetTask(w http.ResponseWriter, r *http.Request) {\n\ttaskID := chi.URLParam(r, \"taskID\")\n\tshouldDeleteTask := false\n\tdeleteParam := r.URL.Query().Get(\"delete\")\n\tif deleteParam == \"1\" {\n\t\tshouldDeleteTask = true\n\t}\n\n\ttask := v1.metaCrawlSvc.TaskByID(taskID)\n\tif task == nil {\n\t\tv1.responseErrorJSON(w, \"task not found\", 404)\n\t\treturn\n\t}\n\n\ttaskStatus := task.Status()\n\tswitch taskStatus {\n\tcase metacrawl.TaskInProgress:\n\t\tv1.responseJSON(w, \"task in progress\", 204)\n\t\treturn\n\tcase metacrawl.TaskCompleted:\n\t\tif shouldDeleteTask {\n\t\t\tv1.metaCrawlSvc.DeleteTaskByID(taskID)\n\t\t}\n\n\t\tv1.responseCSV(w, taskID, task.Render(), 200)\n\t}\n}",
"func (_Contract *ContractCallerSession) GetTask(i *big.Int) (struct {\n\tActive bool\n\tAssignment *big.Int\n\tProposalID *big.Int\n}, error) {\n\treturn _Contract.Contract.GetTask(&_Contract.CallOpts, i)\n}",
"func GetTask(name string) *Task {\n\tlock.Lock()\n\tdefer lock.Unlock()\n\tlog.Info(\"Getting %s in %v\", name, tasksMap[name])\n\n\treturn tasksMap[name]\n}",
"func (r *OrbitRunner) getTask(name string) *orbitTask {\n\tfor _, task := range r.config.Tasks {\n\t\tif name == task.Use {\n\t\t\treturn task\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (ts *TaskService) Get(reqdata *TaskGetRequest) (*TaskGetResponse, *http.Response, error) {\n\n\tu := fmt.Sprintf(\"tasks/%s\", reqdata.UUID)\n\n\tu, err := addOptions(u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := ts.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar result *TaskGetResponse\n\tresp, err := ts.client.Do(req, &result)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn result, resp, nil\n}",
"func (cl *RedisClient) GetTask() (*RedisTask, error) {\n\tval, err := cl.client.Keys(\"tasks:*\").Result()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar result *RedisTask\n\tvar key string\n\ttxf := func(tx *redis.Tx) error {\n\t\tresult = nil\n\t\tstate, err := tx.HGet(key, \"state\").Result()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif state == \"new\" {\n\t\t\tinputfile, err := cl.client.HGet(key, \"inputfile\").Result()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\toutfile, err := cl.client.HGet(key, \"outfile\").Result()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tresult = &RedisTask{}\n\t\t\tresult.InputFile = inputfile\n\t\t\tresult.OutFile = outfile\n\t\t\tresult.TaskName = key\n\t\t\t_, err = tx.HSet(key, \"state\", \"holded\").Result()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\tfor _, key = range val {\n\t\terr := cl.client.Watch(txf, key)\n\t\tif err == redis.TxFailedErr {\n\t\t\treturn nil, err\n\t\t}\n\t\tif result != nil {\n\t\t\treturn result, nil\n\t\t}\n\t}\n\n\treturn nil, nil\n}",
"func (s *state) GetTask(exID string) (*mesos.Task, error) {\n\t// Check if task is in Launched Tasks list\n\tfor _, t := range s.st.GetTasks.LaunchedTasks {\n\t\tif s.isMatchingTask(&t, exID) {\n\t\t\treturn &t, nil\n\t\t}\n\t}\n\n\t// Check if task is in Queued Tasks list\n\tfor _, t := range s.st.GetTasks.QueuedTasks {\n\t\tif s.isMatchingTask(&t, exID) {\n\t\t\treturn &t, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"unable to find task matching executor id %s\", exID)\n}",
"func (_Contract *ContractCaller) GetTask(opts *bind.CallOpts, i *big.Int) (struct {\n\tActive bool\n\tAssignment *big.Int\n\tProposalID *big.Int\n}, error) {\n\tvar out []interface{}\n\terr := _Contract.contract.Call(opts, &out, \"getTask\", i)\n\n\toutstruct := new(struct {\n\t\tActive bool\n\t\tAssignment *big.Int\n\t\tProposalID *big.Int\n\t})\n\tif err != nil {\n\t\treturn *outstruct, err\n\t}\n\n\toutstruct.Active = *abi.ConvertType(out[0], new(bool)).(*bool)\n\toutstruct.Assignment = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int)\n\toutstruct.ProposalID = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int)\n\n\treturn *outstruct, err\n\n}",
"func GetUserTask(username string, id int64) (Task, error) {\n\t// TODO\n\treturn *&Task{}, nil\n}",
"func (svc *Service) Get(ctx context.Context, id uuid.UUID) (*domain.Task, error) {\n\tsvc.taskRequestPolled.Inc()\n\treturn svc.taskGateway.FindByID(ctx, id)\n}",
"func GetTask(db *sql.DB, taskuuid string) (*Task, error) {\n\tdbLogger.Debug(\"GetTask...\")\n\tvar task = new(Task)\n\tvar err error\n\tvar stmt *sql.Stmt\n\n\tif err := db.Ping(); err != nil {\n\t\tdbLogger.Fatal(ERROR_DB_NOT_CONNECTED)\n\t\treturn nil, errors.New(ERROR_DB_NOT_CONNECTED)\n\t}\n\n\tstmt, err = db.Prepare(\"SELECT rowid, taskuuid, useruuid, keyword, bc_txuuid, type, state, payload FROM task WHERE taskuuid = ? and deleted = 0\")\n\tif err != nil {\n\t\tdbLogger.Errorf(\"Failed preparing statement: %v\", err)\n\t\treturn nil, fmt.Errorf(ERROR_DB_PREPARED + \": %v\", err)\n\t}\n\tdefer stmt.Close()\n\n\tif err := stmt.QueryRow(taskuuid).Scan(&task.RowID, &task.TaskUUID, &task.UserUUID, &task.Keyword, &task.BC_txuuid, &task.Type, &task.State, &task.Payload); err != nil {\n\t\tdbLogger.Errorf(\"Failed getting task by taskuuid %s: %v\", taskuuid, err)\n\t\treturn nil, fmt.Errorf(ERROR_DB_QUERY + \": %v\", err)\n\t}\n\tdbLogger.Debugf(\"Get task by taskuuid %s: \\n%#v\", taskuuid, *task)\n\n\treturn task, nil\n}",
"func (t *ModTask) Task() task.Task { return t }"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewCommonTime returns a simple 4/4 meter at the specified tempo | func NewCommonTime(bpm float64) *Meter {
return &Meter{
BeatsPerMinute: bpm,
BeatsPerBar: 4,
BeatValue: notes.Quarter,
}
} | [
"func monotime() int64",
"func (t Time) Nanosecond() int {}",
"func newFakeTime() {\n\tfakeCurrentTime = fakeTime().Add(time.Hour * 24 * 2)\n}",
"func NanoTime() int64",
"func (t Time) Clock() (hour, min, sec int) {}",
"func New(h, m int) Time {\n\treturn minToTime(h * 60 + m)\n}",
"func newTime(year int, month time.Month, day int, hourMinSec ...int) time.Time {\n\tvar hour, min, sec int\n\n\tswitch len(hourMinSec) {\n\tcase 0:\n\t\t// nothing\n\tcase 3:\n\t\tsec = hourMinSec[2]\n\t\tfallthrough\n\tcase 2:\n\t\tmin = hourMinSec[1]\n\t\tfallthrough\n\tcase 1:\n\t\thour = hourMinSec[0]\n\tdefault:\n\t\tpanic(\"too many arguments\")\n\t}\n\n\treturn time.Date(year, month, day, hour, min, sec, 0, time.UTC)\n}",
"func new_time() time.Duration {\n\treturn time.Duration((rand.Intn(300) + 150)) * time.Millisecond\n}",
"func MinuteHourOfTheDay(minuteOfTheDay int16) int8 { return int8(minuteOfTheDay / 60) }",
"func NewMinute(t time.Time, lsw, dut1 int) (Minute, error) {\n\tt = t.UTC() // Don't care about local times\n\tmin := Minute{\n\t\tTime: t,\n\t\tlsw: lsw == 1,\n\t\tdut1: dut1,\n\t}\n\tbits := min.bits[:]\n\n\tmarkers := []int{9, 19, 29, 39, 49, 59} // P1-P6\n\tbits[0] = bitNone // Minute mark\n\tfor _, v := range markers {\n\t\tbits[v] = bitMarker\n\t}\n\n\tmidnight := time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.UTC)\n\tendOfDay := midnight.AddDate(0, 0, 1)\n\n\tdst1 := 0 // DST status at 00:00Z today\n\tif isDST(midnight) {\n\t\tdst1 = 1\n\t}\n\tdst2 := 0 // DST status at 24:00Z today\n\tif isDST(endOfDay) {\n\t\tdst2 = 1\n\t}\n\n\tyear1s := t.Year() % 10\n\tyear10s := t.Year()%100 - year1s\n\n\tminute1s := t.Minute() % 10\n\tminute10s := t.Minute()%100 - minute1s\n\n\thour1s := t.Hour() % 10\n\thour10s := t.Hour()%100 - hour1s\n\n\tdayOfYear1s := t.YearDay() % 10\n\tdayOfYear10s := t.YearDay()%100 - dayOfYear1s\n\tdayOfYear100s := t.YearDay()%1000 - dayOfYear1s - dayOfYear10s\n\n\tdut1Sign, dut1Magnitude := 1, dut1 // dut1Sign is positive\n\tif dut1 < 0 {\n\t\tdut1Sign = 0\n\t\tdut1Magnitude *= -1\n\t}\n\tif dut1Magnitude > 7 {\n\t\tdut1Magnitude = 7 // Only 3 bits for this value.\n\t}\n\n\terr := minuteEncoder.encode(bits, []int{\n\t\t0, 0, dst1, lsw, year1s, 0, 0,\n\t\tminute1s, minute10s, 0, 0,\n\t\thour1s, hour10s, 0, 0,\n\t\tdayOfYear1s, dayOfYear10s, 0,\n\t\tdayOfYear100s, 0, 0,\n\t\tdut1Sign, year10s, dst2, dut1Magnitude, 0,\n\t})\n\tif err != nil {\n\t\treturn min, errors.Wrapf(err, \"Cannot encode minute %s\", t.Format(\"15:04\"))\n\t}\n\n\tmin.lastSecond = lastSecond(t, min.lsw)\n\n\treturn min, nil\n}",
"func timeDistaneToTempo(msecA, msecB int64) (bpm float64) {\n\treturn float64(60000000) / float64(msecB-msecA)\n}",
"func MinuteOfTheDay(checkTime *time.Time) int16 { return int16((checkTime.Hour() * 60) + checkTime.Minute()) }",
"func nanotime() int64",
"func PrintMetTime() {\n\tfmt.Printf(\"%02d:%02d\", metHour(), metMinute())\n}",
"func NewGTime() GTime {\n\treturn GTime{From: \"now-24h\", To: \"now\"}\n}",
"func (am *AutogitManager) commonTime(ctx context.Context) time.Time {\n\toffset, haveOffset := am.config.MDServer().OffsetFromServerTime()\n\tif !haveOffset {\n\t\tam.log.CDebugf(ctx, \"No offset, cannot use common time; \"+\n\t\t\t\"falling back to local time\")\n\t\treturn am.config.Clock().Now()\n\t}\n\treturn am.config.Clock().Now().Add(-offset)\n}",
"func (*Root) ModTime() time.Time { return time.Time{} }",
"func GetSignalTime(timeUnit int32, refDate time.Time) time.Time {\n\tvar t time.Time\n\tswitch timeUnit {\n\tcase SignalTimeUnit_NOW:\n\t\t{\n\t\t\treturn refDate.UTC().Truncate(time.Hour * 24)\n\t\t}\n\tcase SignalTimeUnit_MONTH:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -30)\n\t\t}\n\tcase SignalTimeUnit_BIMONTH:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -60)\n\t\t}\n\tcase SignalTimeUnit_QUARTER:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -90)\n\t\t}\n\tcase SignalTimeUnit_HALFYEAR:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -180)\n\t\t}\n\tcase SignalTimeUnit_THIRDQUARTER:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -270)\n\t\t}\n\tcase SignalTimeUnit_YEAR:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -365)\n\t\t}\n\t}\n\n\treturn t.Truncate(time.Hour * 24)\n}",
"func (f File) CTime() (uint32, uint32) {\n\treturn 0, 0\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
New returns a new meter with the specified parameters | func New(bpm, beatsPerBar float64, beatValue notes.Duration) *Meter {
return &Meter{
BeatsPerMinute: bpm,
BeatsPerBar: beatsPerBar,
BeatValue: beatValue,
}
} | [
"func NewMeter(t time.Time, interval int, staleThreshold int) Meter {\n\tm := &StandardMeter{\n\t\t0,\n\t\tNewEWMA1(t),\n\t\tNewEWMA5(t),\n\t\tNewEWMA15(t),\n\t\tt,\n\t\tt,\n\t\tinterval,\n\t\tstaleThreshold,\n\t}\n\n\treturn m\n}",
"func NewMeter(name string, options ...Option) Meter {\n\treturn newMeter(name, options...)\n}",
"func NewMeter(name string, snapshotInterval time.Duration) *Meter {\n\tm := Meter{}\n\tm.name = name\n\tm.printInterval = snapshotInterval\n\tm.Reset()\n\treturn &m\n}",
"func CreateMeter() Meter {\n\n\t/*\n\t * Create a new meter struct.\n\t */\n\tm := meterStruct{\n\t\tcurrentValue: 0.0,\n\t\tpeakValue: 0.0,\n\t\tsampleCounter: 0,\n\t}\n\n\treturn &m\n}",
"func NewMeter(name string) metics.Meter {\n\tif !Enabled {\n\t\treturn new(metics.NilMeter)\n\t}\n\treturn metics.GetOrRegisterMeter(name, metics.DefaultRegistry)\n}",
"func New(p Params) (*Worker, error) {\n\tif p.SampleDir == \"\" {\n\t\treturn nil, fmt.Errorf(\"no sample directory set\")\n\t}\n\tif p.MeterAddr == \"\" {\n\t\treturn nil, fmt.Errorf(\"no meter address set\")\n\t}\n\tif p.Now == nil {\n\t\tp.Now = time.Now\n\t}\n\tif p.Interval == 0 {\n\t\tp.Interval = DefaultInterval\n\t}\n\tctx, cancel := context.WithCancel(context.Background())\n\tw := &Worker{\n\t\tp: p,\n\t\tctx: ctx,\n\t\tclose: cancel,\n\t}\n\tw.wg.Add(1)\n\tgo func() {\n\t\tif err := w.run(); err != nil {\n\t\t\tlog.Printf(\"sample worker for meter at %q failed: %v\", w.p.MeterAddr, err)\n\t\t}\n\t}()\n\treturn w, nil\n}",
"func New(name string, rate float64, tags ...string) Metric {\n\treturn Metric{name, rate, tags}\n}",
"func NewMeter(options ...meterOption) *ProgressMeter {\n\tm := &ProgressMeter{\n\t\tlogger: &progressLogger{},\n\t\tstartTime: time.Now(),\n\t\tfileIndex: make(map[string]int64),\n\t\tfileIndexMutex: &sync.Mutex{},\n\t\tfinished: make(chan interface{}),\n\t}\n\n\tfor _, opt := range options {\n\t\topt(m)\n\t}\n\n\treturn m\n}",
"func NewMeter(name string) metrics.Meter {\n\treturn metrics.GetOrRegisterMeter(name, metrics.DefaultRegistry)\n}",
"func New() MME {\n\t// TODO: Implement this!\n\toperationCosts = make(map[rpcs.Operation]int)\n\toperationCosts[rpcs.SMS] = -1\n\toperationCosts[rpcs.Call] = -5\n\toperationCosts[rpcs.Load] = 10\n\tm := new(mme)\n\tm.state = make(map[uint64]rpcs.MMEState)\n\tm.stateMutex = new(sync.Mutex)\n\treturn m\n}",
"func NewMeter(id string, token string, expiration int, credits int) (*Meter, error) {\n\terr := db.Update(func(tx *bolt.Tx) error {\n\t\tb, err := tx.CreateBucketIfNotExists([]byte(id))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Token cannot be re-used, fill only if it is unseen\n\t\ttokens, err := b.CreateBucketIfNotExists([]byte(\"Tokens\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Check if token hasn't already been used\n\t\tif t := tokens.Get([]byte(token)); t == nil {\n\t\t\t// Add token to bucket\n\t\t\ttokens.Put([]byte(token), []byte(strconv.Itoa(expiration)))\n\t\t\t// Add credits to bucket\n\t\t\tc := b.Get([]byte(\"Credits\"))\n\t\t\tif c != nil {\n\t\t\t\tbalance, err := strconv.Atoi(string(c))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tlog.WithField(\"balance\", balance).WithField(\"credits\", credits).Info(\"Inserting new credits\")\n\t\t\t\tb.Put([]byte(\"Credits\"), []byte(strconv.Itoa(balance+credits)))\n\t\t\t} else {\n\t\t\t\tlog.WithField(\"credits\", credits).Info(\"Inserting new credits\")\n\t\t\t\tb.Put([]byte(\"Credits\"), []byte(strconv.Itoa(credits)))\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Meter{ID: id, db: db}, nil\n}",
"func CreateMeter(numChannels uint32, names []string) (Meter, error) {\n\tnumNames := len(names)\n\tnumNames32 := uint32(numNames)\n\n\t/*\n\t * Check if number of channel names matches number of channels.\n\t */\n\tif numChannels != numNames32 {\n\t\treturn nil, fmt.Errorf(\"Failed to create channel meter. Requested channel meter for %d channels, but provided %d channel names.\", numChannels, numNames)\n\t} else {\n\t\tchannelMeters := make([]*channelMeterStruct, numChannels)\n\n\t\t/*\n\t\t * Create the channel meters.\n\t\t */\n\t\tfor i := range channelMeters {\n\t\t\tname := names[i]\n\n\t\t\t/*\n\t\t\t * Create a new channel meter.\n\t\t\t */\n\t\t\tchannelMeter := &channelMeterStruct{\n\t\t\t\tchannelName: name,\n\t\t\t\tenabled: false,\n\t\t\t\tcurrentValue: 0.0,\n\t\t\t\tpeakValue: 0.0,\n\t\t\t\tsampleCounter: 0,\n\t\t\t}\n\n\t\t\tchannelMeters[i] = channelMeter\n\t\t}\n\n\t\t/*\n\t\t * Create a new level meter.\n\t\t */\n\t\tmeter := meterStruct{\n\t\t\tchannelMeters: channelMeters,\n\t\t\tenabled: false,\n\t\t}\n\n\t\treturn &meter, nil\n\t}\n\n}",
"func New(name errors.Op) *Metric {\n\treturn &Metric{\n\t\tName: name,\n\t}\n}",
"func New() *Monitor {\n\tmonitor := &Monitor{}\n\tmonitor.Id = time.Now().Unix()\n\tmonitor.ticker = *time.NewTicker(time.Minute)\n\tlog.Printf(\"Initializing new gomon monitor. Monitor id: %d\", monitor.Id)\n\n\treturn monitor\n}",
"func (m *Manager) Meter(delay time.Duration) *Meter {\n\treturn &Meter{\n\t\tm: m,\n\t\tdelay: delay,\n\t\tnext: time.Now(),\n\t}\n}",
"func New() MME {\n\tvar m MME = new(mme)\n\treturn m\n}",
"func NewMeasurement(name string) Measurement {\n\tattrs := make(map[string]interface{})\n\treturn Measurement{\n\t\tName: name,\n\t\tAttributes: attrs,\n\t}\n}",
"func New(interval time.Duration) *Estimator {\n\treturn &Estimator{\n\t\tinterval: rtptime.FromDuration(interval, rtptime.JiffiesPerSec),\n\t\ttime: rtptime.Now(rtptime.JiffiesPerSec),\n\t}\n}",
"func New(monster int) *Monster {\n\treturn &Monster{\n\t\tid: monster,\n\t\tInfo: monsterData[monster],\n\t\tDisplaced: Empty{},\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NoteToTime converts a notes.Duration to a time.Duration based on the meter | func (m Meter) NoteToTime(noteVal notes.Duration) time.Duration {
return time.Duration((float64(noteVal/m.BeatValue) / m.BeatsPerMinute) * float64(time.Minute))
} | [
"func ToDuration(tm uint64, hz uint32) time.Duration {\n\treturn time.Duration(tm * uint64(time.Second) / uint64(hz))\n}",
"func minutesToDuration(n uint8) Duration {\n\treturn Duration(time.Duration(n) * time.Minute)\n}",
"func (e *Exact) convertToDuration() time.Duration {\n\tif isValidUnitOfTime(e.Unit) {\n\t\treturn convertTimeToDuration(e.Quantity, e.Unit)\n\t}\n\tpanic(\"'unit' is not a valid unit of time\")\n}",
"func (r Rest) Duration(measure time.Duration) time.Duration {\n\tif Duration(r) == None {\n\t\treturn 0\n\t}\n\t//the fraction of the measure the note takes\n\tfraq := 1. / math.Pow(2., float64(r))\n\n\treturn time.Duration(float64(measure) * fraq)\n}",
"func (m Meter) NoteToFreq(noteVal notes.Duration) float64 {\n\tduration := m.NoteToTime(noteVal)\n\treturn 1 / float64(duration.Seconds())\n}",
"func adjTime(context interface{}, value string) (time.Time, error) {\n\n\t// The default value is in seconds unless overridden.\n\t// #time:0 Current date/time\n\t// #time:-3600 3600 seconds in the past\n\t// #time:3m\t\t3 minutes in the future.\n\n\t// Possible duration types.\n\t// \"ns\": int64(Nanosecond),\n\t// \"us\": int64(Microsecond),\n\t// \"ms\": int64(Millisecond),\n\t// \"s\": int64(Second),\n\t// \"m\": int64(Minute),\n\t// \"h\": int64(Hour),\n\n\t// Do we have a single value?\n\tif len(value) == 1 {\n\t\tval, err := strconv.Atoi(value[0:1])\n\t\tif err != nil {\n\t\t\treturn time.Time{}.UTC(), fmt.Errorf(\"Invalid duration : %q\", value[0:1])\n\t\t}\n\n\t\tif val == 0 {\n\t\t\treturn time.Now().UTC(), nil\n\t\t}\n\n\t\treturn time.Now().Add(time.Duration(val) * time.Second).UTC(), nil\n\t}\n\n\t// Do we have a duration type and where does the\n\t// actual duration value end\n\tvar typ string\n\tvar end int\n\n\t// The end byte position for the last character in the string.\n\tePos := len(value) - 1\n\n\t// Look at the very last character.\n\tt := value[ePos:]\n\tswitch t {\n\n\t// Is this a minute or hour? [3m]\n\tcase \"m\", \"h\":\n\t\ttyp = t\n\t\tend = ePos // Position of last chr in value.\n\n\t// Is this a second or other duration? [3s or 3us]\n\tcase \"s\":\n\t\ttyp = t // s for 3s\n\t\tend = ePos // 3 for 3s\n\n\t\t// Is this smaller than a second? [ns, us, ms]\n\t\tif len(value) > 2 {\n\t\t\tt := value[ePos-1 : ePos]\n\t\t\tswitch t {\n\t\t\tcase \"n\", \"u\", \"m\":\n\t\t\t\ttyp = value[ePos-1:] // us for 3us\n\t\t\t\tend = ePos - 1 // 3 for 3us\n\t\t\t}\n\t\t}\n\n\tdefault:\n\t\ttyp = \"s\" // s for 3600\n\t\tend = ePos + 1 // 0 for 3600\n\t}\n\n\t// Check if we are to negative the value.\n\tvar start int\n\tif value[0] == '-' {\n\t\tstart = 1\n\t}\n\n\t// Check the remaining bytes is an integer value.\n\tval, err := strconv.Atoi(value[start:end])\n\tif err != nil {\n\t\treturn time.Time{}.UTC(), fmt.Errorf(\"Invalid duration : %q\", value[start:end])\n\t}\n\n\t// Do we have to negate the value?\n\tif start == 1 {\n\t\tval *= -1\n\t}\n\n\t// Calcuate the time value.\n\tswitch typ {\n\tcase \"ns\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Nanosecond).UTC(), nil\n\tcase \"us\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Microsecond).UTC(), nil\n\tcase \"ms\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Millisecond).UTC(), nil\n\tcase \"m\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Minute).UTC(), nil\n\tcase \"h\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Hour).UTC(), nil\n\tdefault:\n\t\treturn time.Now().Add(time.Duration(val) * time.Second).UTC(), nil\n\t}\n}",
"func toDuration(i interface{}) time.Duration {\n\tv, err := toDurationE(i)\n\tpanicerr(err)\n\treturn v\n}",
"func toDurationE(i interface{}) (d time.Duration, err error) {\n\ti = indirect(i)\n\n\tswitch s := i.(type) {\n\tcase time.Duration:\n\t\treturn s, nil\n\tcase int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8:\n\t\td = time.Duration(toInt64(s))\n\t\treturn\n\tcase float32, float64:\n\t\td = time.Duration(toFloat64(s))\n\t\treturn\n\tcase string:\n\t\tif strings.ContainsAny(s, \"nsuµmh\") {\n\t\t\td, err = time.ParseDuration(s)\n\t\t} else {\n\t\t\td, err = time.ParseDuration(s + \"ns\")\n\t\t}\n\t\treturn\n\tdefault:\n\t\terr = fmt.Errorf(\"unable to cast %#v of type %T to Duration\", i, i)\n\t\treturn\n\t}\n}",
"func ToMillis(d *duration.Duration) int64 {\n\tdur, err := ptypes.Duration(d)\n\tif err != nil {\n\t\tlog.Panic(\"Error converting durationpb to Duration \", err)\n\t}\n\n\treturn dur.Milliseconds()\n}",
"func convertTime(time uint64, stream_uot, target_uot UnitOfTime) uint64 {\n\tunitmultiplier := map[UnitOfTime]uint64{\n\t\tUOT_NS: 1000000000,\n\t\tUOT_US: 1000000,\n\t\tUOT_MS: 1000,\n\t\tUOT_S: 1}\n\treturn time / unitmultiplier[stream_uot] * unitmultiplier[target_uot]\n}",
"func DurationToMillis(dur *time.Duration) int64 { return int64(float64(dur.Nanoseconds()) / nanosecondsPerMillisecond) }",
"func ToMillis(t string) int64 {\n\tparsed, err := time.ParseDuration(t)\n\tif err == nil {\n\t\treturn int64(parsed) / 1000000\n\t}\n\treturn -1\n}",
"func (e PrecisionTiming) durationToMs(x time.Duration) float64 {\n\treturn float64(x) / float64(time.Millisecond)\n}",
"func (q MetricTicks) Duration(tempoBPM uint32, deltaTicks uint32) time.Duration {\n\tif q == 0 {\n\t\tq = defaultMetric\n\t}\n\t// (60000 / T) * (d / R) = D[ms]\n\t//\tdurQnMilli := 60000 / float64(tempoBPM)\n\t//\t_4thticks := float64(deltaTicks) / float64(uint16(q))\n\tres := 60000000000 * float64(deltaTicks) / (float64(tempoBPM) * float64(uint16(q)))\n\t//fmt.Printf(\"what: %vns\\n\", res)\n\treturn time.Duration(int64(math.Round(res)))\n\t//\treturn time.Duration(roundFloat(durQnMilli*_4thticks, 0)) * time.Millisecond\n}",
"func msToDuration(ms int64) time.Duration {\n\treturn time.Duration(ms * int64(time.Millisecond))\n}",
"func durToMsec(dur time.Duration) string {\n\treturn fmt.Sprintf(\"%dms\", dur/time.Millisecond)\n}",
"func (d *Delay) TimeDuration() time.Duration {\n\treturn time.Duration(d.Duration*1000) * time.Millisecond\n}",
"func (n *Note) measure() {\n\tvar samples int\n\tlength := wholeNote + (wholeNote / 100 * 4 * (4 - n.tempo)) // 4% per tempo unit\n\tswitch n.duration {\n\tcase 'W':\n\t\tsamples = length\n\tcase 'H':\n\t\tsamples = length / 2\n\tcase 'Q':\n\t\tsamples = length / 4\n\tcase 'E':\n\t\tsamples = length / 8\n\tcase 'S':\n\t\tsamples = length / 16\n\tcase 'T':\n\t\tsamples = length / 32\n\tcase 'I':\n\t\tsamples = length / 64\n\t}\n\n\tif samples > 0 {\n\t\t// Apply dot measure\n\t\tif n.dotted {\n\t\t\tsamples += samples / 2\n\t\t}\n\t}\n\n\tn.samples = samples\n}",
"func TimeDuration(t string) (time.Duration, error) {\n\ttc, err := strconv.Atoi(t)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn time.Duration(tc), nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NoteToFreq converts a notes.Duration into a frequency with period equal to that note length | func (m Meter) NoteToFreq(noteVal notes.Duration) float64 {
duration := m.NoteToTime(noteVal)
return 1 / float64(duration.Seconds())
} | [
"func midi_to_frequency(note uint) float32 {\n\treturn float32(A5_FREQUENCY * math.Pow(2.0, float64(int(note)-MIDI_NOTE_A5)/NOTES_IN_OCTAVE))\n}",
"func getFreq(beatLen int) float64 {\n\treturn float64(fss) / float64(beatLen)\n}",
"func toFreq(s semi, tonic freq) freq {\n\treturn tonic * freq(math.Pow(root12, float64(s)))\n}",
"func (c *Config) FrequencyDur() time.Duration {\n\tconfigLock.RLock()\n\tdefer configLock.RUnlock()\n\n\tif c.Frequency == 0 {\n\t\treturn callhomeCycleDefault\n\t}\n\n\treturn c.Frequency\n}",
"func (m Meter) NoteToTime(noteVal notes.Duration) time.Duration {\n\treturn time.Duration((float64(noteVal/m.BeatValue) / m.BeatsPerMinute) * float64(time.Minute))\n}",
"func (m *TermsExpiration) GetFrequency()(*i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration) {\n return m.frequency\n}",
"func ChanToFreq(ch string) string {\n\tif ch == \"14\" {\n\t\treturn \"2484\"\n\t}\n\tchInt, err := strconv.Atoi(ch)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\tif 1 <= chInt && chInt <= 13 {\n\t\tfreq := ((chInt - 1) * 5) + 2412\n\t\treturn strconv.Itoa(freq)\n\t}\n\tfreqMap := map[int]int{\n\t\t36: 5180, 38: 5190, 40: 5200, 42: 5210, 44: 5220, 46: 5230, 48: 5240, 50: 5250, 52: 5260, 54: 5270, 56: 5280, 58: 5290,\n\t\t60: 5300, 62: 5310, 64: 5320, 100: 5500, 102: 5510, 104: 5520, 106: 5530, 108: 5540, 110: 5550, 112: 5560, 114: 5570,\n\t\t116: 5580, 118: 5590, 120: 5600, 122: 5610, 124: 5620, 126: 5630, 128: 5640, 132: 5660, 134: 5670, 136: 5680, 138: 5690,\n\t\t140: 5700, 142: 5710, 144: 5720, 149: 5745, 151: 5755, 153: 5765, 155: 5775, 157: 5785, 159: 5795, 161: 5805, 165: 5825,\n\t\t169: 5845, 173: 5865, 183: 4915, 184: 4920, 185: 4925, 187: 4935, 188: 4940, 189: 4945, 192: 4960, 196: 4980,\n\t}\n\n\tfreq, ok := freqMap[chInt]\n\tif ok {\n\t\treturn strconv.Itoa(freq)\n\t}\n\n\treturn \"\"\n}",
"func NewNote(vol float64, len time.Duration, freq ...float64) *Note {\n\treturn &Note{\n\t\tVolume: vol,\n\t\tFrequency: freq,\n\t\tOctave: 1.0,\n\t\tLength: len,\n\t}\n}",
"func (c *Config) GetFrequency() time.Duration {\n\tif c.FrequencyInMS == 0 {\n\t\treturn time.Second\n\t}\n\n\treturn time.Duration(c.FrequencyInMS) * time.Millisecond\n}",
"func (n Note) Length() time.Duration { return 0 }",
"func (o SchedulePropertiesOutput) Frequency() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ScheduleProperties) *string { return v.Frequency }).(pulumi.StringPtrOutput)\n}",
"func wrapper_audioFreq(t *eval.Thread, in []eval.Value, out []eval.Value) {\n\tif uiSettings.Terminated() {\n\t\treturn\n\t}\n\n\tfreq := uint(in[0].(eval.UintValue).Get(t))\n\n\tmutex.Lock()\n\tuiSettings.SetAudioFreq(freq)\n\tmutex.Unlock()\n}",
"func (sr Series) Freq() float64 {\n\tvar freq float64\n\tif sr.Dtype == \"string\" {\n\t\tif values, ok := sr.Values.(helper.NumpythonicStringArray); ok {\n\t\t\t_, val := values.MostCommon(1)\n\t\t\tfreq = float64(val[0])\n\t\t}\n\t}\n\n\tif sr.Dtype == \"float64\" {\n\t\tif values, ok := sr.Values.(helper.NumpythonicFloatArray); ok {\n\t\t\t_, val := values.MostCommon(1)\n\t\t\tfreq = float64(val[0])\n\t\t}\n\t}\n\n\treturn freq\n}",
"func (f Frequency) period() time.Duration {\n\treturn time.Duration(1000000000.0 / float64(f))\n}",
"func (n *Note) measure() {\n\tvar samples int\n\tlength := wholeNote + (wholeNote / 100 * 4 * (4 - n.tempo)) // 4% per tempo unit\n\tswitch n.duration {\n\tcase 'W':\n\t\tsamples = length\n\tcase 'H':\n\t\tsamples = length / 2\n\tcase 'Q':\n\t\tsamples = length / 4\n\tcase 'E':\n\t\tsamples = length / 8\n\tcase 'S':\n\t\tsamples = length / 16\n\tcase 'T':\n\t\tsamples = length / 32\n\tcase 'I':\n\t\tsamples = length / 64\n\t}\n\n\tif samples > 0 {\n\t\t// Apply dot measure\n\t\tif n.dotted {\n\t\t\tsamples += samples / 2\n\t\t}\n\t}\n\n\tn.samples = samples\n}",
"func frequencyFromSemitone(semitone int) float32 {\n\t// A4 is 440 Hz, 12 semitones per octave\n\treturn float32(440 * math.Pow(2, float64(semitone-69)/12))\n}",
"func (o AnomalySubscriptionOutput) Frequency() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *AnomalySubscription) pulumi.StringOutput { return v.Frequency }).(pulumi.StringOutput)\n}",
"func NewFrequency(occurrences uint64, duration time.Duration) (Frequency, error) {\n\tif duration < time.Nanosecond {\n\t\treturn Frequency(math.MaxFloat64), errors.New(\"duration must be greater than 0\")\n\t}\n\treturn Frequency(float64(occurrences) / duration.Seconds()), nil\n}",
"func (o KubernetesClusterMaintenanceWindowAutoUpgradePtrOutput) Frequency() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *KubernetesClusterMaintenanceWindowAutoUpgrade) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Frequency\n\t}).(pulumi.StringPtrOutput)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GenerateJWTToken generates a JWT token with the username and singed by the given secret key | func GenerateJWTToken(userName, jwtAccSecretKey string) (string, error) {
claims := jwt.MapClaims{
"username": userName,
"ExpiresAt": jwt.TimeFunc().Add(1 * time.Minute).Unix(),
"IssuedAt": jwt.TimeFunc().Unix(),
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
return token.SignedString([]byte(jwtAccSecretKey))
} | [
"func GenerateJWTToken(parent context.Context, secretKey string, userId int64) (string, error) {\n\t_, cancel := context.WithTimeout(parent, time.Duration(1)*time.Second)\n\tdefer cancel()\n\n\t// Timestamp the beginning.\n\tnow := time.Now()\n\n\tuserIdStr := fmt.Sprintf(\"%d\", userId)\n\n\t// Define a signer.\n\ths256 := jwt.NewHS256(secretKey)\n\tjot := &jwt.JWT{\n\t\tIssuer: issuer,\n\t\tSubject: userIdStr,\n\t\tAudience: audience,\n\t\tExpirationTime: now.Add(24 * 30 * 12 * time.Hour).Unix(), // token is valid for 1 year\n\t\tNotBefore: now.Unix(), // token can be used right now once it generated\n\t\tIssuedAt: now.Unix(),\n\t\tID: userIdStr,\n\t}\n\n\tjot.SetAlgorithm(hs256)\n\tpayload, err := jwt.Marshal(jot)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttokenBytes, err := hs256.Sign(payload)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(tokenBytes), nil\n}",
"func GenerateJWTToken(username string, tokenDuration time.Duration) (tokenString string, err error) {\n\t//start generate jwt token\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\tclaims := make(jwt.MapClaims)\n\tclaims[\"exp\"] = time.Now().Add(tokenDuration).Unix()\n\tclaims[\"iat\"] = time.Now().Unix()\n\tclaims[\"sub\"] = username\n\t//claims[\"email\"] = form.Email\n\ttoken.Claims = claims\n\ttokenString, err = token.SignedString([]byte(SecretKey))\n\treturn tokenString, err\n}",
"func GenerateJWT(username string, session *r.Session) string {\n\tvar jwt string\n\tdb := os.Getenv(\"DB\")\n\ttokenTable := os.Getenv(\"TOKENTABLE\")\n\tsalt := randStringBytes(32)\n\tu64 := b64.URLEncoding.EncodeToString([]byte(username))\n\ts64 := b64.URLEncoding.EncodeToString([]byte(salt))\n\thash := computeHMAC(u64 + \".\" + s64)\n\th := u64 + \".\" + s64 + \".\" + b64.URLEncoding.EncodeToString([]byte(hash))\n\t// Write to token table\n\tif !CheckUserExists(username, tokenTable, session) {\n\t\tauth := AuthToken{username, h}\n\t\t// fmt.Println(auth)\n\t\tr.DB(db).Table(tokenTable).Insert(auth).Run(session)\n\t\tjwt = h\n\t}\n\n\treturn jwt\n}",
"func GenerateJWT() (string, error) {\n\tlog.Printf(\"Generating new JWT\")\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\n\tclaims := token.Claims.(jwt.MapClaims)\n\n\tclaims[\"authorized\"] = true\n\tclaims[\"exp\"] = time.Now().Add(time.Minute * 30).Unix()\n\n\ttokenString, err := token.SignedString(signingKey)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn tokenString, nil\n}",
"func generateJwtToken(login, fgp string, api *UserAPIHandler) (string, error) {\n\tvar claims models.TokenClaims\n\n\t// set required claims\n\tclaims.ExpiresAt = time.Now().Add(1 * time.Hour).Unix()\n\tclaims.Fingerprint = fgp\n\tif IsUserAdmin(login, api.admins) {\n\t\tclaims.Role = roleAdmin\n\t} else {\n\t\tclaims.Role = roleUser\n\t}\n\n\t// generate and sign the token\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\treturn token.SignedString(api.jwtSecret)\n}",
"func GenerateToken(secret []byte, aud, sub string) (string, error) {\n\n\ttok := jwt.NewWithClaims(jwt.SigningMethodHS256, &jwt.RegisteredClaims{\n\t\tIssuer: TokenIssuer,\n\t\tAudience: []string{aud},\n\t\tSubject: sub,\n\t\tIssuedAt: jwt.NewNumericDate(time.Now()),\n\t\tNotBefore: jwt.NewNumericDate(time.Now().Add(-15 * time.Minute)),\n\t})\n\n\treturn tok.SignedString(secret)\n}",
"func JWTGenerator(userID *domainModel.ID) (string, error) {\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\n\tclaims := token.Claims.(jwt.MapClaims)\n\tclaims[\"userID\"] = userID\n\tclaims[\"exp\"] = time.Now().Add(time.Hour * 24).Unix()\n\n\treturn token.SignedString([]byte(constants.JWTSecretKey))\n}",
"func generateJWT(u Model) (string, error) {\n\tvar token string\n\tc := Claim{\n\t\tUsuario: u,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\t// Tiempo de expiración del token: 1 semana\n\t\t\tExpiresAt: time.Now().Add(time.Hour * 24 * 1).Unix(),\n\t\t\tIssuer: \"Cursos EDteam\",\n\t\t},\n\t}\n\n\tt := jwt.NewWithClaims(jwt.SigningMethodRS256, c)\n\ttoken, err := t.SignedString(SignKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn token, nil\n}",
"func generateToken(user models.User) (string, error) {\n\tnow := time.Now()\n\texpiry := time.Now().Add(constants.AuthenticationTimeout)\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, datatransfers.JWTClaims{\n\t\tID: user.ID,\n\t\tExpiresAt: expiry.Unix(),\n\t\tIssuedAt: now.Unix(),\n\t})\n\treturn token.SignedString([]byte(config.AppConfig.JWTSecret))\n}",
"func createJwtToken(u user.User) (string, error) {\n\t// Set custom claims\n\tclaims := &middleware.LoginCustomClaims{\n\t\tu.Username,\n\t\tfalse,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(time.Hour * 72).Unix(),\n\t\t},\n\t}\n\n\t// Create token with claims\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\t// Generate encoded token and send it as response.\n\tkey := viper.GetString(\"auth.signkey\")\n\tt, err := token.SignedString([]byte(key))\n\treturn t, err\n\n}",
"func GenerateJWT(signingKey []byte, userID string) (string, error) {\n\tclaims := UserJWTClaims{\n\t\tjwt.StandardClaims{\n\t\t\tIssuer: \"discapi\",\n\t\t\tSubject: userID,\n\t\t},\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\treturn token.SignedString(signingKey)\n}",
"func GenerateJWT(initialToken string, validDuration int) (string, error) {\n\n\tloginKey := []byte(initialToken)\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\tclaims := token.Claims.(jwt.MapClaims)\n\n\tclaims[\"authorized\"] = true\n\tclaims[\"exp\"] = time.Now().Add(time.Minute * time.Duration(validDuration))\n\n\tjwtToken, jwtErr := token.SignedString(loginKey)\n\n\tif jwtErr != nil {\n\t\tlog.Println(\"Error creating jwt Token : \", jwtErr)\n\t\treturn \"\", jwtErr\n\t}\n\n\treturn jwtToken, nil\n}",
"func (s service) generateJWT(identity Identity) (string, error) {\n\treturn jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"id\": identity.ID(),\n\t\t\"isAdmin\": identity.IsAdmin(),\n\t\t\"exp\": time.Now().Add(time.Duration(s.tokenExpiration) * time.Hour).Unix(),\n\t}).SignedString([]byte(s.signingKey))\n}",
"func GenerateJWT(tokenID int64, tokenString string, issuedAt, expiresAt int64, sessionID int64, accountID int64) (string, error) {\n\tstrTokenID := helper.Int64ToString(tokenID)\n\tjwtID := helper.Int64ToString(issuedAt) + \"a\" + strTokenID\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, JWTClaims{\n\t\tTokenString: tokenString,\n\t\tTokenID: tokenID,\n\t\tSessionID: sessionID,\n\t\tAccountID: accountID,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tId: jwtID,\n\t\t\tIssuer: \"cstd/customer\",\n\t\t\tIssuedAt: issuedAt,\n\t\t\tExpiresAt: expiresAt,\n\t\t},\n\t})\n\tstr, err := token.SignedString(createJWTSecretKey(jwtID))\n\tif err != nil {\n\t\tlogger.Error(\"accesstoken\", err.Error())\n\t}\n\treturn str, err\n}",
"func GenerateToken(jwtSecret string, claims InvoicesClaims) string {\n\thmacSampleSecret := []byte(jwtSecret)\n\n\ttype Claims struct {\n\t\tInvoicesClaims\n\t\tjwt.StandardClaims\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, Claims{\n\t\tInvoicesClaims{\n\t\t\tGetInvoices: true,\n\t\t\tGetInvoice: true,\n\t\t\tCreateInvoice: true,\n\t\t},\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: getExpiry(),\n\t\t},\n\t})\n\n\ttokenString, err := token.SignedString(hmacSampleSecret)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn tokenString\n}",
"func (s service) generateJWT(identity Identity) (string, error) {\n\treturn jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"id\": identity.GetID(),\n\t\t\"name\": identity.GetName(),\n\t\t\"exp\": time.Now().Add(time.Duration(s.tokenExpiration) * time.Hour).Unix(),\n\t}).SignedString([]byte(s.signingKey))\n}",
"func (a *Service) GenerateJweToken(customClaims map[string]interface{}) (string, *time.Time, *error_utils.ApiError) {\n\n\tenc, err := jose.NewEncrypter(\n\t\tjose.ContentEncryption(a.encryptionAlgorithm),\n\t\tjose.Recipient{Algorithm: jose.DIRECT, Key: a.encryptionKey},\n\t\t(&jose.EncrypterOptions{}).WithType(\"JWT\"),\n\t)\n\tif err != nil {\n\t\treturn \"\", nil, error_utils.NewInternalServerError(err.Error())\n\t}\n\n\texpire := a.timeFunc().UTC().Add(a.timeout)\n\n\tclaims := map[string]interface{} { }\n\tclaims[\"exp\"] = expire.Unix()\n\tclaims[\"orig_iat\"] = a.timeFunc().Unix()\n\tclaims[\"iss\"] = a.issuer\n\n\tif customClaims != nil {\n\t\tfor key, value := range customClaims {\n\t\t\tclaims[key] = value\n\t\t}\n\t}\n\n\ttoken, err := jwt.Encrypted(enc).Claims(claims).CompactSerialize()\n\tif err != nil {\n\t\treturn \"\", nil, error_utils.NewInternalServerError(err.Error())\n\t}\n\n\treturn token, &expire, nil\n}",
"func generateUserToken(identity *Identity) *jwt.Token {\n\ttoken := jwt.New(jwt.SigningMethodRS256)\n\tclaims := token.Claims.(jwt.MapClaims)\n\tclaims[\"jti\"] = uuid.NewV4().String()\n\tiat := time.Now().Unix()\n\tclaims[\"exp\"] = 0\n\tclaims[\"iat\"] = iat\n\tclaims[\"typ\"] = \"Bearer\"\n\tclaims[\"preferred_username\"] = identity.Username\n\tclaims[\"sub\"] = identity.ID.String()\n\tclaims[\"email\"] = identity.Email\n\n\ttoken.Header[\"kid\"] = \"test-key\"\n\n\treturn token\n}",
"func createJwToken(user models.User) (string, error) {\n\n\tjwtExpired, _ := strconv.ParseInt(os.Getenv(\"JWT_EXPIRED_MINUTES\"), 10, 64)\n\n\tclaims := models.JwtClaims{\n\t\tName: user.Name,\n\t\tEmail: user.Email,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tId: strconv.Itoa(user.ID),\n\t\t\tExpiresAt: time.Now().Add(time.Duration(jwtExpired) * time.Minute).Unix(),\n\t\t},\n\t}\n\n\trawToken := jwt.NewWithClaims(jwt.SigningMethodHS512, claims)\n\n\ttoken, err := rawToken.SignedString([]byte(os.Getenv(\"JWT_SECRET\")))\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn token, nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
WriteCloserWithContext converts ContextCloser to io.Closer, whenever new Close method will be called, the ctx will be passed to it | func WriteCloserWithContext(ctx context.Context, closer WriteContextCloser) io.WriteCloser {
return &closerWithContext{
WriteContextCloser: closer,
ctx: ctx,
}
} | [
"func (c *Closer) Ctx() context.Context { return (*closerCtx)(c) }",
"func (fw *FileWriter) CloseWithContext(ctx context.Context, opts ...FlushRowGroupOption) error {\n\tif fw.schemaWriter.rowGroupNumRecords() > 0 {\n\t\tif err := fw.FlushRowGroup(opts...); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tkv := make([]*parquet.KeyValue, 0, len(fw.kvStore))\n\tfor i := range fw.kvStore {\n\t\tv := fw.kvStore[i]\n\t\taddr := &v\n\t\tif v == \"\" {\n\t\t\taddr = nil\n\t\t}\n\t\tkv = append(kv, &parquet.KeyValue{\n\t\t\tKey: i,\n\t\t\tValue: addr,\n\t\t})\n\t}\n\tmeta := &parquet.FileMetaData{\n\t\tVersion: fw.version,\n\t\tSchema: fw.schemaWriter.getSchemaArray(),\n\t\tNumRows: fw.totalNumRecords,\n\t\tRowGroups: fw.rowGroups,\n\t\tKeyValueMetadata: kv,\n\t\tCreatedBy: &fw.createdBy,\n\t\tColumnOrders: nil,\n\t}\n\n\tpos := fw.w.Pos()\n\tif err := writeThrift(ctx, meta, fw.w); err != nil {\n\t\treturn err\n\t}\n\n\tln := int32(fw.w.Pos() - pos)\n\tif err := binary.Write(fw.w, binary.LittleEndian, &ln); err != nil {\n\t\treturn err\n\t}\n\n\tif err := writeFull(fw.w, magic); err != nil {\n\t\treturn err\n\t}\n\n\treturn fw.bw.Flush()\n}",
"func WithWriterContext(ctx context.Context) FileWriterOption {\n\treturn func(fw *FileWriter) {\n\t\tfw.ctx = ctx\n\t}\n}",
"func WithContext(response http.ResponseWriter, request *http.Request, ctx context.Context) (http.ResponseWriter, *http.Request) {\n\tif ca, ok := response.(ContextAware); ok {\n\t\tca.SetContext(ctx)\n\t\treturn response, request.WithContext(ctx)\n\t}\n\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\n\treturn &contextAwareResponseWriter{response, ctx}, request.WithContext(ctx)\n}",
"func DelayedCtxCloser(ctx context.Context, delay time.Duration) context.Context {\n\tdelayedCtx, cancel := context.WithCancel(context.Background())\n\n\tgo func() {\n\t\t<-ctx.Done()\n\t\ttime.Sleep(delay)\n\t\tcancel()\n\t}()\n\n\treturn delayedCtx\n}",
"func MarshalWithContext(v interface{}, ctx *Context) ([]byte, error) {\n\tvar b bytes.Buffer\n\tenc := NewEncoder(&b)\n\tenc.Context = ctx\n\tif err := enc.Encode(v); err != nil {\n\t\treturn nil, err\n\t}\n\treturn b.Bytes(), nil\n}",
"func CloseContext(ctx *ContextT) {\n\tC.yices_free_context(yctx(*ctx))\n\tctx.raw = 0\n}",
"func (o *WriteOptions) Context() context.Context {\n\tif o != nil && o.ctx != nil {\n\t\treturn o.ctx\n\t}\n\treturn context.Background()\n}",
"func WriteCloser(ctx context.Context, w io.WriteCloser) io.WriteCloser {\n\twriterDeadline(ctx, w)\n\n\treturn &writeCloser{\n\t\twriter: writer{\n\t\t\tctx: ctx,\n\t\t\tw: w,\n\t\t},\n\t\tcloser: closer{\n\t\t\tctx: ctx,\n\t\t\tc: w,\n\t\t},\n\t}\n}",
"func SafeCloseCtx(ctx context.Context, c io.Closer, msg string, fields ...zapcore.Field) {\n\tif cerr := c.Close(); cerr != nil {\n\t\tfields = append(fields, zap.Error(errors.WithStack(cerr)))\n\t\tDefault().For(ctx).Error(msg, fields...)\n\t}\n}",
"func WriteStructWithContext(ctx context.Context, p thrift.TProtocol, value thrift.TStruct, name string, field int16) error {\n\tif err := p.WriteFieldBegin(ctx, name, thrift.STRUCT, field); err != nil {\n\t\treturn thrift.PrependError(\"write field begin error: \", err)\n\t}\n\tif err := value.Write(ctx, p); err != nil {\n\t\treturn thrift.PrependError(\"field write error: \", err)\n\t}\n\tif err := p.WriteFieldEnd(ctx); err != nil {\n\t\treturn thrift.PrependError(\"write field end error: \", err)\n\t}\n\treturn nil\n}",
"func (out Output) ApplyWithContext(ctx context.Context,\n\tapplier func(ctx context.Context, v interface{}) (interface{}, error)) Output {\n\n\tresult := newOutput(out.s.deps...)\n\tgo func() {\n\t\tv, known, err := out.s.await(ctx)\n\t\tif err != nil || !known {\n\t\t\tresult.s.fulfill(nil, known, err)\n\t\t\treturn\n\t\t}\n\n\t\t// If we have a known value, run the applier to transform it.\n\t\tu, err := applier(ctx, v)\n\t\tif err != nil {\n\t\t\tresult.s.reject(err)\n\t\t\treturn\n\t\t}\n\n\t\t// Fulfill the result.\n\t\tresult.s.fulfill(u, true, nil)\n\t}()\n\treturn result\n}",
"func PipeWithContext(\n\tctx context.Context,\n\tsamplesPerSecond uint,\n\tformat SampleFormat,\n) (PipeReader, PipeWriter) {\n\tctx, cancel := context.WithCancel(ctx)\n\tp := &pipe{\n\t\tcontext: ctx,\n\t\tcancel: cancel,\n\t\tformat: format,\n\t\tsamplesPerSecond: samplesPerSecond,\n\t\tsamplesCh: make(chan Samples),\n\t\treadSamplesCh: make(chan int),\n\t}\n\treturn p, p\n}",
"func (lw *LogWriter) WithContext(ctx context.Context) LogWriter {\n\treturn LogWriter{w: lw.w.WithContext(ctx)}\n}",
"func (ctx *ResourceContext) SafeClose() {\n}",
"func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) {\n\tvar (\n\t\tctx context.Context\n\t\tcancel context.CancelFunc\n\t)\n\n\tif c.exportTimeout > 0 {\n\t\tctx, cancel = context.WithTimeout(parent, c.exportTimeout)\n\t} else {\n\t\tctx, cancel = context.WithCancel(parent)\n\t}\n\n\tif c.metadata.Len() > 0 {\n\t\tctx = metadata.NewOutgoingContext(ctx, c.metadata)\n\t}\n\n\treturn ctx, cancel\n}",
"func WrapCancel(cancel context.CancelFunc) io.Closer {\n\treturn Wrap(func() error {\n\t\tcancel()\n\t\treturn nil\n\t})\n}",
"func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) {\n\tvar (\n\t\tctx context.Context\n\t\tcancel context.CancelFunc\n\t)\n\n\tif c.exportTimeout > 0 {\n\t\tctx, cancel = context.WithTimeout(parent, c.exportTimeout)\n\t} else {\n\t\tctx, cancel = context.WithCancel(parent)\n\t}\n\n\tif c.metadata.Len() > 0 {\n\t\tctx = metadata.NewOutgoingContext(ctx, c.metadata)\n\t}\n\n\t// Unify the client stopCtx with the parent.\n\tgo func() {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\tcase <-c.stopCtx.Done():\n\t\t\t// Cancel the export as the shutdown has timed out.\n\t\t\tcancel()\n\t\t}\n\t}()\n\n\treturn ctx, cancel\n}",
"func (m *MQTT) WriteWithContext(ctx context.Context, msg *message.Batch) error {\n\treturn m.Write(msg)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NilCloser returns closer if it's not nil otherwise returns a nop closer | func NilCloser(r io.Closer) io.Closer {
if r == nil {
return &nilCloser{}
}
return r
} | [
"func (n *nilCloser) Close() error {\n\t// works even if n is nil\n\treturn nil\n}",
"func noopCloser(r io.Reader, err error) io.ReadCloser {\n\treturn &nopCloser{\n\t\tReader: r,\n\t\terr: err,\n\t}\n}",
"func NopCloser() error { return nil }",
"func (noopCloser) Close() error {\n\treturn nil\n}",
"func NoCloser(in io.Reader) io.Reader {\n\tif in == nil {\n\t\treturn in\n\t}\n\t// if in doesn't implement io.Closer, just return it\n\tif _, canClose := in.(io.Closer); !canClose {\n\t\treturn in\n\t}\n\treturn noClose{in: in}\n}",
"func NopCloser() io.Closer {\r\n\treturn &nopCloser{}\r\n}",
"func NopCloser(r io.Writer) io.WriteCloser { return nopCloser{r} }",
"func unwrapNopCloser(r io.Reader) (underlyingReader io.Reader, isNopCloser bool) {\n\tswitch reflect.TypeOf(r) {\n\tcase nopCloserType, nopCloserWriterToType:\n\t\treturn reflect.ValueOf(r).Field(0).Interface().(io.Reader), true\n\tdefault:\n\t\treturn nil, false\n\t}\n}",
"func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser {\n\treturn nopCloser{rs}\n}",
"func NopCloser(bio Biome) BiomeCloser {\n\treturn nopCloser{bio}\n}",
"func NopReadSeekerCloser(r io.ReadSeeker) ReadSeekerCloser {\n\treturn readSeekerCloser{r, func() error { return nil }}\n}",
"func Wrap(closeFunc func() error) io.Closer {\n\treturn simpleCloser{\n\t\tcloseFunc: closeFunc,\n\t}\n}",
"func NopCloser(std agent.ExtendedAgent) Agent {\n\treturn nopCloser{std}\n}",
"func NoCloseReader(r io.Reader) io.Reader {\n _, ok := r.(io.Closer)\n if ok {\n return readerWrapper{r}\n }\n return r\n}",
"func NoOpReadCloser(r io.Reader) io.ReadCloser {\n\treturn noOpCloser{r}\n}",
"func NopWriteCloser(r io.Writer) io.WriteCloser {\n\treturn nopWriteCloser{r}\n}",
"func NopWriteCloser(w io.Writer) io.WriteCloser {\n\treturn &nopWriteCloser{w}\n}",
"func NopCloser(r xml.TokenReader) TokenReadCloser {\n\treturn nopCloser{r}\n}",
"func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser {\n\treturn exported.NopCloser(rs)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NopWriteCloser returns a WriteCloser with a noop Close method wrapping the provided Writer w | func NopWriteCloser(r io.Writer) io.WriteCloser {
return nopWriteCloser{r}
} | [
"func NopCloser(r io.Writer) io.WriteCloser { return nopCloser{r} }",
"func NopWriteCloser(w io.Writer) io.WriteCloser {\n\treturn &nopWriteCloser{w}\n}",
"func NopCloser() error { return nil }",
"func noopCloser(r io.Reader, err error) io.ReadCloser {\n\treturn &nopCloser{\n\t\tReader: r,\n\t\terr: err,\n\t}\n}",
"func NopCloser() io.Closer {\r\n\treturn &nopCloser{}\r\n}",
"func NopFlusher(w Writer) WriteFlusher {\n\treturn nopFlusher{w}\n}",
"func NopCloser(std agent.ExtendedAgent) Agent {\n\treturn nopCloser{std}\n}",
"func (*nopCloseWriter) Close() error {\n\treturn nil\n}",
"func NoOpReadCloser(r io.Reader) io.ReadCloser {\n\treturn noOpCloser{r}\n}",
"func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser {\n\treturn nopCloser{rs}\n}",
"func NopCloser(r xml.TokenReader) TokenReadCloser {\n\treturn nopCloser{r}\n}",
"func WrapWriteCloser(r io.Writer) io.WriteCloser {\n\treturn wrapWriteCloser{r}\n}",
"func NewWriteCloser(t mockConstructorTestingTNewWriteCloser) *WriteCloser {\n\tmock := &WriteCloser{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NopCloser(bio Biome) BiomeCloser {\n\treturn nopCloser{bio}\n}",
"func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser {\n\treturn exported.NopCloser(rs)\n}",
"func NewMockWriteCloser(t *testing.T) *MockWriteCloser {\n\treturn &MockWriteCloser{\n\t\tb: bytes.Buffer{},\n\t\tclosed: false,\n\t\tt: t,\n\t}\n}",
"func (c *carver) newWriterCloser(fp string) (io.WriteCloser, error) {\n\tif c.dryRun {\n\t\treturn noopCloser{w: io.Discard}, nil\n\t}\n\tif c.w != nil {\n\t\treturn noopCloser{w: c.w}, nil\n\t}\n\treturn os.Create(fp)\n}",
"func NopReadSeekerCloser(r io.ReadSeeker) ReadSeekerCloser {\n\treturn readSeekerCloser{r, func() error { return nil }}\n}",
"func NopReadCloser(r io.Reader) io.ReadCloser {\n\treturn ioutil.NopCloser(r)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewTracer returns a new tracer | func NewTracer(description string) *Tracer {
return &Tracer{Started: time.Now().UTC(), Description: description}
} | [
"func NewTracer(name string) *Tracer {\n\tname = fmt.Sprintf(namePattern, name)\n\treturn &Tracer{\n\t\tname: name,\n\t}\n}",
"func NewTracer(region string) Shooter {\n\treturn &Tracer{\n\t\tName: fmt.Sprintf(\"Request from %s\", region),\n\t\tAttacker: &http.Client{\n\t\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\t\treturn http.ErrUseLastResponse\n\t\t\t},\n\t\t},\n\t\tDuration: constants.DefaultWorkerDuration,\n\t\tRegion: region,\n\t\tTarget: constants.EmptyString,\n\t}\n}",
"func NewTracer(msgName string) Tracer {\n\treturn &tracer{\n\t\tmsgName: msgName,\n\t\tnextIndex: 1,\n\t}\n}",
"func NewTracer(parent *Logger, prefix string) *Logger {\n\treturn &Logger{parent: parent, prefix: prefix, tracer: true}\n}",
"func NewTracer(name string, w io.Writer, m Memory) Memory {\n\treturn &tracer{m: m, w: w, s: name}\n}",
"func NewTracer(cli CLI) (*Tracer, error) {\n\ttracer := &Tracer{\n\t\tcli: cli,\n\t\tnumPackets: defaultNumPackets,\n\t}\n\treturn tracer, nil\n}",
"func NewTracer(_ *config.Config) (*Tracer, error) {\n\treturn nil, ebpf.ErrNotImplemented\n}",
"func NewTracer(cfg TracerConfig) opentracing.Tracer {\n\tvar tracer opentracing.Tracer\n\tswitch cfg.Provider {\n\tcase Zipkin:\n\t\tlogrus.Error(\"No implements yet.\")\n\t\t// fmt.Sprintf(\"http://%s:%s/api/v1/spans\",cfg.Host, cfg.Port)\n\t\tbreak\n\tcase Jaeger:\n\t\ttracer = newJaegerTracer(cfg)\n\t\tbreak\n\tdefault:\n\t\tlogrus.Errorf(\"unsported provider %s, use opentracing.GlobalTracer()\", cfg.Provider)\n\t\ttracer = opentracing.GlobalTracer()\n\t}\n\treturn tracer\n}",
"func NewTracer(ctx context.Context, opts ...Option) (opentracing.Tracer, error) {\n\trecorder, err := NewRecorder(ctx, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn basictracer.New(recorder), nil\n}",
"func New(w io.Writer) Tracer{\n\treturn &tracer{out:w}\n}",
"func NewTracer() Tracer {\n\treturn &nullTracer{}\n}",
"func NewTracer(tracer trace.Tracer) octrace.Tracer {\n\treturn internal.NewTracer(tracer)\n}",
"func New(recorder Recorder, source opentracing.TraceContextSource) opentracing.Tracer {\n\treturn &standardTracer{\n\t\tTraceContextSource: source,\n\t\trecorder: recorder,\n\t}\n}",
"func NewTracer(\n\tserviceName string,\n\tsampler Sampler,\n\treporter Reporter,\n\toptions ...TracerOption,\n) (opentracing.Tracer, io.Closer) {\n\tt := &Tracer{\n\t\tserviceName: serviceName,\n\t\tsampler: samplerV1toV2(sampler),\n\t\treporter: reporter,\n\t\tinjectors: make(map[interface{}]Injector),\n\t\textractors: make(map[interface{}]Extractor),\n\t\tmetrics: *NewNullMetrics(),\n\t\tspanAllocator: simpleSpanAllocator{},\n\t}\n\n\tfor _, option := range options {\n\t\toption(t)\n\t}\n\n\t// register default injectors/extractors unless they are already provided via options\n\ttextPropagator := NewTextMapPropagator(getDefaultHeadersConfig(), t.metrics)\n\tt.addCodec(opentracing.TextMap, textPropagator, textPropagator)\n\n\thttpHeaderPropagator := NewHTTPHeaderPropagator(getDefaultHeadersConfig(), t.metrics)\n\tt.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator)\n\n\tbinaryPropagator := NewBinaryPropagator(t)\n\tt.addCodec(opentracing.Binary, binaryPropagator, binaryPropagator)\n\n\t// TODO remove after TChannel supports OpenTracing\n\tinteropPropagator := &jaegerTraceContextPropagator{tracer: t}\n\tt.addCodec(SpanContextFormat, interopPropagator, interopPropagator)\n\n\tzipkinPropagator := &zipkinPropagator{tracer: t}\n\tt.addCodec(ZipkinSpanFormat, zipkinPropagator, zipkinPropagator)\n\n\tif t.baggageRestrictionManager != nil {\n\t\tt.baggageSetter = newBaggageSetter(t.baggageRestrictionManager, &t.metrics)\n\t} else {\n\t\tt.baggageSetter = newBaggageSetter(baggage.NewDefaultRestrictionManager(0), &t.metrics)\n\t}\n\tif t.debugThrottler == nil {\n\t\tt.debugThrottler = throttler.DefaultThrottler{}\n\t}\n\n\tif t.randomNumber == nil {\n\t\tseedGenerator := utils.NewRand(time.Now().UnixNano())\n\t\tpool := sync.Pool{\n\t\t\tNew: func() interface{} {\n\t\t\t\treturn rand.NewSource(seedGenerator.Int63())\n\t\t\t},\n\t\t}\n\n\t\tt.randomNumber = func() uint64 {\n\t\t\tgenerator := pool.Get().(rand.Source)\n\t\t\tnumber := uint64(generator.Int63())\n\t\t\tpool.Put(generator)\n\t\t\treturn number\n\t\t}\n\t}\n\tif t.timeNow == nil {\n\t\tt.timeNow = time.Now\n\t}\n\tif t.logger == nil {\n\t\tt.logger = log.NullLogger\n\t}\n\t// Set tracer-level tags\n\tt.tags = append(t.tags, Tag{key: JaegerClientVersionTagKey, value: JaegerClientVersion})\n\tif hostname, err := os.Hostname(); err == nil {\n\t\tt.tags = append(t.tags, Tag{key: TracerHostnameTagKey, value: hostname})\n\t}\n\tif ipval, ok := t.getTag(TracerIPTagKey); ok {\n\t\tipv4, err := utils.ParseIPToUint32(ipval.(string))\n\t\tif err != nil {\n\t\t\tt.hostIPv4 = 0\n\t\t\tt.logger.Error(\"Unable to convert the externally provided ip to uint32: \" + err.Error())\n\t\t} else {\n\t\t\tt.hostIPv4 = ipv4\n\t\t}\n\t} else if ip, err := utils.HostIP(); err == nil {\n\t\tt.tags = append(t.tags, Tag{key: TracerIPTagKey, value: ip.String()})\n\t\tt.hostIPv4 = utils.PackIPAsUint32(ip)\n\t} else {\n\t\tt.logger.Error(\"Unable to determine this host's IP address: \" + err.Error())\n\t}\n\n\tif t.options.gen128Bit {\n\t\tif t.options.highTraceIDGenerator == nil {\n\t\t\tt.options.highTraceIDGenerator = t.randomNumber\n\t\t}\n\t} else if t.options.highTraceIDGenerator != nil {\n\t\tt.logger.Error(\"Overriding high trace ID generator but not generating \" +\n\t\t\t\"128 bit trace IDs, consider enabling the \\\"Gen128Bit\\\" option\")\n\t}\n\tif t.options.maxTagValueLength == 0 {\n\t\tt.options.maxTagValueLength = DefaultMaxTagValueLength\n\t}\n\tt.process = Process{\n\t\tService: serviceName,\n\t\tUUID: strconv.FormatUint(t.randomNumber(), 16),\n\t\tTags: t.tags,\n\t}\n\tif throttler, ok := t.debugThrottler.(ProcessSetter); ok {\n\t\tthrottler.SetProcess(t.process)\n\t}\n\n\treturn t, t\n}",
"func New(recorders []basictracer.SpanRecorder) opentracing.Tracer {\n\treturn basictracer.New(NewRecorder(recorders))\n}",
"func NewTracer(ctx context.Context, yamlConfig []byte) (opentracing.Tracer, io.Closer, error) {\n\tconfig := Config{}\n\tif err := yaml.Unmarshal(yamlConfig, &config); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\toptions := lightstep.Options{\n\t\tAccessToken: config.AccessToken,\n\t\tCollector: config.Collector,\n\t}\n\tlighstepTracer, err := lightstep.CreateTracer(options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tt := &Tracer{\n\t\tlighstepTracer,\n\t\tctx,\n\t}\n\treturn t, t, nil\n}",
"func NewTracer(opts ...Option) Tracer {\n\treturn &noopTracer{opts: NewOptions(opts...)}\n}",
"func New() graphql.Tracer {\n\treturn tracer{Tracer: gqlopencensus.New()}\n}",
"func NewTracer(serverID, addr string) (*Tracer, error) {\n\tc, err := net.Dial(\"tcp\", addr)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to connect to logging server: %v\", err)\n\t}\n\thello := map[string]string{\n\t\t\"action\": \"hello\",\n\t\t\"serverID\": serverID,\n\t}\n\tenc := json.NewEncoder(c)\n\tenc.Encode(hello)\n\tdec := json.NewDecoder(c)\n\tvar resp map[string]string\n\tif err := dec.Decode(&resp); err != nil {\n\t\tc.Close()\n\t\treturn nil, fmt.Errorf(\"failed to get response from server: %v\", err)\n\t}\n\tif status, ok := resp[\"status\"]; !ok {\n\t\treturn nil, fmt.Errorf(\"response from server missing status key: %v\", resp)\n\t} else if status != \"ok\" {\n\t\treturn nil, fmt.Errorf(\"expected response status 'ok' but got %q\", status)\n\t}\n\treturn &Tracer{\n\t\tremote: &c,\n\t\tserverID: serverID,\n\t\tencoder: enc,\n\t\tqueue: make(chan map[string]string),\n\t\tshutdown: make(chan struct{}),\n\t}, nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ThisFunction returns calling function name | func ThisFunction() string {
var pc [32]uintptr
runtime.Callers(2, pc[:])
return runtime.FuncForPC(pc[0]).Name()
} | [
"func CurrentFunctionName() string {\n\tpc, _, _, _ := runtime.Caller(1)\n\tfuncname := path.Base(runtime.FuncForPC(pc).Name())\n\treturn funcname\n}",
"func ThisFunc() *runtime.Func {\n\tpc, _, _, _ := runtime.Caller(1)\n\treturn runtime.FuncForPC(pc)\n}",
"func myCaller() string {\n\t// Skip GetCallerFunctionName and the function to get the caller of\n\treturn getFrame(2).Function\n}",
"func getSelf() string {\n\tpc := make([]uintptr, 1)\n\n\t// return the program counter for the calling function\n\truntime.Callers(2, pc)\n\n\tf := runtime.FuncForPC(pc[0])\n\treturn f.Name()\n}",
"func callerName() (caller string) {\n\tpc, _, _, ok := runtime.Caller(2) // 0: function-self, 1: parent function caller\n\tif !ok {\n\t\tcaller = \"#\"\n\t} else {\n\t\tpath := runtime.FuncForPC(pc).Name()\n\t\titems := strings.Split(path, \".\")\n\t\tcaller = items[len(items)-1]\n\t\tif len(caller) == 0 {\n\t\t\tcaller = path\n\t\t}\n\t}\n\treturn caller\n}",
"func (c *caller) getFunction() string {\n\treturn c.function\n}",
"func getCallerName() string {\n\tpc := make([]uintptr, 10)\n\n\t// This method is supposed to be used only from other log package\n\t// so it skips three calls.\n\truntime.Callers(3, pc)\n\tif len(pc) > 0 {\n\t\tf := runtime.FuncForPC(pc[0])\n\t\treturn f.Name()\n\t}\n\treturn \"\"\n}",
"func (f *Func) Name() string",
"func (m Function) Name() string {\n\treturn m.name\n}",
"func GetMyCaller() string {\n\t_, filename, _, _ := runtime.Caller(1)\n\treturn filename\n}",
"func GetCaller() string {\n\tvar pc [1]uintptr\n\truntime.Callers(2, pc[:])\n\tf := runtime.FuncForPC(pc[0])\n\tif f == nil {\n\t\treturn \"Unable to find caller\"\n\t}\n\treturn f.Name()\n}",
"func GetCallingFunction() string {\n\tfpcs := make([]uintptr, 1)\n\n\tn := runtime.Callers(3, fpcs)\n\tif n == 0 {\n\t\treturn \"n/a\"\n\t}\n\n\tfun := runtime.FuncForPC(fpcs[0] - 1)\n\tif fun == nil {\n\t\treturn \"n/a\"\n\t}\n\n\tnameParts := strings.Split(fun.Name(), \".\")\n\n\treturn nameParts[len(nameParts)-1]\n}",
"func GetFuncName() string {\n\tpc, _, _, _ := runtime.Caller(1)\n\treturn runtime.FuncForPC(pc).Name()\n}",
"func GetFunctionName() string {\n\tpc, _, _, _ := runtime.Caller(1)\n\tfullName := runtime.FuncForPC(pc).Name()\n\tparts := strings.Split(fullName, \".\")\n\treturn parts[len(parts)-1]\n}",
"func callerSource() string {\n\tpc, file, line, success := runtime.Caller(2)\n\tif !success {\n\t\tfile = \"<unknown>\"\n\t\tline = 0\n\t}\n\tfile = path.Base(file)\n\tname := runtime.FuncForPC(pc).Name()\n\tname = strings.TrimPrefix(name, \"github.com/minio/minio/cmd.\")\n\treturn fmt.Sprintf(\"[%s:%d:%s()]\", file, line, name)\n}",
"func TraceFunc() (funcName string) {\n\tfun, _, _ := TraceSkip(1)\n\treturn filepath.Base(fun)\n}",
"func getFunctionName(i interface{}) string {\n\tpcIdentifier := runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()\n\tbaseName := filepath.Base(pcIdentifier)\n\tparts := strings.SplitN(baseName, \".\", 2)\n\tif len(parts) <= 1 {\n\t\treturn parts[0]\n\t}\n\n\treturn parts[1]\n}",
"func (o FunctionEventInvokeConfigOutput) FunctionName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *FunctionEventInvokeConfig) pulumi.StringOutput { return v.FunctionName }).(pulumi.StringOutput)\n}",
"func caller() string {\n\tcalldepth := 2\n\t// get the stack trace\n\tpc := make([]uintptr, calldepth) // at least 1 entry needed\n\truntime.Callers(calldepth, pc)\n\tf := runtime.FuncForPC(pc[1]) // 0 wil be the logging function (e.g. Debugf, Info, etc.)\n\tfile, line := f.FileLine(pc[1]) // see previous comment\n\tshortfile := file[strings.LastIndex(file, \"/\")+1:]\n\tmethod := f.Name()[strings.LastIndex(f.Name(), \"/\")+1:]\n\t// remove (*) when there is pointers\n\tmethod = strings.Replace(method, \"(\", \"\", -1)\n\tmethod = strings.Replace(method, \"*\", \"\", -1)\n\tmethod = strings.Replace(method, \")\", \"\", -1)\n\treturn fmt.Sprintf(\"%s:%d %s\", shortfile, line, method)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Value returns value of the string | func (s *SyncString) Value() string {
s.Lock()
defer s.Unlock()
return s.string
} | [
"func (s *String) GetValue() string {\n\treturn s.value\n}",
"func (sval *ScalarValue) Value() string {\n\tswitch {\n\tcase strings.HasPrefix(sval.Raw, `\"\"\"`):\n\t\treturn parseBlockString(sval.Raw)\n\tcase strings.HasPrefix(sval.Raw, `\"`):\n\t\treturn parseString(sval.Raw)\n\tdefault:\n\t\treturn sval.Raw\n\t}\n}",
"func (t Type) Value() string {\n\tstr := string(t)\n\tv, ok := builtin[str]\n\tif !ok {\n\t\treturn gocase.To(strcase.ToCamel(str))\n\t}\n\n\treturn v\n}",
"func (s DnaString) GetValue() string {\n\treturn s.Value\n}",
"func (s *StringChecksum) Value() string {\n\treturn s.value\n}",
"func (p stringProperty) Value() (string, error) {\n\treturn p.value, nil\n}",
"func (s Stash) Value() string {\n\tvals := utils.MapKeys(s.payload)\n\tif len(vals) < 1 {\n\t\treturn \"\"\n\t}\n\n\treturn expand(fmt.Sprintf(\"%v\", vals[0]))\n}",
"func (t *Token) Value() string {\n\treturn t.strBuilder.String()\n}",
"func (i VkIdentifier) Value() string {\n\treturn string(i)\n}",
"func (b *baseSemanticUTF8String) Value() interface{} {\n\treturn b.V\n}",
"func (f Formal) Value() string {\n\treturn string(f)\n}",
"func (r *ReflowletVersion) Value() string {\n\treturn string(*r)\n}",
"func (d *Description) Value() string {\n\tif d == nil {\n\t\treturn \"\"\n\t}\n\tif strings.HasPrefix(d.Raw, `\"\"\"`) {\n\t\treturn parseBlockString(d.Raw)\n\t}\n\treturn parseString(d.Raw)\n}",
"func (p *Property) ValueString() string {\n\treturn p.vstr\n}",
"func (f EncodedField) Value() string {\n\treturn f[1]\n}",
"func (o IoTHubEnrichmentOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IoTHubEnrichment) string { return v.Value }).(pulumi.StringOutput)\n}",
"func (cell *StringCell) Value() interface{} {\n\treturn cell.value\n}",
"func (f *Title) Value() string {\n\ts := decode.UTF16(f.data)\n\treturn trim.Nil(s)\n}",
"func (n *ruleNode) GetValue() string {\n\tv := n.Value\n\tif n.Type == Substring {\n\t\tv = make([]byte, 1+len(n.Value))\n\t\tv[0] = '*'\n\t\tcopy(v[1:], n.Value)\n\t}\n\treturn string(v)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ClickableURL fixes address in url to make sure it's clickable, e.g. it replaces "undefined" address like 0.0.0.0 used in network listeners format with loopback 127.0.0.1 | func ClickableURL(in string) string {
out, err := url.Parse(in)
if err != nil {
return in
}
host, port, err := net.SplitHostPort(out.Host)
if err != nil {
return in
}
ip := net.ParseIP(host)
// if address is not an IP, unspecified, e.g. all interfaces 0.0.0.0 or multicast,
// replace with localhost that is clickable
if len(ip) == 0 || ip.IsUnspecified() || ip.IsMulticast() {
out.Host = fmt.Sprintf("127.0.0.1:%v", port)
return out.String()
}
return out.String()
} | [
"func makeHTMLhref(url string) string {\n\treturn \"<a target=_blank href=\" + url + \">URL</a>\"\n}",
"func URLButton(text, url string) Button {\n\treturn button{\n\t\tType: \"web_url\",\n\t\tTitle: text,\n\t\tURL: url,\n\t\tShareButton: \"hide\",\n\t\tExtensions: true,\n\t}\n}",
"func SanitizeURL(in string) string {\n\treturn sanitizeURLWithFlags(in, purell.FlagsSafe|purell.FlagRemoveTrailingSlash|purell.FlagRemoveDotSegments|purell.FlagRemoveDuplicateSlashes|purell.FlagRemoveUnnecessaryHostDots|purell.FlagRemoveEmptyPortSeparator)\n}",
"func fixImgurLink(link string) string {\n\toriginalurl, err := url.Parse(link)\n\n\tif err != nil || originalurl.Host != \"imgur.com\" {\n\t\treturn link\n\t}\n\n\treturn fmt.Sprintf(\"http://i.imgur.com%s.gif\", originalurl.Path)\n}",
"func URL(unsanitized *url.URL) string {\n\tescaped := strings.ReplaceAll(unsanitized.String(), \"\\n\", \"\")\n\treturn strings.ReplaceAll(escaped, \"\\r\", \"\")\n}",
"func Link(url string, text string) string {\n\treturn fmt.Sprintf(\"\\u001B]8;;%s\\u0007%s\\u001B]8;;\\u0007\", url, text)\n}",
"func URL(link *Link) (string, error) {\n\tb64, err := Encode(link)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Inject some random data into the url so that facebook can never cache it\n\trand, err := gonanoid.Nanoid(4)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn os.Getenv(\"SELF_HOST\") + \"/linkr/\" + rand + \"/link?d=\" + url.QueryEscape(b64), nil\n}",
"func (bot *Bot) handleURLsListener(message events.EventMessage) {\n\n\t// Find all URLs in the message.\n\tlinks := xurls.Strict().FindAllString(message.Message, -1)\n\t// Remove multiple same links from one message.\n\tlinks = utils.RemoveDuplicates(links)\n\tfor i := range links {\n\t\t// Validate the url.\n\t\tbot.Log.Infof(\"Got link %s\", links[i])\n\t\tlink := utils.StandardizeURL(links[i])\n\t\tbot.Log.Debugf(\"Standardized to: %s\", link)\n\n\t\t// Try to get the body of the page.\n\t\terr, finalLink, body := bot.GetPageBody(link, map[string]string{})\n\t\tif err != nil {\n\t\t\tbot.Log.Warningf(\"Could't fetch the body: %s\", err)\n\t\t}\n\n\t\t// Update link if needed.\n\t\tif finalLink != \"\" {\n\t\t\tlink = finalLink\n\t\t}\n\n\t\t// Iterate over meta tags to get the description\n\t\tdescription := \"\"\n\t\tmetas := metaRe.FindAllStringSubmatch(string(body), -1)\n\t\tfor i := range metas {\n\t\t\tif len(metas[i]) > 1 {\n\t\t\t\tisDesc := descRe.FindString(metas[i][0])\n\t\t\t\tif isDesc != \"\" && (len(metas[i][1]) > len(description)) {\n\t\t\t\t\tdescription = utils.CleanString(metas[i][1], true)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// Get the title\n\t\ttitle := \"\"\n\t\tmatch := titleRe.FindStringSubmatch(string(body))\n\t\tif len(match) > 1 {\n\t\t\ttitle = utils.CleanString(match[1], true)\n\t\t}\n\n\t\t// Insert URL into the db.\n\t\tbot.Log.Debugf(\"Storing URL info for: %s\", link)\n\t\tif _, err := bot.Db.Exec(`INSERT INTO urls(transport, channel, nick, link, quote, title) VALUES(?, ?, ?, ?, ?, ?)`,\n\t\t\tmessage.TransportName, message.Channel, message.Nick, link, message.Message, title); err != nil {\n\t\t\tbot.Log.Warningf(\"Can't add url to database: %s\", err)\n\t\t}\n\n\t\t// Trigger url found message.\n\t\tbot.EventDispatcher.Trigger(events.EventMessage{\n\t\t\tmessage.TransportName,\n\t\t\tmessage.TransportFormatting,\n\t\t\tevents.EventURLFound,\n\t\t\tmessage.Nick,\n\t\t\tmessage.UserId,\n\t\t\tmessage.Channel,\n\t\t\tlink,\n\t\t\tmessage.Context,\n\t\t\tmessage.AtBot,\n\t\t})\n\n\t\tlinkKey := link + message.Channel\n\t\t// If we can't announce yet, skip this link.\n\t\tif time.Since(bot.lastURLAnnouncedTime[linkKey]) < bot.Config.UrlAnnounceIntervalMinutes*time.Minute {\n\t\t\tcontinue\n\t\t}\n\t\tif lines, exists := bot.lastURLAnnouncedLinesPassed[linkKey]; exists && lines < bot.Config.UrlAnnounceIntervalLines {\n\t\t\tcontinue\n\t\t}\n\n\t\t// On mattermost we can skip all link info display.\n\t\tif message.TransportName == \"mattermost\" {\n\t\t\treturn\n\t\t}\n\n\t\t// Announce the title, save the description.\n\t\tif title != \"\" {\n\t\t\tif description != \"\" {\n\t\t\t\tbot.SendNotice(&message, title+\" …\")\n\t\t\t} else {\n\t\t\t\tbot.SendNotice(&message, title)\n\t\t\t}\n\t\t\tbot.lastURLAnnouncedTime[linkKey] = time.Now()\n\t\t\tbot.lastURLAnnouncedLinesPassed[linkKey] = 0\n\t\t\t// Keep the long info for later.\n\t\t\tbot.AddMoreInfo(message.TransportName, message.Channel, description)\n\t\t}\n\t}\n}",
"func IsSimpleUrl() func(string) error {\r\n\treturn MatchesRegex(`^https?:\\/\\/(?:www\\.)?(?:[\\w_-]+\\.)+[\\w_-]+.*$`)\r\n}",
"func LinkButton(text, url string) Button {\n\treturn button{\n\t\tType: \"web_url\",\n\t\tTitle: text,\n\t\tURL: url,\n\t\tShareButton: \"hide\",\n\t\tExtensions: false,\n\t}\n}",
"func sanitizeBindAddr(bindAddr string, aurl *url.URL) (string, error) {\n\t// If it is a valid host:port simply return with no further checks.\n\tbhost, bport, err := net.SplitHostPort(bindAddr)\n\tif err == nil && bhost != \"\" {\n\t\treturn bindAddr, nil\n\t}\n\n\t// SplitHostPort makes the host optional, but we don't want that.\n\tif bhost == \"\" && bport != \"\" {\n\t\treturn \"\", fmt.Errorf(\"IP required can't use a port only\")\n\t}\n\n\t// bindAddr doesn't have a port if we reach here so take the port from the\n\t// advertised URL.\n\t_, aport, err := net.SplitHostPort(aurl.Host)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn net.JoinHostPort(bindAddr, aport), nil\n}",
"func LegalURL(url string) bool {\n\treturn linksRegexp.MatchString(url)\n}",
"func MakeURLForTest(s string) safehtml.URL {\n\treturn url(s)\n}",
"func ToURL(s string) string {\n\ts = strings.Trim(s, \" \")\n\ts = strings.ReplaceAll(s, \" \", \"%20\")\n\treturn s\n}",
"func fixURL(href, base string) string {\n\turi, err := url.Parse(href)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\tbaseURL, err := url.Parse(base)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\turi = baseURL.ResolveReference(uri)\n\treturn uri.String()\n}",
"func Link(url, text string) string {\n\treturn Osc + \"8;;\" + url + Bel + text + Osc + \"8;;\" + Bel\n}",
"func makeAbsoluteHref(baseURL string, href string) string {\n\tif strings.HasPrefix(href, \"http\") {\n\t\treturn href\n\t} else {\n\t\treturn baseURL + href\n\t}\n}",
"func URL(e *Context) error {\n\ttarget := extractBaseTarget(e.DOM.HeadNode)\n\n\tfor n := e.DOM.RootNode; n != nil; n = htmlnode.Next(n) {\n\t\t// Skip text nodes and anything inside mustache templates\n\t\tif n.Type == html.TextNode || htmlnode.IsDescendantOf(n, atom.Template) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// TODO(b/112417267): Handle amp-img rewriting.\n\t\tif strings.EqualFold(n.Data, \"amp-img\") {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Make attributes with URLs portable on any tag\n\t\trewritePortableURLs(n, e.BaseURL, anyTagAttrs)\n\n\t\tswitch n.DataAtom {\n\t\tcase atom.Form:\n\t\t\t// Make attributes with URLs absolute on <form> tag.\n\t\t\trewriteAbsoluteURLs(n, e.BaseURL, formTagAttrs)\n\t\tcase atom.Img:\n\t\t\t// Make attributes with URLs portable on <img> tag.\n\t\t\trewritePortableURLs(n, e.BaseURL, imgTagAttrs)\n\t\tdefault:\n\t\t\tswitch n.Data {\n\t\t\tcase \"amp-install-serviceworker\":\n\t\t\t\t// Make attributes with URLs portable on <amp-install-serviceworker> tag.\n\t\t\t\trewritePortableURLs(n, e.BaseURL, ampInstallServiceWorkerTagAttrs)\n\t\t\tcase amphtml.AMPStory:\n\t\t\t\t// Make attributes with URLs portable on <amp-story> tag.\n\t\t\t\trewritePortableURLs(n, e.BaseURL, ampStoryTagAttrs)\n\t\t\tcase \"amp-story-page\":\n\t\t\t\t// Make attributes with URLs portable on <amp-story-page> tag.\n\t\t\t\trewritePortableURLs(n, e.BaseURL, ampStoryPageTagAttrs)\n\t\t\t}\n\t\t}\n\n\t\t// Tags with href attribute.\n\t\tif href, ok := htmlnode.FindAttribute(n, \"\", \"href\"); ok {\n\t\t\t// Remove the base tag href with the following rationale:\n\t\t\t//\n\t\t\t// 1) The <base href> can be harmful. When handling things like image\n\t\t\t// source sets which are re-hosted and served from\n\t\t\t// https://cdn.ampproject.org, paths starting with \"/\" are rewritten\n\t\t\t// into the stored html document with the intent that \"/\" is relative\n\t\t\t// to the root of cdn.ampproject.org. If a base href were present, it\n\t\t\t// would change the meaning of the relative links.\n\t\t\t//\n\t\t\t// 2) Other hrefs are absolutified in the document relative to the base\n\t\t\t// href. Thus, it is not necessary to maintain the base href for\n\t\t\t// browser URL resolution.\n\t\t\tswitch n.DataAtom {\n\t\t\tcase atom.Base:\n\t\t\t\thtmlnode.RemoveAttribute(n, href)\n\t\t\t\tif len(n.Attr) == 0 {\n\t\t\t\t\thtmlnode.RemoveNode(&n)\n\t\t\t\t}\n\t\t\tcase atom.Link:\n\t\t\t\tif v, ok := htmlnode.GetAttributeVal(n, \"rel\"); ok && v == \"canonical\" {\n\t\t\t\t\t// If the origin doc is self-canonical, it should be an absolute URL\n\t\t\t\t\t// and not portable (which would result in canonical = \"#\").\n\t\t\t\t\t// Maintain the original canonical, and absolutify it. See b/36102624\n\t\t\t\t\thtmlnode.SetAttribute(n, \"\", \"href\", amphtml.RewriteAbsoluteURL(e.BaseURL, href.Val))\n\t\t\t\t} else {\n\t\t\t\t\thtmlnode.SetAttribute(n, \"\", \"href\", amphtml.RewritePortableURL(e.BaseURL, href.Val))\n\t\t\t\t}\n\t\t\tcase atom.A:\n\t\t\t\tportableHref := amphtml.RewritePortableURL(e.BaseURL, href.Val)\n\t\t\t\t// Set a default target\n\t\t\t\t// 1. If the href is not a fragment AND\n\t\t\t\t// 2. If there is no target OR If there is a target and it is not an allowed target\n\t\t\t\tif !strings.HasPrefix(portableHref, \"#\") {\n\t\t\t\t\tif v, ok := htmlnode.GetAttributeVal(n, \"target\"); !ok || (ok && !isAllowedTarget(v)) {\n\t\t\t\t\t\thtmlnode.SetAttribute(n, \"\", \"target\", target)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\thtmlnode.SetAttribute(n, \"\", \"href\", portableHref)\n\t\t\tdefault:\n\t\t\t\t// Make a PortableUrl for any remaining tags with href.\n\t\t\t\thtmlnode.SetAttribute(n, \"\", \"href\", amphtml.RewritePortableURL(e.BaseURL, href.Val))\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (s *htmlState) checkURL(raw string) {\n\tif s.ignore&issueURL != 0 {\n\t\treturn\n\t}\n\n\tif strings.HasPrefix(raw, \"mailto:\") {\n\t\tif strings.Index(raw, \"@\") == -1 {\n\t\t\ts.err(fmt.Errorf(\"not an email address\"))\n\t\t}\n\t\treturn\n\t}\n\n\tu, err := url.Parse(raw)\n\tif err != nil {\n\t\ts.err(fmt.Errorf(\"bad URL '%s': %s\", raw, err.Error()))\n\t\treturn\n\t}\n\tif u.Opaque != \"\" {\n\t\ts.err(fmt.Errorf(\"bad URL part '%s'\", u.Opaque))\n\t\treturn\n\t}\n\n\tif strings.Index(raw, \" \") != -1 {\n\t\ts.err(fmt.Errorf(\"unencoded space in URL\"))\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
AsBool converts string to bool, in case of the value is empty or unknown, defaults to false | func AsBool(v string) bool {
if v == "" {
return false
}
out, _ := apiutils.ParseBool(v)
return out
} | [
"func (s *Value) asBool() (bool, error) {\n\t// A missing value is considered false\n\tif s == nil {\n\t\treturn false, nil\n\t}\n\tswitch s.Name {\n\tcase \"true\":\n\t\treturn true, nil\n\tcase \"false\":\n\t\treturn false, nil\n\tdefault:\n\t\treturn false, fmt.Errorf(\"invalid boolean: %s\", s.Name)\n\t}\n}",
"func strAsBool(value string) (result bool, err error) {\n\tlvalue := strings.ToLower(value)\n\tswitch lvalue {\n\tcase \"0\", \"false\", \"f\", \"no\", \"n\":\n\t\tresult = false\n\tcase \"1\", \"true\", \"t\", \"yes\", \"y\":\n\t\tresult = true\n\tdefault:\n\t\tresult = false\n\t\tmsg := \"Unknown conversion from string to bool for value '%s'\"\n\t\terr = fmt.Errorf(msg, value)\n\t}\n\treturn\n}",
"func (v *Value) AsBool(dv bool) bool {\n\tif v.IsUndefined() {\n\t\treturn dv\n\t}\n\tswitch tv := v.raw.(type) {\n\tcase string:\n\t\tb, err := strconv.ParseBool(tv)\n\t\tif err != nil {\n\t\t\treturn dv\n\t\t}\n\t\treturn b\n\tcase int:\n\t\treturn tv == 1\n\tcase float64:\n\t\treturn tv == 1.0\n\tcase bool:\n\t\treturn tv\n\tcase time.Time:\n\t\treturn tv.UnixNano() > 0\n\tcase time.Duration:\n\t\treturn tv.Nanoseconds() > 0\n\t}\n\treturn dv\n}",
"func Bool(s string) (interface{}, error) {\n\treturn strconv.ParseBool(s)\n}",
"func (s *String) AsBoolean() *Boolean {\n\topChain := s.chain.enter(\"AsBoolean()\")\n\tdefer opChain.leave()\n\n\tif opChain.failed() {\n\t\treturn newBoolean(opChain, false)\n\t}\n\n\tswitch s.value {\n\tcase \"true\", \"True\":\n\t\treturn newBoolean(opChain, true)\n\n\tcase \"false\", \"False\":\n\t\treturn newBoolean(opChain, false)\n\t}\n\n\topChain.fail(AssertionFailure{\n\t\tType: AssertValid,\n\t\tActual: &AssertionValue{s.value},\n\t\tErrors: []error{\n\t\t\terrors.New(\"expected: string can be parsed to boolean\"),\n\t\t},\n\t})\n\n\treturn newBoolean(opChain, false)\n}",
"func (val stringValue) toBool() boolValue {\n\tif val.null {\n\t\treturn boolValue{false, true}\n\t}\n\treturn boolValue{true, false}\n}",
"func StringToBool(v string) bool {\n\tv = strings.TrimSpace(strings.ToLower(v))\n\tif v == \"\" {\n\t\treturn false\n\t}\n\tb, err := strconv.ParseBool(v)\n\tif err == nil {\n\t\treturn b\n\t}\n\tf, err := strconv.ParseFloat(v, 64)\n\tif err == nil {\n\t\treturn f != 0.0\n\t}\n\ti, err := strconv.ParseInt(v, 10, 64)\n\tif err == nil {\n\t\treturn i != 0\n\t}\n\tif v == \"no\" || v == \"n\" {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (s *Str) Bool() bool {\n\tval, err := strconv.ParseBool(s.val)\n\tif err != nil {\n\t\ts.err = err\n\t}\n\treturn val\n}",
"func ParseBool(str string) (bool, error) {}",
"func (argument *Argument) AsBool() (bool, error) {\n\treturn strconv.ParseBool(argument.Raw)\n}",
"func (c *Coercable) AsBool() bool {\n\tvalue, ok := maybeBoolToBool(c.rawValue, c.fieldPresent)\n\n\tif !ok {\n\t\tc.storeError(\"bool\", \"boolean\")\n\t\tvalue, _ = c.defaultValue.(bool)\n\t}\n\n\treturn value\n}",
"func StringToBool(s string, def bool) bool {\n\tv, err := strconv.ParseBool(s)\n\tif err != nil {\n\t\tlog.Warnf(\"failed to parse bool value: %s\", s)\n\t\treturn def\n\t}\n\treturn v\n}",
"func ToBool(v interface{}) (bool, error) {\n\tswitch v.(type) {\n\tcase string:\n\t\t_v := v.(string)\n\t\tswitch _v {\n\t\tcase \"t\", \"T\", \"1\", \"on\", \"On\", \"ON\", \"true\", \"True\", \"TRUE\":\n\t\t\treturn true, nil\n\t\tcase \"f\", \"F\", \"0\", \"off\", \"Off\", \"OFF\", \"false\", \"False\", \"FALSE\", \"\":\n\t\t\treturn false, nil\n\t\tdefault:\n\t\t\treturn false, fmt.Errorf(\"unrecognized bool string: %s\", _v)\n\t\t}\n\t}\n\treturn !IsZero(v), nil\n}",
"func Bool(val string) error {\n\tif strings.EqualFold(val, \"true\") || strings.EqualFold(val, \"false\") {\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"invalid bool value '%s', can be only 'true' or 'false'\", val)\n}",
"func StringToBoolean(str string) bool {\r\n\r\n\tb1, _ := strconv.ParseBool(str)\r\n\treturn b1\r\n}",
"func ToBoolean(str string) (bool, error) {\n\tres, err := strconv.ParseBool(str)\n\tif err != nil {\n\t\tres = false\n\t}\n\treturn res, err\n}",
"func parseBool(v interface{}, def bool) bool {\n\tswitch b := v.(type) {\n\tcase string:\n\t\tswitch strings.ToLower(b) {\n\t\tcase \"t\", \"y\", \"true\", \"yes\":\n\t\t\treturn true\n\t\tcase \"f\", \"n\", \"false\", \"no\":\n\t\t\treturn false\n\t\tdefault:\n\t\t\treturn def\n\t\t}\n\tcase bool:\n\t\treturn b\n\tdefault:\n\t\treturn def\n\t}\n}",
"func StrToBool(s string) (bool, error) {\n\tclean := strings.TrimSpace(s)\n\n\tif regexp.MustCompile(`(?i)^(1|yes|true|y|t)$`).MatchString(clean) {\n\t\treturn true, nil\n\t}\n\n\tif regexp.MustCompile(`(?i)^(0|no|false|n|f)$`).MatchString(clean) {\n\t\treturn false, nil\n\t}\n\n\treturn false, fmt.Errorf(\"cannot convert string value '%s' into a boolean\", clean)\n}",
"func ParseBool(operand string) (value bool, err error) { return strconv.ParseBool(operand) }"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ParseAdvertiseAddr validates advertise address, makes sure it's not an unreachable or multicast address returns address split into host and port, port could be empty if not specified | func ParseAdvertiseAddr(advertiseIP string) (string, string, error) {
advertiseIP = strings.TrimSpace(advertiseIP)
host := advertiseIP
port := ""
if len(net.ParseIP(host)) == 0 && strings.Contains(advertiseIP, ":") {
var err error
host, port, err = net.SplitHostPort(advertiseIP)
if err != nil {
return "", "", trace.BadParameter("failed to parse address %q", advertiseIP)
}
if _, err := strconv.Atoi(port); err != nil {
return "", "", trace.BadParameter("bad port %q, expected integer", port)
}
if host == "" {
return "", "", trace.BadParameter("missing host parameter")
}
}
ip := net.ParseIP(host)
if len(ip) != 0 {
if ip.IsUnspecified() || ip.IsMulticast() {
return "", "", trace.BadParameter("unreachable advertise IP: %v", advertiseIP)
}
}
return host, port, nil
} | [
"func parseAdvertiseAddr(advAddr string, port int) (string, int) {\n\treturn advAddr, port\n\n\t// bug: if use domain, always return empty host\n\t/*m, e := regexp.Match(ipv4Pattern, []byte(advAddr))\n\t// if parse error, use serve port and parsed ip address\n\tif e != nil {\n\t\treturn \"\", port\n\t}\n\tif m {\n\t\treturn advAddr, port\n\t}\n\n\tm, e1 := regexp.Match(ipv4WithPortPattern, []byte(advAddr))\n\t// if parse error, use serve port and parsed ip address\n\tif e1 != nil {\n\t\treturn \"\", port\n\t}\n\tif m {\n\t\t// 1 5\n\t\tregxp := regexp.MustCompile(ipv4WithPortPattern)\n\t\tadAddr := regxp.ReplaceAllString(advAddr, \"${1}\")\n\t\tadPort, _ := strconv.Atoi(regxp.ReplaceAllString(advAddr, \"${5}\"))\n\t\treturn adAddr, adPort\n\t}\n\treturn \"\", port*/\n}",
"func parseAdvertiseAddr(advAddr string, port int) (string, int) {\n\tm, e := regexp.Match(ipv4Pattern, []byte(advAddr))\n\t// if parse error, use serve port and parsed ip address\n\tif e != nil {\n\t\treturn \"\", port\n\t}\n\tif m {\n\t\treturn advAddr, port\n\t}\n\n\tm, e1 := regexp.Match(ipv4WithPortPattern, []byte(advAddr))\n\t// if parse error, use serve port and parsed ip address\n\tif e1 != nil {\n\t\treturn \"\", port\n\t}\n\tif m {\n\t\t// 1 5\n\t\tregxp := regexp.MustCompile(ipv4WithPortPattern)\n\t\tadAddr := regxp.ReplaceAllString(advAddr, \"${1}\")\n\t\tadPort, _ := strconv.Atoi(regxp.ReplaceAllString(advAddr, \"${5}\"))\n\t\treturn adAddr, adPort\n\t}\n\treturn \"\", port\n}",
"func calculateAdvertiseAddress(bindAddr, advertiseAddr string) (net.IP, error) {\n\tif advertiseAddr != \"\" {\n\t\tip := net.ParseIP(advertiseAddr)\n\t\tif ip == nil {\n\t\t\treturn nil, errors.Errorf(\"failed to parse advertise addr '%s'\", advertiseAddr)\n\t\t}\n\t\tif ip4 := ip.To4(); ip4 != nil {\n\t\t\tip = ip4\n\t\t}\n\t\treturn ip, nil\n\t}\n\n\tif isAny(bindAddr) {\n\t\tprivateIP, err := getPrivateAddress()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to get private IP\")\n\t\t}\n\t\tif privateIP == \"\" {\n\t\t\treturn nil, errors.New(\"no private IP found, explicit advertise addr not provided\")\n\t\t}\n\t\tip := net.ParseIP(privateIP)\n\t\tif ip == nil {\n\t\t\treturn nil, errors.Errorf(\"failed to parse private IP '%s'\", privateIP)\n\t\t}\n\t\treturn ip, nil\n\t}\n\n\tip := net.ParseIP(bindAddr)\n\tif ip == nil {\n\t\treturn nil, errors.Errorf(\"failed to parse bind addr '%s'\", bindAddr)\n\t}\n\treturn ip, nil\n}",
"func normalizeAdvertise(addr string, bind string, defport int, dev bool) (string, error) {\n\taddr, err := parseSingleIPTemplate(addr)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Error parsing advertise address template: %v\", err)\n\t}\n\n\tif addr != \"\" {\n\t\t// Default to using manually configured address\n\t\t_, _, err = net.SplitHostPort(addr)\n\t\tif err != nil {\n\t\t\tif !isMissingPort(err) && !isTooManyColons(err) {\n\t\t\t\treturn \"\", fmt.Errorf(\"Error parsing advertise address %q: %v\", addr, err)\n\t\t\t}\n\n\t\t\t// missing port, append the default\n\t\t\treturn net.JoinHostPort(addr, strconv.Itoa(defport)), nil\n\t\t}\n\n\t\treturn addr, nil\n\t}\n\n\t// Fallback to bind address first, and then try resolving the local hostname\n\tips, err := net.LookupIP(bind)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Error resolving bind address %q: %v\", bind, err)\n\t}\n\n\t// Return the first non-localhost unicast address\n\tfor _, ip := range ips {\n\t\tif ip.IsLinkLocalUnicast() || ip.IsGlobalUnicast() {\n\t\t\treturn net.JoinHostPort(ip.String(), strconv.Itoa(defport)), nil\n\t\t}\n\t\tif ip.IsLoopback() {\n\t\t\tif dev {\n\t\t\t\t// loopback is fine for dev mode\n\t\t\t\treturn net.JoinHostPort(ip.String(), strconv.Itoa(defport)), nil\n\t\t\t}\n\t\t\treturn \"\", fmt.Errorf(\"Defaulting advertise to localhost is unsafe, please set advertise manually\")\n\t\t}\n\t}\n\n\t// Bind is not localhost but not a valid advertise IP, use first private IP\n\taddr, err = parseSingleIPTemplate(\"{{ GetPrivateIP }}\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Unable to parse default advertise address: %v\", err)\n\t}\n\treturn net.JoinHostPort(addr, strconv.Itoa(defport)), nil\n}",
"func parseHost(addr string) string {\n\tvar (\n\t\thost, port string\n\t\tdefaultAssigned bool\n\t)\n\n\tv := strings.Split(addr, \":\")\n\n\tswitch len(v) {\n\tcase 2:\n\t\thost = v[0]\n\t\tport = v[1]\n\n\t\tif host == \"\" {\n\t\t\thost = _DEFAULT_HOST\n\t\t\tdefaultAssigned = true\n\t\t}\n\n\t\tif port == \"\" {\n\t\t\tport = _DEFAULT_PORT\n\t\t\tdefaultAssigned = true\n\t\t}\n\n\t\tif defaultAssigned == false {\n\t\t\treturn addr // addr is already in required format\n\t\t}\n\t\tbreak\n\n\tcase 1:\n\t\thost = v[0]\n\t\tif host == \"\" {\n\t\t\thost = _DEFAULT_HOST\n\t\t}\n\t\tport = _DEFAULT_PORT\n\tcase 0:\n\t\tfallthrough\n\tdefault:\n\t\thost = _DEFAULT_HOST\n\t\tport = _DEFAULT_PORT\n\t\tbreak\n\t}\n\treturn strings.Join([]string{host, port}, \":\")\n}",
"func CalculateAdvertiseIP(bindHost, advertiseHost string, resolver Resolver, logger log.Logger) (net.IP, error) {\n\t// Prefer advertise host, if it's given.\n\tif advertiseHost != \"\" {\n\t\t// Best case: parse a plain IP.\n\t\tif ip := net.ParseIP(advertiseHost); ip != nil {\n\t\t\tif ip4 := ip.To4(); ip4 != nil {\n\t\t\t\tip = ip4\n\t\t\t}\n\t\t\treturn ip, nil\n\t\t}\n\n\t\t// Otherwise, try to resolve it as if it's a hostname.\n\t\tips, err := resolver.LookupIPAddr(context.Background(), advertiseHost)\n\t\tif err == nil && len(ips) == 1 {\n\t\t\tif ip4 := ips[0].IP.To4(); ip4 != nil {\n\t\t\t\tips[0].IP = ip4\n\t\t\t}\n\t\t\treturn ips[0].IP, nil\n\t\t}\n\n\t\t// Didn't work, fall back to the bind host.\n\t\tif err == nil && len(ips) != 1 {\n\t\t\terr = fmt.Errorf(\"advertise host '%s' resolved to %d IPs\", advertiseHost, len(ips))\n\t\t}\n\t\tlevel.Warn(logger).Log(\"err\", err, \"msg\", \"falling back to bind host\")\n\t}\n\n\t// If bind host is all-zeroes, try to get a private IP.\n\tif bindHost == \"0.0.0.0\" {\n\t\tprivateIP, err := sockaddr.GetPrivateIP()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to deduce private IP from all-zeroes bind address\")\n\t\t}\n\t\tif privateIP == \"\" {\n\t\t\treturn nil, errors.Wrap(err, \"no private IP found, and explicit advertise address not provided\")\n\t\t}\n\t\tip := net.ParseIP(privateIP)\n\t\tif ip == nil {\n\t\t\treturn nil, errors.Errorf(\"failed to parse private IP '%s'\", privateIP)\n\t\t}\n\t\treturn ip, nil\n\t}\n\n\t// Otherwise, try to parse the bind host as an IP.\n\tif ip := net.ParseIP(bindHost); ip != nil {\n\t\treturn ip, nil\n\t}\n\n\t// And finally, try to resolve the bind host.\n\tips, err := resolver.LookupIPAddr(context.Background(), bindHost)\n\tif err == nil && len(ips) == 1 {\n\t\tif ip4 := ips[0].IP.To4(); ip4 != nil {\n\t\t\tips[0].IP = ip4\n\t\t}\n\t\treturn ips[0].IP, nil\n\t}\n\n\t// Didn't work. This time it's fatal.\n\tif err == nil && len(ips) != 1 {\n\t\terr = fmt.Errorf(\"bind host '%s' resolved to %d IPs\", bindHost, len(ips))\n\t}\n\treturn nil, errors.Wrap(err, \"bind host failed to resolve\")\n}",
"func ParseAddress(addrString string) *Address {\n\tsplited := strings.Split(addrString, \":\")\n\tif 2 == len(splited) && splited[0] != \"\" && splited[1] != \"\" {\n\t\treturn &Address{\n\t\t\tName: splited[0],\n\t\t\tPort: splited[1],\n\t\t}\n\t}\n\tlog.Fatalf(\"The address string is invalid.\")\n\treturn nil\n}",
"func parseAddr(addrType scion.AddrType, addrLen scion.AddrLen, raw []byte) (net.Addr, error) {\n\tswitch addrLen {\n\tcase scion.AddrLen4:\n\t\tswitch addrType {\n\t\tcase scion.T4Ip:\n\t\t\treturn &net.IPAddr{IP: net.IP(raw)}, nil\n\t\tcase scion.T4Svc:\n\t\t\treturn addr.HostSVC(binary.BigEndian.Uint16(raw[:addr.HostLenSVC])), nil\n\t\t}\n\tcase scion.AddrLen16:\n\t\tswitch addrType {\n\t\tcase scion.T16Ip:\n\t\t\treturn &net.IPAddr{IP: net.IP(raw)}, nil\n\t\t}\n\t}\n\treturn nil, serrors.New(\"unsupported address type/length combination\",\n\t\t\"type\", addrType, \"len\", addrLen)\n}",
"func ParseAddress(address string) (string, string) {\n\tsplit := strings.Split(address, \":\")\n\tip := split[0]\n\tport := split[1]\n\n\treturn ip, port\n}",
"func parseListeningAddress(ctx *context.T, laddress string) (network string, address string, p flow.Protocol, err error) {\n\tparts := strings.SplitN(laddress, \"/\", 2)\n\tif len(parts) != 2 {\n\t\treturn \"\", \"\", nil, ErrorfInvalidAddress(ctx, \"invalid vine address %v, address must be of the form 'network/address/tag'\", laddress)\n\t}\n\tp, _ = flow.RegisteredProtocol(parts[0])\n\tif p == nil {\n\t\treturn \"\", \"\", nil, ErrorfNoRegisteredProtocol(ctx, \"no registered protocol: %v\", parts[0])\n\t}\n\treturn parts[0], parts[1], p, nil\n}",
"func ParseAddr(addr string) (network, address string) {\n\tswitch {\n\tcase IsNamespaceAddr(addr):\n\t\treturn GetNamespace(addr[1:])\n\n\tcase strings.HasPrefix(addr, \"./\"), strings.HasPrefix(addr, \"/\"):\n\t\treturn \"unix\", addr\n\t}\n\n\tparts := strings.SplitN(addr, \":\", 2)\n\tif len(parts) == 2 {\n\t\tif (parts[1] == \"9p\") || (parts[1] == \"9fs\") {\n\t\t\tparts[1] = standardPort\n\t\t}\n\n\t\treturn \"tcp\", strings.Join(parts, \":\")\n\t}\n\n\tparts = strings.SplitN(addr, \"!\", 3)\n\tswitch len(parts) {\n\tcase 2:\n\t\tif parts[0] == \"tcp\" {\n\t\t\tparts[1] += \":\" + standardPort\n\t\t}\n\t\treturn parts[0], parts[1]\n\n\tcase 3:\n\t\tif (parts[2] == \"9p\") || (parts[2] == \"9fs\") {\n\t\t\tparts[2] = standardPort\n\t\t}\n\t\treturn parts[0], strings.Join(parts[1:], \":\")\n\t}\n\n\treturn \"tcp\", addr + \":\" + standardPort\n}",
"func selectAdvertiseAddr() (string, error) {\n\t// otherwise, try to pick an address among machine's interfaces\n\taddr, err := utils.PickAdvertiseIP()\n\tif err != nil {\n\t\treturn \"\", trace.Wrap(err, \"could not pick advertise address among \"+\n\t\t\t\"the host's network interfaces, please set the advertise address \"+\n\t\t\t\"via --advertise-addr flag\")\n\t}\n\treturn addr, nil\n}",
"func ParseAddress(addr string) Address {\n\t// Handle IPv6 address in form as \"[2001:4860:0:2001::68]\"\n\tlenAddr := len(addr)\n\tif lenAddr > 0 && addr[0] == '[' && addr[lenAddr-1] == ']' {\n\t\taddr = addr[1 : lenAddr-1]\n\t}\n\taddr = strings.TrimSpace(addr)\n\n\tip := net.ParseIP(addr)\n\tif ip != nil {\n\t\treturn IPAddress(ip)\n\t}\n\treturn DomainAddress(addr)\n}",
"func ParseAddr(s string) (Addr, error) {\n\tcomma := strings.IndexByte(s, ',')\n\tif comma < 0 {\n\t\treturn Addr{}, serrors.New(\"invalid address: expected comma\", \"value\", s)\n\t}\n\tia, err := ParseIA(s[0:comma])\n\tif err != nil {\n\t\treturn Addr{}, err\n\t}\n\th, err := ParseHost(s[comma+1:])\n\tif err != nil {\n\t\treturn Addr{}, err\n\t}\n\treturn Addr{IA: ia, Host: h}, nil\n}",
"func getAddress(opts string) string {\n\tfor _, opt := range strings.Split(opts, \",\") {\n\t\tif strings.HasPrefix(opt, \"addr=\") {\n\t\t\treturn strings.TrimPrefix(opt, \"addr=\")\n\t\t}\n\t}\n\treturn \"\"\n}",
"func parseAddr(text string) (*net.TCPAddr, error) {\n\tif text[0] == ':' {\n\t\ttext = \"0.0.0.0\" + text\n\t}\n\n\taddr := strings.Replace(text, \"public\", address.External().String(), 1)\n\treturn net.ResolveTCPAddr(\"tcp\", addr)\n}",
"func AdvertiseHost(listen string) string {\n\tif listen == \"0.0.0.0\" {\n\t\taddrs, err := net.InterfaceAddrs()\n\t\tif err != nil || len(addrs) == 0 {\n\t\t\treturn \"localhost\"\n\t\t}\n\n\t\tfor _, addr := range addrs {\n\t\t\tif ip, ok := addr.(*net.IPNet); ok && !ip.IP.IsLoopback() && ip.IP.To4() != nil {\n\t\t\t\treturn ip.IP.To4().String()\n\t\t\t}\n\t\t}\n\t\treturn \"localhost\"\n\t}\n\n\treturn listen\n}",
"func Parse(addr string) (host string, port uint16, isLocal, isIPv4, isIPv6 bool, err error) {\n\thost, port, err = SplitHostPort(addr)\n\tif err != nil {\n\t\tlog.Warnf(\"failed to parse addr %s\\terror: %v\", addr, err)\n\t\thost = addr\n\t}\n\n\tip, nerr := netip.ParseAddr(host)\n\tif nerr != nil {\n\t\tlog.Debugf(\"host: %s,\\tport: %d,\\tip: %#v,\\terror: %v\", host, port, ip, nerr)\n\t}\n\n\t// return host and port and flags\n\treturn host, port,\n\t\t// check is local ip or not\n\t\tIsLocal(host) || nerr == nil && ip.IsLoopback(),\n\t\t// check is IPv4 or not\n\t\t// ic < 2,\n\t\tnerr == nil && ip.Is4(),\n\t\t// check is IPv6 or not\n\t\t// ic >= 2,\n\t\tnerr == nil && (ip.Is6() || ip.Is4In6()),\n\t\t// Split error\n\t\terr\n}",
"func SplitAddr(b []byte) Addr {\n\taddrLen := 1\n\tif len(b) < addrLen {\n\t\treturn nil\n\t}\n\n\tswitch b[0] {\n\tcase AtypDomainName:\n\t\tif len(b) < 2 {\n\t\t\treturn nil\n\t\t}\n\t\taddrLen = 1 + 1 + int(b[1]) + 2\n\tcase AtypIPv4:\n\t\taddrLen = 1 + net.IPv4len + 2\n\tcase AtypIPv6:\n\t\taddrLen = 1 + net.IPv6len + 2\n\tdefault:\n\t\treturn nil\n\n\t}\n\n\tif len(b) < addrLen {\n\t\treturn nil\n\t}\n\n\treturn b[:addrLen]\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
StringsSliceFromSet returns a sorted strings slice from set | func StringsSliceFromSet(in map[string]struct{}) []string {
if in == nil {
return nil
}
out := make([]string, 0, len(in))
for key := range in {
out = append(out, key)
}
sort.Strings(out)
return out
} | [
"func (set StringSet) ToSlice() []string {\n\tkeys := make([]string, 0, len(set))\n\tfor k := range set {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}",
"func stringSliceFromGolangSet(sset map[string]struct{}) []string {\n\treturn tpgresource.StringSliceFromGolangSet(sset)\n}",
"func StrSliceSet(slice []string) []string {\n\tset := make([]string, 0)\n\ttempMap := make(map[string]bool, len(slice))\n\tfor _, v := range slice {\n\t\tif !tempMap[v] {\n\t\t\tset = append(set, v)\n\t\t\ttempMap[v] = true\n\t\t}\n\t}\n\n\treturn set\n}",
"func (s *Set) StringSlice() []string {\n\tslice := make([]string, 0, s.Size())\n\n\ts.mutex.Lock()\n\tfor k := range s.m {\n\t\tslice = append(slice, k.(string))\n\t}\n\ts.mutex.Unlock()\n\n\treturn slice\n}",
"func (s StringSet) ToSlice() []string {\n\tret := make([]string, len(s))\n\tidx := 0\n\tfor v := range s {\n\t\tret[idx] = v\n\t\tidx++\n\t}\n\tsort.Strings(ret)\n\treturn ret\n}",
"func StringSetFromSlice(ss []string) StringSet {\n\to := StringSet{}\n\tfor _, s := range ss {\n\t\to[s] = struct{}{}\n\t}\n\treturn o\n}",
"func SetToSlice(set map[string]struct{}) []string {\n\tdata := make([]string, 0, len(set))\n\tfor key := range set {\n\t\tdata = append(data, key)\n\t}\n\treturn data\n}",
"func ToStringSlice(set mapset.Set) []string {\n\tif set == nil {\n\t\treturn nil\n\t}\n\tslice := set.ToSlice()\n\tresult := make([]string, len(slice))\n\tfor i, item := range slice {\n\t\tresult[i] = item.(string)\n\t}\n\treturn result\n}",
"func (s StringSet) ToSlice() []string {\n\tresult := make([]string, 0, s.Size())\n\tfor e := range s {\n\t\tresult = append(result, e)\n\t}\n\treturn result\n}",
"func KeysOfStringSet(set map[string]bool) []string {\n\tret := make([]string, 0, len(set))\n\tfor v := range set {\n\t\tret = append(ret, v)\n\t}\n\n\treturn ret\n}",
"func NewStringSetFromSlice(start []string) StringSet {\n\tret := make(StringSet)\n\tfor _, s := range start {\n\t\tret.Add(s)\n\t}\n\treturn ret\n}",
"func (o StringSet) Slice() []string {\n\tss := make([]string, len(o))\n\ti := 0\n\tfor s := range o {\n\t\tss[i] = s\n\t\ti++\n\t}\n\treturn ss\n}",
"func StringSliceToSet(slice []string) String {\n\tset := make(String, len(slice))\n\tfor _, s := range slice {\n\t\tset.Add(s)\n\t}\n\treturn set\n}",
"func (queryParametersBag) uniqueStringsSlice(in []string) []string {\n\tkeys := make(map[string]bool)\n\tout := make([]string, 0)\n\n\tfor _, entry := range in {\n\t\tif _, ok := keys[entry]; !ok {\n\t\t\tkeys[entry] = true\n\t\t\tout = append(out, entry)\n\t\t}\n\t}\n\n\treturn out\n}",
"func StringSet(s sets.String) zapcore.ObjectMarshalerFunc {\n\treturn func(enc zapcore.ObjectEncoder) error {\n\t\tenc.AddString(\"keys\", strings.Join(s.UnsortedList(), \",\"))\n\t\treturn nil\n\t}\n}",
"func copyAndSortStringSlice(s []string) []string {\n\tsc := make([]string, 0, len(s))\n\tsc = append(sc, s...)\n\n\tsort.Strings(sc)\n\treturn sc\n}",
"func (ss Set) Slice() []string {\n\tslc := make([]string, 0, len(ss))\n\tfor k := range ss {\n\t\tslc = append(slc, k)\n\t}\n\n\treturn slc\n}",
"func StrsplitSet(string_ string, delimiters string, maxTokens int32) []string {\n\tc_string := C.CString(string_)\n\tdefer C.free(unsafe.Pointer(c_string))\n\n\tc_delimiters := C.CString(delimiters)\n\tdefer C.free(unsafe.Pointer(c_delimiters))\n\n\tc_max_tokens := (C.gint)(maxTokens)\n\n\tretC := C.g_strsplit_set(c_string, c_delimiters, c_max_tokens)\n\tretGo := []string{}\n\tfor p := retC; *p != nil; p = (**C.char)(C.gpointer((uintptr(C.gpointer(p)) + uintptr(C.sizeof_gpointer)))) {\n\t\ts := C.GoString(*p)\n\t\tretGo = append(retGo, s)\n\t}\n\n\treturn retGo\n}",
"func StringSet() *StringSetFilter {\r\n\tf := new(StringSetFilter)\r\n\tf.strcase = STRING_RAWCASE\r\n\tf.delimiter = \",\"\r\n\tf.minCount = 0\r\n\tf.maxCount = types.MaxInt\r\n\treturn f\r\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ParseOnOff parses whether value is "on" or "off", parameterName is passed for error reporting purposes, defaultValue is returned when no value is set | func ParseOnOff(parameterName, val string, defaultValue bool) (bool, error) {
switch val {
case teleport.On:
return true, nil
case teleport.Off:
return false, nil
case "":
return defaultValue, nil
default:
return false, trace.BadParameter("bad %q parameter value: %q, supported values are on or off", parameterName, val)
}
} | [
"func parseBool(v interface{}, def bool) bool {\n\tswitch b := v.(type) {\n\tcase string:\n\t\tswitch strings.ToLower(b) {\n\t\tcase \"t\", \"y\", \"true\", \"yes\":\n\t\t\treturn true\n\t\tcase \"f\", \"n\", \"false\", \"no\":\n\t\t\treturn false\n\t\tdefault:\n\t\t\treturn def\n\t\t}\n\tcase bool:\n\t\treturn b\n\tdefault:\n\t\treturn def\n\t}\n}",
"func BoolOnOff(b bool) string {\n\tif b {\n\t\treturn \"on\"\n\t}\n\treturn \"off\"\n}",
"func (dps *domainParser) di2OnOff() {\n\tdps.defaultValue = dps.onOffDefaultValue\n\tdps.customParseID = dps.onOffCustomParseID\n\tdps.checkEndedCorrect = dps.onOffCheckEndedCorrect\n\tdps.appendQP = dps.onOffAppendQp\n}",
"func ParseBool(str string) (bool, error) {\n\tif str == \"on\" {\n\t\treturn true, nil\n\t}\n\tif str == \"off\" {\n\t\treturn false, nil\n\t}\n\treturn strconv.ParseBool(str)\n}",
"func (f flagBool) Parse(value string) interface{} {\n\tswitch value {\n\tcase \"1\", \"t\", \"T\", \"true\", \"TRUE\", \"True\", \"y\", \"Y\", \"yes\", \"YES\", \"Yes\":\n\t\treturn true\n\t}\n\treturn false\n}",
"func iniToBool(val string) (bool, bool) {\n\tswitch val {\n\tcase \"t\", \"T\", \"true\", \"TRUE\", \"True\", \"YES\", \"yes\", \"Yes\", \"y\", \"ON\", \"on\", \"On\":\n\t\treturn true, true\n\tcase \"f\", \"F\", \"false\", \"FALSE\", \"False\", \"NO\", \"no\", \"No\", \"n\", \"OFF\", \"off\", \"Off\":\n\t\treturn false, true\n\t}\n\treturn false, false\n}",
"func ParseBool(param string) *bool {\n\n\tparam = strings.ToLower(param)\n\tif param == \"true\" || param == \"1\" {\n\t\tb := true\n\t\treturn &b\n\t}\n\tif param == \"false\" || param == \"0\" {\n\t\tb := false\n\t\treturn &b\n\t}\n\n\treturn nil\n}",
"func parseBool(param string) (bool, error) {\n\tif param == \"\" {\n\t\treturn false, nil\n\t}\n\n\treturn strconv.ParseBool(param)\n}",
"func parseBoolParam(paramName string, gen config.Generator) (bool, error) {\n\tfor _, param := range gen.Params {\n\t\tif param.Key == paramName {\n\t\t\tresult, err := strconv.ParseBool(param.Value)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\treturn result, nil\n\t\t}\n\t}\n\treturn false, nil\n}",
"func (f flagString) Parse(value string) interface{} {\n\treturn value\n}",
"func ParseBoolP(cmd *cobra.Command, name string) (*bool, error) {\n\tflagRaw, err := cmd.Flags().GetString(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar flagVal *bool\n\tss, err := strconv.ParseBool(flagRaw)\n\tif err != nil && flagRaw != \"\" {\n\t\treturn nil, err\n\t}\n\n\tif flagRaw != \"\" && err == nil {\n\t\treturn &ss, nil\n\t}\n\n\treturn flagVal, nil\n}",
"func (f *Form) Bool(param string, defaultValue bool) bool {\n\tvals, ok := f.values[param]\n\tif !ok {\n\t\treturn defaultValue\n\t}\n\tparamVal, err := strconv.ParseBool(vals[0])\n\tif err != nil {\n\t\tf.err = err\n\t\treturn defaultValue\n\t}\n\treturn paramVal\n}",
"func NamedBoolDefault(name string, def bool) func(http.ResponseWriter, url.Values, martini.Context) {\n\treturn func(w http.ResponseWriter, query url.Values, m martini.Context) {\n\t\tvalue_string := query.Get(name)\n\t\tvalue, err := strconv.ParseBool(value_string)\n\n\t\tif \"\" == value_string {\n\t\t\tm.Map(NamedBoolParameter(def))\n\t\t\treturn\n\t\t}\n\n\t\tif nil != err {\n\t\t\thttp.Error(w, fmt.Sprintf(\"\\\"%s\\\" is not a boolean\"), 422)\n\t\t}\n\n\t\tm.Map(NamedBoolParameter(value))\n\t}\n}",
"func ParseBool(s string) (value bool, err error) {\n\tif s == \"\" {\n\t\treturn\n\t}\n\ts = strings.ToLower(s)\n\tswitch s {\n\tcase \"y\", \"yes\", \"on\":\n\t\treturn true, nil\n\tcase \"n\", \"no\", \"off\":\n\t\treturn false, nil\n\t}\n\treturn strconv.ParseBool(s)\n}",
"func (p *Parser) Bool(flag string, def bool, help ...string) (val bool, err error) {\n\tok, err := p.Scan(flag, &val)\n\tif err != nil {\n\t\treturn\n\t}\n\tif !ok {\n\t\tval = def\n\t}\n\tp.register(flag, help, def)\n\treturn\n}",
"func (m *Server) LEDOn(w http.ResponseWriter, r *http.Request) {\n\n\tif err := r.ParseForm(); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\ta := strings.ToLower(r.Form.Get(\"cmd\")) // valid values = {on,off}.\n\n\tswitch a {\n\tcase \"on\":\n\t\tif err := m.sonny.LEDOn(true); err != nil {\n\t\t\twriteResponse(w, &response{\n\t\t\t\tErr: fmt.Sprintf(\"Error: LED failed %v\", err),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\tcase \"off\":\n\t\tif err := m.sonny.LEDOn(false); err != nil {\n\t\t\twriteResponse(w, &response{\n\t\t\t\tErr: fmt.Sprintf(\"Error: LED failed %v\", err),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\tdefault:\n\t\twriteResponse(w, &response{\n\t\t\tErr: \"Error: unknown command\",\n\t\t})\n\t\treturn\n\t}\n\n\twriteResponse(w, &response{\n\t\tData: \"OK\",\n\t})\n}",
"func ParseBool(operand string) (value bool, err error) { return strconv.ParseBool(operand) }",
"func (dps *domainParser) onOffDefaultValue() (tmpIDs []string, queryPieceIDs map[string]bool) {\n\ttmpIDs = []string{onoff_default_id}\n\tqueryPieceIDs = map[string]bool{onoff_default_id: true}\n\treturn\n}",
"func (s Service) Parse(setting string) {\n\tif kv := strings.Split(setting, \"=\"); len(kv) == 1 {\n\t\ts[kv[0]] = \"true\"\n\t} else if len(kv) == 2 {\n\t\ts[kv[0]] = strings.Trim(kv[1], ` \t\"`)\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
IsGroupMember returns whether currently logged user is a member of a group | func IsGroupMember(gid int) (bool, error) {
groups, err := os.Getgroups()
if err != nil {
return false, trace.ConvertSystemError(err)
}
for _, group := range groups {
if group == gid {
return true, nil
}
}
return false, nil
} | [
"func (userser *UserService) IsGroupMember(userid , groupid string ) bool {\n\ter := userser.UserRepo.IsGroupMember(userid , groupid )\n\tif er != nil {\n\t\treturn false \n\t}\n\treturn true \n}",
"func IsMemberOfGroup(group, userName string) (bool, error) {\n\treturn isMemberOfGroup(group, userName)\n}",
"func (s *GroupService) isGroupMember(groupId, userId string) (bool, error) {\n\tvar condition = map[string]interface{}{\n\t\t\"groupId\": groupId,\n\t\t\"userId\": userId,\n\t}\n\tmemberProfile, err := groupRepo.FindOneMember(condition)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn memberProfile == nil, nil\n}",
"func IsMember(claims jwtgo.Claims, groups []string, scopes []string) bool {\n\tmapClaims, err := MapClaims(claims)\n\tif err != nil {\n\t\treturn false\n\t}\n\t// O(n^2) loop\n\tfor _, userGroup := range GetGroups(mapClaims, scopes) {\n\t\tfor _, group := range groups {\n\t\t\tif userGroup == group {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}",
"func (mv *MembershipValidator) IsInGroup(\n\tpublicKey *operator.PublicKey,\n) bool {\n\taddress, err := mv.signing.PublicKeyToAddress(publicKey)\n\tif err != nil {\n\t\tmv.logger.Errorf(\"cannot convert public key to chain address: [%v]\", err)\n\t\treturn false\n\t}\n\n\t_, isInGroup := mv.members[address.String()]\n\treturn isInGroup\n}",
"func (c *client) IsMember(org, user string) (bool, error) {\n\tc.log(\"IsMember\", org, user)\n\tif org == user {\n\t\t// Make it possible to run a couple of plugins on personal repos.\n\t\treturn true, nil\n\t}\n\tcode, err := c.request(&request{\n\t\tmethod: http.MethodGet,\n\t\tpath: fmt.Sprintf(\"/orgs/%s/members/%s\", org, user),\n\t\torg: org,\n\t\texitCodes: []int{204, 404, 302},\n\t}, nil)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif code == 204 {\n\t\treturn true, nil\n\t} else if code == 404 {\n\t\treturn false, nil\n\t} else if code == 302 {\n\t\treturn false, fmt.Errorf(\"requester is not %s org member\", org)\n\t}\n\t// Should be unreachable.\n\treturn false, fmt.Errorf(\"unexpected status: %d\", code)\n}",
"func (fc *fakeClient) IsMember(org, user string) (bool, error) {\n\tfor _, m := range fc.orgMembers[org] {\n\t\tif m == user {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}",
"func (c *Client) IsMember(org, user string) (bool, error) {\n\tmember, resp, err := c.cl.Organizations.IsMember(org, user)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tlogRateLimit(\"IsMember\", resp)\n\treturn member, nil\n}",
"func (g *Github) isMember(user string) (bool, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), timeoutShortRequest)\n\tdefer cancel()\n\n\tret, _, err := g.client.Organizations.IsMember(ctx, g.owner, user)\n\n\treturn ret, err\n}",
"func (c *Client) IsMember(org, user string) (bool, error) {\n\tresp, err := c.request(http.MethodGet, fmt.Sprintf(\"%s/orgs/%s/members/%s\", c.base, org, user), nil)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode == 204 {\n\t\treturn true, nil\n\t} else if resp.StatusCode == 404 {\n\t\treturn false, nil\n\t} else if resp.StatusCode == 302 {\n\t\treturn false, fmt.Errorf(\"requester is not %s org member\", org)\n\t}\n\treturn false, fmt.Errorf(\"unexpected status: %s\", resp.Status)\n}",
"func (m *Member) IsMember() bool { return m.Role == MemberRoleMember }",
"func (c *Settings) IsMember(teams []*Team) bool {\n\tfor _, team := range teams {\n\t\tif c.Orgs[team.Login] {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (t *Token) GrantsGroupMembership(group string) bool {\n\tfor _, r := range t.Roles {\n\t\tif r == group {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (g *Group) IsMyGroup(u *User) bool {\n\n\tif g.IsAdmin(u) {\n\t\treturn true\n\t}\n\n\tfor _, user := range g.Users {\n\t\tif user == u.Username {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (i *Installation) IsInGroup() bool {\n\treturn i.GroupID != nil\n}",
"func (s *SyncStorage) IsMember(ns string, group string, member interface{}) (bool, error) {\n\tretVal, err := s.getDbBackend(ns).SIsMember(getNsPrefix(ns)+group, member)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn retVal, err\n}",
"func (ctx *TestContext) UserIsAMemberOfTheGroup(user, group string) error {\n\terr := ctx.ThereIsAUserWith(getParameterString(map[string]string{\n\t\t\"group_id\": user,\n\t\t\"user\": user,\n\t}))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ctx.GroupIsAChildOfTheGroup(user, group)\n}",
"func (htGroup *HTGroup) IsUserInGroup(user string, group string) bool {\n\tgroups := htGroup.GetUserGroups(user)\n\treturn containsGroup(groups, group)\n}",
"func UserInGroup(u *user.User, g *Group) (bool, error) {\n\treturn userInGroup(u, g)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
DNSName extracts DNS name from host:port string. | func DNSName(hostport string) (string, error) {
host, err := Host(hostport)
if err != nil {
return "", trace.Wrap(err)
}
if ip := net.ParseIP(host); len(ip) != 0 {
return "", trace.BadParameter("%v is an IP address", host)
}
return host, nil
} | [
"func GetHostName(hostAddr string) string {\n\treturn strings.Split(hostAddr, base.UrlPortNumberDelimiter)[0]\n}",
"func GetHostname(addr string) string {\n\treturn strings.Split(addr, \":\")[0]\n}",
"func hostname(hostport string) (string, error) {\n\thost, _, err := net.SplitHostPort(hostport)\n\treturn host, err\n}",
"func ResolveHostname(addr string) string {\n\tif idx := strings.IndexByte(addr, ':'); idx == 0 {\n\t\t// only port, then return the localhost hostname\n\t\treturn \"localhost\"\n\t} else if idx > 0 {\n\t\treturn addr[0:idx]\n\t}\n\t// it's already hostname\n\treturn addr\n}",
"func hostname(hostport string) string {\n\tcolon := strings.IndexByte(hostport, ':')\n\tif colon == -1 {\n\t\treturn hostport\n\t}\n\tif i := strings.IndexByte(hostport, ']'); i != -1 {\n\t\treturn strings.TrimPrefix(hostport[:i], \"[\")\n\t}\n\treturn hostport[:colon]\n}",
"func DnsDecoder(urlStr string) (*string, *string, error) {\n\tu, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\thostTmp := u.Host\n\tIP := Dns(u.Host)\n\tif IP != nil {\n\t\tu.Host = IP.String()\n\t\turlStr = u.String()\n\t\treturn &urlStr, &hostTmp, nil\n\t}\n\treturn nil, nil, fmt.Errorf(\"dnsDecoder fail\")\n}",
"func (name Name) DNSName() string {\n\treturn strings.ReplaceAll(name.String(), \"_\", \"-\")\n}",
"func (p project) DNSName() string {\n\treturn strings.Replace(strcase.ToSnake(p.Name), \"_\", \"-\", -1)\n}",
"func elbNameFromElbDNS(elbDNS string) string {\n\tre, err := regexp.Compile(\"(.*)(?:-[0-9]{6})\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\telbName := re.FindStringSubmatch(elbDNS)[1]\n\treturn strings.TrimPrefix(elbName, \"internal-\")\n}",
"func parseHost(addr string) string {\n\tvar (\n\t\thost, port string\n\t\tdefaultAssigned bool\n\t)\n\n\tv := strings.Split(addr, \":\")\n\n\tswitch len(v) {\n\tcase 2:\n\t\thost = v[0]\n\t\tport = v[1]\n\n\t\tif host == \"\" {\n\t\t\thost = _DEFAULT_HOST\n\t\t\tdefaultAssigned = true\n\t\t}\n\n\t\tif port == \"\" {\n\t\t\tport = _DEFAULT_PORT\n\t\t\tdefaultAssigned = true\n\t\t}\n\n\t\tif defaultAssigned == false {\n\t\t\treturn addr // addr is already in required format\n\t\t}\n\t\tbreak\n\n\tcase 1:\n\t\thost = v[0]\n\t\tif host == \"\" {\n\t\t\thost = _DEFAULT_HOST\n\t\t}\n\t\tport = _DEFAULT_PORT\n\tcase 0:\n\t\tfallthrough\n\tdefault:\n\t\thost = _DEFAULT_HOST\n\t\tport = _DEFAULT_PORT\n\t\tbreak\n\t}\n\treturn strings.Join([]string{host, port}, \":\")\n}",
"func urlToHostName(u string) (string, error) {\n\turl, err := url.Parse(u)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to parse url %q: %w\", u, err)\n\t}\n\n\tport := url.Port()\n\tvar suffix string\n\tif port != \"\" {\n\t\tsuffix = \"_\" + port\n\t}\n\treturn strings.ReplaceAll(url.Hostname()+suffix, \".\", \"_\"), nil\n}",
"func HostNameandPort(node string) (host, port string, ipv6 bool, err error) {\n\ttokens := []string{}\n\n\t// Set _IPv6 based on input address\n\tipv6, err = IsIPv6(node)\n\n\tif err != nil {\n\t\treturn \"\", \"\", false, err\n\t}\n\n\terr = nil\n\t// For IPv6\n\tif ipv6 {\n\t\t// Then the url should be of the form [::1]:8091\n\t\ttokens = strings.Split(node, \"]:\")\n\t\thost = strings.Replace(tokens[0], \"[\", \"\", 1)\n\n\t} else {\n\t\t// For IPv4\n\t\ttokens = strings.Split(node, \":\")\n\t\thost = tokens[0]\n\t}\n\n\tif len(tokens) == 2 {\n\t\tport = tokens[1]\n\t} else {\n\t\tport = \"\"\n\t}\n\n\treturn\n}",
"func (o ChallengeSpecOutput) DnsName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ChallengeSpec) string { return v.DnsName }).(pulumi.StringOutput)\n}",
"func parseHostPort(str string) (string, string) {\n\tvar (\n\t\thost string\n\t\tport string\n\n\t\ti = strings.Index(str, \":\")\n\t)\n\tif i == -1 {\n\t\treturn str, \"\"\n\t}\n\n\thost = str[:i]\n\tport = str[i+1:]\n\n\treturn host, port\n}",
"func Hostname() (string, error)",
"func hostnameInSNI(name string) string {\n\thost := name\n\tif len(host) > 0 && host[0] == '[' && host[len(host)-1] == ']' {\n\t\thost = host[1 : len(host)-1]\n\t}\n\tif i := strings.LastIndex(host, \"%\"); i > 0 {\n\t\thost = host[:i]\n\t}\n\tif net.ParseIP(host) != nil {\n\t\treturn \"\"\n\t}\n\tfor len(name) > 0 && name[len(name)-1] == '.' {\n\t\tname = name[:len(name)-1]\n\t}\n\treturn name\n}",
"func hostport(hostport string) string {\n\tcolon := strings.IndexByte(hostport, ':')\n\tif colon == -1 {\n\t\treturn \"\"\n\t}\n\tif i := strings.Index(hostport, \"]:\"); i != -1 {\n\t\treturn hostport[i+len(\"]:\"):]\n\t}\n\tif strings.Contains(hostport, \"]\") {\n\t\treturn \"\"\n\t}\n\treturn hostport[colon+len(\":\"):]\n}",
"func hostname(ip net.IP) string {\n\tnames, err := net.LookupAddr(ip.String())\n\tif err != nil || len(names) == 0 {\n\t\treturn ip.String()\n\t}\n\n\treturn fmt.Sprintf(\"%s (%s)\", names[0], ip.String())\n}",
"func getHostNameAndPort(hostInfo string) (string, int, error) {\n\thost := strings.SplitN(hostInfo, \":\", -1)\n\tif len(host) != 2 {\n\t\treturn \"\", 0, fmt.Errorf(\"expected hostname:port, got %s\", host)\n\t}\n\n\tport, err := strconv.Atoi(host[1])\n\tif err != nil {\n\t\treturn \"\", 0, fmt.Errorf(\"invalid port number, got %s\", host[1])\n\t}\n\n\treturn host[0], port, nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
MultiCloser implements io.Close, it sequentially calls Close() on each object | func MultiCloser(closers ...io.Closer) io.Closer {
return &multiCloser{
closers: closers,
}
} | [
"func (mc *MultiCloser) Close() error {\n\tmc.mu.Lock()\n\tdefer mc.mu.Unlock()\n\tresult := &multierror.Error{ErrorFormat: utils.SingleLineErrorFormatter}\n\n\tfor _, closer := range mc.closers {\n\t\tif err := closer.Close(); err != nil {\n\t\t\tresult = multierror.Append(result, err)\n\t\t}\n\t}\n\n\tmc.closers = []io.Closer{}\n\treturn result.ErrorOrNil()\n}",
"func (m *IOClosers) Close() (err error) {\n\tfor _, c := range m.closers {\n\t\tif err = c.Close(); err != nil {\n\t\t\tlogger.Errorf(\"Error closing write strream: %s\", err.Error())\n\t\t}\n\t}\n\treturn\n}",
"func (mw *multiWriter) Close() error {\n\tmw.Lock()\n\tfor _, w := range mw.writers {\n\t\tw.Close()\n\t}\n\tmw.writers = nil\n\tmw.Unlock()\n\treturn nil\n}",
"func closeAll(closers ...xclose.Closer) error {\n\tmultiErr := xerrors.NewMultiError()\n\tfor _, closer := range closers {\n\t\tif err := closer.Close(); err != nil {\n\t\t\tmultiErr = multiErr.Add(err)\n\t\t}\n\t}\n\treturn multiErr.FinalError()\n}",
"func closeMultipleSrvs(srvs []*httptest.Server) {\n\tfor _, srv := range srvs {\n\t\tsrv.Close()\n\t}\n}",
"func (list each) Close() {\n\tfor _, br := range list {\n\t\tbr.Close()\n\t}\n}",
"func (bc *BatchCloser) Close() error {\n\tvar errs errorsbp.Batch\n\tfor _, closer := range bc.closers {\n\t\terrs.AddPrefix(fmt.Sprintf(\"%#v\", closer), closer.Close())\n\t}\n\treturn errs.Compile()\n}",
"func (d *Death) closeObjects(closer closer, done chan<- closer) {\n\terr := closer.C.Close()\n\tif err != nil {\n\t\td.log.Error(err)\n\t\tcloser.Err = err\n\t}\n\tdone <- closer\n}",
"func (c *Closer) CloseAll() {\n\tfor i := len(c.closers) - 1; i >= 0; i-- {\n\t\tClose(c.closers[i])\n\t}\n}",
"func (m *MultiConnPool) Close() {\n\tfor _, p := range m.Pools {\n\t\tp.Close()\n\t}\n}",
"func (a Iterators) Close() error {\n\tfor _, itr := range a {\n\t\titr.Close()\n\t}\n\treturn nil\n}",
"func (b *DefaultDocumentIOMultiWriter) Close() (err error) {\n\tfor i := 0; i < len(b.writers); i++ {\n\t\terr = b.writers[i].Close()\n\t}\n\n\tb.wg.Wait()\n\treturn\n}",
"func (mitr *MultiIterator) Close() {\n\tC.multiiterator_close(mitr.c)\n}",
"func (i *Iterator) Close() {}",
"func Close() {\n\tpipeLock.Lock()\n\tfor mid, pipeline := range pipes {\n\t\tif pipeline != nil {\n\t\t\tpipeline.Close()\n\t\t\tdelete(pipes, mid)\n\t\t}\n\t}\n\tpipeLock.Unlock()\n}",
"func (e *BaseExecutor) Close() error {\n\tvar firstErr error\n\tfor _, src := range e.children {\n\t\tif err := src.Close(); err != nil && firstErr == nil {\n\t\t\tfirstErr = err\n\t\t}\n\t}\n\treturn firstErr\n}",
"func (mlog *MultiLogger) Close() {\n\tmlog.Lock()\n\tmlog.isClosed = true\n\tclose(mlog.qerr)\n\tclose(mlog.qout)\n\t<-mlog.flushq\n\t<-mlog.flushq\n\tmlog.Unlock()\n}",
"func (this *ResourceManagerImpl) Close() {\n\tfor _, resource := range this.registry {\n\t\tresource.Close()\n\t}\n}",
"func (r Tiered) Close() error {\n\tvar me multierror.Error\n\tfor _, router := range r.Routers {\n\t\tif closer, ok := router.(io.Closer); ok {\n\t\t\tif err := closer.Close(); err != nil {\n\t\t\t\tme.Errors = append(me.Errors, err)\n\t\t\t}\n\t\t}\n\t}\n\treturn me.ErrorOrNil()\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
OpaqueAccessDenied returns a generic NotFound instead of AccessDenied so as to avoid leaking the existence of secret resources. | func OpaqueAccessDenied(err error) error {
if trace.IsAccessDenied(err) {
return trace.NotFound("not found")
}
return trace.Wrap(err)
} | [
"func (aee *ActiveEndpointsError) Forbidden() {}",
"func AccessDeny(msg string) Access {\n\treturn Access{Allow: false, Message: msg, StatusCode: http.StatusBadRequest}\n}",
"func Forbidden(err error) Response {\n\tmessage := \"not authorized\"\n\tif err != nil {\n\t\tmessage = err.Error()\n\t}\n\treturn &errorResponse{\n\t\tcode: http.StatusForbidden,\n\t\tmsg: message,\n\t}\n}",
"func AccessDeniedWithErr(w http.ResponseWriter, r *http.Request, err error) {\n\tdata := []byte(`{ \"error\": \"ERR_FORBIDDEN\" }`)\n\n\tsendError(w, r, err, http.StatusForbidden, data)\n}",
"func NotFoundErr(ctx context.Context) error {\n\treturn appstatus.Errorf(codes.NotFound, \"requested resource not found or %q does not have permission to view it\", auth.CurrentIdentity(ctx))\n}",
"func TestAccessDenied(t *testing.T) {\n\trunTest(t, func(s *Session) {\n\t\ts.Handle(\"model\", res.Access(func(r res.AccessRequest) {\n\t\t\tr.AccessDenied()\n\t\t}))\n\t}, func(s *Session) {\n\t\tinb := s.Request(\"access.test.model\", nil)\n\t\ts.GetMsg(t).\n\t\t\tAssertSubject(t, inb).\n\t\t\tAssertError(t, res.ErrAccessDenied)\n\t})\n}",
"func assertSecretNotFound(\n\tctx context.Context,\n\tf *framework.Framework,\n\tnamespacedName types.NamespacedName,\n) error {\n\tsecret := &corev1.Secret{}\n\terr := f.Client.Get(ctx, namespacedName, secret)\n\tif err == nil {\n\t\treturn fmt.Errorf(\"secret '%s' still found\", namespacedName)\n\t}\n\tif errors.IsNotFound(err) {\n\t\treturn nil\n\t}\n\treturn err\n}",
"func AccessDenied() ErrorBuilder {\n\treturn &defaultErrorBuilder{\n\t\terr: \"access_denied\",\n\t\terrorDescription: \"The authorization request was denied.\",\n\t}\n}",
"func (r Response) Forbidden(code string, payload Payload, header ...ResponseHeader) {\n\tr.Response(code, http.Forbidden, payload, header...)\n}",
"func Forbidden(format string, args ...interface{}) error {\n\treturn New(http.StatusForbidden, format, args...)\n}",
"func TestAccessDeniedHandler(t *testing.T) {\n\trunTest(t, func(s *Session) {\n\t\ts.Handle(\"model\", res.Access(res.AccessDenied))\n\t}, func(s *Session) {\n\t\tinb := s.Request(\"access.test.model\", nil)\n\t\ts.GetMsg(t).\n\t\t\tAssertSubject(t, inb).\n\t\t\tAssertError(t, res.ErrAccessDenied)\n\t})\n}",
"func (r *Router) Forbidden(ctx *Context) {\n\tctx.Forbidden()\n}",
"func RealNotFound(w http.ResponseWriter, r *http.Request, routes martini.Routes) {\n\t// We throw in 405 handling for free (or for the cost of a MethodsFor call)\n\tmethods := routes.MethodsFor(r.URL.Path)\n\n\t// If no methods on this path it is a a 404 and return\n\tif len(methods) == 0 {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\n\t// Otherwise a 405 with Allow header\n\tw.Header().Set(\"Allow\", stringMethods(methods))\n\tw.WriteHeader(http.StatusMethodNotAllowed)\n}",
"func Forbidden(w http.ResponseWriter, err error) {\n\t(Response{Error: err.Error()}).json(w, http.StatusForbidden)\n}",
"func Forbidden(format string, a ...interface{}) *Error {\n\treturn newError(ErrForbidden, format, a)\n}",
"func (r Response) NotFound(code string, payload Payload, header ...ResponseHeader) {\n\tr.Response(code, http.NotFound, payload, header...)\n}",
"func NotFoundRoute(res http.ResponseWriter, req *http.Request) {\n res.Write([]byte(\"Oopsie woopsie this doesn't exist.\"))\n}",
"func NotFoundHandler() Handler { return HandlerFunc(NotFound) }",
"func (cors *Cors) NotFound(w http.ResponseWriter, r *http.Request, routes martini.Routes) {\n\n\t// Leave if not a preflight request\n\tif r.Method != \"OPTIONS\" || len(r.Header.Get(\"Origin\")) == 0 {\n\t\treturn\n\t}\n\n\t// MethodsFor could be expensive with lots of routes.\n\t// It might help to increase Access-Control-Max-Age\n\tmethods := routes.MethodsFor(r.URL.Path)\n\n\t// If this Url has no methods leave it to the next handler\n\tif len(methods) == 0 {\n\t\treturn\n\t}\n\n\th := w.Header()\n\t// Set all the CORS headers other than Access-Control-Allow-{Origin,Methods}\n\tcors.setHeaders(h)\n\th.Set(\"Access-Control-Allow-Methods\", stringMethods(methods))\n\tw.WriteHeader(http.StatusOK)\n\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
PopInt returns a value from the list, it panics if not enough values were allocated | func (p *PortList) PopInt() int {
i, err := strconv.Atoi(p.Pop())
if err != nil {
panic(err)
}
return i
} | [
"func (s *SliceOfInt) Pop() int {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}",
"func (s *SliceOfInt32) Pop() int32 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}",
"func (s *SliceOfInt64) Pop() int64 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}",
"func (s *SliceOfInt8) Pop() int8 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}",
"func (s *SliceOfUint) Pop() uint {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}",
"func (i Integer64) Pop(index int) (int64, Integer64, error) {\n\tvar item int64\n\tres := make([]int64, 0)\n\n\tif len(i) == 0 {\n\t\terr := \"Pop on empty slice failed\"\n\t\treturn item, res, errors.New(err)\n\t}\n\n\tif index < 0 || index > (len(i)-1) {\n\t\terr := fmt.Sprintf(\"Pop on index %d not available on slice of length %d\", index, len(i))\n\t\treturn item, res, errors.New(err)\n\t}\n\n\titem = i[index]\n\tres = append(i[:index], i[index+1:]...)\n\n\treturn item, res, nil\n}",
"func (s *SliceOfInt16) Pop() int16 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}",
"func (s *SliceOfUint32) Pop() uint32 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}",
"func (s *IntSet) Pop() int {\n\tfor element := range *s {\n\t\ts.Delete(element)\n\t\treturn element\n\t}\n\treturn -1\n}",
"func (il *List[T]) Pop() {\n\tvar zero T\n\til.List[len(il.List)-1] = zero // for gc\n\til.List = il.List[:len(il.List)-1]\n}",
"func (l *List) Pop() (i int, err error) {\n\tif l.IsEmpty() {\n\t\terr = fmt.Errorf(\"Pop from empty list\")\n\t\treturn\n\t}\n\n\ti = l.last.val\n\tl.last = l.last.prev\n\tif l.last != nil {\n\t\tl.last.next = nil\n\t}\n\n\tl.size--\n\treturn\n}",
"func (list *List) Pop(idx ...int) (interface{}, error) {\n\tindex := list.getLastIndex()\n\tll := len(idx)\n\n\tif ll > 1 {\n\t\treturn nil, fmt.Errorf(\"only 1 or 0 arguments are allowed\")\n\t}\n\n\t// in case of `list.Pop()`\n\telement := list.getByIndex(index)\n\tif ll == 0 {\n\t\treturn element, list.removeByIndex(index)\n\t}\n\n\tif idx[0] > index {\n\t\treturn nil, fmt.Errorf(\"index out of range\")\n\t}\n\n\tindex = idx[0]\n\treturn element, list.removeByIndex(index)\n}",
"func (s *SliceOfUint64) Pop() uint64 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}",
"func (t *topK) Pop() interface{} {\n\tn := len(t.values)\n\tx := t.values[n-1]\n\tt.values = t.values[:n-1]\n\treturn x\n}",
"func lvalPop(v *LVal, i int) *LVal {\n\tx := v.Cell[i]\n\n\tv.Cell = append(v.Cell[:i], v.Cell[i+1:]...)\n\treturn x\n}",
"func (t *SpaceTracker) Pop() int {\n\n\t//return -1 if empty\n\tif t.Empty() {\n\t\treturn -1\n\t}\n\t//type assertion\n\tnextIndex, _ := t.FreeFrames.Front().Value.(int)\n\n\t//remove index from data structure (hash and list)\n\tdelete(t.Location, nextIndex)\n\tt.FreeFrames.Remove(t.FreeFrames.Front())\n\treturn nextIndex\n}",
"func (s *SliceOfFloat32) Pop() float32 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}",
"func (h *Heap) Pop() interface{} {\n\tif h.size == 0 {\n\t\treturn nil\n\t}\n\tres := h.values[1]\n\th.values[1] = h.values[h.size]\n\th.values = h.values[:h.size]\n\th.size--\n\n\th.bubbleDown()\n\n\treturn res\n}",
"func popInt(cloneMap map[string]string, key string) (int, error) {\n\tval, err := pop(cloneMap, key)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn strconv.Atoi(val)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetFreeTCPPorts returns n ports starting from port 20000. | func GetFreeTCPPorts(n int, offset ...int) (PortList, error) {
list := make([]string, 0, n)
start := PortStartingNumber
if len(offset) != 0 {
start = offset[0]
}
for i := start; i < start+n; i++ {
list = append(list, strconv.Itoa(i))
}
return PortList{ports: list}, nil
} | [
"func getFreePorts(t *testing.T, n int) ports {\n\tports := make(ports, n)\n\tfor i := 0; i < n; i++ {\n\t\tlis, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer func() {\n\t\t\terr := lis.Close()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}()\n\t\tports[i] = lis.Addr().(*net.TCPAddr).Port\n\t}\n\treturn ports\n}",
"func GetFreePorts(count int) ([]int, error) {\n\tvar ports []int\n\tfor i := 0; i < count; i++ {\n\t\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tl, err := net.ListenTCP(\"tcp\", addr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer l.Close()\n\t\tports = append(ports, l.Addr().(*net.TCPAddr).Port)\n\t}\n\treturn ports, nil\n}",
"func Take(n int) (ports []int, err error) {\n\tif n <= 0 {\n\t\treturn nil, fmt.Errorf(\"freeport: cannot take %d ports\", n)\n\t}\n\n\tmu.Lock()\n\tdefer mu.Unlock()\n\n\t// Reserve a port block\n\tonce.Do(initialize)\n\n\tif n > total {\n\t\treturn nil, fmt.Errorf(\"freeport: block size too small\")\n\t}\n\n\tfor len(ports) < n {\n\t\tfor freePorts.Len() == 0 {\n\t\t\tif total == 0 {\n\t\t\t\treturn nil, fmt.Errorf(\"freeport: impossible to satisfy request; there are no actual free ports in the block anymore\")\n\t\t\t}\n\t\t\tcondNotEmpty.Wait()\n\t\t}\n\n\t\telem := freePorts.Front()\n\t\tfreePorts.Remove(elem)\n\t\tport := elem.Value.(int)\n\n\t\tif used := isPortInUse(port); used {\n\t\t\t// Something outside of the test suite has stolen this port, possibly\n\t\t\t// due to assignment to an ephemeral port, remove it completely.\n\t\t\tlogf(\"WARN\", \"leaked port %d due to theft; removing from circulation\", port)\n\t\t\ttotal--\n\t\t\tcontinue\n\t\t}\n\n\t\tports = append(ports, port)\n\t}\n\n\t// logf(\"DEBUG\", \"free ports: %v\", ports)\n\treturn ports, nil\n}",
"func GetFreeTCPPort() (port int, err error) {\n\tln, err := net.Listen(\"tcp\", \"[::]:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tport = ln.Addr().(*net.TCPAddr).Port\n\terr = ln.Close()\n\treturn\n}",
"func freePortAddrs(ip string, n int) []string {\n\tmin, max := 49152, 65535\n\tfreePortsMu.Lock()\n\tdefer freePortsMu.Unlock()\n\tports := make(map[int]net.Listener, n)\n\taddrs := make([]string, n)\n\tif lastPort < min || lastPort > max {\n\t\tlastPort = min\n\t}\n\tfor i := 0; i < n; i++ {\n\t\tp, addr, listener, err := oneFreePort(ip, lastPort, min, max)\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\t\tlastPort = p\n\t\taddrs[i] = addr\n\t\tports[p] = listener\n\t\tusedPorts[p] = struct{}{}\n\t}\n\t// Now release them all. It's now a race to get our desired things\n\t// listening on these addresses.\n\tfor _, l := range ports {\n\t\tif err := l.Close(); err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\t}\n\treturn addrs\n}",
"func FindFreePort() int {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer l.Close()\n\treturn l.Addr().(*net.TCPAddr).Port\n}",
"func GetFreePort(t *testing.T) string {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\trequire.NoError(t, err)\n\tlistener, err := net.ListenTCP(\"tcp\", addr)\n\trequire.NoError(t, err)\n\tdefer listener.Close()\n\n\taddress := listener.Addr().String()\n\tcolon := strings.Index(address, \":\")\n\tport := address[colon+1:]\n\treturn port\n}",
"func freePort() (uint16, error) {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer l.Close()\n\n\treturn uint16(l.Addr().(*net.TCPAddr).Port), nil\n}",
"func freeport(t *testing.T) (port int, addr string) {\n\tl, err := net.ListenTCP(\"tcp\", &net.TCPAddr{IP: net.ParseIP(\"127.0.0.1\")})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer l.Close()\n\ta := l.Addr().(*net.TCPAddr)\n\tport = a.Port\n\treturn port, a.String()\n}",
"func findFreePort() int {\n\tln, _ := net.Listen(\"tcp\", \":0\")\n\tln.Close()\n\n\taddr := ln.Addr().(*net.TCPAddr)\n\treturn addr.Port\n}",
"func getOpenPorts(n int) []string {\n\tports := []string{}\n\tfor i := 0; i < n; i++ {\n\t\tts := httptest.NewServer(http.NewServeMux())\n\t\tdefer ts.Close()\n\t\tu, err := url.Parse(ts.URL)\n\t\trtx.Must(err, \"Could not parse url to local server:\", ts.URL)\n\t\tports = append(ports, \":\"+u.Port())\n\t}\n\treturn ports\n}",
"func getFreePort(t *testing.T) string {\n\tl, err := net.Listen(\"tcp\", \":\")\n\tif err != nil {\n\t\tt.Fatalf(\"getFreePort: could not get free port: %v\", err)\n\t}\n\tdefer l.Close()\n\treturn l.Addr().String()[strings.LastIndex(l.Addr().String(), \":\"):]\n}",
"func GetFreePort(t *testing.T) string {\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_ = listener.Close()\n\n\treturn strings.Split(listener.Addr().String(), \":\")[1]\n}",
"func availablePorts(cnt int) ([]string, error) {\n\trtn := []string{}\n\n\tfor i := 0; i < cnt; i++ {\n\t\tport, err := getPort()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trtn = append(rtn, strconv.Itoa(port))\n\t}\n\treturn rtn, nil\n}",
"func GetPort() (int, error) {\n\tfor i := previousPort; i < maxPort; i++ {\n\t\tif IsPortAvailable(i) {\n\t\t\t// Next previousPort is 1124 if i == 1024 now.\n\t\t\tpreviousPort = i + 100\n\t\t\treturn i, nil\n\t\t}\n\t}\n\treturn -1, errors.New(\"Not found free TCP Port\")\n}",
"func FreePort() (int, error) {\n\t// Opens a TCP connection to a free port on the host\n\t// and closes the connection but getting the port from it\n\t// so the can be setted to a free\n\t// random port each time if no one is specified\n\tl, err := net.Listen(\"tcp\", \"\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tl.Close()\n\tsl := strings.Split(l.Addr().String(), \":\")\n\tp, err := strconv.Atoi(sl[len(sl)-1])\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn p, nil\n}",
"func FreePort() (int, error) {\n\tlistener, err := net.Listen(\"tcp\", \":0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tport := listener.Addr().(*net.TCPAddr).Port\n\tif err := listener.Close(); err != nil {\n\t\treturn 0, err\n\t}\n\treturn port, nil\n}",
"func GetFreePort() string {\n\tl, err := net.Listen(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer l.Close()\n\treturn l.Addr().String()\n}",
"func GetFreePort(host string, preferredPort uint32) (int, error) {\n\taddress := host + \":\" + fmt.Sprint(preferredPort)\n\taddr, err := net.ResolveTCPAddr(\"tcp\", address)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer l.Close()\n\treturn l.Addr().(*net.TCPAddr).Port, nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.