Dataset Viewer
query
stringlengths 8
6.75k
| document
stringlengths 9
1.89M
| negatives
sequencelengths 19
19
| metadata
dict |
---|---|---|---|
Returns the value of the 'go_package' option of the first .proto file found in the same directory as projectFile | func detectGoPackageForProject(projectFile string) (string, error) {
var goPkg string
projectDir := filepath.Dir(projectFile)
if err := filepath.Walk(projectDir, func(protoFile string, info os.FileInfo, err error) error {
// already set
if goPkg != "" {
return nil
}
if !strings.HasSuffix(protoFile, ".proto") {
return nil
}
// search for go_package on protos in the same dir as the project.json
if projectDir != filepath.Dir(protoFile) {
return nil
}
content, err := ioutil.ReadFile(protoFile)
if err != nil {
return err
}
lines := strings.Split(string(content), "\n")
for _, line := range lines {
goPackage := goPackageStatementRegex.FindStringSubmatch(line)
if len(goPackage) == 0 {
continue
}
if len(goPackage) != 2 {
return errors.Errorf("parsing go_package error: from %v found %v", line, goPackage)
}
goPkg = goPackage[1]
break
}
return nil
}); err != nil {
return "", err
}
if goPkg == "" {
return "", errors.Errorf("no go_package statement found in root dir of project %v", projectFile)
}
return goPkg, nil
} | [
"func (c *common) GetPackage() string { return c.file.GetPackage() }",
"func (pkg *goPackage) firstGoFile() string {\n\tgoSrcs := []platformStringsBuilder{\n\t\tpkg.library.sources,\n\t\tpkg.binary.sources,\n\t\tpkg.test.sources,\n\t}\n\tfor _, sb := range goSrcs {\n\t\tif sb.strs != nil {\n\t\t\tfor s := range sb.strs {\n\t\t\t\tif strings.HasSuffix(s, \".go\") {\n\t\t\t\t\treturn s\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}",
"func (fd *File) GoPackagePath() string {\n\treturn fd.builder.GoPackagePath\n}",
"func GoPackage(packageName string) string {\n\tif packageName == \"\" {\n\t\treturn \"\"\n\t}\n\tsplit := strings.Split(packageName, \".\")\n\treturn split[len(split)-1] + \"pb\"\n}",
"func goPackageName(pkg *protoPackage) string {\n\tif opt, ok := pkg.options[\"go_package\"]; ok {\n\t\tif i := strings.IndexByte(opt, ';'); i >= 0 {\n\t\t\treturn opt[i+1:]\n\t\t} else if i := strings.LastIndexByte(opt, '/'); i >= 0 {\n\t\t\treturn opt[i+1:]\n\t\t} else {\n\t\t\treturn opt\n\t\t}\n\t}\n\tif pkg.name != \"\" {\n\t\treturn strings.Replace(pkg.name, \".\", \"_\", -1)\n\t}\n\tif len(pkg.files) == 1 {\n\t\tfor s := range pkg.files {\n\t\t\treturn strings.TrimSuffix(s, \".proto\")\n\t\t}\n\t}\n\treturn \"\"\n}",
"func GoPackageOption(options []proto.Option) (string, string, bool) {\n\tfor _, opt := range options {\n\t\tif opt.Name != \"go_package\" {\n\t\t\tcontinue\n\t\t}\n\t\tparts := strings.SplitN(opt.Constant.Source, \";\", 2)\n\t\tswitch len(parts) {\n\t\tcase 0:\n\t\t\treturn \"\", \"\", true\n\t\tcase 1:\n\t\t\treturn parts[0], \"\", true\n\t\tcase 2:\n\t\t\treturn parts[0], parts[1], true\n\t\tdefault:\n\t\t\treturn parts[0], strings.Join(parts[1:], \";\"), true\n\t\t}\n\t}\n\n\treturn \"\", \"\", false\n}",
"func goPackageName(d *descriptor.FileDescriptorProto) (name string, explicit bool) {\n\t// Does the file have a \"go_package\" option?\n\tif _, pkg, ok := goPackageOption(d); ok {\n\t\treturn pkg, true\n\t}\n\n\t// Does the file have a package clause?\n\tif pkg := d.GetPackage(); pkg != \"\" {\n\t\treturn pkg, false\n\t}\n\t// Use the file base name.\n\treturn baseName(d.GetName()), false\n}",
"func (d *FileDescriptor) goPackageName() (name string, explicit bool) {\n\t// Does the file have a \"go_package\" option?\n\tif _, pkg, ok := d.goPackageOption(); ok {\n\t\treturn pkg, true\n\t}\n\n\t// Does the file have a package clause?\n\tif pkg := d.GetPackage(); pkg != \"\" {\n\t\treturn pkg, false\n\t}\n\t// Use the file base name.\n\treturn baseName(d.GetName()), false\n}",
"func (c *common) PackageName() string { return uniquePackageOf(c.file) }",
"func (d *FileDescriptor) goFileName(pathType pathType) string {\n\tname := *d.Name\n\tif ext := path.Ext(name); ext == \".proto\" || ext == \".protodevel\" {\n\t\tname = name[:len(name)-len(ext)]\n\t}\n\tname += \".cobra.pb.go\"\n\n\tif pathType == pathTypeSourceRelative {\n\t\treturn name\n\t}\n\n\t// Does the file have a \"go_package\" option?\n\t// If it does, it may override the filename.\n\tif impPath, _, ok := d.goPackageOption(); ok && impPath != \"\" {\n\t\t// Replace the existing dirname with the declared import path.\n\t\t_, name = path.Split(name)\n\t\tname = path.Join(impPath, name)\n\t\treturn name\n\t}\n\n\treturn name\n}",
"func (pp *protoPackage) pkgPath() string {\n\treturn strings.Replace(pp.Pkg, \".\", \"/\", -1)\n}",
"func goPkg(fileName string) (string, error) {\n\tcontent, err := os.ReadFile(fileName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar pkgName string\n\tif match := goPkgOptRe.FindSubmatch(content); len(match) > 0 {\n\t\tpn, err := strconv.Unquote(string(match[1]))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tpkgName = pn\n\t}\n\tif p := strings.IndexRune(pkgName, ';'); p > 0 {\n\t\tpkgName = pkgName[:p]\n\t}\n\treturn pkgName, nil\n}",
"func (project Project) Package() (string, error) {\n\n\tif project.packageName != \"\" {\n\t\treturn project.packageName, nil\n\t}\n\n\tgoModPath := project.RelPath(GoModFileName)\n\tif !project.FileExists(goModPath) {\n\t\treturn \"\", errors.New(\"Failed to determine the package name for this project\")\n\t}\n\n\tb, err := ioutil.ReadFile(goModPath)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Failed to read the go.mod file\")\n\t}\n\n\tmod, err := gomod.Parse(goModPath, b)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Failed to parse the go.mod file\")\n\t}\n\n\tproject.packageName = strings.TrimSuffix(mod.Name, \"/\")\n\n\treturn project.packageName, nil\n\n}",
"func (f *FileStruct) GetPersistPackageOption() string {\n\tif f.Desc == nil || f.Desc.GetOptions() == nil {\n\t\treturn \"\"\n\t}\n\tif proto.HasExtension(f.Desc.GetOptions(), persist.E_Package) {\n\t\tpkg, err := proto.GetExtension(f.Desc.GetOptions(), persist.E_Package)\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).Debug(\"Error\")\n\t\t\treturn \"\"\n\t\t}\n\t\t//logrus.WithField(\"pkg\", *pkg.(*string)).Info(\"Package\")\n\t\treturn *pkg.(*string)\n\t}\n\tlogrus.WithField(\"File Options\", f.Desc.GetOptions()).Debug(\"file options\")\n\treturn \"\"\n}",
"func Which(s protoreflect.FullName) ProtoFile {\r\n\treturn wellKnownTypes[s]\r\n}",
"func GetPackageName(source string) string {\n\tfileNode, err := parser.ParseFile(\"\", source, nil, parser.ImportsOnly)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\treturn fileNode.Name.Name()\n}",
"func goPkg(fname string) (string, error) {\n\tcontent, err := ioutil.ReadFile(fname)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar pkgName string\n\tif match := goPkgOptRe.FindSubmatch(content); len(match) > 0 {\n\t\tpn, err := strconv.Unquote(string(match[1]))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tpkgName = pn\n\t}\n\tif p := strings.IndexRune(pkgName, ';'); p > 0 {\n\t\tpkgName = pkgName[:p]\n\t}\n\treturn pkgName, nil\n}",
"func ProtoFromFileDescriptor(d protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto {\n\tif imp, ok := d.(protoreflect.FileImport); ok {\n\t\td = imp.FileDescriptor\n\t}\n\ttype canProto interface {\n\t\tFileDescriptorProto() *descriptorpb.FileDescriptorProto\n\t}\n\tif res, ok := d.(canProto); ok {\n\t\treturn res.FileDescriptorProto()\n\t}\n\tif res, ok := d.(DescriptorProtoWrapper); ok {\n\t\tif fd, ok := res.AsProto().(*descriptorpb.FileDescriptorProto); ok {\n\t\t\treturn fd\n\t\t}\n\t}\n\treturn protodesc.ToFileDescriptorProto(d)\n}",
"func goPackageOption(d *descriptor.FileDescriptorProto) (impPath, pkg string, ok bool) {\n\tpkg = d.GetOptions().GetGoPackage()\n\tif pkg == \"\" {\n\t\treturn\n\t}\n\tok = true\n\t// The presence of a slash implies there's an import path.\n\tslash := strings.LastIndex(pkg, \"/\")\n\tif slash < 0 {\n\t\treturn\n\t}\n\timpPath, pkg = pkg, pkg[slash+1:]\n\t// A semicolon-delimited suffix overrides the package name.\n\tsc := strings.IndexByte(impPath, ';')\n\tif sc < 0 {\n\t\treturn\n\t}\n\timpPath, pkg = impPath[:sc], impPath[sc+1:]\n\treturn\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewQueueManager instantiates a new QueueManager object This constructor will assign default values to properties that have it defined, and makes sure properties required by API are set, but the set of arguments will change when the set of required properties is changed | func NewQueueManager(name string, clusters []string, aliasQueues []AliasQueue, remoteQueues []RemoteQueue, clusterQueues []ClusterQueue, ) *QueueManager {
this := QueueManager{}
this.Name = name
this.Clusters = clusters
this.AliasQueues = aliasQueues
this.RemoteQueues = remoteQueues
this.ClusterQueues = clusterQueues
return &this
} | [
"func New() *QueueManager {\n\treturn &QueueManager{\n\t\thandlers: make(map[string]Handler),\n\t}\n}",
"func NewQueueManager(logger log.Logger, cfg config.QueueConfig, externalLabels labels.Labels, relabelConfigs []*relabel.Config, client StorageClient, flushDeadline time.Duration) *QueueManager {\n\tif logger == nil {\n\t\tlogger = log.NewNopLogger()\n\t} else {\n\t\tlogger = log.With(logger, \"queue\", client.Name())\n\t}\n\tt := &QueueManager{\n\t\tlogger: logger,\n\t\tflushDeadline: flushDeadline,\n\t\tcfg: cfg,\n\t\texternalLabels: externalLabels,\n\t\trelabelConfigs: relabelConfigs,\n\t\tclient: client,\n\t\tqueueName: client.Name(),\n\n\t\tlogLimiter: rate.NewLimiter(logRateLimit, logBurst),\n\t\tnumShards: cfg.MinShards,\n\t\treshardChan: make(chan int),\n\t\tquit: make(chan struct{}),\n\n\t\tsamplesIn: newEWMARate(ewmaWeight, shardUpdateDuration),\n\t\tsamplesOut: newEWMARate(ewmaWeight, shardUpdateDuration),\n\t\tsamplesOutDuration: newEWMARate(ewmaWeight, shardUpdateDuration),\n\t}\n\tt.shards = t.newShards(t.numShards)\n\tnumShards.WithLabelValues(t.queueName).Set(float64(t.numShards))\n\tshardCapacity.WithLabelValues(t.queueName).Set(float64(t.cfg.Capacity))\n\n\t// Initialize counter labels to zero.\n\tsentBatchDuration.WithLabelValues(t.queueName)\n\tsucceededSamplesTotal.WithLabelValues(t.queueName)\n\tfailedSamplesTotal.WithLabelValues(t.queueName)\n\tdroppedSamplesTotal.WithLabelValues(t.queueName)\n\n\treturn t\n}",
"func NewQueueManager(channelLength int) *QueueManager {\n\tmanager := &QueueManager{\n\t\tmapping: make(map[string]*Queue),\n\t\tchannelLength: channelLength,\n\t\tevents: make(chan *QueueEvent, 1),\n\t\tlistenerChan: make(chan queueEventListener, 1),\n\t\tlisteners: make([]chan<- *QueueEvent, 0),\n\t}\n\tmanager.Add(DefaultQueueName)\n\tgo manager.run()\n\treturn manager\n}",
"func NewQueueManager(q amboy.Queue) Manager {\n\treturn &queueManager{\n\t\tqueue: q,\n\t}\n}",
"func NewQueue() *Queue {\n return &Queue{member: make([]interface{}, 0)}\n}",
"func (e *Engine) newQueue(\n\tcfg configkit.RichApplication,\n\tds persistence.DataStore,\n) *queue.Queue {\n\treturn &queue.Queue{\n\t\tRepository: ds,\n\t\tMarshaler: e.opts.Marshaler,\n\t\t// TODO: https://github.com/dogmatiq/verity/issues/102\n\t\t// Make buffer size configurable.\n\t\tBufferSize: 0,\n\t}\n}",
"func NewQueueManagerWithDefaults() *QueueManager {\n\tthis := QueueManager{}\n\treturn &this\n}",
"func SetupQueue(handler interface{}, fallbackHandler interface{}) {\n\tq = memqueue.NewQueue(&msgqueue.Options{\n\t\tHandler: handler,\n\t\tFallbackHandler: fallbackHandler,\n\t\tMaxWorkers: config.Config.MaxWorkers,\n\t\tMaxFetchers: config.Config.MaxFetchers,\n\t\tRetryLimit: 1,\n\t})\n}",
"func NewQueue() Queue {\n\treturn Queue{}\n}",
"func NewQueue(cli *clientv3.Client) (Queue, error) {\n\t// issue linearized read to ensure leader election\n\tglog.Infof(\"GET request to endpoint %v\", cli.Endpoints())\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\t_, err := cli.Get(ctx, \"foo\")\n\tcancel()\n\tglog.Infof(\"GET request succeeded on endpoint %v\", cli.Endpoints())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tctx, cancel = context.WithCancel(context.Background())\n\treturn &queue{\n\t\tcli: cli,\n\t\trootCtx: ctx,\n\t\trootCancel: cancel,\n\t}, nil\n}",
"func newQueueMeta(conf *Conf) *queueMeta {\n\treturn &queueMeta{conf: conf}\n}",
"func NewQueue(maximumCapacity int, initialCapacity int, factory TokenFactory) *Queue {\n\tq := &Queue{\n\t\tmaxCapacity: maximumCapacity,\n\t\tavailableTokens: make(chan (Token), maximumCapacity),\n\t\tcommittedTokens: make(chan (Token), maximumCapacity),\n\t\tdiscardTokens: make(chan (Token), maximumCapacity),\n\t\tcloseTokens: make(chan (Token)),\n\t}\n\n\tfor i := 0; i < maximumCapacity; i++ {\n\t\ttoken := factory()\n\t\tif token == nil {\n\t\t\treturn nil\n\t\t}\n\t\tq.discardTokens <- token\n\t\tq.validTokens = append(q.validTokens, token)\n\t}\n\n\tq.EnableDisableTokens(initialCapacity)\n\n\treturn q\n}",
"func NewQueue(l int) *Queue {\n\tif l == -1 {\n\t\treturn &Queue{\n\t\t\tQueue: make([]types.Event, 0),\n\t\t\tL: int(^uint(0) >> 1), // max integer value, architecture independent\n\t\t}\n\t}\n\tq := &Queue{\n\t\tQueue: make([]types.Event, 0, l),\n\t\tL: l,\n\t}\n\tlog.WithFields(log.Fields{\"Capacity\": q.L}).Debugf(\"Creating queue\")\n\treturn q\n}",
"func setupManager(username string, password string, brokerIp string, brokerPort int, manager *Manager, exchange string, queueName string) error {\n\tamqpURI := getAmqpUri(username, password, brokerIp, brokerPort)\n\tmanager.logger.Debugf(\"dialing %s\", amqpURI)\n\tvar err error\n\tmanager.Connection, err = amqp.Dial(amqpURI)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanager.logger.Debugf(\"got Connection, getting Channel\")\n\tmanager.Channel, err = manager.Connection.Channel()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanager.logger.Debugf(\"got Channel, declaring Exchange (%q)\", exchange)\n\n\tmanager.logger.Debugf(\"declared Exchange, declaring Queue %q\", queueName)\n\tqueue, err := manager.Channel.QueueDeclare(\n\t\tqueueName,\n\t\ttrue,\n\t\ttrue,\n\t\tfalse,\n\t\tfalse,\n\t\tnil,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanager.logger.Debugf(\"declared Queue (%q, %d messages, %d consumers), binding to Exchange\",\n\t\tqueue.Name, queue.Messages, queue.Consumers)\n\n\tif err = manager.Channel.QueueBind(\n\t\tqueue.Name, // name of the queue\n\t\tqueue.Name, // bindingKey\n\t\texchange, // sourceExchange\n\t\tfalse, // noWait\n\t\tnil, // arguments\n\t); err != nil {\n\t\treturn err\n\t}\n\n\tmanager.logger.Debug(\"Queue bound to Exchange, starting Consume\")\n\treturn nil\n}",
"func New(ttl time.Duration) *Queue {\n\n\treturn &Queue{\n\t\ttaskList: []*apiq.Task{},\n\t\tTTL: ttl,\n\t}\n}",
"func NewQueue() *queue {\n\treturn &queue{nil, nil, 0}\n}",
"func NewQueue(action func(interface{}) error) *QueueWorker {\n\treturn &QueueWorker{\n\t\taction: action,\n\t\tlatch: &Latch{},\n\t\tmaxWork: DefaultQueueWorkerMaxWork,\n\t}\n}",
"func NewQueue(maxWorkers int, maxQueue int) *Queue {\n\tq := make(chan Job, maxQueue)\n\treturn &Queue{\n\t\tq,\n\t\ttrue,\n\t\t&Dispatcher{\n\t\t\tjobQueue: q,\n\t\t\tworkerPool: make(chan chan Job, maxWorkers),\n\t\t\tMaxWorkers: maxWorkers,\n\t\t},\n\t}\n}",
"func (t *OpenconfigQos_Qos_Queues) NewQueue(Name string) (*OpenconfigQos_Qos_Queues_Queue, error){\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.Queue == nil {\n\t\tt.Queue = make(map[string]*OpenconfigQos_Qos_Queues_Queue)\n\t}\n\n\tkey := Name\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.Queue[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list Queue\", key)\n\t}\n\n\tt.Queue[key] = &OpenconfigQos_Qos_Queues_Queue{\n\t\tName: &Name,\n\t}\n\n\treturn t.Queue[key], nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewQueueManagerWithDefaults instantiates a new QueueManager object This constructor will only assign default values to properties that have it defined, but it doesn't guarantee that properties required by API are set | func NewQueueManagerWithDefaults() *QueueManager {
this := QueueManager{}
return &this
} | [
"func New() *QueueManager {\n\treturn &QueueManager{\n\t\thandlers: make(map[string]Handler),\n\t}\n}",
"func NewQueueManager(logger log.Logger, cfg config.QueueConfig, externalLabels labels.Labels, relabelConfigs []*relabel.Config, client StorageClient, flushDeadline time.Duration) *QueueManager {\n\tif logger == nil {\n\t\tlogger = log.NewNopLogger()\n\t} else {\n\t\tlogger = log.With(logger, \"queue\", client.Name())\n\t}\n\tt := &QueueManager{\n\t\tlogger: logger,\n\t\tflushDeadline: flushDeadline,\n\t\tcfg: cfg,\n\t\texternalLabels: externalLabels,\n\t\trelabelConfigs: relabelConfigs,\n\t\tclient: client,\n\t\tqueueName: client.Name(),\n\n\t\tlogLimiter: rate.NewLimiter(logRateLimit, logBurst),\n\t\tnumShards: cfg.MinShards,\n\t\treshardChan: make(chan int),\n\t\tquit: make(chan struct{}),\n\n\t\tsamplesIn: newEWMARate(ewmaWeight, shardUpdateDuration),\n\t\tsamplesOut: newEWMARate(ewmaWeight, shardUpdateDuration),\n\t\tsamplesOutDuration: newEWMARate(ewmaWeight, shardUpdateDuration),\n\t}\n\tt.shards = t.newShards(t.numShards)\n\tnumShards.WithLabelValues(t.queueName).Set(float64(t.numShards))\n\tshardCapacity.WithLabelValues(t.queueName).Set(float64(t.cfg.Capacity))\n\n\t// Initialize counter labels to zero.\n\tsentBatchDuration.WithLabelValues(t.queueName)\n\tsucceededSamplesTotal.WithLabelValues(t.queueName)\n\tfailedSamplesTotal.WithLabelValues(t.queueName)\n\tdroppedSamplesTotal.WithLabelValues(t.queueName)\n\n\treturn t\n}",
"func NewQueueManager(channelLength int) *QueueManager {\n\tmanager := &QueueManager{\n\t\tmapping: make(map[string]*Queue),\n\t\tchannelLength: channelLength,\n\t\tevents: make(chan *QueueEvent, 1),\n\t\tlistenerChan: make(chan queueEventListener, 1),\n\t\tlisteners: make([]chan<- *QueueEvent, 0),\n\t}\n\tmanager.Add(DefaultQueueName)\n\tgo manager.run()\n\treturn manager\n}",
"func DefaultQueue(queue string) func(*Locker) error {\n\treturn func(l *Locker) error {\n\t\tl.DefaultQueue = queue\n\t\treturn nil\n\t}\n}",
"func NewDefaultClient() QueueClient {\n\treturn &inMemoryQueue{queues: make(map[string][]string)}\n}",
"func Default() *JobManager {\n\tif _default == nil {\n\t\t_defaultLock.Lock()\n\t\tdefer _defaultLock.Unlock()\n\n\t\tif _default == nil {\n\t\t\t_default = New()\n\t\t}\n\t}\n\treturn _default\n}",
"func NewDefault(db *bolt.DB) (q queue.WaitQueue, err error) {\n\treturn New(db, DefaultBucket, DefaultMemQueueSize, DefaultBufSize)\n}",
"func DefaultQueue(queue string) func(*Config) error {\n\treturn func(c *Config) error {\n\t\tc.DefaultQueue = queue\n\t\treturn nil\n\t}\n}",
"func NewQueueManager(name string, clusters []string, aliasQueues []AliasQueue, remoteQueues []RemoteQueue, clusterQueues []ClusterQueue, ) *QueueManager {\n\tthis := QueueManager{}\n\tthis.Name = name\n\tthis.Clusters = clusters\n\tthis.AliasQueues = aliasQueues\n\tthis.RemoteQueues = remoteQueues\n\tthis.ClusterQueues = clusterQueues\n\treturn &this\n}",
"func NewRemoteQueueWithDefaults() *RemoteQueue {\n\tthis := RemoteQueue{}\n\treturn &this\n}",
"func DefaultQueueSettings() QueueSettings {\n\treturn QueueSettings{\n\t\tEnabled: true,\n\t\tNumConsumers: 10,\n\t\t// For 5000 queue elements at 100 requests/sec gives about 50 sec of survival of destination outage.\n\t\t// This is a pretty decent value for production.\n\t\t// User should calculate this from the perspective of how many seconds to buffer in case of a backend outage,\n\t\t// multiply that by the number of requests per seconds.\n\t\tQueueSize: 5000,\n\t\tPersistentStorageEnabled: false,\n\t}\n}",
"func SetupQueue(handler interface{}, fallbackHandler interface{}) {\n\tq = memqueue.NewQueue(&msgqueue.Options{\n\t\tHandler: handler,\n\t\tFallbackHandler: fallbackHandler,\n\t\tMaxWorkers: config.Config.MaxWorkers,\n\t\tMaxFetchers: config.Config.MaxFetchers,\n\t\tRetryLimit: 1,\n\t})\n}",
"func NewQueueManager(q amboy.Queue) Manager {\n\treturn &queueManager{\n\t\tqueue: q,\n\t}\n}",
"func NewDefaultManager() Manager {\n\ts := &defaultManager{}\n\ts.completed.Store(false)\n\ts.managedResourceInfos = make(map[*ResourceInfo]struct{})\n\ts.managedStatus = make(map[schema.GroupResource]*updateStatus)\n\treturn s\n}",
"func GetDefaultQueueSetting(queueName string) TransactionTransportConnectionQueueSettings {\n\treturn TransactionTransportConnectionQueueSettings{\n\t\tQueueName: queueName,\n\t\tDurable: true,\n\t\tAutoDelete: false,\n\t\tExclusive: false,\n\t\tNoWait: false,\n\t\tArgs: nil,\n\t}\n}",
"func NewDefault(m map[string]interface{}) (share.Manager, error) {\n\tc := &config{}\n\tif err := mapstructure.Decode(m, c); err != nil {\n\t\terr = errors.Wrap(err, \"error creating a new manager\")\n\t\treturn nil, err\n\t}\n\n\ts, err := metadata.NewCS3Storage(c.GatewayAddr, c.ProviderAddr, c.ServiceUserID, c.ServiceUserIdp, c.MachineAuthAPIKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tindexer := indexer.CreateIndexer(s)\n\n\tclient, err := pool.GetGatewayServiceClient(c.GatewayAddr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn New(client, s, indexer)\n}",
"func NewQueue(maxQueueSize, maxFlowSize uint64, helper Interface) *Queue {\n\tif maxFlowSize > maxQueueSize {\n\t\tpanic(\"MaxFlowSize > MaxQueueSize\")\n\t}\n\n\tif helper == nil {\n\t\tpanic(\"helper is nil\")\n\t}\n\n\tq := new(Queue)\n\tq.cond.L = &q.lock\n\tq.maxQueueSize = maxQueueSize\n\tq.maxFlowSize = maxFlowSize\n\tq.helper = helper\n\tq.flows = make(map[uint64]*flowInfo)\n\n\treturn q\n}",
"func New(mqURL string) (models.MessageQueue, error) {\n\tmq, err := newmq(mqURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &metricMQ{mq}, nil\n}",
"func NewDefaultMQService() *mqServiceImpl {\n\treturn &mqServiceImpl{}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetClusters returns the Clusters field value | func (o *QueueManager) GetClusters() []string {
if o == nil {
var ret []string
return ret
}
return o.Clusters
} | [
"func (c *ClientImpl) GetClusters(ctx context.Context, hcpHostURL string) (models.ClusterResp, error) {\n\tspan, _ := opentracing.StartSpanFromContext(ctx, \"Get Clusters\")\n\tdefer span.Finish()\n\n\tsession, err := c.getSession(ctx, hcpHostURL, hcpUserName, hcpPassword)\n\tif err != nil {\n\t\treturn models.ClusterResp{}, err\n\t}\n\n\tstatus = Failure\n\tmonitor := metrics.StartExternalCall(externalSvcName, \"Fetch Clusters\")\n\tdefer func() { monitor.RecordWithStatus(status) }()\n\n\tresp, err := mlopsHttp.ExecuteHTTPRequest(\n\t\tctx,\n\t\tc.client,\n\t\thcpHostURL+clusterPathV2,\n\t\thttp.MethodGet,\n\t\tmap[string]string{sessionHeader: session},\n\t\tbytes.NewReader(nil),\n\t)\n\tif err != nil {\n\t\treturn models.ClusterResp{}, errors.Wrapf(err, \"while fetching clusters in MLOps controller platform.\")\n\t}\n\tresp.Body.Close()\n\n\tstatus = Success\n\n\terr = c.deleteSession(ctx, hcpHostURL, session)\n\tif err != nil {\n\t\treturn models.ClusterResp{}, err\n\t}\n\n\tclustersResp := models.ClusterResp{}\n\tjson.NewDecoder(resp.Body).Decode(&clustersResp)\n\n\treturn clustersResp, nil\n}",
"func (a *Client) GetClusters(params *GetClustersParams, opts ...ClientOption) (*GetClustersOK, *GetClustersMultiStatus, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetClustersParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"GetClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/kubernetes-protection/entities/kubernetes/clusters/v1\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\", \"application/octet-stream\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetClustersReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tswitch value := result.(type) {\n\tcase *GetClustersOK:\n\t\treturn value, nil, nil\n\tcase *GetClustersMultiStatus:\n\t\treturn nil, value, nil\n\t}\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for kubernetes_protection: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func (o AppProjectSpecSyncWindowsOutput) Clusters() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v AppProjectSpecSyncWindows) []string { return v.Clusters }).(pulumi.StringArrayOutput)\n}",
"func (a *Client) GetClusters(params *GetClustersParams, opts ...ClientOption) (*GetClustersOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetClustersParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"GetClusters\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/get-clusters\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &GetClustersReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetClustersOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for GetClusters: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func Clusters() (clusters map[string][]string) {\n\tclusters = make(map[string][]string)\n\tif addr := AccessConsulAddr(); addr != \"\" && Region() != \"\" {\n\t\treturn getClustersFromConsul(addr, Region())\n\t}\n\tcs := Get(\"Key-ClusterMgrCluster\").(map[string]string)\n\tfor key, value := range cs {\n\t\tclusters[key] = strings.Split(value, \" \")\n\t}\n\treturn\n}",
"func (p *Client) Clusters(namespace string) *Cluster {\n\treturn &Cluster{p.ProvisioningV1Interface.Clusters(namespace), p.ts}\n}",
"func (a ClustersAPI) Get(clusterID string) (httpmodels.GetResp, error) {\n\tvar clusterInfo httpmodels.GetResp\n\n\tdata := struct {\n\t\tClusterID string `json:\"cluster_id,omitempty\" url:\"cluster_id,omitempty\"`\n\t}{\n\t\tclusterID,\n\t}\n\tresp, err := a.Client.performQuery(http.MethodGet, \"/clusters/get\", data, nil)\n\tif err != nil {\n\t\treturn clusterInfo, err\n\t}\n\n\terr = json.Unmarshal(resp, &clusterInfo)\n\treturn clusterInfo, err\n}",
"func (c *cloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}",
"func clusters(ctx context.Context, projID string) ([]string, error) {\n\tLogf(ctx, \"finding your GKE clusters...\")\n\treturn gcloud(\n\t\tctx,\n\t\t\"--project\", projID,\n\t\t\"container\",\n\t\t\"clusters\",\n\t\t\"list\",\n\t\t\"--format\", \"value(name)\",\n\t)\n}",
"func (h *httpCloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}",
"func (e *ECSClient) GetClusters() ([]*types.ECSCluster, error) {\n\tcArns := []*string{}\n\tparams := &ecs.ListClustersInput{\n\t\tMaxResults: aws.Int64(e.apiMaxResults),\n\t}\n\n\t// Get cluster IDs\n\tlog.Debugf(\"Getting cluster list for region\")\n\tfor {\n\t\tresp, err := e.client.ListClusters(params)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, c := range resp.ClusterArns {\n\t\t\tcArns = append(cArns, c)\n\t\t}\n\t\tif resp.NextToken == nil || aws.StringValue(resp.NextToken) == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tparams.NextToken = resp.NextToken\n\t}\n\n\t// Get service descriptions\n\t// TODO: this has a 100 cluster limit, split calls in 100 by 100\n\tparams2 := &ecs.DescribeClustersInput{\n\t\tClusters: cArns,\n\t}\n\tresp2, err := e.client.DescribeClusters(params2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcs := []*types.ECSCluster{}\n\tlog.Debugf(\"Getting cluster descriptions\")\n\tfor _, c := range resp2.Clusters {\n\t\tec := &types.ECSCluster{\n\t\t\tID: aws.StringValue(c.ClusterArn),\n\t\t\tName: aws.StringValue(c.ClusterName),\n\t\t}\n\t\tcs = append(cs, ec)\n\t}\n\n\tlog.Debugf(\"Got %d clusters\", len(cs))\n\treturn cs, nil\n}",
"func (cp *CloudProvider) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}",
"func (c *Client) GetClusters(ctx context.Context) <-chan GetClusterResult {\n\t// TODO Make the concurrency configurable\n\tconcurrency := int(math.Min(5, float64(runtime.NumCPU())))\n\tresults := make(chan GetClusterResult, concurrency)\n\n\tclusterNames, err := c.GetClusterNames(ctx)\n\tif err != nil {\n\t\tclose(results)\n\t\treturn results\n\t}\n\n\tvar wg sync.WaitGroup\n\n\tgo func() {\n\t\tdefer close(results)\n\t\tfor _, clusterName := range clusterNames {\n\t\t\twg.Add(1)\n\t\t\tgo func(name string) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tcluster, err := c.GetCluster(ctx, name)\n\t\t\t\tresult := GetClusterResult{Cluster: cluster, Error: err}\n\t\t\t\tresults <- result\n\t\t\t}(clusterName)\n\t\t}\n\t\twg.Wait()\n\t}()\n\n\treturn results\n}",
"func (o *QueueManager) GetClustersOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Clusters, true\n}",
"func (c *RetentionScriptResolver) Clusters() []graphql.ID {\n\tids := make([]graphql.ID, len(c.clusterIDs))\n\tfor i, c := range c.clusterIDs {\n\t\tids[i] = graphql.ID(c.String())\n\t}\n\treturn ids\n}",
"func ECSGetClusters() ([]string, error) {\n\n\t// get the aws sdk client config\n\tcfg, err := config.LoadDefaultConfig(context.TODO())\n\tif err != nil {\n\t\tpanic(\"configuration error, \" + err.Error())\n\t}\n\n\tclient := ecs.NewFromConfig(cfg)\n\n\tinput := &ecs.ListClustersInput{}\n\n\tresult, err := client.ListClusters(context.TODO(), input)\n\tif result == nil {\n\n\t\treturn []string{}, err\n\t}\n\treturn result.ClusterArns, err\n\n}",
"func (client ContainerEngineClient) ListClusters(ctx context.Context, request ListClustersRequest) (response ListClustersResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.DefaultRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.listClusters, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = ListClustersResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = ListClustersResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(ListClustersResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into ListClustersResponse\")\n\t}\n\treturn\n}",
"func (client OpenShiftManagedClustersClient) Get(ctx context.Context, resourceGroupName string, resourceName string) (result v20180930preview.OpenShiftManagedCluster, err error) {\n\treq, err := client.GetPreparer(ctx, resourceGroupName, resourceName)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"containerservice.OpenShiftManagedClustersClient\", \"Get\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.GetSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"containerservice.OpenShiftManagedClustersClient\", \"Get\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.GetResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"containerservice.OpenShiftManagedClustersClient\", \"Get\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}",
"func FetchClusters(c *gin.Context) {\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, \"Start listing clusters\")\n\n\tvar clusters []banzaiSimpleTypes.ClusterSimple\n\tvar response []*cloud.ClusterRepresentation\n\tdatabase.Find(&clusters)\n\n\tif len(clusters) <= 0 {\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, \"No clusters found\")\n\t\tcloud.SetResponseBodyJson(c, http.StatusNotFound, gin.H{\n\t\t\tcloud.JsonKeyStatus: http.StatusNotFound,\n\t\t\tcloud.JsonKeyMessage: \"No clusters found!\",\n\t\t})\n\t\treturn\n\t}\n\n\tfor _, cl := range clusters {\n\t\tclust := cloud.GetClusterRepresentation(&cl)\n\t\tif clust != nil {\n\t\t\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, fmt.Sprintf(\"Append %#v cluster representation to response\", clust))\n\t\t\tresponse = append(response, clust)\n\t\t}\n\n\t}\n\tcloud.SetResponseBodyJson(c, http.StatusOK, gin.H{\n\t\tcloud.JsonKeyStatus: http.StatusOK,\n\t\tcloud.JsonKeyData: response,\n\t})\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetClustersOk returns a tuple with the Clusters field value and a boolean to check if the value has been set. | func (o *QueueManager) GetClustersOk() (*[]string, bool) {
if o == nil {
return nil, false
}
return &o.Clusters, true
} | [
"func NewGetClustersOK() *GetClustersOK {\n\treturn &GetClustersOK{}\n}",
"func (o *VirtualizationVmwareVcenterAllOf) GetClusterCountOk() (*int64, bool) {\n\tif o == nil || o.ClusterCount == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterCount, true\n}",
"func (o *ResourceLimits) GetK8sClustersProvisionedOk() (*int32, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.K8sClustersProvisioned, true\n}",
"func (o *ComputeBaseCluster) GetStorageClustersOk() ([]StorageBaseClusterRelationship, bool) {\n\tif o == nil || o.StorageClusters == nil {\n\t\treturn nil, false\n\t}\n\treturn o.StorageClusters, true\n}",
"func NewDescribeClustersOK() *DescribeClustersOK {\n\n\treturn &DescribeClustersOK{}\n}",
"func NewDescribeClustersOK() *DescribeClustersOK {\n\treturn &DescribeClustersOK{}\n}",
"func (o *ClusterNodesConfigDto) GetClusterNodesOk() (*[]NodeConfigDto, bool) {\n\tif o == nil || o.ClusterNodes == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterNodes, true\n}",
"func (o *ListClustersOnEndpointUsingGETOK) IsSuccess() bool {\n\treturn true\n}",
"func (c *cloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}",
"func (a *Client) GetClusters(params *GetClustersParams, opts ...ClientOption) (*GetClustersOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetClustersParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"GetClusters\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/get-clusters\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &GetClustersReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetClustersOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for GetClusters: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func (cp *CloudProvider) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}",
"func (o AppProjectSpecSyncWindowsOutput) Clusters() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v AppProjectSpecSyncWindows) []string { return v.Clusters }).(pulumi.StringArrayOutput)\n}",
"func (a *Client) GetClusters(params *GetClustersParams, opts ...ClientOption) (*GetClustersOK, *GetClustersMultiStatus, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetClustersParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"GetClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/kubernetes-protection/entities/kubernetes/clusters/v1\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\", \"application/octet-stream\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetClustersReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tswitch value := result.(type) {\n\tcase *GetClustersOK:\n\t\treturn value, nil, nil\n\tcase *GetClustersMultiStatus:\n\t\treturn nil, value, nil\n\t}\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for kubernetes_protection: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func (o *RemoteQueue) GetClusterVisibilityOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.ClusterVisibility, true\n}",
"func (o *ProjectDeploymentRuleResponse) GetClusterOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Cluster, true\n}",
"func (o *NiatelemetryNexusDashboardsAllOf) GetClusterNameOk() (*string, bool) {\n\tif o == nil || o.ClusterName == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterName, true\n}",
"func (h *httpCloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}",
"func (o *VirtualizationIweClusterAllOf) GetClusterNameOk() (*string, bool) {\n\tif o == nil || o.ClusterName == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterName, true\n}",
"func (o *ClusterSummaryDTO) GetClusteredOk() (*bool, bool) {\n\tif o == nil || o.Clustered == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Clustered, true\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SetClusters sets field value | func (o *QueueManager) SetClusters(v []string) {
o.Clusters = v
} | [
"func (s *RaftDatabase) SetClusters(clusters int) {\n\ts.clusters = clusters\n}",
"func setSomeClusterValues(ch chan error, manager ConfigManager) error {\n\t// prepare expected cluster config\n\tconf := new(ClusterConfig)\n\tconf.ClusterId = \"myClusterID\"\n\tconf.Description = \"myDescription\"\n\n\tif err := manager.SetClusterConf(conf); err != nil {\n\t\treturn err\n\t}\n\n\treturn <-ch\n}",
"func (_m *Clusterer) SetKubeConfig(c string) {\n\t_m.Called(c)\n}",
"func (store *CenterStore) SetCenters(clust core.Clust) {\n\tstore.centers[len(clust)] = clust\n}",
"func (tr *Cluster) SetParameters(params map[string]interface{}) error {\n\tp, err := json.TFParser.Marshal(params)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn json.TFParser.Unmarshal(p, &tr.Spec.ForProvider)\n}",
"func (c *Cluster) SetServerCoordinates(url string, serverCA []byte, user, password string, clientCert, clientKey []byte) error {\n\tc.Server = url\n\n\t// Create kube config\n\tu := &api.AuthInfo{}\n\tif password != \"\" {\n\t\tu.Username = user\n\t\tu.Password = password\n\t} else {\n\t\tu.ClientCertificateData = clientCert\n\t\tu.ClientKeyData = clientKey\n\t}\n\n\tkc := api.Config{\n\t\tKind: \"Config\",\n\t\tAPIVersion: \"v1\",\n\t\tPreferences: api.Preferences{},\n\t\tClusters: map[string]*api.Cluster{\n\t\t\tc.Name: {\n\t\t\t\tServer: c.Server,\n\t\t\t\tCertificateAuthorityData: serverCA,\n\t\t\t},\n\t\t},\n\t\tAuthInfos: map[string]*api.AuthInfo{\n\t\t\tuser: u,\n\t\t},\n\t\tContexts: map[string]*api.Context{\n\t\t\t\"default\": &api.Context{\n\t\t\t\tCluster: c.Name,\n\t\t\t\tAuthInfo: user,\n\t\t\t},\n\t\t},\n\t\tCurrentContext: \"default\",\n\t}\n\n\td, err := clientcmd.Write(kc)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp := filepath.Join(c.Path, \".kube\", \"config\")\n\terr = ioutil.WriteFile(p, d, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.log.V(2).Info(\"Write file\", \"path\", p)\n\n\t// Create clientset from kube/config\n\tconfig, err := clientcmd.BuildConfigFromFlags(\"\", p)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.log.V(2).Info(\"Read config\", \"path\", p)\n\t// create the clientset\n\tclient, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.log.V(3).Info(\"Created client\")\n\n\tc.client = client\n\n\treturn nil\n}",
"func (_m *Clusterer) SetKubeContext(c string) {\n\t_m.Called(c)\n}",
"func setClusterRoles() cmds.StartupHook {\n\treturn func(ctx context.Context, wg *sync.WaitGroup, args cmds.StartupHookArgs) error {\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\t<-args.APIServerReady\n\t\t\tlogrus.Info(\"Applying Cluster Role Bindings\")\n\n\t\t\tcs, err := newClient(args.KubeConfigAdmin, nil)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatalf(\"clusterrole: new k8s client: %s\", err.Error())\n\t\t\t}\n\n\t\t\tif err := setKubeletAPIServerRoleBinding(ctx, cs); err != nil {\n\t\t\t\tlogrus.Fatalf(\"psp: set kubeletAPIServerRoleBinding: %s\", err.Error())\n\t\t\t}\n\n\t\t\tif err := setKubeProxyServerRoleBinding(ctx, cs); err != nil {\n\t\t\t\tlogrus.Fatalf(\"psp: set kubeProxyServerRoleBinding: %s\", err.Error())\n\t\t\t}\n\n\t\t\tif err := setTunnelControllerRoleBinding(ctx, cs); err != nil {\n\t\t\t\tlogrus.Fatalf(\"psp: set tunnelControllerRoleBinding: %s\", err.Error())\n\t\t\t}\n\n\t\t\tif err := setCloudControllerManagerRoleBinding(ctx, cs); err != nil {\n\t\t\t\tlogrus.Fatalf(\"ccm: set cloudControllerManagerRoleBinding: %s\", err.Error())\n\t\t\t}\n\n\t\t\tlogrus.Info(\"Cluster Role Bindings applied successfully\")\n\t\t}()\n\t\treturn nil\n\t}\n}",
"func (s *ListClustersOutput) SetClusters(v []*ClusterSummary) *ListClustersOutput {\n\ts.Clusters = v\n\treturn s\n}",
"func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) {\n\tdaemon.clusterProvider = clusterProvider\n\tdaemon.netController.SetClusterProvider(clusterProvider)\n\tdaemon.attachableNetworkLock = locker.New()\n}",
"func (cg *CGroup) SetCPUShare(limit int64) error {\n\tversion := cgControllers[\"cpu\"]\n\tswitch version {\n\tcase Unavailable:\n\t\treturn ErrControllerMissing\n\tcase V1:\n\t\treturn cg.rw.Set(version, \"cpu\", \"cpu.shares\", fmt.Sprintf(\"%d\", limit))\n\tcase V2:\n\t\treturn cg.rw.Set(version, \"cpu\", \"cpu.weight\", fmt.Sprintf(\"%d\", limit))\n\t}\n\n\treturn ErrUnknownVersion\n}",
"func (o *ServerProperties) SetCores(v int32) {\n\n\to.Cores = &v\n\n}",
"func (c *Client) SetSlaves(v []interface{}) {\n\tc.slaves = make([]string,0,len(v))\n\tfor _, vv := range v {\n\t\tc.slaves = append(c.slaves, vv.(string))\n\t}\n}",
"func (o AppProjectSpecSyncWindowsOutput) Clusters() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v AppProjectSpecSyncWindows) []string { return v.Clusters }).(pulumi.StringArrayOutput)\n}",
"func (m *MockBuilder) Clusters() []string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Clusters\")\n\tret0, _ := ret[0].([]string)\n\treturn ret0\n}",
"func (o *V0037Node) SetCores(v int32) {\n\to.Cores = &v\n}",
"func (d *DefaultDriver) SetClusterOpts(n node.Node, rtOpts map[string]string) error {\n\treturn &errors.ErrNotSupported{\n\t\tType: \"Function\",\n\t\tOperation: \"SetClusterOpts()\",\n\t}\n}",
"func TestModifyClusterWithProxyOverride(t *testing.T) {\n\tconf := clientcmdapi.Config{\n\t\tClusters: map[string]*clientcmdapi.Cluster{\n\t\t\t\"my-cluster\": {\n\t\t\t\tServer: \"https://192.168.0.1\",\n\t\t\t\tTLSServerName: \"to-be-cleared\",\n\t\t\t\tProxyURL: \"https://192.168.0.2\",\n\t\t\t},\n\t\t},\n\t}\n\ttest := setClusterTest{\n\t\tdescription: \"Testing 'kubectl config set-cluster' with an existing cluster\",\n\t\tconfig: conf,\n\t\targs: []string{\"my-cluster\"},\n\t\tflags: []string{\n\t\t\t\"--server=https://192.168.0.99\",\n\t\t\t\"--proxy-url=https://192.168.0.100\",\n\t\t},\n\t\texpected: `Cluster \"my-cluster\" set.` + \"\\n\",\n\t\texpectedConfig: clientcmdapi.Config{\n\t\t\tClusters: map[string]*clientcmdapi.Cluster{\n\t\t\t\t\"my-cluster\": {Server: \"https://192.168.0.99\", ProxyURL: \"https://192.168.0.100\"},\n\t\t\t},\n\t\t},\n\t}\n\ttest.run(t)\n}",
"func (_m *Resource) SetClusterName(clusterName string) {\n\t_m.Called(clusterName)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetAliasQueues returns the AliasQueues field value | func (o *QueueManager) GetAliasQueues() []AliasQueue {
if o == nil {
var ret []AliasQueue
return ret
}
return o.AliasQueues
} | [
"func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.AliasQueues, true\n}",
"func (o *QueueManager) SetAliasQueues(v []AliasQueue) {\n\to.AliasQueues = v\n}",
"func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tdefer un(lock(b))\n\n\tqs := make(map[string]int)\n\tfor q, items := range b.heaps {\n\t\tif len(qq.MatchPrefix) != 0 || len(qq.MatchExact) != 0 {\n\t\t\tif !matchesPrefix(q, qq.MatchPrefix...) && !matchesExact(q, qq.MatchExact...) {\n\t\t\t\t// no match\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tqs[q] = items.Len()\n\t\tif qq.Limit > 0 && len(qs) >= qq.Limit {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn qs, nil\n}",
"func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}",
"func GetQueues(c *gin.Context) {\n\t//TODO: create a while both back and front until value is != nil\n\tsize := len(queue)\n\tlog.Printf(\"squeue: %v\", queue)\n\tif size == 0 {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue don't have any item!\",\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"queues\": queue,\n\t})\n\tlog.Printf(\"equeue: %v\", queue)\n}",
"func (gores *Gores) Queues() []string {\n\tqueues := make([]string, 0)\n\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tdata, _ := conn.Do(\"SMEMBERS\", watchedQueues)\n\tfor _, q := range data.([]interface{}) {\n\t\tqueues = append(queues, string(q.([]byte)))\n\t}\n\n\treturn queues\n}",
"func (i *Inspector) Queues() ([]string, error) {\n\treturn i.rdb.AllQueues()\n}",
"func GetAvailableQueues(w http.ResponseWriter, r *http.Request, p httprouter.Params) {\n\tvar queueNames []string\n\tfor k := range queue.ListQueues() {\n\t\tqueueNames = append(queueNames, k)\n\t}\n\n\tresponseBody := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{queueNames}\n\n\tresponse, err := json.Marshal(responseBody)\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\tslogger.Error(err.Error())\n\t\treturn\n\t}\n\n\t_, err = w.Write(response)\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\tslogger.Error(err.Error())\n\t\treturn\n\t}\n}",
"func (s *API) ListQueues(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"ListQueues\")\n\n\tqueueNamePrefix := req.FormValue(\"QueueNamePrefix\")\n\tvar queues []string\n\tfor k, v := range s.sqs.queues {\n\t\tif strings.HasPrefix(k, queueNamePrefix) {\n\t\t\tqueues = append(queues, v.url)\n\t\t}\n\t}\n\n\tresponse := ListQueuesResponse{\n\t\tResult: ListQueuesResult{queues},\n\t\tMetaData: ResponseMetaData{\"00000000-0000-0000-0000-000000000000\"},\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\tenc := xml.NewEncoder(w)\n\tenc.Indent(\" \", \" \")\n\tif err := enc.Encode(response); err != nil {\n\t\tlog.Errorf(\"error: %s\", err)\n\t}\n}",
"func (t *TopicCache) GetQueue(projectName, serviceName string) []string {\n\tt.RLock()\n\tdefer t.RUnlock()\n\n\tif len(t.inQueue[projectName+serviceName]) >= 100 {\n\t\treturn t.inQueue[projectName+serviceName][:99]\n\t}\n\n\treturn t.inQueue[projectName+serviceName]\n}",
"func GetAvailableQueues() ([]string, error) {\n\tclient := &http.Client{}\n\n\tres, err := client.Get(Host + \"/queues\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trespBody := res.Body\n\tdefer respBody.Close()\n\n\tavailableQueues := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{}\n\tif err := json.NewDecoder(respBody).Decode(&availableQueues); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn availableQueues.Queues, nil\n}",
"func (s *SessionManager) GetAliases() []string {\r\n\taliases := []string{}\r\n\tfor k := range s.configMap {\r\n\t\taliases = append(aliases, k)\r\n\t}\r\n\treturn aliases\r\n}",
"func (s *QSvc) Queues(ctx context.Context, req *pb.QueuesRequest) (*pb.QueuesResponse, error) {\n\tqueueMap, err := s.impl.Queues(ctx,\n\t\tentroq.MatchPrefix(req.MatchPrefix...),\n\t\tentroq.MatchExact(req.MatchExact...),\n\t\tentroq.LimitQueues(int(req.Limit)))\n\tif err != nil {\n\t\treturn nil, autoCodeErrorf(\"failed to get queues: %w\", err)\n\t}\n\tresp := new(pb.QueuesResponse)\n\tfor name, count := range queueMap {\n\t\tresp.Queues = append(resp.Queues, &pb.QueueStats{\n\t\t\tName: name,\n\t\t\tNumTasks: int32(count),\n\t\t})\n\t}\n\treturn resp, nil\n}",
"func (connection *redisConnection) getConsumingQueues() ([]string, error) {\n\treturn connection.redisClient.SMembers(connection.queuesKey)\n}",
"func (h *Hospital) ConsumeQueues(ctx context.Context, t *testing.T) (events int, messages []string) {\n\tt.Helper()\n\treturn h.ConsumeQueuesWithLimit(ctx, t, -1, true)\n}",
"func (a *adapter) queueLookup(queueName string) (*sqs.GetQueueUrlOutput, error) {\n\treturn a.sqsClient.GetQueueUrl(&sqs.GetQueueUrlInput{\n\t\tQueueName: &queueName,\n\t})\n}",
"func (a *AfcNumQueues) Get(client sophos.ClientInterface, options ...sophos.Option) (err error) {\n\treturn get(client, \"/api/nodes/afc.num_queues\", &a.Value, options...)\n}",
"func (bs *BeanstalkdConnectionPool) ListQueues() (queueNames []string, err error) {\n\tqueueNames, err = bs.getGlobalConnect().ListTubes()\n\treturn\n}",
"func getQueueName(arn string) string {\n\tlastIndexOfColon := strings.LastIndex(arn, \":\")\n\treturn arn[lastIndexOfColon+1:]\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetAliasQueuesOk returns a tuple with the AliasQueues field value and a boolean to check if the value has been set. | func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) {
if o == nil {
return nil, false
}
return &o.AliasQueues, true
} | [
"func (o *QueueManager) GetAliasQueues() []AliasQueue {\n\tif o == nil {\n\t\tvar ret []AliasQueue\n\t\treturn ret\n\t}\n\n\treturn o.AliasQueues\n}",
"func (o *QueueManager) SetAliasQueues(v []AliasQueue) {\n\to.AliasQueues = v\n}",
"func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.ClusterQueues, true\n}",
"func (o *QueueManager) GetRemoteQueuesOk() (*[]RemoteQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueues, true\n}",
"func (*OpenconfigQos_Qos_Interfaces_Interface_Output_Queues) IsYANGGoStruct() {}",
"func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues) IsYANGGoStruct() {}",
"func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tdefer un(lock(b))\n\n\tqs := make(map[string]int)\n\tfor q, items := range b.heaps {\n\t\tif len(qq.MatchPrefix) != 0 || len(qq.MatchExact) != 0 {\n\t\t\tif !matchesPrefix(q, qq.MatchPrefix...) && !matchesExact(q, qq.MatchExact...) {\n\t\t\t\t// no match\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tqs[q] = items.Len()\n\t\tif qq.Limit > 0 && len(qs) >= qq.Limit {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn qs, nil\n}",
"func (*OpenconfigQos_Qos_Queues) IsYANGGoStruct() {}",
"func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}",
"func (c *Context) HasQueuesMap(key string) bool {\n\treturn c.makross.HasQueuesMap(key)\n}",
"func (o *LinkLinkinfoInfoSlaveData) GetQueueIdOk() (*int32, bool) {\n\tif o == nil || o.QueueId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.QueueId, true\n}",
"func (o *VnicEthAdapterPolicyAllOf) GetTxQueueSettingsOk() (*VnicEthTxQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.TxQueueSettings.Get(), o.TxQueueSettings.IsSet()\n}",
"func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue) IsYANGGoStruct() {}",
"func (m *Makross) HasQueuesMap(key string) bool {\n\tif value, okay := m.QueuesMap.Load(key); okay {\n\t\tif pqueue, okay := value.(*prior.PriorityQueue); okay {\n\t\t\tif pqueue.Length() > 0 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}",
"func (o *TimeseriesWidgetExpressionAlias) GetAliasNameOk() (*string, bool) {\n\tif o == nil || o.AliasName == nil {\n\t\treturn nil, false\n\t}\n\treturn o.AliasName, true\n}",
"func (o *VnicEthAdapterPolicyInventory) GetTxQueueSettingsOk() (*VnicEthTxQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.TxQueueSettings.Get(), o.TxQueueSettings.IsSet()\n}",
"func (*OpenconfigQos_Qos_Interfaces_Interface_Input_Queues) IsYANGGoStruct() {}",
"func (o *VnicEthAdapterPolicyAllOf) GetRxQueueSettingsOk() (*VnicEthRxQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RxQueueSettings.Get(), o.RxQueueSettings.IsSet()\n}",
"func isValidQueue(q string) bool {\n\tchunks := strings.Split(q, \"/\")\n\treturn len(chunks) == 6 &&\n\t\tchunks[0] == \"projects\" &&\n\t\tchunks[1] != \"\" &&\n\t\tchunks[2] == \"locations\" &&\n\t\tchunks[3] != \"\" &&\n\t\tchunks[4] == \"queues\" &&\n\t\tchunks[5] != \"\"\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SetAliasQueues sets field value | func (o *QueueManager) SetAliasQueues(v []AliasQueue) {
o.AliasQueues = v
} | [
"func (p *Process) CmdSetQueue(pac teoapi.Packet) (err error) {\n\tdata := pac.RemoveTrailingZero(pac.Data())\n\trequest := cdb.KeyValue{Cmd: pac.Cmd()}\n\tif err = request.UnmarshalText(data); err != nil {\n\t\treturn\n\t} else if err = p.tcdb.SetQueue(request.Key, request.Value); err != nil {\n\t\treturn\n\t}\n\t// Return only Value for text requests and all fields for json\n\tresponce := request\n\tresponce.Value = nil\n\tif !request.RequestInJSON {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), responce.Value)\n\t} else if retdata, err := responce.MarshalText(); err == nil {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), retdata)\n\t}\n\treturn\n}",
"func (s *Filters) SetQueues(v []*string) *Filters {\n\ts.Queues = v\n\treturn s\n}",
"func (o *QueueManager) GetAliasQueues() []AliasQueue {\n\tif o == nil {\n\t\tvar ret []AliasQueue\n\t\treturn ret\n\t}\n\n\treturn o.AliasQueues\n}",
"func UpdateQueues(db *storm.DB, torrentQueues TorrentQueues) {\n\ttorrentQueues.ID = 5\n\terr := db.Save(&torrentQueues)\n\tif err != nil {\n\t\tLogger.WithFields(logrus.Fields{\"database\": db, \"error\": err}).Error(\"Unable to write Queues to database!\")\n\t}\n}",
"func (tcdb *Teocdb) SetQueue(key string, value []byte) (err error) {\n\treturn tcdb.session.Query(`UPDATE queue SET lock = '', data = ? WHERE key = ? AND time = toTimestamp(now()) AND random = UUID()`,\n\t\tvalue, key).Exec()\n}",
"func (c *Consumer) SetQueueBind(bind *QueueBind) *Consumer {\n\tif bind != nil {\n\t\tc.mutex.Lock()\n\t\tc.bind = bind\n\t\tc.mutex.Unlock()\n\t}\n\treturn c\n}",
"func (s *UserDataFilters) SetQueues(v []*string) *UserDataFilters {\n\ts.Queues = v\n\treturn s\n}",
"func (s *Service) SetQueue(q amboy.Queue) error {\n\tif s.closer != nil {\n\t\treturn errors.New(\"cannot set a new queue, Service is already open\")\n\t}\n\n\ts.queue = q\n\treturn nil\n}",
"func (c *Consumer) SetQueueName(withPrefix bool, name string) *Consumer {\n\tif name == \"\" {\n\t\tname = c.getExchangeTopic()\n\t}\n\tnewQueueName := GenerateQueueName(withPrefix, name)\n\tc.mutex.Lock()\n\tc.declare.SetName(newQueueName)\n\tc.bind.SetName(newQueueName)\n\tc.mutex.Unlock()\n\treturn c\n}",
"func (q *Queue) Set(ctx context.Context, ds *linux.MsqidDS) error {\n\tq.mu.Lock()\n\tdefer q.mu.Unlock()\n\n\tcreds := auth.CredentialsFromContext(ctx)\n\tif ds.MsgQbytes > maxQueueBytes && !creds.HasCapabilityIn(linux.CAP_SYS_RESOURCE, q.obj.UserNS) {\n\t\t// \"An attempt (IPC_SET) was made to increase msg_qbytes beyond the\n\t\t// system parameter MSGMNB, but the caller is not privileged (Linux:\n\t\t// does not have the CAP_SYS_RESOURCE capability).\"\n\t\treturn linuxerr.EPERM\n\t}\n\n\tif err := q.obj.Set(ctx, &ds.MsgPerm); err != nil {\n\t\treturn err\n\t}\n\n\tq.maxBytes = ds.MsgQbytes\n\tq.changeTime = ktime.NowFromContext(ctx)\n\treturn nil\n}",
"func (c *Client) QueueBind(\n\texchange, queue, key string,\n\topts *QueueBindOpts,\n\tconnOpts *ConnectOpts) error {\n\n\tdefaultOpts := DefaultQueueBindOpts()\n\n\tif opts != nil {\n\t\tdefaultOpts = opts\n\t}\n\n\tdefaultConnOpts := DefaultConnectOpts()\n\tif connOpts != nil {\n\t\tdefaultConnOpts = connOpts\n\t}\n\n\tconn, err := c.connect(defaultConnOpts)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\tch, err := conn.Channel()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer ch.Close()\n\n\terr = ch.QueueBind(\n\t\tqueue,\n\t\tkey,\n\t\texchange,\n\t\tdefaultOpts.NoWait,\n\t\tdefaultOpts.Args,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func SetQueueSettings(ctx *context.Context) {\n\tqid := ctx.ParamsInt64(\"qid\")\n\tmq := queue.GetManager().GetManagedQueue(qid)\n\tif mq == nil {\n\t\tctx.Status(http.StatusNotFound)\n\t\treturn\n\t}\n\tif _, ok := mq.Managed.(queue.ManagedPool); !ok {\n\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.pool.none\"))\n\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\treturn\n\t}\n\n\tmaxNumberStr := ctx.FormString(\"max-number\")\n\tnumberStr := ctx.FormString(\"number\")\n\ttimeoutStr := ctx.FormString(\"timeout\")\n\n\tvar err error\n\tvar maxNumber, number int\n\tvar timeout time.Duration\n\tif len(maxNumberStr) > 0 {\n\t\tmaxNumber, err = strconv.Atoi(maxNumberStr)\n\t\tif err != nil {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.maxnumberworkers.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t\tif maxNumber < -1 {\n\t\t\tmaxNumber = -1\n\t\t}\n\t} else {\n\t\tmaxNumber = mq.MaxNumberOfWorkers()\n\t}\n\n\tif len(numberStr) > 0 {\n\t\tnumber, err = strconv.Atoi(numberStr)\n\t\tif err != nil || number < 0 {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.numberworkers.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tnumber = mq.BoostWorkers()\n\t}\n\n\tif len(timeoutStr) > 0 {\n\t\ttimeout, err = time.ParseDuration(timeoutStr)\n\t\tif err != nil {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.timeout.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t} else {\n\t\ttimeout = mq.BoostTimeout()\n\t}\n\n\tmq.SetPoolSettings(maxNumber, number, timeout)\n\tctx.Flash.Success(ctx.Tr(\"admin.monitor.queue.settings.changed\"))\n\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n}",
"func (k *Keeper) setAlias(ctx sdkTypes.Context, alias string, aliasData *Alias, aliasOwnerData *AliasOwner) {\n\townerStore := ctx.KVStore(k.ownersStoreKey)\n\taliasKey := getAliasKey(alias)\n\taliasInfo := k.cdc.MustMarshalBinaryLengthPrefixed(aliasData)\n\n\townerKey := aliasData.Owner.String()\n\taliasOwnerInfo := k.cdc.MustMarshalBinaryLengthPrefixed(aliasOwnerData)\n\n\townerStore.Set([]byte(ownerKey), aliasOwnerInfo)\n\townerStore.Set([]byte(aliasKey), aliasInfo)\n\n\t// Remove from namestore after approved and set into ownerstore\n\tnameStore := ctx.KVStore(k.namesStoreKey)\n\tnameStore.Delete([]byte(ownerKey))\n\tnameStore.Delete([]byte(aliasKey))\n}",
"func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}",
"func Queue(name string) SubscribeOption {\n\treturn func(o *SubscribeOptions) {\n\t\to.Queue = name\n\t}\n}",
"func (router *EventRouter) BindQueue(queue string, exchange string) {\n\tif router.lastError == nil {\n\t\trouter.DeclareExchange(exchange)\n\t}\n\tif router.lastError == nil {\n\t\trouter.DeclareQueue(queue)\n\t}\n\tif router.lastError == nil {\n\t\trouter.lastError = router.channel.QueueBind(queue, \"\", exchange, false, nil)\n\t}\n}",
"func (e *LifecycleEvent) SetQueueURL(url string) { e.queueURL = url }",
"func (rm *RouterMux) SetAlias(route string, aliases ...string) {\n\tfor _, alias := range aliases {\n\t\trm.aliases[alias] = route\n\t}\n}",
"func (c *Consumer) SetQueueDeclare(declare *QueueDeclare) *Consumer {\n\tif declare != nil {\n\t\tc.mutex.Lock()\n\t\tc.declare = declare\n\t\tc.mutex.Unlock()\n\t}\n\treturn c\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetRemoteQueues returns the RemoteQueues field value | func (o *QueueManager) GetRemoteQueues() []RemoteQueue {
if o == nil {
var ret []RemoteQueue
return ret
}
return o.RemoteQueues
} | [
"func (o *QueueManager) GetRemoteQueuesOk() (*[]RemoteQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueues, true\n}",
"func (o *RemoteQueue) GetRemoteQueue() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.RemoteQueue\n}",
"func (o *QueueManager) SetRemoteQueues(v []RemoteQueue) {\n\to.RemoteQueues = v\n}",
"func GetQueues(c *gin.Context) {\n\t//TODO: create a while both back and front until value is != nil\n\tsize := len(queue)\n\tlog.Printf(\"squeue: %v\", queue)\n\tif size == 0 {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue don't have any item!\",\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"queues\": queue,\n\t})\n\tlog.Printf(\"equeue: %v\", queue)\n}",
"func (o *RemoteQueue) GetRemoteQueueOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueue, true\n}",
"func (gores *Gores) Queues() []string {\n\tqueues := make([]string, 0)\n\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tdata, _ := conn.Do(\"SMEMBERS\", watchedQueues)\n\tfor _, q := range data.([]interface{}) {\n\t\tqueues = append(queues, string(q.([]byte)))\n\t}\n\n\treturn queues\n}",
"func GetRemoteHosts() []string {\r\n\tret := make([]string, 0)\r\n\r\n\tmutex.RLock()\r\n\tdefer mutex.RUnlock()\r\n\r\n\tnodeKey := hex.EncodeToString(GetNodePubKey())\r\n\tfor pubKey, item := range nodes {\r\n\t\tif pubKey != nodeKey && !item.Stopped {\r\n\t\t\tret = append(ret, item.TCPAddress)\r\n\t\t}\r\n\t}\r\n\treturn ret\r\n}",
"func (o *RemoteQueue) GetRemoteQueueManager() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.RemoteQueueManager\n}",
"func (i *Inspector) Queues() ([]string, error) {\n\treturn i.rdb.AllQueues()\n}",
"func GetAvailableQueues() ([]string, error) {\n\tclient := &http.Client{}\n\n\tres, err := client.Get(Host + \"/queues\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trespBody := res.Body\n\tdefer respBody.Close()\n\n\tavailableQueues := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{}\n\tif err := json.NewDecoder(respBody).Decode(&availableQueues); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn availableQueues.Queues, nil\n}",
"func (p *Project) Queues() (*[]Queue, error) {\n qs := make([]Queue, 0)\n err := Mongo.Get(\"queue\", bson.M{\"project\": p.ID}, MaxQueuesPerProject, &qs)\n return &qs, err\n}",
"func (a *Client) GetMsgVpnQueues(params *GetMsgVpnQueuesParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnQueuesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnQueuesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnQueues\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/queues\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnQueuesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnQueuesOK), nil\n\n}",
"func GetRemoteServers() ([]*remoteServer, error) {\n\ts, err := getStorage()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif s.RemoteServers == nil {\n\t\treturn make([]*remoteServer, 0), nil\n\t}\n\n\treturn s.RemoteServers, nil\n}",
"func (storage *SrvStorage) GetVhostQueues(vhost string) []*queue.Queue {\n\tvar queues []*queue.Queue\n\tstorage.db.Iterate(\n\t\tfunc(key []byte, value []byte) {\n\t\t\tif !bytes.HasPrefix(key, []byte(queuePrefix)) || getVhostFromKey(string(key)) != vhost {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tq := &queue.Queue{}\n\t\t\tq.Unmarshal(value, storage.protoVersion)\n\t\t\tqueues = append(queues, q)\n\t\t},\n\t)\n\n\treturn queues\n}",
"func GetAvailableQueues(w http.ResponseWriter, r *http.Request, p httprouter.Params) {\n\tvar queueNames []string\n\tfor k := range queue.ListQueues() {\n\t\tqueueNames = append(queueNames, k)\n\t}\n\n\tresponseBody := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{queueNames}\n\n\tresponse, err := json.Marshal(responseBody)\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\tslogger.Error(err.Error())\n\t\treturn\n\t}\n\n\t_, err = w.Write(response)\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\tslogger.Error(err.Error())\n\t\treturn\n\t}\n}",
"func (a *Client) GetMsgVpnJndiQueues(params *GetMsgVpnJndiQueuesParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnJndiQueuesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnJndiQueuesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnJndiQueues\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/jndiQueues\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnJndiQueuesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnJndiQueuesOK), nil\n\n}",
"func (connection *redisConnection) GetOpenQueues() ([]string, error) {\n\treturn connection.redisClient.SMembers(queuesKey)\n}",
"func (connection *redisConnection) getConsumingQueues() ([]string, error) {\n\treturn connection.redisClient.SMembers(connection.queuesKey)\n}",
"func (bs *BeanstalkdConnectionPool) ListQueues() (queueNames []string, err error) {\n\tqueueNames, err = bs.getGlobalConnect().ListTubes()\n\treturn\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetRemoteQueuesOk returns a tuple with the RemoteQueues field value and a boolean to check if the value has been set. | func (o *QueueManager) GetRemoteQueuesOk() (*[]RemoteQueue, bool) {
if o == nil {
return nil, false
}
return &o.RemoteQueues, true
} | [
"func (o *RemoteQueue) GetRemoteQueueOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueue, true\n}",
"func (o *RemoteQueue) GetRemoteQueueManagerOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueueManager, true\n}",
"func (o *RemoteQueue) GetLocalQueueOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.LocalQueue, true\n}",
"func (o *QueueManager) GetRemoteQueues() []RemoteQueue {\n\tif o == nil {\n\t\tvar ret []RemoteQueue\n\t\treturn ret\n\t}\n\n\treturn o.RemoteQueues\n}",
"func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.AliasQueues, true\n}",
"func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.ClusterQueues, true\n}",
"func (o *QueueManager) SetRemoteQueues(v []RemoteQueue) {\n\to.RemoteQueues = v\n}",
"func (o *VnicEthAdapterPolicyInventory) GetRxQueueSettingsOk() (*VnicEthRxQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RxQueueSettings.Get(), o.RxQueueSettings.IsSet()\n}",
"func (o *VnicEthAdapterPolicyAllOf) GetRxQueueSettingsOk() (*VnicEthRxQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RxQueueSettings.Get(), o.RxQueueSettings.IsSet()\n}",
"func (o *RemoteQueue) GetRemoteQueue() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.RemoteQueue\n}",
"func (o *SnippetDTO) GetRemoteProcessGroupsOk() (*map[string]RevisionDTO, bool) {\n\tif o == nil || o.RemoteProcessGroups == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RemoteProcessGroups, true\n}",
"func (o *NSQProducer) GetRemoteAddressOk() (*string, bool) {\n\tif o == nil || o.RemoteAddress == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RemoteAddress, true\n}",
"func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues) IsYANGGoStruct() {}",
"func (o *VnicEthAdapterPolicyInventory) GetCompletionQueueSettingsOk() (*VnicCompletionQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CompletionQueueSettings.Get(), o.CompletionQueueSettings.IsSet()\n}",
"func (o *LinkLinkinfoInfoSlaveData) GetQueueIdOk() (*int32, bool) {\n\tif o == nil || o.QueueId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.QueueId, true\n}",
"func (o *NotificationConfig) GetReceiversOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Receivers, true\n}",
"func (o *VulnUpdateNotification) GetQueueIdOk() (*string, bool) {\n\tif o == nil || o.QueueId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.QueueId, true\n}",
"func (o *SmsBinaryMessage) GetDestinationsOk() (*[]SmsDestination, bool) {\n\tif o == nil || o.Destinations == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Destinations, true\n}",
"func (o *VnicEthAdapterPolicyAllOf) GetCompletionQueueSettingsOk() (*VnicCompletionQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CompletionQueueSettings.Get(), o.CompletionQueueSettings.IsSet()\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SetRemoteQueues sets field value | func (o *QueueManager) SetRemoteQueues(v []RemoteQueue) {
o.RemoteQueues = v
} | [
"func (o *RemoteQueue) SetRemoteQueue(v string) {\n\to.RemoteQueue = v\n}",
"func (s *Filters) SetQueues(v []*string) *Filters {\n\ts.Queues = v\n\treturn s\n}",
"func (p *Process) CmdSetQueue(pac teoapi.Packet) (err error) {\n\tdata := pac.RemoveTrailingZero(pac.Data())\n\trequest := cdb.KeyValue{Cmd: pac.Cmd()}\n\tif err = request.UnmarshalText(data); err != nil {\n\t\treturn\n\t} else if err = p.tcdb.SetQueue(request.Key, request.Value); err != nil {\n\t\treturn\n\t}\n\t// Return only Value for text requests and all fields for json\n\tresponce := request\n\tresponce.Value = nil\n\tif !request.RequestInJSON {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), responce.Value)\n\t} else if retdata, err := responce.MarshalText(); err == nil {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), retdata)\n\t}\n\treturn\n}",
"func (q *Queue) Set(ctx context.Context, ds *linux.MsqidDS) error {\n\tq.mu.Lock()\n\tdefer q.mu.Unlock()\n\n\tcreds := auth.CredentialsFromContext(ctx)\n\tif ds.MsgQbytes > maxQueueBytes && !creds.HasCapabilityIn(linux.CAP_SYS_RESOURCE, q.obj.UserNS) {\n\t\t// \"An attempt (IPC_SET) was made to increase msg_qbytes beyond the\n\t\t// system parameter MSGMNB, but the caller is not privileged (Linux:\n\t\t// does not have the CAP_SYS_RESOURCE capability).\"\n\t\treturn linuxerr.EPERM\n\t}\n\n\tif err := q.obj.Set(ctx, &ds.MsgPerm); err != nil {\n\t\treturn err\n\t}\n\n\tq.maxBytes = ds.MsgQbytes\n\tq.changeTime = ktime.NowFromContext(ctx)\n\treturn nil\n}",
"func (s *UserDataFilters) SetQueues(v []*string) *UserDataFilters {\n\ts.Queues = v\n\treturn s\n}",
"func (m *TeleconferenceDeviceMediaQuality) SetRemoteIPAddress(value *string)() {\n err := m.GetBackingStore().Set(\"remoteIPAddress\", value)\n if err != nil {\n panic(err)\n }\n}",
"func SetQueueSettings(ctx *context.Context) {\n\tqid := ctx.ParamsInt64(\"qid\")\n\tmq := queue.GetManager().GetManagedQueue(qid)\n\tif mq == nil {\n\t\tctx.Status(http.StatusNotFound)\n\t\treturn\n\t}\n\tif _, ok := mq.Managed.(queue.ManagedPool); !ok {\n\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.pool.none\"))\n\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\treturn\n\t}\n\n\tmaxNumberStr := ctx.FormString(\"max-number\")\n\tnumberStr := ctx.FormString(\"number\")\n\ttimeoutStr := ctx.FormString(\"timeout\")\n\n\tvar err error\n\tvar maxNumber, number int\n\tvar timeout time.Duration\n\tif len(maxNumberStr) > 0 {\n\t\tmaxNumber, err = strconv.Atoi(maxNumberStr)\n\t\tif err != nil {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.maxnumberworkers.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t\tif maxNumber < -1 {\n\t\t\tmaxNumber = -1\n\t\t}\n\t} else {\n\t\tmaxNumber = mq.MaxNumberOfWorkers()\n\t}\n\n\tif len(numberStr) > 0 {\n\t\tnumber, err = strconv.Atoi(numberStr)\n\t\tif err != nil || number < 0 {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.numberworkers.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tnumber = mq.BoostWorkers()\n\t}\n\n\tif len(timeoutStr) > 0 {\n\t\ttimeout, err = time.ParseDuration(timeoutStr)\n\t\tif err != nil {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.timeout.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t} else {\n\t\ttimeout = mq.BoostTimeout()\n\t}\n\n\tmq.SetPoolSettings(maxNumber, number, timeout)\n\tctx.Flash.Success(ctx.Tr(\"admin.monitor.queue.settings.changed\"))\n\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n}",
"func (r *RPC) SetQueueClient(c queue.Client) {\n\tgapi := NewGRpcServer(c, r.api)\n\tjapi := NewJSONRPCServer(c, r.api)\n\tr.gapi = gapi\n\tr.japi = japi\n\tr.c = c\n\t//注册系统rpc\n\tpluginmgr.AddRPC(r)\n\tr.Listen()\n}",
"func SetMaxQueues(maxQueues int) Option {\n\treturn func(o *options) {\n\t\to.maxQueues = maxQueues\n\t}\n}",
"func (r *RPC) SetQueueClient(c queue.Client) {\r\n\tgapi := NewGRpcServer(c, r.api)\r\n\tjapi := NewJSONRPCServer(c, r.api)\r\n\tr.gapi = gapi\r\n\tr.japi = japi\r\n\tr.c = c\r\n\t//注册系统rpc\r\n\tpluginmgr.AddRPC(r)\r\n\tr.Listen()\r\n}",
"func (o *QueueManager) GetRemoteQueues() []RemoteQueue {\n\tif o == nil {\n\t\tvar ret []RemoteQueue\n\t\treturn ret\n\t}\n\n\treturn o.RemoteQueues\n}",
"func (m *TeleconferenceDeviceMediaQuality) SetRemotePort(value *int32)() {\n err := m.GetBackingStore().Set(\"remotePort\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (tcdb *Teocdb) SetQueue(key string, value []byte) (err error) {\n\treturn tcdb.session.Query(`UPDATE queue SET lock = '', data = ? WHERE key = ? AND time = toTimestamp(now()) AND random = UUID()`,\n\t\tvalue, key).Exec()\n}",
"func (m *AudioRoutingGroup) SetReceivers(value []string)() {\n err := m.GetBackingStore().Set(\"receivers\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (t *UnpolledCaches) SetRemotePolled(results map[tc.CacheName]tc.IsAvailable) {\n\tt.m.Lock()\n\tdefer t.m.Unlock()\n\tnumUnpolledCaches := len(t.unpolledCaches)\n\tif numUnpolledCaches == 0 {\n\t\treturn\n\t}\n\tfor cache := range t.unpolledCaches {\n\tinnerLoop:\n\t\tfor cacheName := range results {\n\t\t\tif cacheName != cache {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdelete(t.unpolledCaches, cache)\n\t\t\tdelete(t.seenCaches, cache)\n\t\t\tbreak innerLoop\n\t\t}\n\t}\n}",
"func (m *VpnConfiguration) SetServers(value []VpnServerable)() {\n err := m.GetBackingStore().Set(\"servers\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (network *P2p) SetQueueClient(client queue.Client) {\n\tnetwork.client = client\n\tnetwork.node.SetQueueClient(client)\n\tgo func() {\n\t\tlog.Info(\"p2p\", \"setqueuecliet\", \"ok\")\n\t\tnetwork.node.Start()\n\t\tnetwork.subP2pMsg()\n\t\terr := network.loadP2PPrivKeyToWallet()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}()\n}",
"func PopulateQueues(c *gin.Context) {\n\tif queue == nil {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue doesn't exist, please create it!!!\",\n\t\t})\n\t\treturn\n\t}\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"roberto\",\n\t\tEMAIL: \"[email protected]\",\n\t\tUUID: \"1\",\n\t\tMSG: \"lindo\",\n\t})\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"alex\",\n\t\tEMAIL: \"[email protected]\",\n\t\tUUID: \"2\",\n\t\tMSG: \"lindox\",\n\t})\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"ale\",\n\t\tEMAIL: \"[email protected]\",\n\t\tUUID: \"3\",\n\t\tMSG: \"linduxo\",\n\t})\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"msg\": queue,\n\t})\n}",
"func SetQueueReclaimable(ctx *TestContext, queues []string, reclaimable bool) {\n\tBy(\"Setting Queue reclaimable\")\n\n\tfor _, q := range queues {\n\t\tqueue, err := ctx.Vcclient.SchedulingV1beta1().Queues().Get(context.TODO(), q, metav1.GetOptions{})\n\t\tExpect(err).NotTo(HaveOccurred(), \"failed to get queue %s\", q)\n\n\t\tqueue.Spec.Reclaimable = &reclaimable\n\t\t_, err = ctx.Vcclient.SchedulingV1beta1().Queues().Update(context.TODO(), queue, metav1.UpdateOptions{})\n\t\tExpect(err).NotTo(HaveOccurred(), \"failed to update queue %s\", q)\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetClusterQueues returns the ClusterQueues field value | func (o *QueueManager) GetClusterQueues() []ClusterQueue {
if o == nil {
var ret []ClusterQueue
return ret
}
return o.ClusterQueues
} | [
"func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.ClusterQueues, true\n}",
"func (o *QueueManager) GetClusters() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\n\treturn o.Clusters\n}",
"func (client *Client) GetClusterQueueInfo(request *GetClusterQueueInfoRequest) (response *GetClusterQueueInfoResponse, err error) {\n\tresponse = CreateGetClusterQueueInfoResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}",
"func (connection *redisConnection) getConsumingQueues() ([]string, error) {\n\treturn connection.redisClient.SMembers(connection.queuesKey)\n}",
"func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}",
"func (gores *Gores) Queues() []string {\n\tqueues := make([]string, 0)\n\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tdata, _ := conn.Do(\"SMEMBERS\", watchedQueues)\n\tfor _, q := range data.([]interface{}) {\n\t\tqueues = append(queues, string(q.([]byte)))\n\t}\n\n\treturn queues\n}",
"func (a *AfcNumQueues) Get(client sophos.ClientInterface, options ...sophos.Option) (err error) {\n\treturn get(client, \"/api/nodes/afc.num_queues\", &a.Value, options...)\n}",
"func (t *TopicCache) GetQueue(projectName, serviceName string) []string {\n\tt.RLock()\n\tdefer t.RUnlock()\n\n\tif len(t.inQueue[projectName+serviceName]) >= 100 {\n\t\treturn t.inQueue[projectName+serviceName][:99]\n\t}\n\n\treturn t.inQueue[projectName+serviceName]\n}",
"func GetQueues(c *gin.Context) {\n\t//TODO: create a while both back and front until value is != nil\n\tsize := len(queue)\n\tlog.Printf(\"squeue: %v\", queue)\n\tif size == 0 {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue don't have any item!\",\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"queues\": queue,\n\t})\n\tlog.Printf(\"equeue: %v\", queue)\n}",
"func GetAvailableQueues() ([]string, error) {\n\tclient := &http.Client{}\n\n\tres, err := client.Get(Host + \"/queues\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trespBody := res.Body\n\tdefer respBody.Close()\n\n\tavailableQueues := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{}\n\tif err := json.NewDecoder(respBody).Decode(&availableQueues); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn availableQueues.Queues, nil\n}",
"func (o *RemoteQueue) GetClusterVisibility() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\n\treturn o.ClusterVisibility\n}",
"func (p *Project) Queues() (*[]Queue, error) {\n qs := make([]Queue, 0)\n err := Mongo.Get(\"queue\", bson.M{\"project\": p.ID}, MaxQueuesPerProject, &qs)\n return &qs, err\n}",
"func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tdefer un(lock(b))\n\n\tqs := make(map[string]int)\n\tfor q, items := range b.heaps {\n\t\tif len(qq.MatchPrefix) != 0 || len(qq.MatchExact) != 0 {\n\t\t\tif !matchesPrefix(q, qq.MatchPrefix...) && !matchesExact(q, qq.MatchExact...) {\n\t\t\t\t// no match\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tqs[q] = items.Len()\n\t\tif qq.Limit > 0 && len(qs) >= qq.Limit {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn qs, nil\n}",
"func (i *Inspector) Queues() ([]string, error) {\n\treturn i.rdb.AllQueues()\n}",
"func (q *DistroQueueInfo) GetQueueCollection() string {\n\tif q.SecondaryQueue {\n\t\treturn TaskSecondaryQueuesCollection\n\t}\n\n\treturn TaskQueuesCollection\n}",
"func (s *QSvc) Queues(ctx context.Context, req *pb.QueuesRequest) (*pb.QueuesResponse, error) {\n\tqueueMap, err := s.impl.Queues(ctx,\n\t\tentroq.MatchPrefix(req.MatchPrefix...),\n\t\tentroq.MatchExact(req.MatchExact...),\n\t\tentroq.LimitQueues(int(req.Limit)))\n\tif err != nil {\n\t\treturn nil, autoCodeErrorf(\"failed to get queues: %w\", err)\n\t}\n\tresp := new(pb.QueuesResponse)\n\tfor name, count := range queueMap {\n\t\tresp.Queues = append(resp.Queues, &pb.QueueStats{\n\t\t\tName: name,\n\t\t\tNumTasks: int32(count),\n\t\t})\n\t}\n\treturn resp, nil\n}",
"func (bs *BeanstalkdConnectionPool) ListQueues() (queueNames []string, err error) {\n\tqueueNames, err = bs.getGlobalConnect().ListTubes()\n\treturn\n}",
"func (connection *redisConnection) GetOpenQueues() ([]string, error) {\n\treturn connection.redisClient.SMembers(queuesKey)\n}",
"func (client *Client) GetClusterQueueInfoWithCallback(request *GetClusterQueueInfoRequest, callback func(response *GetClusterQueueInfoResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetClusterQueueInfoResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetClusterQueueInfo(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetClusterQueuesOk returns a tuple with the ClusterQueues field value and a boolean to check if the value has been set. | func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) {
if o == nil {
return nil, false
}
return &o.ClusterQueues, true
} | [
"func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.AliasQueues, true\n}",
"func (o *QueueManager) GetClustersOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Clusters, true\n}",
"func (o *QueueManager) GetRemoteQueuesOk() (*[]RemoteQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueues, true\n}",
"func (o *RemoteQueue) GetClusterVisibilityOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.ClusterVisibility, true\n}",
"func (o *QueueManager) GetClusterQueues() []ClusterQueue {\n\tif o == nil {\n\t\tvar ret []ClusterQueue\n\t\treturn ret\n\t}\n\n\treturn o.ClusterQueues\n}",
"func (o *RemoteQueue) GetRemoteQueueOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueue, true\n}",
"func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}",
"func (o *RemoteQueue) GetLocalQueueOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.LocalQueue, true\n}",
"func (c *Context) HasQueuesMap(key string) bool {\n\treturn c.makross.HasQueuesMap(key)\n}",
"func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tdefer un(lock(b))\n\n\tqs := make(map[string]int)\n\tfor q, items := range b.heaps {\n\t\tif len(qq.MatchPrefix) != 0 || len(qq.MatchExact) != 0 {\n\t\t\tif !matchesPrefix(q, qq.MatchPrefix...) && !matchesExact(q, qq.MatchExact...) {\n\t\t\t\t// no match\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tqs[q] = items.Len()\n\t\tif qq.Limit > 0 && len(qs) >= qq.Limit {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn qs, nil\n}",
"func (client *Client) GetClusterQueueInfo(request *GetClusterQueueInfoRequest) (response *GetClusterQueueInfoResponse, err error) {\n\tresponse = CreateGetClusterQueueInfoResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}",
"func (o *LinkLinkinfoInfoSlaveData) GetQueueIdOk() (*int32, bool) {\n\tif o == nil || o.QueueId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.QueueId, true\n}",
"func GetAvailableQueues() ([]string, error) {\n\tclient := &http.Client{}\n\n\tres, err := client.Get(Host + \"/queues\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trespBody := res.Body\n\tdefer respBody.Close()\n\n\tavailableQueues := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{}\n\tif err := json.NewDecoder(respBody).Decode(&availableQueues); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn availableQueues.Queues, nil\n}",
"func (o *VnicEthAdapterPolicyAllOf) GetTxQueueSettingsOk() (*VnicEthTxQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.TxQueueSettings.Get(), o.TxQueueSettings.IsSet()\n}",
"func (o *V0037Node) GetThreadsOk() (*int32, bool) {\n\tif o == nil || o.Threads == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Threads, true\n}",
"func (m *Makross) HasQueuesMap(key string) bool {\n\tif value, okay := m.QueuesMap.Load(key); okay {\n\t\tif pqueue, okay := value.(*prior.PriorityQueue); okay {\n\t\t\tif pqueue.Length() > 0 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}",
"func (s *QSvc) Queues(ctx context.Context, req *pb.QueuesRequest) (*pb.QueuesResponse, error) {\n\tqueueMap, err := s.impl.Queues(ctx,\n\t\tentroq.MatchPrefix(req.MatchPrefix...),\n\t\tentroq.MatchExact(req.MatchExact...),\n\t\tentroq.LimitQueues(int(req.Limit)))\n\tif err != nil {\n\t\treturn nil, autoCodeErrorf(\"failed to get queues: %w\", err)\n\t}\n\tresp := new(pb.QueuesResponse)\n\tfor name, count := range queueMap {\n\t\tresp.Queues = append(resp.Queues, &pb.QueueStats{\n\t\t\tName: name,\n\t\t\tNumTasks: int32(count),\n\t\t})\n\t}\n\treturn resp, nil\n}",
"func (gores *Gores) Queues() []string {\n\tqueues := make([]string, 0)\n\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tdata, _ := conn.Do(\"SMEMBERS\", watchedQueues)\n\tfor _, q := range data.([]interface{}) {\n\t\tqueues = append(queues, string(q.([]byte)))\n\t}\n\n\treturn queues\n}",
"func GetAvailableQueues(w http.ResponseWriter, r *http.Request, p httprouter.Params) {\n\tvar queueNames []string\n\tfor k := range queue.ListQueues() {\n\t\tqueueNames = append(queueNames, k)\n\t}\n\n\tresponseBody := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{queueNames}\n\n\tresponse, err := json.Marshal(responseBody)\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\tslogger.Error(err.Error())\n\t\treturn\n\t}\n\n\t_, err = w.Write(response)\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\tslogger.Error(err.Error())\n\t\treturn\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a tiploc to the result so that it will be included in the tiploc map | func (bf *boardFilter) addTiploc(tiploc string) {
if tiploc != "" {
bf.tiplocs[tiploc] = nil
}
} | [
"func (r *LocationMap) Add(t *Location) {\n\tif _, ok := r.m[t.Tiploc]; !ok {\n\t\tr.m[t.Tiploc] = t\n\t}\n}",
"func (bd *BlockDAG) updateTips(b *Block) {\n\tif bd.tips == nil {\n\t\tbd.tips = NewHashSet()\n\t\tbd.tips.AddPair(b.GetHash(), b)\n\t\treturn\n\t}\n\tfor k := range bd.tips.GetMap() {\n\t\tblock := bd.getBlock(&k)\n\t\tif block.HasChildren() {\n\t\t\tbd.tips.Remove(&k)\n\t\t}\n\t}\n\tbd.tips.AddPair(b.GetHash(), b)\n}",
"func (m *MemoryStore) SetTips(add hash.Hash, del []*site.Site) {\n\tfor _, d := range del {\n\t\tdelete(m.tips, d.Hash())\n\t}\n\tm.tips[add] = true\n}",
"func (c *CIFImporter) putTiploc(t *cif.Tiploc) error {\n t.Update()\n\n // Link it to this CIF file\n t.DateOfExtract = c.importhd.DateOfExtract\n\n _, err := c.tx.Exec(\n \"INSERT INTO timetable.tiploc \"+\n \"(tiploc, crs, stanox, name, nlc, nlccheck, nlcdesc, station, dateextract) \"+\n \"VALUES ( $1, $2, $3, $4, $5, $6, $7, $8, $9) \"+\n \"ON CONFLICT ( id ) \"+\n \"DO UPDATE SET \"+\n \"crs = EXCLUDED.crs, \"+\n \"stanox = EXCLUDED.stanox, \"+\n \"name = EXCLUDED.name, \"+\n \"nlc = EXCLUDED.nlc, \"+\n \"nlccheck = EXCLUDED.nlccheck, \"+\n \"nlcdesc = EXCLUDED.nlcdesc, \"+\n \"station = EXCLUDED.station, \"+\n \"dateextract = EXCLUDED.dateextract \",\n t.Tiploc,\n t.CRS,\n t.Stanox,\n t.Name,\n t.NLC,\n t.NLCCheck,\n t.NLCDesc,\n t.Station,\n t.DateOfExtract,\n )\n if err != nil {\n log.Printf(\"Failed to insert tiploc %s\", t.Tiploc)\n return err\n }\n\n return nil\n}",
"func (q LocationTemperatureQueryResult) Add(temp float64, city string, y int, mo int, d int) {\n\tq[city][y][mo][d] = append(q[city][y][mo][d], temp)\n}",
"func (f Factory) WithTips(tip, tipper string) Factory {\n\tparsedTips, err := sdk.ParseCoinsNormalized(tip)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tf.tip = &tx.Tip{\n\t\tTipper: tipper,\n\t\tAmount: parsedTips,\n\t}\n\treturn f\n}",
"func (ts *TipSelector) AddTip(bndl *tangle.Bundle) {\n\tts.tipsLock.Lock()\n\tdefer ts.tipsLock.Unlock()\n\n\ttailTxHash := bndl.GetTailHash()\n\n\tif _, exists := ts.nonLazyTipsMap[string(tailTxHash)]; exists {\n\t\t// tip already exists\n\t\treturn\n\t}\n\n\tif _, exists := ts.semiLazyTipsMap[string(tailTxHash)]; exists {\n\t\t// tip already exists\n\t\treturn\n\t}\n\n\tlsmi := tangle.GetSolidMilestoneIndex()\n\n\tscore := ts.calculateScore(tailTxHash, lsmi)\n\tif score == ScoreLazy {\n\t\t// do not add lazy tips.\n\t\t// lazy tips should also not remove other tips from the pool, otherwise the tip pool will run empty.\n\t\treturn\n\t}\n\n\ttip := &Tip{\n\t\tScore: score,\n\t\tHash: tailTxHash,\n\t\tTimeFirstApprover: time.Time{},\n\t\tApproversCount: atomic.NewUint32(0),\n\t}\n\n\tswitch tip.Score {\n\tcase ScoreNonLazy:\n\t\tts.nonLazyTipsMap[string(tailTxHash)] = tip\n\t\tmetrics.SharedServerMetrics.TipsNonLazy.Add(1)\n\tcase ScoreSemiLazy:\n\t\tts.semiLazyTipsMap[string(tailTxHash)] = tip\n\t\tmetrics.SharedServerMetrics.TipsSemiLazy.Add(1)\n\t}\n\n\tts.Events.TipAdded.Trigger(tip)\n\n\t// the approvees (trunk and branch) are the tail transactions this tip approves\n\t// remove them from the tip pool\n\tapproveeTailTxHashes := map[string]struct{}{\n\t\tstring(bndl.GetTrunkHash(true)): {},\n\t\tstring(bndl.GetBranchHash(true)): {},\n\t}\n\n\tcheckTip := func(tipsMap map[string]*Tip, approveeTip *Tip, retentionRulesTipsLimit int, maxApprovers uint32, maxReferencedTipAgeSeconds time.Duration) bool {\n\t\t// if the amount of known tips is above the limit, remove the tip directly\n\t\tif len(tipsMap) > retentionRulesTipsLimit {\n\t\t\treturn ts.removeTipWithoutLocking(tipsMap, hornet.Hash(approveeTip.Hash))\n\t\t}\n\n\t\t// check if the maximum amount of approvers for this tip is reached\n\t\tif approveeTip.ApproversCount.Add(1) >= maxApprovers {\n\t\t\treturn ts.removeTipWithoutLocking(tipsMap, hornet.Hash(approveeTip.Hash))\n\t\t}\n\n\t\tif maxReferencedTipAgeSeconds == time.Duration(0) {\n\t\t\t// check for maxReferenceTipAge is disabled\n\t\t\treturn false\n\t\t}\n\n\t\t// check if the tip was referenced by another transaction before\n\t\tif approveeTip.TimeFirstApprover.IsZero() {\n\t\t\t// mark the tip as referenced\n\t\t\tapproveeTip.TimeFirstApprover = time.Now()\n\t\t}\n\n\t\treturn false\n\t}\n\n\tfor approveeTailTxHash := range approveeTailTxHashes {\n\t\t// we have to separate between the pools, to prevent semi-lazy tips from emptying the non-lazy pool\n\t\tswitch tip.Score {\n\t\tcase ScoreNonLazy:\n\t\t\tif approveeTip, exists := ts.nonLazyTipsMap[approveeTailTxHash]; exists {\n\t\t\t\tif checkTip(ts.nonLazyTipsMap, approveeTip, ts.retentionRulesTipsLimitNonLazy, ts.maxApproversNonLazy, ts.maxReferencedTipAgeSecondsNonLazy) {\n\t\t\t\t\tmetrics.SharedServerMetrics.TipsNonLazy.Sub(1)\n\t\t\t\t}\n\t\t\t}\n\t\tcase ScoreSemiLazy:\n\t\t\tif approveeTip, exists := ts.semiLazyTipsMap[approveeTailTxHash]; exists {\n\t\t\t\tif checkTip(ts.semiLazyTipsMap, approveeTip, ts.retentionRulesTipsLimitSemiLazy, ts.maxApproversSemiLazy, ts.maxReferencedTipAgeSecondsSemiLazy) {\n\t\t\t\t\tmetrics.SharedServerMetrics.TipsSemiLazy.Sub(1)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}",
"func (*Search) Tips() {\n\tfmt.Println(\"\\n => Tips: to select a manga, use `manga <index>`\")\n}",
"func (m *PregnancystatusMutation) AddAntenatalinformationIDs(ids ...int) {\n\tif m._Antenatalinformation == nil {\n\t\tm._Antenatalinformation = make(map[int]struct{})\n\t}\n\tfor i := range ids {\n\t\tm._Antenatalinformation[ids[i]] = struct{}{}\n\t}\n}",
"func (t *Tangle) Tips() []*site.Site {\n\tkeys := []*site.Site{}\n\tfor h := range t.tips {\n\t\ts := t.Get(h)\n\t\tif s != nil {\n\t\t\tkeys = append(keys, s.Site)\n\t\t}\n\t}\n\treturn keys\n}",
"func (resp Response) AddTags(newTags map[string]string) (*influx.Point, error) {\r\n\r\n\t// Pull off the current tags\r\n\ttags := resp.Point.Tags()\r\n\r\n\t// Add the new tags to the current tags\r\n\tfor tag, tagValue := range newTags {\r\n\t\ttags[tag] = tagValue\r\n\t}\r\n\r\n\t// Make a new point\r\n\tfields, err := resp.Point.Fields()\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\r\n\t}\r\n\tpt, err := influx.NewPoint(resp.Point.Name(), tags, fields, resp.Point.Time())\r\n\r\n\t// panic on error\r\n\tif err != nil {\r\n\t\tlog.Fatalf(\"Error adding tags to response point\\n point: %v\\n tags:%v\\n error: %v\\n\", resp.Point, newTags, err)\r\n\t}\r\n\r\n\treturn pt, nil\r\n}",
"func (c *Client) TipLog(names []string, lineCount int) error {\n\tsgs, err := c.getServiceList(names, false)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tc.tipLogServicesOrGroups(sgs, lineCount)\n\n\treturn nil\n}",
"func (s Searcher) AddLocatable(locatable Locatable) {\n\tlocatable_on_grid := newLocatableOnGrid(locatable, s.lat_tiles, s.lng_tiles)\n\ts.locatable_map.AddLocatableOnGrid(&locatable_on_grid)\n}",
"func PostLatestTip(tip *big.Int, poolID string, userID string, genesisHash string) error {\n u, err := url.Parse(poolToolTipURL)\n if err == nil {\n q := u.Query()\n q.Set(\"poolid\", poolID)\n q.Set(\"userid\", userID)\n q.Set(\"genesispref\", genesisHash)\n q.Set(\"mytip\", tip.String())\n u.RawQuery = q.Encode()\n response, err := http.Get(u.String())\n if err == nil {\n if response.StatusCode == 200 {\n return nil\n } else {\n return PoolToolAPIException{URL: poolToolTipURL, StatusCode: response.StatusCode, Reason: response.Status}\n }\n }\n return err\n }\n return err\n}",
"func (pool *TxPool) GetAllTips() map[common.Hash]types.Txi {\n\tpool.mu.RLock()\n\tdefer pool.mu.RUnlock()\n\n\treturn pool.tips.txs\n}",
"func addTagsToPoint(point *influxdb.Point, tags map[string]string) {\n\tif point.Tags == nil {\n\t\tpoint.Tags = tags\n\t} else {\n\t\tfor k, v := range tags {\n\t\t\tpoint.Tags[k] = v\n\t\t}\n\t}\n}",
"func (m *RisksMutation) AddAntenatalinformationIDs(ids ...int) {\n\tif m._Antenatalinformation == nil {\n\t\tm._Antenatalinformation = make(map[int]struct{})\n\t}\n\tfor i := range ids {\n\t\tm._Antenatalinformation[ids[i]] = struct{}{}\n\t}\n}",
"func (pu *PregnancystatusUpdate) AddAntenatalinformation(a ...*Antenatalinformation) *PregnancystatusUpdate {\n\tids := make([]int, len(a))\n\tfor i := range a {\n\t\tids[i] = a[i].ID\n\t}\n\treturn pu.AddAntenatalinformationIDs(ids...)\n}",
"func (t Traveler) AddLabeled(label string, r aql.QueryResult) Traveler {\n\to := Traveler{State: map[string]aql.QueryResult{}}\n\tfor k, v := range t.State {\n\t\to.State[k] = v\n\t}\n\to.State[label] = r\n\treturn o\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process calling points so that we generate the appropriate via and include their tiplocs | func (bf *boardFilter) processCallingPoints(s ldb.Service) {
if len(s.CallingPoints) > 0 {
viaRequest := bf.addVia(s.RID, s.CallingPoints[len(s.CallingPoints)-1].Tiploc)
for _, cp := range s.CallingPoints {
bf.addTiploc(cp.Tiploc)
viaRequest.AppendTiploc(cp.Tiploc)
}
}
} | [
"func TipCaller(handler interface{}, params ...interface{}) {\n\thandler.(func(*Tip))(params[0].(*Tip))\n}",
"func processCoords(gpspoints []GPSPoint) (points Points) {\n\tfor i := 0; i < len(gpspoints); i++ {\n\t\tpoints = append(points, Point{gpspoints[i].Lon, gpspoints[i].Lat, gpspoints[i].SignalDbm})\n\t}\n\treturn\n}",
"func CheckpointCaller(handler interface{}, params ...interface{}) {\n\thandler.(func(checkpointIndex int, tipIndex int, tipsTotal int, txHash aingle.Hash))(params[0].(int), params[1].(int), params[2].(int), params[3].(aingle.Hash))\n}",
"func CheckpointCaller(handler interface{}, params ...interface{}) {\n\thandler.(func(checkpointIndex int, tipIndex int, tipsTotal int, txHash hornet.Hash))(params[0].(int), params[1].(int), params[2].(int), params[3].(hornet.Hash))\n}",
"func pointProcess(term string, sess *mgo.Session, message bot.IncomingMessage) []*bot.OutgoingMessage {\n\twords := strings.Split(message.Text[1:], \" \")\n\tswitch strings.ToLower(words[0]) {\n\tcase \"adultme\":\n\t\treturn requestPoint(words[1:], sess, message)\n\tcase \"award\":\n\t\treturn awardPoint(words[1:2], sess, message)\n\tcase \"reject\":\n\t\treturn rejectPoint(words[1:2], sess, message)\n\tcase \"adults\":\n\t\treturn listAdults(sess)\n\tdefault:\n\t\treturn nil\n\t}\n}",
"func parsePointInfo(p Point, chargerType []string) PointInfoJS {\r\n\tpJS := PointInfoJS{}\r\n\r\n\tpJS.Provider = p.Provider\r\n\tpJS.Address = p.Address\r\n\tpJS.Operator = p.Operator\r\n\tpJS.Requirement = p.Requirement\r\n\tpJS.Charger = p.Charger\r\n\tpJS.Parking = p.Parking\r\n\tpJS.Hour = p.Hour\r\n\tpJS.Facility = p.Facility\r\n\tpJS.Website = p.Website\r\n\tpJS.Location = append(pJS.Location, p.Location.Coordinates[1])\r\n\tpJS.Location = append(pJS.Location, p.Location.Coordinates[0])\r\n\r\n\tfor _, v := range chargerType {\r\n\t\tfor k, n := range pJS.Charger {\r\n\t\t\tif v == n.Type {\r\n\t\t\t\tpJS.Charger[k].Match = true\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn pJS\r\n}",
"func CallerInfo(skip ...int) (caller *CallInfo) {\n\tcaller = &CallInfo{}\n\tskipCount := 1\n\tif len(skip) > 0 {\n\t\tskipCount = skip[0]\n\t}\n\n\tpc, file, line, ok := runtime.Caller(skipCount)\n\tif !ok {\n\t\treturn\n\t}\n\n\tcaller.Line = line\n\t_, caller.FileName = path.Split(file)\n\tparts := strings.Split(runtime.FuncForPC(pc).Name(), `.`)\n\tpl := len(parts)\n\tcaller.FuncName = parts[pl-1]\n\n\tif parts[pl-2][0] == '(' {\n\t\tcaller.FuncName = parts[pl-2] + `.` + caller.FuncName\n\t\tcaller.PackageName = strings.Join(parts[0:pl-2], `.`)\n\t} else {\n\t\tcaller.PackageName = strings.Join(parts[0:pl-1], `.`)\n\t}\n\n\treturn\n}",
"func (cb *CanBusClient) Points(nodeID string, points []data.Point) {\n\tcb.newPoints <- NewPoints{nodeID, \"\", points}\n}",
"func linePointsGen(p1, p2 Point, speed float64) (gen func() (x, y float64, e error)) {\n\t// Set up math\n\tslopeT, slope, _ := getLineParams(p1, p2)\n\n\tx := p1.X\n\txPrev := x\n\ty := p1.Y\n\tyPrev := y\n\te := fmt.Errorf(\"End of path reached\")\n\ttheta := math.Atan(slope)\n\n\t// Every slope type has a different iterator, since they change the\n\t// x and y values in different combinations, as well as do different\n\t// comparisons on the values.\n\tswitch slopeT {\n\tcase ZERORIGHT:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif x > p2.X {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\txPrev = x\n\t\t\tx += speed\n\n\t\t\treturn xPrev, y, nil\n\t\t}\n\tcase ZEROLEFT:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif x < p2.X {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\txPrev = x\n\t\t\tx -= speed\n\n\t\t\treturn xPrev, y, nil\n\t\t}\n\tcase POSRIGHT:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif y > p2.Y || x > p2.X {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\tyPrev = y\n\t\t\txPrev = x\n\n\t\t\ty += speed * math.Sin(theta)\n\t\t\tx += speed * math.Cos(theta)\n\n\t\t\treturn xPrev, yPrev, nil\n\t\t}\n\tcase NEGRIGHT:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif y < p2.Y || x > p2.X {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\tyPrev = y\n\t\t\txPrev = x\n\n\t\t\ty += speed * math.Sin(theta)\n\t\t\tx += speed * math.Cos(theta)\n\n\t\t\treturn xPrev, yPrev, nil\n\t\t}\n\tcase POSLEFT:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif y < p2.Y || x < p2.X {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\tyPrev = y\n\t\t\txPrev = x\n\n\t\t\ty -= speed * math.Sin(theta)\n\t\t\tx -= speed * math.Cos(theta)\n\n\t\t\treturn xPrev, yPrev, nil\n\t\t}\n\tcase NEGLEFT:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif y > p2.Y || x < p2.X {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\tyPrev = y\n\t\t\txPrev = x\n\n\t\t\ty -= speed * math.Sin(theta)\n\t\t\tx -= speed * math.Cos(theta)\n\n\t\t\treturn xPrev, yPrev, nil\n\t\t}\n\tcase INFUP:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif y > p2.Y {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\tyPrev := y\n\t\t\ty += speed\n\n\t\t\treturn x, yPrev, nil\n\t\t}\n\tcase INFDOWN:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif y < p2.Y {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\tyPrev := y\n\t\t\ty -= speed\n\n\t\t\treturn x, yPrev, nil\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (c *CustomAlgorithm) CustomMovingPoints(gpxPoint *geo.GPXPoint, previousGPXPoint *geo.GPXPoint, algorithm geo.Algorithm) error {\n\n\t/* \tDefine which points should be used; if a point should be used for calculation then set it's new values like Duration, Distance, Speed, etc.\n\tHere we use the set the new value for the points which used for \"Moving\"Time/Distanc\n\t*/\n\n\t// speed < 100 m/s\n\tif gpxPoint.Speed < 100.0 {\n\t\treturn errors.New(\"Point Speed below threshold\")\n\t}\n\tgpxPoint.Point.SetPointData(&previousGPXPoint.Point, algorithm)\n\treturn nil\n}",
"func (s *BaseAspidaListener) EnterPoints(ctx *PointsContext) {}",
"func (b *block) Plan(pointIds ...string) ([]spi.PointSPI, error) {\n\tpoints := []spi.PointSPI{}\n\n\tif len(pointIds) == 0 {\n\t\t// if there are no specified points, include all points\n\n\t\tfor _, p := range b.points {\n\t\t\tpoints = append(points, p)\n\t\t}\n\t} else {\n\t\tincluded := map[string]bool{}\n\t\tincluded_sf := map[string]bool{}\n\n\t\t// include all specified points\n\t\tfor _, id := range pointIds {\n\t\t\tif p, ok := b.points[id]; !ok {\n\t\t\t\treturn nil, sunspec.ErrNoSuchPoint\n\t\t\t} else {\n\t\t\t\tif !included[id] {\n\t\t\t\t\tpoints = append(points, p)\n\t\t\t\t\tincluded[id] = true\n\t\t\t\t}\n\t\t\t\tif p.Type() == typelabel.ScaleFactor {\n\t\t\t\t\tincluded_sf[id] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// include their scale factors too...\n\t\t//\n\t\t// we do this for several reasons:\n\t\t// - to interpret a point that uses a scale factor, we need the scale factor too\n\t\t// - if we don't there we may read a value point after its scale factor point has changed\n\t\t// By forcing contemporaneous reads of a scale factor and its related points we help to ensure\n\t\t// that the two values are consistent.\n\t\t// - we want to avoid app programmers having to encode knowedlege in their programs\n\t\t// about these depednencies - the knowledge is in the SMDX documents, so lets use it\n\t\tfor _, p := range points {\n\t\t\tsfp := p.(*point).scaleFactor\n\t\t\tif sfp != nil {\n\t\t\t\tif !included[sfp.Id()] {\n\t\t\t\t\tpoints = append(points, sfp.(spi.PointSPI))\n\t\t\t\t\tincluded[sfp.Id()] = true\n\t\t\t\t\tincluded_sf[sfp.Id()] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// We also include all the currently valid points that reference any scale\n\t\t// factor points we are going to read since we don't want such points to\n\t\t// unexpectedly enter an error state when they are invalidated by the\n\t\t// read of the scale factor point. This allows twp separate reads each\n\t\t// of which have a point that reference a shared scale factor point to\n\t\t// be equivalent to a single read of all points or to two reads in which\n\t\t// all points related to a single scale factor are read in the same read\n\t\t// as the scale factor itself.\n\t\t//\n\t\t// One consequence of this behaviour is that any local changes (via a\n\t\t// setter) to a point dependent on a scale factor point may be lost by a\n\t\t// read of any point that is dependent on the same scale factor which\n\t\t// itself means that local changes to points should be written to the\n\t\t// physical device with Block.Write before the next Block.Read or else\n\t\t// they may be lost under some circumstances even if the point concerned\n\t\t// is not directly referened by the Read call.\n\t\t//\n\t\t// Part of the reason we do this is to maximise the consistency of data\n\t\t// exposed by the API while minimising both the effort for the programmer\n\t\t// to maintain the consistency and also surprising behaviour.\n\t\tfor _, p := range b.points {\n\t\t\tif sfp := p.scaleFactor; sfp == nil || p.Error() != nil || !included_sf[sfp.Id()] {\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tif !included[p.Id()] {\n\t\t\t\t\tpoints = append(points, p)\n\t\t\t\t\tincluded[p.Id()] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// sort so scale factors come first, then other points in offset order\n\tsort.Sort(scaleFactorFirstOrder(points))\n\treturn points, nil\n}",
"func applyToPoints(points []Point, fn func(*Point)) {\n\tfor j := range points {\n\t\tfn(&points[j])\n\t}\n}",
"func buildSamplePoints (t *testing.T) []*point.Point {\n\treturn []*point.Point{\n\t\tbuildSamplePoint(t, 1), buildSamplePoint(t, 2), buildSamplePoint(t, 3)}\n}",
"func (u *DatadogUnifi) loopPoints(r report) {\n\tm := r.metrics()\n\n\tfor _, s := range m.RogueAPs {\n\t\tu.switchExport(r, s)\n\t}\n\n\tfor _, s := range m.Sites {\n\t\tu.switchExport(r, s)\n\t}\n\n\tfor _, s := range m.SitesDPI {\n\t\tu.reportSiteDPI(r, s.(*unifi.DPITable))\n\t}\n\n\tfor _, s := range m.Clients {\n\t\tu.switchExport(r, s)\n\t}\n\n\tfor _, s := range m.Devices {\n\t\tu.switchExport(r, s)\n\t}\n\n\tfor _, s := range r.events().Logs {\n\t\tu.switchExport(r, s)\n\t}\n\n\tappTotal := make(totalsDPImap)\n\tcatTotal := make(totalsDPImap)\n\n\tfor _, s := range m.ClientsDPI {\n\t\tu.batchClientDPI(r, s, appTotal, catTotal)\n\t}\n\n\treportClientDPItotals(r, appTotal, catTotal)\n}",
"func CallerChain(skipFrom, skipUntil int) (res []CallInfo) {\n\tfor skipCount := skipFrom; skipCount <= skipUntil; skipCount++ {\n\t\tpc, file, line, ok := runtime.Caller(skipCount)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\n\t\tcaller := CallInfo{Line: line}\n\t\t_, caller.FileName = path.Split(file)\n\t\tparts := strings.Split(runtime.FuncForPC(pc).Name(), `.`)\n\t\tpl := len(parts)\n\t\tcaller.FuncName = parts[pl-1]\n\n\t\tif parts[pl-2][0] == '(' {\n\t\t\tcaller.FuncName = parts[pl-2] + `.` + caller.FuncName\n\t\t\tcaller.PackageName = strings.Join(parts[0:pl-2], `.`)\n\t\t} else {\n\t\t\tcaller.PackageName = strings.Join(parts[0:pl-1], `.`)\n\t\t}\n\n\t\tres = append(res, caller)\n\t}\n\treturn\n}",
"func (a axes) drawPoint(p *vg.Painter, xy xyer, cs vg.CoordinateSystem, l Line, pointNumber int) {\n\tx, y, isEnvelope := xy.XY(l)\n\n\t// add number of NaNs leading pointNumber to pointNumber.\n\ttargetNumber := pointNumber\n\tfor i, v := range x {\n\t\tif i > targetNumber {\n\t\t\tbreak\n\t\t}\n\t\tif math.IsNaN(v) {\n\t\t\tpointNumber++\n\t\t}\n\t}\n\n\tif len(x) <= pointNumber || len(y) <= pointNumber || pointNumber < 0 {\n\t\treturn\n\t}\n\tp.SetFont(font1)\n\tlabels := make([]vg.FloatText, 2)\n\tif isEnvelope {\n\t\tif n := len(x); n != len(y) || pointNumber+2 > n {\n\t\t\treturn\n\t\t} else {\n\t\t\txp, yp := x[pointNumber], y[pointNumber]\n\t\t\txp2, yp2 := x[n-pointNumber-2], y[n-pointNumber-2]\n\t\t\tx = []float64{xp, xp2}\n\t\t\ty = []float64{yp, yp2}\n\t\t\tlabels[0] = vg.FloatText{X: xp, Y: yp, S: fmt.Sprintf(\"(%.4g, %.4g)\", xp, yp), Align: 5}\n\t\t\tlabels[1] = vg.FloatText{X: xp2, Y: yp2, S: fmt.Sprintf(\"(%.4g, %.4g)\", xp2, yp2), Align: 1}\n\t\t}\n\t} else {\n\t\txp, yp := x[pointNumber], y[pointNumber]\n\t\tx = []float64{xp}\n\t\ty = []float64{yp}\n\t\tvar s string\n\t\tif xyp, ok := xy.(xyPolar); ok {\n\t\t\txstr := \"\"\n\t\t\tif xyp.rmin == 0 && xyp.rmax == 0 { // polar\n\t\t\t\tif len(l.X) > pointNumber && pointNumber >= 0 {\n\t\t\t\t\txstr = fmt.Sprintf(\"%.4g, \", l.X[pointNumber])\n\t\t\t\t}\n\t\t\t\ts = xstr + xmath.Absang(complex(yp, xp), \"%.4g@%.0f\")\n\t\t\t} else { // ring\n\t\t\t\ts = fmt.Sprintf(\"%.4g@%.1f\", l.X[pointNumber], 180.0*l.Y[pointNumber]/math.Pi)\n\t\t\t}\n\t\t} else {\n\t\t\ts = fmt.Sprintf(\"(%.4g, %.4g)\", xp, yp)\n\t\t}\n\t\tlabels[0] = vg.FloatText{X: xp, Y: yp, S: s, Align: 1}\n\t\tlabels = labels[:1]\n\t}\n\n\tsize := l.Style.Marker.Size\n\tif size == 0 {\n\t\tsize = l.Style.Line.Width\n\t}\n\tif size == 0 {\n\t\tsize = 9\n\t} else {\n\t\tsize *= 3\n\t}\n\tc := a.plot.Style.Order.Get(l.Style.Marker.Color, l.Id+1).Color()\n\tp.SetColor(c)\n\tp.Add(vg.FloatCircles{X: x, Y: y, CoordinateSystem: cs, Radius: size, Fill: true})\n\trect := a.inside.Bounds()\n\tfor _, l := range labels {\n\t\tl.CoordinateSystem = cs\n\t\tl.Rect = rect\n\n\t\t// Change the alignment, if the label would be placed at a picture boundary.\n\t\tx0, y0 := cs.Pixel(l.X, l.Y, rect)\n\t\tif l.Align == 1 && y0 < 30 {\n\t\t\tl.Align = 5\n\t\t} else if l.Align == 5 && y0 > rect.Max.Y-30 {\n\t\t\tl.Align = 1\n\t\t}\n\t\tif x0 < 50 {\n\t\t\tif l.Align == 1 {\n\t\t\t\tl.Align = 0\n\t\t\t} else if l.Align == 5 {\n\t\t\t\tl.Align = 6\n\t\t\t}\n\t\t} else if x0 > rect.Max.X-50 {\n\t\t\tif l.Align == 1 {\n\t\t\t\tl.Align = 2\n\t\t\t} else if l.Align == 5 {\n\t\t\t\tl.Align = 4\n\t\t\t}\n\t\t}\n\n\t\t// Place the label above or below with the offset of the marker's radius.\n\t\tif l.Align <= 2 { // Label is above point.\n\t\t\tl.Yoff = -size\n\t\t} else if l.Align >= 4 { // Label is below point\n\t\t\tl.Yoff = size\n\t\t}\n\n\t\t// Fill background rectangle of the label.\n\t\tx, y, w, h := l.Extent(p)\n\t\tsaveColor := p.GetColor()\n\t\tp.SetColor(a.bg)\n\t\tp.Add(vg.Rectangle{X: x, Y: y, W: w, H: h, Fill: true})\n\t\tp.SetColor(saveColor)\n\t\tp.Add(l)\n\t}\n}",
"func drawPoints(bc *braille.Canvas, points []image.Point, opt *brailleCircleOptions) error {\n\tfor _, p := range points {\n\t\tswitch opt.pixelChange {\n\t\tcase braillePixelChangeSet:\n\t\t\tif err := bc.SetPixel(p, opt.cellOpts...); err != nil {\n\t\t\t\treturn fmt.Errorf(\"SetPixel => %v\", err)\n\t\t\t}\n\t\tcase braillePixelChangeClear:\n\t\t\tif err := bc.ClearPixel(p, opt.cellOpts...); err != nil {\n\t\t\t\treturn fmt.Errorf(\"ClearPixel => %v\", err)\n\t\t\t}\n\n\t\t}\n\t}\n\treturn nil\n}",
"func (b *BccLatticePointGenerator) forEachPoint(\n\tboundingBox *BoundingBox3D,\n\tspacing float64,\n\tpoints *[]*Vector3D.Vector3D,\n\tcallback func(*([]*Vector3D.Vector3D), *Vector3D.Vector3D) bool,\n) {\n\n\thalfSpacing := spacing / 2\n\tboxWidth := boundingBox.width()\n\tboxHeight := boundingBox.height()\n\tboxDepth := boundingBox.depth()\n\n\tposition := Vector3D.NewVector(0, 0, 0)\n\thasOffset := false\n\tshouldQuit := false\n\n\tfor k := float64(0); k*halfSpacing <= boxDepth && !shouldQuit; k++ {\n\n\t\tposition.Z = k*halfSpacing + boundingBox.lowerCorner.Z\n\t\tvar offset float64\n\t\tif hasOffset {\n\n\t\t\toffset = halfSpacing\n\t\t} else {\n\t\t\toffset = 0\n\t\t}\n\n\t\tfor j := float64(0); j*spacing+offset <= boxHeight && !shouldQuit; j++ {\n\t\t\tposition.Y = j*spacing + offset + boundingBox.lowerCorner.Y\n\n\t\t\tfor i := float64(0); i*spacing+offset <= boxWidth; i++ {\n\t\t\t\tposition.X = i*spacing + offset + boundingBox.lowerCorner.X\n\n\t\t\t\tif !callback(points, position) {\n\t\t\t\t\tshouldQuit = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\thasOffset = !hasOffset\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process any associations, pulling in their schedules | func (bf *boardFilter) processAssociations(s ldb.Service) {
for _, assoc := range s.Associations {
assoc.AddTiplocs(bf.tiplocs)
//if assoc.IsJoin() || assoc.IsSplit() {
ar := assoc.Main.RID
ai := assoc.Main.LocInd
if ar == s.RID {
ar = assoc.Assoc.RID
ai = assoc.Assoc.LocInd
}
// Resolve the schedule if a split, join or if NP only if previous service & we are not yet running
//if ar != s.RID {
if assoc.Category != "NP" || (s.LastReport.Tiploc == "" && assoc.Assoc.RID == s.RID) {
as := bf.d.ldb.GetSchedule(ar)
if as != nil {
assoc.Schedule = as
as.AddTiplocs(bf.tiplocs)
as.LastReport = as.GetLastReport()
bf.processToc(as.Toc)
if ai < (len(as.Locations) - 1) {
if as.Origin != nil {
bf.addTiploc(as.Destination.Tiploc)
}
destination := as.Locations[len(as.Locations)-1].Tiploc
if as.Destination != nil {
destination = as.Destination.Tiploc
}
viaRequest := bf.addVia(ar, destination)
for _, l := range as.Locations[ai:] {
bf.addTiploc(l.Tiploc)
viaRequest.AppendTiploc(l.Tiploc)
}
}
bf.processReason(as.CancelReason, true)
bf.processReason(as.LateReason, false)
}
}
}
} | [
"func (s *candidate) Schedule() (constructedSchedule, error) {\n\tsch := constructedSchedule{\n\t\tearliest: s.earliest,\n\t\teventsByAttendee: make(map[AttendeeID]*attendeeEvents),\n\t}\n\tfor _, event := range s.order {\n\t\tif err := sch.Add(s.reqs[event]); err != nil {\n\t\t\treturn sch, err\n\t\t}\n\t}\n\treturn sch, nil\n}",
"func (records Records) LoadDoctorSchedule(fetcher DoctorScheduleFetcher) {\n\tvar lastID, lastSpec, lastName string\n\n\tdoctorRecords := make(Records, 0)\n\n\tfor _, r := range records {\n\t\tif lastID == \"\" {\n\t\t\tlastID = r.ID()\n\t\t\tlastSpec = r.Spec\n\t\t\tlastName = r.Name\n\t\t}\n\n\t\tif r.ID() != lastID {\n\t\t\tdoctorRecords = doctorRecords.Cleaned()\n\n\t\t\tschedule := &DoctorSchedule{\n\t\t\t\tSpec: lastSpec,\n\t\t\t\tName: lastName,\n\t\t\t\tCells: make(TimeCells, len(doctorRecords)),\n\t\t\t}\n\n\t\t\tfor i, rr := range doctorRecords {\n\t\t\t\tschedule.Cells[i] = &TimeCell{\n\t\t\t\t\tStartTime: rr.StartTime,\n\t\t\t\t\tDuration: rr.Duration,\n\t\t\t\t\tFree: rr.Free,\n\t\t\t\t\tRoom: rr.Room,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfetcher(schedule)\n\n\t\t\tlastID = r.ID()\n\t\t\tlastSpec = r.Spec\n\t\t\tlastName = r.Name\n\t\t\tdoctorRecords = make(Records, 0)\n\t\t}\n\n\t\tdoctorRecords = append(doctorRecords, r)\n\t}\n}",
"func notifyScheduleAssociates(s models.Schedule, action string) error {\n\t// Get the associated schedule events\n\tvar events []models.ScheduleEvent\n\tif err := dbClient.GetScheduleEventsByScheduleName(&events, s.Name); err != nil {\n\t\treturn err\n\t}\n\n\t// Get the device services for the schedule events\n\tvar services []models.DeviceService\n\tfor _, se := range events {\n\t\tvar ds models.DeviceService\n\t\tif err := dbClient.GetDeviceServiceByName(&ds, se.Service); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tservices = append(services, ds)\n\t}\n\n\t// Notify the associated device services\n\tif err := notifyAssociates(services, s.Id.Hex(), action, models.SCHEDULE); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (db *Database) GetSchedule(startLocationName, destinationName, date string) ([]Trip, map[int][]TripOffering, error) {\n trips := []Trip{}\n offerings := make(map[int][]TripOffering)\n row, err := db.Query(fmt.Sprintf(\"SELECT * FROM Trip WHERE StartLocationName=%s\", startLocationName))\n if err != nil {\n return trips, offerings, err\n }\n // Get the trips with the given start location name\n trips = RowToTrips(row)\n row.Close()\n // Get the trip offerings for each trip\n for _, t := range trips {\n row, err := db.Query(fmt.Sprintf(\"SELECT * FROM TripOffering WHERE TripNumber=%d\", t.TripNumber))\n if err != nil {\n return trips, offerings, err\n }\n for row.Next() {\n var tripNumber int\n var date string\n var scheduledStartTime string\n var scheduledArrivalTime string\n var driverName string\n var busID int\n row.Scan(&tripNumber, &date, &scheduledStartTime, &scheduledArrivalTime, &driverName, &busID)\n if _, ok := offerings[tripNumber]; !ok {\n offerings[tripNumber] = []TripOffering{}\n }\n offerings[tripNumber] = append(offerings[tripNumber], TripOffering{\n TripNumber: tripNumber,\n Date: date,\n ScheduledStartTime: scheduledStartTime,\n ScheduledArrivalTime: scheduledArrivalTime,\n DriverName: driverName,\n BusID: busID,\n })\n }\n row.Close()\n }\n return trips, offerings, nil\n}",
"func soundersScheduleCollector() {\n\n\tfetchSoundersSchedule()\n\n\tc := time.Tick(24 * time.Hour)\n\tfor _ = range c {\n\t\tfetchSoundersSchedule()\n\t}\n}",
"func ScheduleUnmarshalJSON(b []byte) (schedule Schedule, err error) {\n\tvar mixed interface{}\n\tjson.Unmarshal(b, &mixed)\n\n\tfor key, value := range mixed.(map[string]interface{}) {\n\t\trawValue, _ := json.Marshal(value)\n\t\tswitch key {\n\t\tcase \"date\":\n\t\t\tvar date Date\n\t\t\terr = json.Unmarshal(rawValue, &date)\n\t\t\tschedule = date\n\t\tcase \"day\":\n\t\t\tvar day Day\n\t\t\terr = json.Unmarshal(rawValue, &day)\n\t\t\tschedule = day\n\t\tcase \"intersection\":\n\t\t\tvar intersection Intersection\n\t\t\terr = json.Unmarshal(rawValue, &intersection)\n\t\t\tschedule = intersection\n\t\tcase \"month\":\n\t\t\tvar month Month\n\t\t\terr = json.Unmarshal(rawValue, &month)\n\t\t\tschedule = month\n\t\tcase \"union\":\n\t\t\tvar union Union\n\t\t\terr = json.Unmarshal(rawValue, &union)\n\t\t\tschedule = union\n\t\tcase \"week\":\n\t\t\tvar week Week\n\t\t\terr = json.Unmarshal(rawValue, &week)\n\t\t\tschedule = week\n\t\tcase \"weekday\":\n\t\t\tvar weekday Weekday\n\t\t\terr = json.Unmarshal(rawValue, &weekday)\n\t\t\tschedule = weekday\n\t\tcase \"year\":\n\t\t\tvar year Year\n\t\t\terr = json.Unmarshal(rawValue, &year)\n\t\t\tschedule = year\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"%s is not a recognized schedule\", key)\n\t\t}\n\t}\n\treturn\n}",
"func (jf JobFactory) Process(schedules []Schedule) {\n\tfor _, item := range schedules {\n\t\tif item.Api.Url != \"\" {\n\t\t\tlocalItem := item\n\t\t\tAddJob(item.Schedule, func() {\n\t\t\t\tlog.Printf(\"executing %s at %s\", localItem.Name, localItem.Api.Url)\n\t\t\t\toptions := restful.Options{}\n\t\t\t\toptions.Method = localItem.Api.Method\n\t\t\t\toptions.Headers = make(map[string]string)\n\t\t\t\toptions.Headers[\"Content-Type\"] = \"application/json\"\n\t\t\t\tif localItem.Api.Authorization != \"\" {\n\t\t\t\t\toptions.Headers[\"Authorization\"] = localItem.Api.Authorization\n\t\t\t\t}\n\t\t\t\toptions.Transformer = localItem.Api.Transform\n\t\t\t\toptions.Payload = localItem.Api.Body\n\t\t\t\tmessage, _ := restful.Call(localItem.Api.Url, &options)\n\t\t\t\tevent := EventData{}\n\t\t\t\tjson.Unmarshal([]byte(message), &event)\n\t\t\t\tGetEventsManager().Notify(event)\n\t\t\t})\n\t\t\tcontinue\n\t\t}\n\t\tvalue, ok := advertisedJobs[item.Name]\n\t\tif ok {\n\t\t\tlog.Printf(\"%s, %s\", item.Schedule, item.Name)\n\t\t\tAddJob(item.Schedule, value)\n\t\t}\n\t}\n\tInitJobs()\n}",
"func (s *Schedule) GetAll(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\tconn, err := db.Connect()\n\tif err != nil {\n\t\treturn common.APIError(http.StatusInternalServerError, err)\n\t}\n\n\tsession := conn.NewSession(nil)\n\tdefer session.Close()\n\tdefer conn.Close()\n\n\tif request.QueryStringParameters == nil {\n\t\trequest.QueryStringParameters = map[string]string{\n\t\t\t\"event_id\": request.PathParameters[\"id\"],\n\t\t}\n\t} else {\n\t\trequest.QueryStringParameters[\"event_id\"] = request.PathParameters[\"id\"]\n\t}\n\n\tresult, err := db.Select(session, db.TableEventSchedule, request.QueryStringParameters, Schedule{})\n\tif err != nil {\n\t\treturn common.APIError(http.StatusInternalServerError, err)\n\t}\n\n\treturn common.APIResponse(result, http.StatusOK)\n}",
"func buildScheduleList(runables ScriptSet) scheduledSet {\n\tset := scheduledSet{}\n\n\t_ = runables.Walk(func(s *Script) error {\n\t\tsch := schedule{scriptID: s.ID}\n\t\tfor _, t := range s.triggers {\n\t\t\tif !t.IsDeferred() {\n\t\t\t\t// only interested in deferred scripts\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif t.Condition == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif ts, err := time.Parse(time.RFC3339, t.Condition); err == nil {\n\t\t\t\tts = ts.Truncate(time.Minute)\n\t\t\t\tif ts.Before(now()) {\n\t\t\t\t\t// in the past...\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tsch.timestamps = append(sch.timestamps, ts)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// @todo parse cron format and fill intervals\n\t\t}\n\n\t\t// If there is anything useful in the schedule,\n\t\t// add it to the list\n\t\tif len(sch.timestamps) > 0 {\n\t\t\tset = append(set, sch)\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn set\n}",
"func extract_schedules(hull []fpoint) []vrp.Schedule {\n\tschedules := make([]vrp.Schedule, len(hull))\n\tfor i, h := range hull {\n\t\tschedules[i] = h.schedule\n\t}\n\treturn schedules\n}",
"func (s *Scheduler) ScheduleTasks() {\n\t/*\n\t\tif events exist unattended, make tasks based on set up times\n\t*/\n\n}",
"func (r *ScheduleRepo) GetAll() (map[usecase.ScheduleID]*schedule.Schedule, usecase.Error) {\n\treturn r.getAllWhere(\"\")\n}",
"func (a *Airport) processArrivals() {\n\tfor {\n\t\tarrival, ok := <-a.arrivalChan\n\t\tif !ok {\n\t\t\ta.log.Errorf(\"arrival channel closed\")\n\t\t\treturn\n\t\t}\n\t\tswitch arrival.GetChangeType() {\n\t\tcase datasync.Put:\n\t\t\tfl := flight.Info{}\n\t\t\tif err := arrival.GetValue(&fl); err != nil {\n\t\t\t\ta.log.Errorf(\"failed to get value for arrival flight: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfl.Status = flight.Status_arrival\n\t\t\ta.runwayChan <- fl\n\t\tcase datasync.Delete:\n\t\t\ta.log.Debugf(\"arrival %s deleted\\n\", arrival.GetKey())\n\t\t}\n\t}\n}",
"func GetADVSchedules(id string, addr string, localIP string) error {\r\n\tlocalAddr, err := net.ResolveIPAddr(\"ip\", localIP)\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\tLocalBindAddr := &net.TCPAddr{IP: localAddr.IP}\r\n\ttransport := &http.Transport{\r\n\t\tDial: (&net.Dialer{\r\n\t\t\tLocalAddr: LocalBindAddr,\r\n\t\t\tTimeout: 5 * time.Second,\r\n\t\t\tKeepAlive: 30 * time.Second,\r\n\t\t}).Dial,\r\n\t}\r\n\tclient := &http.Client{\r\n\t\tTransport: transport,\r\n\t}\r\n\r\n\turl := \"http://\" + addr + \"/adm/adv-schedules/\" + id + \"?format=cic\"\r\n\r\n\treq, err := http.NewRequest(\"GET\", url, nil)\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\tresp, err := client.Do(req)\r\n\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\tif resp.StatusCode != 200 {\r\n\t\treturn fmt.Errorf(\"ADM Receved %v\", resp.Status)\r\n\t}\r\n\r\n\tfor {\r\n\t\tbuf := make([]byte, 32*1024)\r\n\t\t_, err := resp.Body.Read(buf)\r\n\r\n\t\tif err != nil && err != io.EOF {\r\n\t\t\treturn err\r\n\t\t}\r\n\r\n\t\tif err == io.EOF {\r\n\t\t\tbreak\r\n\t\t}\r\n\t}\r\n\tresp.Body.Close()\r\n\ttransport.CloseIdleConnections()\r\n\r\n\treturn nil\r\n}",
"func (pgmodel *PgDB) SelectCurrentScheduler() ([]model.ScheduleTask, error) {\n\tnow, _ := time.Parse(\"2006-01-02 15:04:00\", time.Now().UTC().Format(\"2006-01-02 15:04:00\"))\n\n\tscheduleRepository := model.NewScheduleRepository()\n\tscheduleModel := scheduleRepository.GetTaskModel()\n\n\terr := pgmodel.db.Model(&scheduleModel).\n\t\tColumnExpr(\"schedule_task.*\").\n\t\tColumnExpr(\"delivery.title AS delivery__title\").\n\t\tColumnExpr(\"delivery.text AS delivery__text\").\n\t\tColumnExpr(\"delivery.user_ids AS delivery__user_ids\").\n\t\tColumnExpr(\"delivery.id AS delivery__id\").\n\t\tColumnExpr(\"delivery.filter AS delivery__filter\").\n\t\tJoin(\"INNER JOIN talkbank_bots.delivery AS delivery ON delivery.id = schedule_task.action_id\").\n\t\tWhere(\"schedule_task.is_active = ?\", true).\n\t\tWhereGroup(func(q *orm.Query) (*orm.Query, error) {\n\t\t\treturn q.\n\t\t\t\tWhereOrGroup(func(subQ1 *orm.Query) (*orm.Query, error) {\n\t\t\t\t\treturn subQ1.\n\t\t\t\t\t\tWhere(\"schedule_task.type = ?\", \"onetime\").\n\t\t\t\t\t\tWhere(\"schedule_task.from_datetime >= ?\", now).\n\t\t\t\t\t\tWhereGroup(func(subQ *orm.Query) (*orm.Query, error) {\n\t\t\t\t\t\t\treturn subQ.\n\t\t\t\t\t\t\t\tWhereOr(\"schedule_task.to_datetime IS NULL\").\n\t\t\t\t\t\t\t\tWhereOr(\"schedule_task.to_datetime >= schedule_task.from_datetime\"), nil\n\t\t\t\t\t\t}), nil\n\t\t\t\t}).\n\t\t\t\tWhereOrGroup(func(subQ2 *orm.Query) (*orm.Query, error) {\n\t\t\t\t\treturn subQ2.\n\t\t\t\t\t\tWhere(\"schedule_task.type = ?\", \"recurrently\").\n\t\t\t\t\t\tWhereGroup(func(subGroup *orm.Query) (*orm.Query, error) {\n\t\t\t\t\t\t\treturn subGroup.Where(\"schedule_task.from_datetime <= ?\", now).\n\t\t\t\t\t\t\t\tWhereGroup(func(subQ *orm.Query) (*orm.Query, error) {\n\t\t\t\t\t\t\t\t\treturn subQ.\n\t\t\t\t\t\t\t\t\t\tWhereOr(\"schedule_task.to_datetime IS NULL\").\n\t\t\t\t\t\t\t\t\t\tWhereOrGroup(func(subQ1 *orm.Query) (*orm.Query, error) {\n\t\t\t\t\t\t\t\t\t\t\treturn subQ1.\n\t\t\t\t\t\t\t\t\t\t\t\tWhere(\"schedule_task.to_datetime >= ?\", now).\n\t\t\t\t\t\t\t\t\t\t\t\tWhere(\"schedule_task.to_datetime > schedule_task.from_datetime\"), nil\n\t\t\t\t\t\t\t\t\t\t}), nil\n\t\t\t\t\t\t\t\t}), nil\n\t\t\t\t\t\t}).\n\t\t\t\t\t\tWhereOrGroup(func(subGroup2 *orm.Query) (*orm.Query, error) {\n\t\t\t\t\t\t\treturn subGroup2.\n\t\t\t\t\t\t\t\tWhere(\"schedule_task.from_datetime >= ?\", now).\n\t\t\t\t\t\t\t\tWhere(\"schedule_task.from_datetime <= schedule_task.next_run\").\n\t\t\t\t\t\t\t\tWhereGroup(func(toGroup *orm.Query) (*orm.Query, error) {\n\t\t\t\t\t\t\t\t\treturn toGroup.\n\t\t\t\t\t\t\t\t\t\tWhereOr(\"schedule_task.to_datetime IS NULL\").\n\t\t\t\t\t\t\t\t\t\tWhereOr(\"schedule_task.to_datetime >= schedule_task.next_run\"), nil\n\t\t\t\t\t\t\t\t}), nil\n\t\t\t\t\t\t}), nil\n\t\t\t\t}), nil\n\t\t}).\n\t\tOrder(\"schedule_task.id ASC\").\n\t\tSelect()\n\n\tif err != nil {\n\t\tfmt.Println(\"Error to get data from scheduler_task\", err)\n\t\treturn nil, err\n\t}\n\n\treturn scheduleModel, nil\n}",
"func notifyScheduleEventAssociates(se models.ScheduleEvent, action string) error {\n\t// Get the associated device service\n\tvar ds models.DeviceService\n\tif err := dbClient.GetDeviceServiceByName(&ds, se.Service); err != nil {\n\t\treturn err\n\t}\n\n\tvar services []models.DeviceService\n\tservices = append(services, ds)\n\n\t// Notify the associated device service\n\tif err := notifyAssociates(services, se.Id.Hex(), action, models.SCHEDULEEVENT); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (o *Operation) populateLinks(zones []Zone, inGid GoogleID, assignments map[TaskID][]GoogleID, depends map[TaskID][]TaskID) error {\n\tvar description sql.NullString\n\n\trows, err := db.Query(\"SELECT link.ID, link.fromPortalID, link.toPortalID, task.comment, task.taskorder, task.state, link.color, task.zone, task.delta FROM link JOIN task ON link.ID = task.ID WHERE task.opID = ? AND link.opID = task.opID\", o.ID)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\ttmpLink := Link{}\n\t\ttmpLink.opID = o.ID\n\n\t\terr := rows.Scan(&tmpLink.ID, &tmpLink.From, &tmpLink.To, &description, &tmpLink.Order, &tmpLink.State, &tmpLink.Color, &tmpLink.Zone, &tmpLink.DeltaMinutes)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\tcontinue\n\t\t}\n\t\ttmpLink.Task.ID = TaskID(tmpLink.ID)\n\n\t\tif description.Valid {\n\t\t\ttmpLink.Desc = description.String\n\t\t\ttmpLink.Comment = description.String\n\t\t}\n\n\t\ttmpLink.ThrowOrder = tmpLink.Order\n\n\t\tif a, ok := assignments[tmpLink.Task.ID]; ok {\n\t\t\ttmpLink.Assignments = a\n\t\t\ttmpLink.AssignedTo = a[0]\n\t\t}\n\n\t\tif d, ok := depends[tmpLink.Task.ID]; ok {\n\t\t\ttmpLink.DependsOn = d\n\t\t}\n\n\t\tif tmpLink.State == \"completed\" {\n\t\t\ttmpLink.Completed = true\n\t\t}\n\n\t\t// this isn't in a zone with which we are concerned AND not assigned to me, skip\n\t\tif !tmpLink.Zone.inZones(zones) && !tmpLink.IsAssignedTo(inGid) {\n\t\t\tcontinue\n\t\t}\n\t\to.Links = append(o.Links, tmpLink)\n\t}\n\treturn nil\n}",
"func doEvents() error {\n\tif len(accounts) == 0 {\n\t\twf.NewItem(\"No Accounts Configured\").\n\t\t\tSubtitle(\"Action this item to add a Google account\").\n\t\t\tAutocomplete(\"workflow:login\").\n\t\t\tIcon(aw.IconWarning)\n\n\t\twf.SendFeedback()\n\t\treturn nil\n\t}\n\n\tvar (\n\t\tcals []*Calendar\n\t\terr error\n\t)\n\n\tif cals, err = activeCalendars(); err != nil {\n\t\tif err == errNoActive {\n\t\t\twf.NewItem(\"No Active Calendars\").\n\t\t\t\tSubtitle(\"Action this item to choose calendars\").\n\t\t\t\tAutocomplete(\"workflow:calendars\").\n\t\t\t\tIcon(aw.IconWarning)\n\n\t\t\twf.SendFeedback()\n\n\t\t\treturn nil\n\t\t}\n\n\t\tif err == errNoCalendars {\n\t\t\tif !wf.IsRunning(\"update-calendars\") {\n\t\t\t\tcmd := exec.Command(os.Args[0], \"update\", \"calendars\")\n\t\t\t\tif err := wf.RunInBackground(\"update-calendars\", cmd); err != nil {\n\t\t\t\t\treturn errors.Wrap(err, \"run calendar update\")\n\t\t\t\t}\n\t\t\t}\n\n\t\t\twf.NewItem(\"Fetching List of Calendars…\").\n\t\t\t\tSubtitle(\"List will reload shortly\").\n\t\t\t\tValid(false).\n\t\t\t\tIcon(ReloadIcon())\n\n\t\t\twf.Rerun(0.1)\n\t\t\twf.SendFeedback()\n\n\t\t\treturn nil\n\t\t}\n\n\t\treturn err\n\t}\n\n\tlog.Printf(\"%d active calendar(s)\", len(cals))\n\n\tvar (\n\t\tall []*Event\n\t\tevents []*Event\n\t\tparsed time.Time\n\t)\n\n\tif all, err = loadEvents(opts.StartTime, cals...); err != nil {\n\t\treturn errors.Wrap(err, \"load events\")\n\t}\n\n\t// Filter out events after cutoff\n\tfor _, e := range all {\n\t\tif !opts.ScheduleMode && e.Start.After(opts.EndTime) {\n\t\t\tbreak\n\t\t}\n\t\tevents = append(events, e)\n\t\tlog.Printf(\"%s\", e.Title)\n\t}\n\n\tif len(all) == 0 && wf.IsRunning(\"update-events\") {\n\t\twf.NewItem(\"Fetching Events…\").\n\t\t\tSubtitle(\"Results will refresh shortly\").\n\t\t\tIcon(ReloadIcon()).\n\t\t\tValid(false)\n\n\t\twf.Rerun(0.1)\n\t}\n\n\tlog.Printf(\"%d event(s) for %s\", len(events), opts.StartTime.Format(timeFormat))\n\n\tif t, ok := parseDate(opts.Query); ok {\n\t\tparsed = t\n\t}\n\n\tif len(events) == 0 && opts.Query == \"\" {\n\t\twf.NewItem(fmt.Sprintf(\"No Events on %s\", opts.StartTime.Format(timeFormatLong))).\n\t\t\tIcon(ColouredIcon(iconCalendar, yellow))\n\t}\n\n\tvar day time.Time\n\n\tfor _, e := range events {\n\t\t// Show day indicator if this is the first event of a given day\n\t\tif opts.ScheduleMode && midnight(e.Start).After(day) {\n\t\t\tday = midnight(e.Start)\n\n\t\t\twf.NewItem(day.Format(timeFormatLong)).\n\t\t\t\tArg(day.Format(timeFormat)).\n\t\t\t\tValid(true).\n\t\t\t\tIcon(iconDay)\n\t\t}\n\n\t\ticon := ColouredIcon(iconCalendar, e.Colour)\n\n\t\tsub := fmt.Sprintf(\"%s – %s / %s\",\n\t\t\te.Start.Local().Format(hourFormat),\n\t\t\te.End.Local().Format(hourFormat),\n\t\t\te.CalendarTitle)\n\n\t\tif e.Location != \"\" {\n\t\t\tsub = sub + \" / \" + e.Location\n\t\t}\n\n\t\tit := wf.NewItem(e.Title).\n\t\t\tSubtitle(sub).\n\t\t\tIcon(icon).\n\t\t\tArg(e.URL).\n\t\t\tQuicklook(previewURL(opts.StartTime, e.ID)).\n\t\t\tValid(true).\n\t\t\tVar(\"action\", \"open\")\n\n\t\tif e.Location != \"\" {\n\t\t\tapp := \"Google Maps\"\n\t\t\tif opts.UseAppleMaps {\n\t\t\t\tapp = \"Apple Maps\"\n\t\t\t}\n\n\t\t\ticon := ColouredIcon(iconMap, e.Colour)\n\t\t\tit.NewModifier(\"cmd\").\n\t\t\t\tSubtitle(\"Open in \"+app).\n\t\t\t\tArg(mapURL(e.Location)).\n\t\t\t\tValid(true).\n\t\t\t\tIcon(icon).\n\t\t\t\tVar(\"CALENDAR_APP\", \"\") // Don't open Maps URLs in CALENDAR_APP\n\t\t}\n\t}\n\n\tif !opts.ScheduleMode {\n\t\t// Navigation items\n\t\tprev := opts.StartTime.AddDate(0, 0, -1)\n\t\twf.NewItem(\"Previous: \"+relativeDate(prev)).\n\t\t\tIcon(iconPrevious).\n\t\t\tArg(prev.Format(timeFormat)).\n\t\t\tValid(true).\n\t\t\tVar(\"action\", \"date\")\n\n\t\tnext := opts.StartTime.AddDate(0, 0, 1)\n\t\twf.NewItem(\"Next: \"+relativeDate(next)).\n\t\t\tIcon(iconNext).\n\t\t\tArg(next.Format(timeFormat)).\n\t\t\tValid(true).\n\t\t\tVar(\"action\", \"date\")\n\t}\n\n\tif opts.Query != \"\" {\n\t\twf.Filter(opts.Query)\n\t}\n\n\tif !parsed.IsZero() {\n\t\ts := parsed.Format(timeFormat)\n\n\t\twf.NewItem(parsed.Format(timeFormatLong)).\n\t\t\tSubtitle(relativeDays(parsed, false)).\n\t\t\tArg(s).\n\t\t\tAutocomplete(s).\n\t\t\tValid(true).\n\t\t\tIcon(iconDefault)\n\t}\n\n\twf.WarnEmpty(\"No Matching Events\", \"Try a different query?\")\n\twf.SendFeedback()\n\treturn nil\n}",
"func updateScheduleFields(from models.Schedule, to *models.Schedule, w http.ResponseWriter) error {\n\tif from.Cron != \"\" {\n\t\tif _, err := cron.Parse(from.Cron); err != nil {\n\t\t\terr = errors.New(\"Invalid cron format\")\n\t\t\thttp.Error(w, err.Error(), http.StatusConflict)\n\t\t\treturn err\n\t\t}\n\t\tto.Cron = from.Cron\n\t}\n\tif from.End != \"\" {\n\t\tif _, err := msToTime(from.End); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusConflict)\n\t\t\treturn err\n\t\t}\n\t\tto.End = from.End\n\t}\n\tif from.Frequency != \"\" {\n\t\tif !isIntervalValid(from.Frequency) {\n\t\t\terr := errors.New(\"Frequency format is incorrect: \" + from.Frequency)\n\t\t\thttp.Error(w, err.Error(), http.StatusConflict)\n\t\t\treturn err\n\t\t}\n\t\tto.Frequency = from.Frequency\n\t}\n\tif from.Start != \"\" {\n\t\tif _, err := msToTime(from.Start); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusConflict)\n\t\t\treturn err\n\t\t}\n\t\tto.Start = from.Start\n\t}\n\tif from.Origin != 0 {\n\t\tto.Origin = from.Origin\n\t}\n\tif from.Name != \"\" && from.Name != to.Name {\n\t\t// Check if new name is unique\n\t\tvar checkS models.Schedule\n\t\tif err := dbClient.GetScheduleByName(&checkS, from.Name); err != nil {\n\t\t\tif err != db.ErrNotFound {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusServiceUnavailable)\n\t\t\t}\n\t\t} else {\n\t\t\tif checkS.Id != to.Id {\n\t\t\t\terr := errors.New(\"Duplicate name for the schedule\")\n\t\t\t\thttp.Error(w, err.Error(), http.StatusConflict)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// Check if the schedule still has attached schedule events\n\t\tstillInUse, err := isScheduleStillInUse(*to)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusServiceUnavailable)\n\t\t\treturn err\n\t\t}\n\t\tif stillInUse {\n\t\t\terr = errors.New(\"Schedule is still in use, can't change the name\")\n\t\t\thttp.Error(w, err.Error(), http.StatusConflict)\n\t\t\treturn err\n\t\t}\n\n\t\tto.Name = from.Name\n\t}\n\n\treturn nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
acceptService returns true if the service is to be accepted, false if it's to be ignored | func (bf *boardFilter) acceptService(service ldb.Service) bool {
// Original requirement, must have an RID
if service.RID == "" {
return false
}
// remove terminating services
if bf.terminated && bf.atStation(service.Destination) {
return false
}
if bf.callAt && !bf.callsAt(service.CallingPoints, bf.callAtTiplocs) {
return false
}
return true
} | [
"func (f *aclFilter) allowService(service string) bool {\n\tif service == \"\" {\n\t\treturn true\n\t}\n\n\tif !f.enforceVersion8 && service == structs.ConsulServiceID {\n\t\treturn true\n\t}\n\treturn f.authorizer.ServiceRead(service)\n}",
"func (r *RPCAcceptor) Accept(req *ChannelAcceptRequest) bool {\n\treturn r.acceptClosure(req)\n}",
"func (m *MockMessageSvc) Accept(msgType string, purpose []string) bool {\n\tif m.AcceptFunc != nil {\n\t\treturn m.AcceptFunc(msgType, purpose)\n\t}\n\n\treturn true\n}",
"func (s *Service) Accept(conn net.Conn, ipport string) error {\n\tswitch s.Role {\n\tcase ROLE_MANAGE:\n\t\treturn TcpAcceptor(conn, s, ipport)\n\tcase ROLE_PROXY, ROLE_WEBSERVER:\n\t\treturn HttpAcceptor(conn, s, ipport)\n\tdefault:\n\t\tlog.Fatal(\"unknown role in accept\")\n\t}\n\treturn errors.New(\"Accept fell through!\")\n}",
"func (s *acceptFirst) Accept(from interface{}) bool {\n\tif _, ok := s.handled[from]; ok {\n\t\treturn false\n\t}\n\ts.handled[from] = struct{}{}\n\treturn true\n}",
"func (s *Suite) Accept(t string) bool {\n\treturn t == signatureType\n}",
"func (c *ChainedAcceptor) Accept(req *ChannelAcceptRequest) bool {\n\tresult := true\n\n\tc.acceptorsMtx.RLock()\n\tfor _, acceptor := range c.acceptors {\n\t\t// We call Accept first in case any acceptor (perhaps an RPCAcceptor)\n\t\t// wishes to be notified about ChannelAcceptRequest.\n\t\tresult = acceptor.Accept(req) && result\n\t}\n\tc.acceptorsMtx.RUnlock()\n\n\treturn result\n}",
"func (r *Runtime) isAccept() bool {\n\taccepts := r.d.F\n\tif accepts.Contains(r.cur) {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (f *MSPFilter) Accept(peer fab.Peer) bool {\n\treturn peer.MSPID() == f.mspID\n}",
"func IsValidService(s string) bool {\n\tswitch s {\n\tcase\n\t\t\"all\",\n\t\t\"proxy\",\n\t\t\"authorize\",\n\t\t\"authenticate\":\n\t\treturn true\n\t}\n\treturn false\n}",
"func ValidService(service string, cfg *config.CloudConfig) bool {\n\tservices := availableService(cfg, false)\n\tif !IsLocalOrURL(service) && !util.Contains(services, service) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func matchService(req *http.Request, services []*Service) (*Service, bool) {\n\tfor _, service := range services {\n\t\thostRegexp := regexp.MustCompile(service.HostRegexp)\n\t\tif !hostRegexp.MatchString(req.Host) {\n\t\t\tlog.Tracef(\"Req host [%s] doesn't match [%s].\",\n\t\t\t\treq.Host, hostRegexp)\n\t\t\tcontinue\n\t\t}\n\n\t\tif service.PathRegexp == \"\" {\n\t\t\tlog.Debugf(\"Host [%s] matched pattern [%s] and path \"+\n\t\t\t\t\"expression is empty. Using service [%s].\",\n\t\t\t\treq.Host, hostRegexp, service.Address)\n\t\t\treturn service, true\n\t\t}\n\n\t\tpathRegexp := regexp.MustCompile(service.PathRegexp)\n\t\tif !pathRegexp.MatchString(req.URL.Path) {\n\t\t\tlog.Tracef(\"Req path [%s] doesn't match [%s].\",\n\t\t\t\treq.URL.Path, pathRegexp)\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Debugf(\"Host [%s] matched pattern [%s] and path [%s] \"+\n\t\t\t\"matched [%s]. Using service [%s].\",\n\t\t\treq.Host, hostRegexp, req.URL.Path, pathRegexp,\n\t\t\tservice.Address)\n\t\treturn service, true\n\t}\n\tlog.Errorf(\"No backend service matched request [%s%s].\", req.Host,\n\t\treq.URL.Path)\n\treturn nil, false\n}",
"func (v *VDRI) Accept(method string) bool {\n\treturn v.accept(method)\n}",
"func (e *entry) canServe() bool {\n\t_, ok := e.svc.(Service)\n\treturn ok\n}",
"func (sms *SMS) Accept() {\n\tsms.acceptCh <- true\n\tclose(sms.acceptCh)\n}",
"func (aa Acceptors) Accept(from interface{}) bool {\n\tfor _, a := range aa {\n\t\tif !a.Accept(from) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (s ServiceSpecs) SupportService(serviceUrl string, serviceOrg string) bool {\n\tif serviceUrl == \"\" {\n\t\treturn true\n\t} else {\n\t\tif len(s) == 0 {\n\t\t\treturn true\n\t\t} else {\n\t\t\tfor _, sp := range s {\n\t\t\t\tif sp.Url == serviceUrl && (sp.Org == \"\" || sp.Org == serviceOrg) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}",
"func containsService(name string, services []servicescm.Service) bool {\n\tfor _, svc := range services {\n\t\tif svc.Name == name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func IsExposedService(svc *corev1.Service) bool {\n\tlabels := svc.Labels\n\tif labels == nil {\n\t\tlabels = map[string]string{}\n\t}\n\tfor _, l := range ExposeLabelKeys {\n\t\tif labels[l] == \"true\" {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
rowToRecord converts from pgx.Row to a store.Record | func (s *sqlStore) rowToRecord(row pgx.Row) (*store.Record, error) {
var expiry *time.Time
record := &store.Record{}
metadata := make(Metadata)
if err := row.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil {
if err == sql.ErrNoRows {
return record, store.ErrNotFound
}
return nil, err
}
// set the metadata
record.Metadata = toMetadata(&metadata)
if expiry != nil {
record.Expiry = time.Until(*expiry)
}
return record, nil
} | [
"func (s *sqlStore) rowsToRecords(rows pgx.Rows) ([]*store.Record, error) {\n\tvar records []*store.Record\n\n\tfor rows.Next() {\n\t\tvar expiry *time.Time\n\t\trecord := &store.Record{}\n\t\tmetadata := make(Metadata)\n\n\t\tif err := rows.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil {\n\t\t\treturn records, err\n\t\t}\n\n\t\t// set the metadata\n\t\trecord.Metadata = toMetadata(&metadata)\n\t\tif expiry != nil {\n\t\t\trecord.Expiry = time.Until(*expiry)\n\t\t}\n\t\trecords = append(records, record)\n\t}\n\treturn records, nil\n}",
"func recordToRecord(\n\ttopic string,\n\tpartition int32,\n\tbatch *kmsg.RecordBatch,\n\trecord *kmsg.Record,\n) *Record {\n\th := make([]RecordHeader, 0, len(record.Headers))\n\tfor _, kv := range record.Headers {\n\t\th = append(h, RecordHeader{\n\t\t\tKey: kv.Key,\n\t\t\tValue: kv.Value,\n\t\t})\n\t}\n\n\treturn &Record{\n\t\tKey: record.Key,\n\t\tValue: record.Value,\n\t\tHeaders: h,\n\t\tTimestamp: timeFromMillis(batch.FirstTimestamp + int64(record.TimestampDelta)),\n\t\tTopic: topic,\n\t\tPartition: partition,\n\t\tAttrs: RecordAttrs{uint8(batch.Attributes)},\n\t\tProducerID: batch.ProducerID,\n\t\tProducerEpoch: batch.ProducerEpoch,\n\t\tLeaderEpoch: batch.PartitionLeaderEpoch,\n\t\tOffset: batch.FirstOffset + int64(record.OffsetDelta),\n\t}\n}",
"func (r RecordV1) toRecord() Record {\n\treturn Record{\n\t\tType: r.Type,\n\t\tName: r.Name,\n\t\tAppliedAt: r.AppliedAt,\n\t}\n}",
"func RowToRawData(rows *sql.Rows) (r RawData) {\n\trecord, _ := RowToArr(rows)\n\tr.Header = record[0]\n\tr.Rows = append(r.Rows, record[1:])\n\treturn\n}",
"func convertRow(\n\trow *Row,\n\twantsNode bool,\n\twantsTimestamp bool,\n\tdesiredValues []string,\n) *stats.Row {\n\tvar (\n\t\tnode string\n\t\ttimestamp time.Time\n\t)\n\n\tvar resultValues map[string]interface{}\n\tif len(desiredValues) > 0 {\n\t\tresultValues = make(map[string]interface{})\n\t}\n\n\tfor _, v := range desiredValues {\n\t\tresultValues[v] = row.value(v)\n\t}\n\n\tif wantsNode {\n\t\tnode = row.Node\n\t}\n\tif wantsTimestamp {\n\t\ttimestamp = row.Timestamp.UTC()\n\t}\n\n\treturn &stats.Row{\n\t\tNode: node,\n\t\tTimestamp: timestamp,\n\t\tValues: resultValues,\n\t}\n}",
"func Row2Bytes() func([]interface{}) ([]byte, error) {\n\t//TODO: test this\n\thandle := new(codec.MsgpackHandle)\n\n\treturn func(row []interface{}) ([]byte, error) {\n\t\tbuffer := new(bytes.Buffer)\n\t\tenc := codec.NewEncoder(buffer, handle)\n\t\terr := enc.Encode(row)\n\t\tif err != nil {\n\t\t\treturn []byte{}, err\n\t\t}\n\t\treturn buffer.Bytes(), nil\n\t}\n}",
"func toRecord(cache airtabledb.DB, src Feature, dst interface{}) {\n\tdV := reflect.ValueOf(dst).Elem().FieldByName(\"Fields\")\n\tsV := reflect.ValueOf(src)\n\tcopyFields(cache, sV, dV)\n}",
"func (e *commonFormatEncoder) Row(tp int, row *[]interface{}, seqno uint64) ([]byte, error) {\n\tcf := convertRowToCommonFormat(tp, row, e.inSchema, seqno, e.filter)\n\treturn CommonFormatEncode(cf)\n}",
"func MarshalRecord(record *rangedb.Record) ([]byte, error) {\n\tvar buf bytes.Buffer\n\n\tnewRecord := *record\n\tnewRecord.Data = nil\n\n\tencoder := msgpack.NewEncoder(&buf)\n\tencoder.UseJSONTag(true)\n\n\terr := encoder.Encode(newRecord)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed encoding record: %v\", err)\n\t}\n\n\terr = encoder.Encode(record.Data)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed encoding record data: %v\", err)\n\t}\n\n\treturn buf.Bytes(), nil\n}",
"func RowTo[T any](row CollectableRow) (T, error) {\n\tvar value T\n\terr := row.Scan(&value)\n\treturn value, err\n}",
"func (dao PathProfileDAOPsql) rowToPathProfile(row *sql.Row, o *models.PathProfile) error {\n\treturn row.Scan(&o.ID, &o.ProfileID, &o.Path.ID, &o.Path.Path, &o.Path.PathName, &o.Path.Description, &o.Post, &o.Put, &o.Del, &o.Get, &o.CreatedAt, &o.UpdatedAt)\n}",
"func (raw *Raw) ToRecord() Record {\n\tstart := time.Now()\n\tcborH := &codec.CborHandle{}\n\trec := getRecordByTypeID(raw.Type)\n\tdec := codec.NewDecoder(bytes.NewReader(raw.Data), cborH)\n\terr := dec.Decode(rec)\n\tsince := time.Since(start)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif raw.Type == codeRecordID {\n\t\tlog.Debugf(\"ToRecord func in record/serialize: for TypeID %s, time inside - %s\", raw.Type, since)\n\t}\n\treturn rec\n}",
"func CSVToRecord(fields []string) (record Record, err error) {\n\t// Convert prices from strings to floats\n\topenPrice, err := strconv.ParseFloat(fields[CSVOpenIndex], FloatSize)\n\tif err != nil {\n\t\terr = errors.New(\"failed to parse open price\")\n\t}\n\topenPriceCents := int(openPrice * 100)\n\n\tclosePrice, err := strconv.ParseFloat(fields[CSVCloseIndex], FloatSize)\n\tif err != nil {\n\t\terr = errors.New(\"failed to parse close price\")\n\t}\n\tclosePriceCents := int(closePrice * 100)\n\n\t// Convert time from string to time\n\tconst dateFormat = \"2006-01-02\"\n\trecordDate, err := time.Parse(dateFormat, fields[CSVDateIndex])\n\tif err != nil {\n\t\terr = errors.New(\"failed to parse quote date\")\n\t}\n\trecord.Day = recordDate\n\trecord.Open = openPriceCents\n\trecord.Close = closePriceCents\n\treturn\n}",
"func (tkrs *CSVKeyedRecordScanner) Record() (*libutils.KeyedRecord) {\n wire_data := tkrs.scanner.Bytes()\n wire_data_copy := make([]byte, len(wire_data))\n copy(wire_data_copy, wire_data)\n\n return libutils.NewKeyedRecordFromBytes(wire_data_copy, tkrs.decoder)\n}",
"func NewRecord(schema *arrow.Schema, cols []arrow.Array, nrows int64) *simpleRecord {\n\trec := &simpleRecord{\n\t\trefCount: 1,\n\t\tschema: schema,\n\t\trows: nrows,\n\t\tarrs: make([]arrow.Array, len(cols)),\n\t}\n\tcopy(rec.arrs, cols)\n\tfor _, arr := range rec.arrs {\n\t\tarr.Retain()\n\t}\n\n\tif rec.rows < 0 {\n\t\tswitch len(rec.arrs) {\n\t\tcase 0:\n\t\t\trec.rows = 0\n\t\tdefault:\n\t\t\trec.rows = int64(rec.arrs[0].Len())\n\t\t}\n\t}\n\n\terr := rec.validate()\n\tif err != nil {\n\t\trec.Release()\n\t\tpanic(err)\n\t}\n\n\treturn rec\n}",
"func (r *Rows) row(a ...interface{}) error {\n\tdefer r.Close()\n\n\tfor _, dp := range a {\n\t\tif _, ok := dp.(*sql.RawBytes); ok {\n\t\t\treturn VarTypeError(\"RawBytes isn't allowed on Row()\")\n\t\t}\n\t}\n\n\tif !r.Next() {\n\t\tif err := r.Err(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn sql.ErrNoRows\n\t}\n\tif err := r.Scan(a...); err != nil {\n\t\treturn err\n\t}\n\n\treturn r.Close()\n}",
"func ConvertRecord(s string) (r record) {\n // Drop the last char pf the string (it's a ' ')\n s = s[:len(s) - 1]\n\n // Split the string in the various fields\n var fields []string = strings.Split(s, \" \")\n\n // Update the fields of the record based on the various fields\n for _, f := range fields {\n switch f[:3] {\n case \"byr\": r.byr = f[4:]\n case \"iyr\": r.iyr = f[4:]\n case \"eyr\": r.eyr = f[4:]\n case \"hgt\": r.hgt = f[4:]\n case \"hcl\": r.hcl = f[4:]\n case \"ecl\": r.ecl = f[4:]\n case \"pid\": r.pid = f[4:]\n }\n }\n\n return\n}",
"func (m *MySQL) ToRecord(cs dbchangeset) *changeset.Record {\n\ttag := \"\"\n\tif cs.Tag != nil {\n\t\ttag = *cs.Tag\n\t}\n\n\treturn &changeset.Record{\n\t\tID: cs.ID,\n\t\tAuthor: cs.Author,\n\t\tFilename: cs.Filename,\n\t\tDateExecuted: cs.DateExecuted,\n\t\tOrderExecuted: cs.OrderExecuted,\n\t\tChecksum: cs.Checksum,\n\t\tDescription: cs.Description,\n\t\tTag: tag,\n\t\tVersion: cs.Version,\n\t}\n}",
"func RowToArr(rows *sql.Rows) (records [][]string, err error) {\n\tfmt.Printf(\"RowToArr start at %s\", time.Now())\n\tcolumns, err := rows.Columns()\n\tif err != nil {\n\t\treturn\n\t}\n\tcount := len(columns)\n\treadCols := make([]interface{}, count)\n\trawCols := make([]interface{}, count)\n\t//records = make([]interface{}, 0)\n\trecords = append(records, columns) //append row header as 1st row\n\n\t// var resultCols []string\n\tfor rows.Next() {\n\t\t// resultCols = make([]string, count)\n\t\tfor i := range columns {\n\t\t\treadCols[i] = &rawCols[i]\n\t\t}\n\t\terr = rows.Scan(readCols...)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tresultCols := assertTypeArray(columns, rawCols)\n\t\trecords = append(records, resultCols)\n\t}\n\n\tfmt.Printf(\"RowToArr end at %s\", time.Now())\n\treturn records, nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
rowsToRecords converts from pgx.Rows to []store.Record | func (s *sqlStore) rowsToRecords(rows pgx.Rows) ([]*store.Record, error) {
var records []*store.Record
for rows.Next() {
var expiry *time.Time
record := &store.Record{}
metadata := make(Metadata)
if err := rows.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil {
return records, err
}
// set the metadata
record.Metadata = toMetadata(&metadata)
if expiry != nil {
record.Expiry = time.Until(*expiry)
}
records = append(records, record)
}
return records, nil
} | [
"func (s *sqlStore) rowToRecord(row pgx.Row) (*store.Record, error) {\n\tvar expiry *time.Time\n\trecord := &store.Record{}\n\tmetadata := make(Metadata)\n\n\tif err := row.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn record, store.ErrNotFound\n\t\t}\n\t\treturn nil, err\n\t}\n\n\t// set the metadata\n\trecord.Metadata = toMetadata(&metadata)\n\tif expiry != nil {\n\t\trecord.Expiry = time.Until(*expiry)\n\t}\n\n\treturn record, nil\n}",
"func convertRows(rows []*bq.TableRow, schema Schema) ([][]Value, error) {\n\tvar rs [][]Value\n\tfor _, r := range rows {\n\t\trow, err := convertRow(r, schema)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trs = append(rs, row)\n\t}\n\treturn rs, nil\n}",
"func RowToArr(rows *sql.Rows) (records [][]string, err error) {\n\tfmt.Printf(\"RowToArr start at %s\", time.Now())\n\tcolumns, err := rows.Columns()\n\tif err != nil {\n\t\treturn\n\t}\n\tcount := len(columns)\n\treadCols := make([]interface{}, count)\n\trawCols := make([]interface{}, count)\n\t//records = make([]interface{}, 0)\n\trecords = append(records, columns) //append row header as 1st row\n\n\t// var resultCols []string\n\tfor rows.Next() {\n\t\t// resultCols = make([]string, count)\n\t\tfor i := range columns {\n\t\t\treadCols[i] = &rawCols[i]\n\t\t}\n\t\terr = rows.Scan(readCols...)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tresultCols := assertTypeArray(columns, rawCols)\n\t\trecords = append(records, resultCols)\n\t}\n\n\tfmt.Printf(\"RowToArr end at %s\", time.Now())\n\treturn records, nil\n}",
"func records(rows *sql.Rows) (Records, error) {\n\tvar res Records\n\tfor rows.Next() {\n\t\tvar streamID string\n\t\tvar streamIndex uint64\n\t\tvar originStreamID string\n\t\tvar originStreamIndex uint64\n\t\tvar id string\n\t\tvar typ string\n\t\tvar recordedOn string\n\t\tvar data []byte\n\t\tvar metadata []byte\n\t\terr := rows.Scan(&streamID, &streamIndex, &originStreamID, &originStreamIndex, &recordedOn, &id, &typ, &data, &metadata)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tr := Record{\n\t\t\tStreamID: streamID,\n\t\t\tStreamIndex: streamIndex,\n\t\t\tOriginStreamID: originStreamID,\n\t\t\tOriginStreamIndex: originStreamIndex,\n\t\t\tRecordedOn: parseTime(recordedOn),\n\t\t\tID: id,\n\t\t\tType: typ,\n\t\t\tData: json.RawMessage(data),\n\t\t\tMetadata: json.RawMessage(metadata),\n\t\t}\n\t\tres = append(res, r)\n\t}\n\treturn res, nil\n}",
"func RowToRawData(rows *sql.Rows) (r RawData) {\n\trecord, _ := RowToArr(rows)\n\tr.Header = record[0]\n\tr.Rows = append(r.Rows, record[1:])\n\treturn\n}",
"func (a *kinesisFirehoseWriter) toRecords(msg message.Batch) ([]*firehose.Record, error) {\n\tentries := make([]*firehose.Record, msg.Len())\n\n\terr := msg.Iter(func(i int, p *message.Part) error {\n\t\tentry := firehose.Record{\n\t\t\tData: p.AsBytes(),\n\t\t}\n\n\t\tif len(entry.Data) > mebibyte {\n\t\t\ta.log.Errorf(\"part %d exceeds the maximum Kinesis Firehose payload limit of 1 MiB\\n\", i)\n\t\t\treturn component.ErrMessageTooLarge\n\t\t}\n\n\t\tentries[i] = &entry\n\t\treturn nil\n\t})\n\n\treturn entries, err\n}",
"func (r RowIdentifiers) ToRows(callback func(*proto.RowResponse) error) error {\n\tif len(r.Keys) > 0 {\n\t\tci := []*proto.ColumnInfo{{Name: r.Field, Datatype: \"string\"}}\n\t\tfor _, key := range r.Keys {\n\t\t\tif err := callback(&proto.RowResponse{\n\t\t\t\tHeaders: ci,\n\t\t\t\tColumns: []*proto.ColumnResponse{\n\t\t\t\t\t{ColumnVal: &proto.ColumnResponse_StringVal{StringVal: key}},\n\t\t\t\t},\n\t\t\t}); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"calling callback\")\n\t\t\t}\n\t\t\tci = nil\n\t\t}\n\t} else {\n\t\tci := []*proto.ColumnInfo{{Name: r.Field, Datatype: \"uint64\"}}\n\t\tfor _, id := range r.Rows {\n\t\t\tif err := callback(&proto.RowResponse{\n\t\t\t\tHeaders: ci,\n\t\t\t\tColumns: []*proto.ColumnResponse{\n\t\t\t\t\t{ColumnVal: &proto.ColumnResponse_Uint64Val{Uint64Val: uint64(id)}},\n\t\t\t\t},\n\t\t\t}); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"calling callback\")\n\t\t\t}\n\t\t\tci = nil\n\t\t}\n\t}\n\treturn nil\n}",
"func rowsToThings(rows *sql.Rows) Things {\n\tvar (\n\t\tt Thing\n\t\tresult Things\n\t\terr error\n\t)\n\n\tcheckRows(\"Things\", rows)\n\n\tfor i := 0; rows.Next(); i++ {\n\t\terr := rows.Scan(&t.ckey, &t.cval, &t.url, &t.data, &t.clockid, &t.tsn)\n\t\tcheckErr(\"scan things\", err)\n\n\t\tresult = append(result, t)\n\t}\n\terr = rows.Err()\n\tcheckErr(\"end reading things loop\", err)\n\n\tfmt.Printf(\"returning things: %d rows\\n\", len(result))\n\treturn result\n}",
"func recordToSlice(record Record) []string {\n\tvar recordSlice []string\n\n\trecordSlice = []string{\n\t\tfmt.Sprintf(\"%d\",record.CheeseId), record.CheeseName, record.ManufacturerName, record.ManufacturerProvCode,\n\t\trecord.ManufacturingType, record.WebSite, fmt.Sprintf(\"%.2f\", record.FatContentPercent), \n\t\tfmt.Sprintf(\"%.2f\", record.MoisturePercent), record.Particularities, record.Flavour, \n\t\trecord.Characteristics, record.Ripening, fmt.Sprintf(\"%t\", record.Organic),\n\t\trecord.CategoryType, record.MilkType, record.MilkTreatmentType, record.RindType, record.LastUpdateDate,\n\t}\n\n\treturn recordSlice\n}",
"func convertToMap(rows *sql.Rows) ([]map[string]interface{}, error) {\n\tvar response []map[string]interface{}\n\tcols, err := rows.Columns()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor rows.Next() {\n\t\tcolumns := make([]interface{}, len(cols))\n\t\tcolumnPointers := make([]interface{}, len(cols))\n\t\tfor i := range columns {\n\t\t\tcolumnPointers[i] = &columns[i]\n\t\t}\n\t\tif err := rows.Scan(columnPointers...); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tm := make(map[string]interface{})\n\t\tfor i, colName := range cols {\n\t\t\tval := columnPointers[i].(*interface{})\n\t\t\tm[colName] = *val\n\t\t}\n\t\tresponse = append(response, m)\n\t}\n\treturn response, nil\n}",
"func databaseRowsToPaginationDataList(rows *sql.Rows, dtFields []dtColumn) ([]map[string]string, error) {\n\tvar dataList []map[string]string\n\n\tcolumns, err := rows.Columns()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not get row.Columns %w\", err)\n\t}\n\n\tvalues := make([]sql.RawBytes, len(columns))\n\t// rows.Scan wants '[]interface{}' as an argument, so we must copy the\n\t// references into such a slice\n\t// See http://code.google.com/p/go-wiki/wiki/InterfaceSlice for details\n\tscanArgs := make([]interface{}, len(values))\n\tfor i := range values {\n\t\tscanArgs[i] = &values[i]\n\t}\n\n\tfor rows.Next() {\n\t\t// get RawBytes from data\n\t\terr = rows.Scan(scanArgs...)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not scan rows to 'scanArgs...' %w\", err)\n\t\t}\n\n\t\tvar value string\n\n\t\tfor i, col := range values {\n\t\t\t// Here we can check if the value is nil (NULL value)\n\t\t\tif col == nil {\n\t\t\t\tvalue = \"NULL\"\n\t\t\t} else {\n\t\t\t\tvalue = string(col)\n\t\t\t}\n\n\t\t\tfor _, dtField := range dtFields {\n\t\t\t\tif dtField.dbColumnName == columns[i] {\n\t\t\t\t\tdtObject := map[string]string{dtField.dtColumnName: value}\n\t\t\t\t\tdataList = append(dataList, dtObject)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn dataList, nil\n}",
"func CollectRows[T any](rows Rows, fn RowToFunc[T]) ([]T, error) {\n\tdefer rows.Close()\n\n\tslice := []T{}\n\n\tfor rows.Next() {\n\t\tvalue, err := fn(rows)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tslice = append(slice, value)\n\t}\n\n\tif err := rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slice, nil\n}",
"func ConvertRows(rows Rows) (sql.Table, error) {\n\tcols, err := rows.Columns()\n\tif err != nil {\n\t\treturn sql.Table{}, err\n\t}\n\n\tcolumnTypes, err := rows.ColumnTypes()\n\tif err != nil {\n\t\treturn sql.Table{}, err\n\t}\n\n\tresult := sql.Table{Columns: cols}\n\n\tfor rows.Next() {\n\t\tcolumns := make([]interface{}, len(cols))\n\t\tfor i := range columns {\n\t\t\t// Populating with pointers to concrete types.\n\t\t\tcolumns[i] = reflect.New(columnTypes[i].ScanType()).Interface()\n\t\t}\n\n\t\t// Scan the result into the column pointers...\n\t\tif err := rows.Scan(columns...); err != nil {\n\t\t\treturn sql.Table{}, err\n\t\t}\n\n\t\tm := make(map[string]interface{})\n\t\tfor i, c := range cols {\n\t\t\t// Getting values from pointers to types.\n\t\t\tm[c] = reflect.ValueOf(columns[i]).Elem().Interface()\n\t\t}\n\n\t\tresult.Rows = append(result.Rows, m)\n\t}\n\n\treturn result, nil\n}",
"func rowsToPruebas(rows *sql.Rows) ([]*Prueba, error) {\n\tvar pruebas []*Prueba\n\tfor rows.Next() {\n\t\tvar t Prueba\n\t\terr := rows.Scan(&t.ID, &t.Preguntaid, &t.Entrada, &t.Salida, &t.Visible, &t.PostEntrega, &t.Valor)\n\t\tif err != nil {\n\t\t\treturn pruebas, err\n\t\t}\n\t\tpruebas = append(pruebas, &t)\n\t}\n\treturn pruebas, nil\n}",
"func RowToDrivers(row *sql.Rows) []Driver {\n result := []Driver{}\n for row.Next() {\n var driverName string\n var driverTelephoneNumber string\n row.Scan(&driverName, &driverTelephoneNumber)\n result = append(result, Driver{\n DriverName: driverName,\n DriverTelephoneNumber: driverTelephoneNumber,\n })\n }\n return result\n}",
"func (a *kinesisWriter) toRecords(msg message.Batch) ([]*kinesis.PutRecordsRequestEntry, error) {\n\tentries := make([]*kinesis.PutRecordsRequestEntry, msg.Len())\n\n\terr := msg.Iter(func(i int, p *message.Part) error {\n\t\tpartKey, err := a.partitionKey.String(i, msg)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"partition key interpolation error: %w\", err)\n\t\t}\n\t\tentry := kinesis.PutRecordsRequestEntry{\n\t\t\tData: p.AsBytes(),\n\t\t\tPartitionKey: aws.String(partKey),\n\t\t}\n\n\t\tif len(entry.Data) > mebibyte {\n\t\t\ta.log.Errorf(\"part %d exceeds the maximum Kinesis payload limit of 1 MiB\\n\", i)\n\t\t\treturn component.ErrMessageTooLarge\n\t\t}\n\n\t\thashKey, err := a.hashKey.String(i, msg)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"hash key interpolation error: %w\", err)\n\t\t}\n\t\tif hashKey != \"\" {\n\t\t\tentry.ExplicitHashKey = aws.String(hashKey)\n\t\t}\n\n\t\tentries[i] = &entry\n\t\treturn nil\n\t})\n\n\treturn entries, err\n}",
"func RowToTrips(row *sql.Rows) []Trip {\n trips := []Trip{}\n for row.Next() {\n var tripNumber int\n var startLocationName string\n var destinationName string\n row.Scan(&tripNumber, &startLocationName, &destinationName)\n trips = append(trips, Trip{\n TripNumber: tripNumber,\n StartLocationName: startLocationName,\n DestinationName: destinationName,\n })\n }\n return trips\n}",
"func Row2Bytes() func([]interface{}) ([]byte, error) {\n\t//TODO: test this\n\thandle := new(codec.MsgpackHandle)\n\n\treturn func(row []interface{}) ([]byte, error) {\n\t\tbuffer := new(bytes.Buffer)\n\t\tenc := codec.NewEncoder(buffer, handle)\n\t\terr := enc.Encode(row)\n\t\tif err != nil {\n\t\t\treturn []byte{}, err\n\t\t}\n\t\treturn buffer.Bytes(), nil\n\t}\n}",
"func convertFromTsRows(tsRows [][]TsCell) []*riak_ts.TsRow {\n\tvar rows []*riak_ts.TsRow\n\tvar cells []*riak_ts.TsCell\n\tfor _, tsRow := range tsRows {\n\t\tcells = make([]*riak_ts.TsCell, 0)\n\n\t\tfor _, tsCell := range tsRow {\n\t\t\tcells = append(cells, tsCell.cell)\n\t\t}\n\n\t\tif len(rows) < 1 {\n\t\t\trows = make([]*riak_ts.TsRow, 0)\n\t\t}\n\n\t\trows = append(rows, &riak_ts.TsRow{Cells: cells})\n\t}\n\n\treturn rows\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
findConflict finds the index of the conflict. It returns the first pair of conflicting entries between the existing entries and the given entries, if there are any. If there is no conflicting entries, and the existing entries contains all the given entries, zero will be returned. If there is no conflicting entries, but the given entries contains new entries, the index of the first new entry will be returned. An entry is considered to be conflicting if it has the same index but a different term. The first entry MUST have an index equal to the argument 'from'. The index of the given entries MUST be continuously increasing. | func (l *LogStore) findConflict(entries []*pb.Entry) uint64 {
// TODO: 会有第0个冲突么?
for _, ne := range entries {
if !l.matchTerm(ne.Index, ne.Term) {
if ne.Index <= l.lastIndex() {
l.logger.Info("log found conflict",
zap.Uint64("conflictIndex", ne.Index),
zap.Uint64("conflictTerm", ne.Term),
zap.Uint64("existTerm", l.termOrPanic(l.term(ne.Index))))
}
return ne.Index
}
}
return 0
} | [
"func (l *raftLog) findConflict(from uint64, ents []pb.Entry) uint64 {\n\t// TODO(xiangli): validate the index of ents\n\tfor i, ne := range ents {\n\t\tif oe := l.at(from + uint64(i)); oe == nil || oe.Term != ne.Term {\n\t\t\treturn from + uint64(i)\n\t\t}\n\t}\n\treturn 0\n}",
"func FindConflictsByUser(entries []*RenderedScheduleEntry) map[string][]*Conflict {\n\tentriesByUser := RenderedScheduleEntries(entries).GroupBy(func(entry *RenderedScheduleEntry) string {\n\t\treturn entry.User.ID\n\t})\n\n\tvar (\n\t\tm sync.Mutex\n\t\twg sync.WaitGroup\n\t\tresults = make(map[string][]*Conflict, len(entriesByUser))\n\t)\n\n\tfor userID, entries := range entriesByUser {\n\t\twg.Add(1)\n\n\t\tgo func(userID string, entries []*RenderedScheduleEntry) {\n\t\t\tdefer wg.Done()\n\n\t\t\tconflicts := []*Conflict{}\n\n\t\t\tsort.Slice(entries, func(i, j int) bool {\n\t\t\t\treturn entries[i].Start.Before(entries[j].Start)\n\t\t\t})\n\n\t\t\tfor i, left := range entries {\n\t\t\t\tfor j := i + 1; j < len(entries); j++ {\n\t\t\t\t\tright := entries[j]\n\n\t\t\t\t\tif !right.Start.Before(left.End) { // if left.End <= right.Start\n\t\t\t\t\t\t// All good, RHS doesn't start until at least after LHS\n\t\t\t\t\t\t// ends. Stop scanning for conflicts related to LHS.\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\n\t\t\t\t\tlog.Printf(\"CONFLICT: %s is in both %q and %q from %s to %s\\n\", left.User.Summary, left.Schedule, right.Schedule, right.Start, left.End)\n\n\t\t\t\t\tconflicts = append(conflicts, &Conflict{Left: left, Right: right})\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tm.Lock()\n\t\t\tdefer m.Unlock()\n\n\t\t\tresults[userID] = conflicts\n\t\t}(userID, entries)\n\t}\n\n\twg.Wait()\n\n\treturn results\n}",
"func startIdx[E any](haystack, needle []E) int {\n\tp := &needle[0]\n\tfor i := range haystack {\n\t\tif p == &haystack[i] {\n\t\t\treturn i\n\t\t}\n\t}\n\t// TODO: what if the overlap is by a non-integral number of Es?\n\tpanic(\"needle not found\")\n}",
"func NewCreateMailerEntryConflict() *CreateMailerEntryConflict {\n\n\treturn &CreateMailerEntryConflict{}\n}",
"func FindNonOverlapping(overlaps map[int]bool) int {\n\tfor index, isOverlapping := range overlaps {\n\t\tif !isOverlapping {\n\t\t\treturn index\n\t\t}\n\t}\n\treturn -1\n}",
"func (tr *TransactionRepository) FindBetween(start int64, end int64) ([]*types.Transaction, *rTypes.Error) {\n\tif start > end {\n\t\treturn nil, errors.Errors[errors.StartMustNotBeAfterEnd]\n\t}\n\tvar transactions []transaction\n\ttr.dbClient.Where(whereClauseBetweenConsensus, start, end).Find(&transactions)\n\n\tsameHashMap := make(map[string][]transaction)\n\tfor _, t := range transactions {\n\t\th := t.getHashString()\n\t\tsameHashMap[h] = append(sameHashMap[h], t)\n\t}\n\tres := make([]*types.Transaction, 0, len(sameHashMap))\n\tfor _, sameHashTransactions := range sameHashMap {\n\t\ttransaction, err := tr.constructTransaction(sameHashTransactions)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tres = append(res, transaction)\n\t}\n\treturn res, nil\n}",
"func (s *schedule) getConflicts(timestamp uint32, length uint32) (conflicts uint) {\n\ts.RLock()\n\tdefer s.RUnlock()\n\tfor _, item := range s.items {\n\t\tscheduledFrom := uint64(item.timestamp) % uintmax\n\t\tscheduledTo := scheduledFrom + uint64(item.length)\n\t\tfrom := uint64(timestamp)\n\t\tto := from + uint64(length)\n\n\t\tif scheduledTo > uintmax || to > uintmax {\n\t\t\tif scheduledTo-uintmax <= from || scheduledFrom >= to-uintmax {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else if scheduledTo <= from || scheduledFrom >= to {\n\t\t\tcontinue\n\t\t}\n\n\t\tif item.payload == nil {\n\t\t\tconflicts++\n\t\t} else {\n\t\t\tconflicts += 100\n\t\t}\n\t}\n\treturn\n}",
"func (s *server) ResolveConflict(ctx context.Context, in *proto_job.ResultRequest) (*proto_job.ResultReply, error) {\n\tlog.Print(\"ResolveConflict\")\n\treturn s.resultService.ResolveConflict(in)\n}",
"func (ml *messageLog) FromIndex(index int, exclusive bool) defs.MessageFindFunc {\r\n\tif index < 0 {\r\n\t\tindex = len(ml.log.entries) + index\r\n\t\tif index < 0 {\r\n\t\t\tindex = 0\r\n\t\t}\r\n\t}\r\n\tif exclusive {\r\n\t\tindex += 1\r\n\t}\r\n\treturn func() (int, bool) {\r\n\t\tif index < len(ml.log.entries) {\r\n\t\t\treturn index, true\r\n\t\t}\r\n\t\treturn 0, false\r\n\t}\r\n}",
"func ConflictFromMarshalUtil(marshalUtil *marshalutil.MarshalUtil) (conflict Conflict, err error) {\n\treadStartOffset := marshalUtil.ReadOffset()\n\n\tconflict = Conflict{}\n\tbytesID, err := marshalUtil.ReadBytes(int(ledgerstate.TransactionIDLength))\n\tif err != nil {\n\t\terr = errors.Errorf(\"failed to parse ID from conflict: %w\", err)\n\t\treturn\n\t}\n\tconflict.ID, _, err = ledgerstate.TransactionIDFromBytes(bytesID)\n\tif err != nil {\n\t\terr = errors.Errorf(\"failed to parse ID from bytes: %w\", err)\n\t\treturn\n\t}\n\n\tconflict.Opinion, err = OpinionFromMarshalUtil(marshalUtil)\n\tif err != nil {\n\t\terr = errors.Errorf(\"failed to parse opinion from conflict: %w\", err)\n\t\treturn\n\t}\n\n\t// return the number of bytes we processed\n\tparsedBytes := marshalUtil.ReadOffset() - readStartOffset\n\tif parsedBytes != ConflictLength {\n\t\terr = errors.Errorf(\"parsed bytes (%d) did not match expected size (%d): %w\", parsedBytes, ConflictLength, cerrors.ErrParseBytesFailed)\n\t\treturn\n\t}\n\n\treturn\n}",
"func (re *raftEngine) entriesToApply(ents []raftpb.Entry) (nents []raftpb.Entry) {\r\n\tif len(ents) == 0 {\r\n\t\treturn\r\n\t}\r\n\tfirstIndex := ents[0].Index\r\n\tif firstIndex > re.appliedIndex+1 {\r\n\t\tlog.ZAPSugaredLogger().Errorf(\"Error raised when processing entries to apply, first index of committed entry [%d] should <= appliedIndex [%d].\", firstIndex, re.appliedIndex)\r\n\t\treturn\r\n\t}\r\n\tif re.appliedIndex-firstIndex+1 < uint64(len(ents)) {\r\n\t\tnents = ents[re.appliedIndex-firstIndex+1:]\r\n\t}\r\n\treturn\r\n}",
"func NewGetWaitlistEntryConflict(body *GetWaitlistEntryConflictResponseBody) *goa.ServiceError {\n\tv := &goa.ServiceError{\n\t\tName: *body.Name,\n\t\tID: *body.ID,\n\t\tMessage: *body.Message,\n\t\tTemporary: *body.Temporary,\n\t\tTimeout: *body.Timeout,\n\t\tFault: *body.Fault,\n\t}\n\n\treturn v\n}",
"func (r *Replica) scanConflicts(instances []*Instance, cmds []cmd.Command, start InstanceId, end InstanceId) (InstanceId, bool) {\n\tfor i := start; i > end; i-- {\n\t\tif instances[i] == nil {\n\t\t\tcontinue\n\t\t}\n\t\t// we only need to find the highest instance in conflict\n\t\tif r.StateMac.HaveConflicts(cmds, instances[i].cmds) {\n\t\t\treturn i, true\n\t\t}\n\t}\n\n\treturn conflictNotFound, false\n}",
"func (finder *AmpliconFinder) Locate() ([]int, []int, error) {\n\tif finder.searched {\n\t\tif finder.found {\n\t\t\treturn []int{finder.iBegin, finder.iEnd}, []int{finder.mis5, finder.mis3}, nil\n\t\t}\n\t\treturn nil, nil, nil\n\t}\n\n\tif finder.MaxMismatch <= 0 { // exactly matching\n\t\t// search F\n\t\tvar i int\n\n\t\tif finder.rF == nil {\n\t\t\ti = bytes.Index(finder.Seq, finder.F)\n\t\t\tif i < 0 { // not found\n\t\t\t\tfinder.searched, finder.found = true, false\n\t\t\t\treturn nil, nil, nil\n\t\t\t}\n\t\t} else {\n\t\t\tloc := finder.rF.FindSubmatchIndex(finder.Seq)\n\t\t\tif len(loc) == 0 {\n\t\t\t\tfinder.searched, finder.found = true, false\n\t\t\t\treturn nil, nil, nil\n\t\t\t}\n\t\t\ti = loc[0]\n\t\t}\n\n\t\tif len(finder.R) == 0 { // only forward primer, returns location of F\n\t\t\tfinder.searched, finder.found = true, true\n\t\t\tfinder.iBegin, finder.iEnd = i, i+len(finder.F)-1\n\t\t\tfinder.mis5 = amplicon_mismatches(finder.Seq[i:i+len(finder.F)], finder.F)\n\t\t\tfinder.mis3 = 0\n\t\t\treturn []int{i + 1, i + len(finder.F)},\n\t\t\t\t[]int{finder.mis5, finder.mis3},\n\t\t\t\tnil\n\t\t}\n\n\t\t// two primers given, need to search R\n\t\tvar j int\n\t\tif finder.rR == nil {\n\t\t\tj = bytes.Index(finder.Seq, finder.R)\n\t\t\tif j < 0 {\n\t\t\t\tfinder.searched, finder.found = true, false\n\t\t\t\treturn nil, nil, nil\n\t\t\t}\n\n\t\t\tfor {\n\t\t\t\tif j+1 >= len(finder.Seq) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tk := bytes.Index(finder.Seq[j+1:], finder.R)\n\t\t\t\tif k < 0 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tj += k + 1\n\t\t\t}\n\t\t} else {\n\t\t\tloc := finder.rR.FindAllSubmatchIndex(finder.Seq, -1)\n\t\t\tif len(loc) == 0 {\n\t\t\t\tfinder.searched, finder.found = true, false\n\t\t\t\treturn nil, nil, nil\n\t\t\t}\n\t\t\tj = loc[len(loc)-1][0]\n\t\t}\n\n\t\tif j < i { // wrong location of F and R: 5' ---R-----F---- 3'\n\t\t\tfinder.searched, finder.found = true, false\n\t\t\treturn nil, nil, nil\n\t\t}\n\t\tfinder.searched, finder.found = true, true\n\t\tfinder.iBegin, finder.iEnd = i, j+len(finder.R)-1\n\t\tfinder.mis5 = amplicon_mismatches(finder.Seq[i:i+len(finder.F)], finder.F)\n\t\tfinder.mis3 = amplicon_mismatches(finder.Seq[j:j+len(finder.R)], finder.R)\n\t\treturn []int{i + 1, j + len(finder.R)},\n\t\t\t[]int{finder.mis5, finder.mis3},\n\t\t\tnil\n\t}\n\n\t// search F\n\tlocsI, err := finder.FMindex.Locate(finder.F, finder.MaxMismatch)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif len(locsI) == 0 { // F not found\n\t\tfinder.searched, finder.found = true, false\n\t\treturn nil, nil, nil\n\t}\n\tif len(finder.R) == 0 { // returns location of F\n\t\tsort.Ints(locsI) // remain the first location\n\t\tfinder.searched, finder.found = true, true\n\t\tfinder.iBegin, finder.iEnd = locsI[0], locsI[0]+len(finder.F)-1\n\t\tfinder.mis5 = amplicon_mismatches(finder.Seq[locsI[0]:locsI[0]+len(finder.F)], finder.F)\n\t\tfinder.mis3 = 0\n\t\treturn []int{locsI[0] + 1, locsI[0] + len(finder.F)},\n\t\t\t[]int{finder.mis5, finder.mis3},\n\t\t\tnil\n\t}\n\n\t// search R\n\tlocsJ, err := finder.FMindex.Locate(finder.R, finder.MaxMismatch)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif len(locsJ) == 0 {\n\t\tfinder.searched, finder.found = true, false\n\t\treturn nil, nil, nil\n\t}\n\tsort.Ints(locsI) // to remain the FIRST location\n\tsort.Ints(locsJ) // to remain the LAST location\n\tfinder.searched, finder.found = true, true\n\tfinder.iBegin, finder.iEnd = locsI[0], locsJ[len(locsJ)-1]+len(finder.R)-1\n\tfinder.mis5 = amplicon_mismatches(finder.Seq[locsI[0]:locsI[0]+len(finder.F)], finder.F)\n\tfinder.mis3 = amplicon_mismatches(finder.Seq[locsJ[len(locsJ)-1]:locsJ[len(locsJ)-1]+len(finder.R)], finder.R)\n\treturn []int{locsI[0] + 1, locsJ[len(locsJ)-1] + len(finder.R)},\n\t\t[]int{finder.mis5, finder.mis3},\n\t\tnil\n}",
"func searchInIndex(r io.ReadSeeker, from, to int, searchKey []byte) (int, bool, error) {\n\tif _, err := r.Seek(int64(from), io.SeekStart); err != nil {\n\t\treturn 0, false, fmt.Errorf(\"failed to seek: %w\", err)\n\t}\n\n\tfor {\n\t\tkey, value, err := decode(r)\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn 0, false, fmt.Errorf(\"failed to read: %w\", err)\n\t\t}\n\t\tif err == io.EOF {\n\t\t\treturn 0, false, nil\n\t\t}\n\t\toffset := decodeInt(value)\n\n\t\tif bytes.Equal(key, searchKey) {\n\t\t\treturn offset, true, nil\n\t\t}\n\n\t\tif to > from {\n\t\t\tcurrent, err := r.Seek(0, io.SeekCurrent)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, false, fmt.Errorf(\"failed to seek: %w\", err)\n\t\t\t}\n\n\t\t\tif current > int64(to) {\n\t\t\t\treturn 0, false, nil\n\t\t\t}\n\t\t}\n\t}\n}",
"func (c *causality) detectConflict(keys [][]byte) (bool, int) {\n\tif len(keys) == 0 {\n\t\treturn false, 0\n\t}\n\n\tfirstIdx := -1\n\tfor _, key := range keys {\n\t\tif idx, ok := c.relations[string(key)]; ok {\n\t\t\tif firstIdx == -1 {\n\t\t\t\tfirstIdx = idx\n\t\t\t} else if firstIdx != idx {\n\t\t\t\treturn true, -1\n\t\t\t}\n\t\t}\n\t}\n\n\treturn firstIdx != -1, firstIdx\n}",
"func searchRange(nums []int, target int) []int {\n\tresult := []int{-1, -1}\n\tif len(nums) <= 0 {\n\t\treturn result\n\t}\n\n\tfor i := 0; i < len(nums); i++ {\n\t\tif nums[i] == target {\n\t\t\tresult[0] = i\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfor j := len(nums) - 1; j >= 0; j-- {\n\t\tif nums[j] == target {\n\t\t\tresult[1] = j\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn result\n}",
"func NewConflict(parameters ...wparams.ParamStorer) Error {\n\treturn newGenericError(nil, DefaultConflict, wparams.NewParamStorer(parameters...))\n}",
"func (gui *Gui) findNewSelectedIdx(prevNodes []*filetree.FileNode, currNodes []*filetree.FileNode) int {\n\tgetPaths := func(node *filetree.FileNode) []string {\n\t\tif node == nil {\n\t\t\treturn nil\n\t\t}\n\t\tif node.File != nil && node.File.IsRename() {\n\t\t\treturn node.File.Names()\n\t\t} else {\n\t\t\treturn []string{node.Path}\n\t\t}\n\t}\n\n\tfor _, prevNode := range prevNodes {\n\t\tselectedPaths := getPaths(prevNode)\n\n\t\tfor idx, node := range currNodes {\n\t\t\tpaths := getPaths(node)\n\n\t\t\t// If you started off with a rename selected, and now it's broken in two, we want you to jump to the new file, not the old file.\n\t\t\t// This is because the new should be in the same position as the rename was meaning less cursor jumping\n\t\t\tfoundOldFileInRename := prevNode.File != nil && prevNode.File.IsRename() && node.Path == prevNode.File.PreviousName\n\t\t\tfoundNode := utils.StringArraysOverlap(paths, selectedPaths) && !foundOldFileInRename\n\t\t\tif foundNode {\n\t\t\t\treturn idx\n\t\t\t}\n\t\t}\n\t}\n\n\treturn -1\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add adds one or more previously unadded urls to crawler to visit. source can be nil to indicate root. Returns a list of errors if any occured. | func (c *Crawler) Add(source *url.URL, uri ...*url.URL) []error {
var errs []error
for _, u := range uri {
var err error
u := u
u.Fragment = "" // reset fragment, we don't want it messing our visited list
if source != nil {
u = source.ResolveReference(u)
}
if u.Scheme != "http" && u.Scheme != "https" {
err = ErrUnsupportedScheme
} else if err == nil && c.filter != nil && !c.filter(u) {
err = ErrFilteredOut
}
us := u.String()
// For the already-visited test we need to clean up each URL a bit
vkey := strings.TrimRight(us[strings.Index(us, ":")+1:], "/") // Remove scheme and trailing slash
if err == nil {
c.toVisitMu.RLock()
if _, ok := c.toVisit[vkey]; ok {
err = ErrAlreadyInList
}
c.toVisitMu.RUnlock()
}
if err == nil {
c.logger.Debugf("Add(%v %v): OK", source, us)
atomic.AddUint64(&c.numQueued, 1)
} else if err != nil {
//c.logger.Warnf("Add(%v %v): %v", source, us, err)
atomic.AddUint64(&c.numEncountered, 1)
errs = append(errs, errors.Wrapf(err, "Invalid URL %v", u))
continue
}
c.toVisitMu.Lock()
c.toVisit[vkey] = struct{}{}
c.toVisitMu.Unlock()
{
uu := *u
uu.Scheme = ""
if source != nil && source.Host == uu.Host {
uu.Host = ""
}
if source == nil {
c.mapper.Add("<root>", uu.String())
} else {
c.mapper.Add(source.String(), uu.String())
}
}
v := visit{
source: source,
target: u,
}
select {
case c.visitChan <- v:
case <-c.ctx.Done():
return append(errs, c.ctx.Err())
}
}
return errs
} | [
"func add(url string, verbose bool, scrapeURLs *scrapeURL) {\n\tscrapeURLs.AddedURLsCount++\n\tscrapeURLs.AddedURLs = append(scrapeURLs.AddedURLs, url)\n\tif verbose {\n\t\tlog.Println(\"Added: \" + url)\n\t}\n}",
"func (s *Sources) Add(src string) error {\n\tif src == \"\" {\n\t\treturn errors.New(\"src is an empty string\")\n\t}\n\tfor _, v := range *s {\n\t\tif v == src {\n\t\t\treturn errors.New(\"src already exist\")\n\t\t}\n\t}\n\t*s = append(*s, src)\n\treturn nil\n}",
"func (s *Sources) Add(source Source) {\n\ts.sources = append(s.sources, source)\n}",
"func (s *SitemapIndex) Add(u *URL) {\n\ts.URLs = append(s.URLs, u)\n}",
"func (os *OriginChecker) AddRawURLs(urls []string) {\n\tos.Lock()\n\tdefer os.Unlock()\n\n\tfor _, u := range urls {\n\t\tclean, err := cleanOrigin(u)\n\t\tif err == nil {\n\t\t\tos.origins[clean] = true\n\t\t}\n\t}\n}",
"func (search *Search) AddSource(source string) *Search {\n\tvar sources []string\n\tif search.query[SOURCE] == nil {\n\t\tsources = []string{}\n\t} else {\n\t\tsources = search.query[SOURCE].([]string)\n\t}\n\tsources = append(sources, source)\n\tsearch.query[SOURCE] = sources\n\treturn search\n}",
"func (r *RssFeedEmitter) Add(url string) {\n\tfor _, feed := range r.feeds {\n\t\tif feed.Link == url {\n\t\t\treturn\n\t\t}\n\t}\n\tnewFeed, err := r.parser.ParseURL(url)\n\tif err != nil {\n\t\treturn\n\t}\n\tr.feeds = append(r.feeds, *newFeed)\n}",
"func (s *Sources) AddSources(src ...string) error {\n\tfor _, v := range src {\n\t\terr := s.Add(v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (h *CrawlHandler) AddCrawl(url string, statusCode int) {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\n\th.crawls[url] = statusCode\n}",
"func (f *frontier) Add(uri ...string) {\n\tfor _, i := range uri {\n\t\tu, err := f.filter(f, i)\n\t\tif err != nil {\n\t\t\tcontinue // do nothing\n\t\t}\n\t\tf.lk.Lock()\n\t\tf.nbs = append(f.nbs, &visitable{uri: u})\n\t\tf.lk.Unlock()\n\t}\n}",
"func (s *Launcher) addSource(source *sources.LogSource) {\n\ts.activeSources = append(s.activeSources, source)\n\ts.launchTailers(source)\n}",
"func (r *Repository) AddImages(urls []string) []error {\n\tvar errors []error\n\terrChan := make(chan error, len(urls))\n\tvar wg sync.WaitGroup\n\tfor _, url := range urls {\n\t\turl := url\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terr := r.addImage(url)\n\t\t\terrChan <- err\n\t\t}()\n\t}\n\n\twg.Wait()\n\tclose(errChan)\n\n\tfor err := range errChan {\n\t\tif err != nil {\n\t\t\terrors = append(errors, err)\n\t\t}\n\t}\n\n\treturn errors\n}",
"func (m *Manager) addFiles(url string, source string, dir bool) error {\n\n\tif url == \"\" || url[0] != '/' {\n\t\treturn ErrUrl\n\t}\n\n\turl = strings.TrimSuffix(url, \"/\")\n\tif !dir && url == \"\" {\n\t\turl = \"/\"\n\t}\n\tif dir && url == \"\" {\n\t\treturn ErrRootLevel\n\t}\n\n\ts, err := os.Executable()\n\tif err != nil {\n\t\treturn err\n\t}\n\tpath, err := filepath.Abs(path.Dir(s) + \"/\" + source)\n\tif info, errDir := os.Stat(path); err != nil || os.IsNotExist(errDir) || (info != nil && info.IsDir() != dir) {\n\t\tif dir {\n\t\t\treturn fmt.Errorf(ErrPathDoesNotExist.Error(), source)\n\t\t}\n\t\treturn fmt.Errorf(ErrFileDoesNotExist.Error(), source)\n\t}\n\n\tif dir {\n\t\tm.router.AddPublicDir(url, path)\n\t\treturn nil\n\t}\n\n\tm.router.AddPublicFile(url, path)\n\treturn nil\n}",
"func (collector *ErrorCollector) addError(step string, err error) {\n\tcollector.Errors[step] = append(collector.Errors[step], err)\n\n\tcollector.ErrorsNb++\n}",
"func (self *errorList) Add(err error) {\n\tif err != nil {\n\t\tself.list = append(self.list, err.Error())\n\t}\n\t//return err\n}",
"func (e *ErrorsList) Add(err error) {\n\tif err != nil {\n\t\t// Checking against container\n\t\tif container, ok := err.(multipleErrorsContainer); ok {\n\t\t\tfor _, err := range container.List() {\n\t\t\t\te.Add(err)\n\t\t\t}\n\t\t} else {\n\t\t\t*e = append(*e, err)\n\t\t}\n\t}\n}",
"func (lc *linkCollection) Add(url string, path string) {\n\tif lc.links == nil {\n\t\tlc.links = make(linkList)\n\t}\n\n\tl := lc.links[url]\n\n\tif l.Target == \"\" {\n\t\tl = link{Target: url}\n\t}\n\n\tl.Documents = append(l.Documents, path)\n\n\tlc.links[url] = l\n}",
"func (u *URL) Add(host HostPath) {\n\tu.values[host]++\n\tu.total++\n}",
"func (gState *State) ManageNewURLs() {\n\t//decides on whether to add to the directory list, or add to file output\n\tfor {\n\t\tcandidate := <-gState.Chans.newPagesChan\n\t\t//check the candidate is an actual URL\n\t\t//handle that one crazy case where :/ might be at the start because reasons\n\t\tif strings.HasPrefix(candidate.URL, \"://\") {\n\t\t\t//add a garbage scheme to get past the url parse stuff (the scheme will be added from the reference anyway)\n\t\t\tcandidate.URL = \"xxx\" + candidate.URL\n\t\t}\n\t\tu, err := url.Parse(strings.TrimSpace(candidate.URL))\n\n\t\tif err != nil {\n\t\t\tgState.wg.Done()\n\t\t\tgState.PrintOutput(err.Error(), Error, 0)\n\t\t\tcontinue //probably a better way of doing this\n\t\t}\n\n\t\t//links of the form <a href=\"/thing\" ></a> don't have a host portion to the URL\n\t\tif u.Host == \"\" {\n\t\t\tu.Host = candidate.Reference.Host\n\t\t}\n\n\t\t//actualUrl := gState.ParsedURL.Scheme + \"://\" + u.Host\n\t\tactualURL := net.CleanURL(u, (*candidate.Reference).Scheme+\"://\"+u.Host)\n\n\t\tgState.CMut.Lock()\n\t\tif _, ok := gState.Checked[actualURL]; !ok && //must have not checked it before\n\t\t\t(gState.Hosts.HostExists(u.Host) || gState.Whitelist[u.Host]) && //must be within whitelist, or be one of the starting urls\n\t\t\t!gState.Cfg.NoRecursion { //no recursion means we don't care about adding extra paths or content\n\t\t\tgState.Checked[actualURL] = true\n\t\t\tgState.CMut.Unlock()\n\t\t\tgState.wg.Add(1)\n\t\t\tgState.Chans.pagesChan <- SpiderPage{URL: actualURL, Reference: candidate.Reference, Result: candidate.Result}\n\t\t\tgState.PrintOutput(\"URL Added: \"+actualURL, Debug, 3)\n\n\t\t\t//also add any directories in the supplied path to the 'to be hacked' queue\n\t\t\tpath := \"\"\n\t\t\tdirs := strings.Split(u.Path, \"/\")\n\t\t\tfor i, y := range dirs {\n\n\t\t\t\tpath = path + y\n\t\t\t\tif len(path) > 0 && string(path[len(path)-1]) != \"/\" && i != len(dirs)-1 {\n\t\t\t\t\tpath = path + \"/\" //don't add double /'s, and don't add on the last value\n\t\t\t\t}\n\t\t\t\t//prepend / if it doesn't already exist\n\t\t\t\tif len(path) > 0 && string(path[0]) != \"/\" {\n\t\t\t\t\tpath = \"/\" + path\n\t\t\t\t}\n\n\t\t\t\tnewDir := candidate.Reference.Scheme + \"://\" + candidate.Reference.Host + path\n\t\t\t\tnewPage := SpiderPage{}\n\t\t\t\tnewPage.URL = newDir\n\t\t\t\tnewPage.Reference = candidate.Reference\n\t\t\t\tnewPage.Result = candidate.Result\n\t\t\t\tgState.CMut.RLock()\n\t\t\t\tif gState.Checked[newDir] {\n\t\t\t\t\tgState.CMut.RUnlock()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tgState.CMut.RUnlock()\n\t\t\t\tgState.wg.Add(1)\n\t\t\t\tgState.Chans.newPagesChan <- newPage\n\t\t\t}\n\t\t} else {\n\t\t\tgState.CMut.Unlock()\n\t\t}\n\n\t\tgState.wg.Done()\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
getSourcegraphVersion queries the Sourcegraph GraphQL API to get the current version of the Sourcegraph instance. | func (svc *Service) getSourcegraphVersion(ctx context.Context) (string, error) {
var result struct {
Site struct {
ProductVersion string
}
}
ok, err := svc.client.NewQuery(sourcegraphVersionQuery).Do(ctx, &result)
if err != nil || !ok {
return "", err
}
return result.Site.ProductVersion, err
} | [
"func GetVersion() string {\n\treturn version\n}",
"func (_ EntityAliases) SensuAgentVersion(p graphql.ResolveParams) (string, error) {\n\tval, err := graphql.DefaultResolver(p.Source, p.Info.FieldName)\n\tret, ok := val.(string)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\tif !ok {\n\t\treturn ret, errors.New(\"unable to coerce value for field 'sensuAgentVersion'\")\n\t}\n\treturn ret, err\n}",
"func (a *BaseAggregateSourced) GetVersion() int {\n\treturn a.Version\n}",
"func getVersion(agentInstall DotNetAgentInstall) (result tasks.Result) {\n\n\tagentVersion, err := tasks.GetFileVersion(agentInstall.AgentPath)\n\n\tif err != nil {\n\t\tresult.Status = tasks.Error\n\t\tresult.Summary = \"Error finding .Net Agent version\"\n\t\tlog.Info(\"Error finding .Net Agent version. The error is \", err)\n\t\treturn result\n\t}\n\n\tresult.Status = tasks.Info\n\tresult.Summary = agentVersion\n\tresult.Payload = agentVersion\n\treturn result\n\n}",
"func SourceVersion() string {\n\treturn fmt.Sprintf(\"%s commit: %s / nearest-git-\"+\n\t\t\"tag: %s / branch: %s / %s\\n\",\n\t\tProgramName, LAST_GIT_COMMIT_HASH,\n\t\tNEAREST_GIT_TAG, GIT_BRANCH, GO_VERSION)\n}",
"func CurrentSourceVersion() string {\n\tif environ.HasValue(\"SOURCE_VERSION_OVERRIDE\") {\n\t\treturn environ.GetValueStr(\"SOURCE_VERSION_OVERRIDE\")\n\t}\n\n\tmanifestPath := path.Join(RootDir(), \"src\", \"appengine\", \"resources\", \"clusterfuzz-source.manifest\")\n\tresult, err := ioutil.ReadFile(manifestPath)\n\n\tif err != nil {\n\t\tlogs.Panicf(\"Failed to get current source version: %v\", err)\n\t}\n\n\treturn string(result)\n}",
"func (_Bridge *BridgeCaller) GetVersion(opts *bind.CallOpts) (string, error) {\n\tvar (\n\t\tret0 = new(string)\n\t)\n\tout := ret0\n\terr := _Bridge.contract.Call(opts, out, \"getVersion\")\n\treturn *ret0, err\n}",
"func (o ContentSourceOutput) Version() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ContentSource) *string { return v.Version }).(pulumi.StringPtrOutput)\n}",
"func Version() string {\n\treturn C.GoString(C.gfal2_version())\n}",
"func (m *SynchronizationSchema) GetVersion()(*string) {\n val, err := m.GetBackingStore().Get(\"version\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}",
"func (c *Client) GetVersion() (string, error) {\n\tvar version string\n\tvar statusResponse internal.StatusResponse\n\n\treq, err := http.NewRequest(http.MethodGet, c.baseURL+statusEndpoint, nil)\n\tif err != nil {\n\t\treturn version, fmt.Errorf(\"failed to build request for status endpoint - %s\", err.Error())\n\t}\n\treq.Header.Set(\"Accept\", \"application/xml\")\n\n\tresp, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn version, fmt.Errorf(\"failed to fetch backend version - %s\", err.Error())\n\t}\n\tdefer resp.Body.Close()\n\n\terr = json.NewDecoder(resp.Body).Decode(&statusResponse)\n\tif err != nil {\n\t\treturn version, fmt.Errorf(\"failed to fetch backend version - %s\", err.Error())\n\t}\n\n\treturn statusResponse.Version.Backend, nil\n}",
"func getVersion(driver *neo4j.Driver) (Version, error) {\n\tversion := Version{}\n\tsession := (*driver).NewSession(neo4j.SessionConfig{})\n\tdefer session.Close()\n\n\tresult, err := session.Run(VERSION_QUERY, nil)\n\tif err != nil {\n\t\treturn version, nil\n\t}\n\n\trecord, err := result.Single()\n\tif err != nil {\n\t\treturn version, nil\n\t}\n\n\tval, found := record.Get(\"version\")\n\tif !found {\n\t\treturn version, errors.New(\"couldn't find 'version' in query results\")\n\t}\n\tdata, ok := val.([]interface{})\n\tif !ok {\n\t\treturn version, errors.New(\"'version' isn't an array\")\n\t}\n\tif len(data) < 2 {\n\t\treturn version, errors.New(\"'version' array is empty or too small\")\n\t}\n\n\tval, found = record.Get(\"extra\")\n\tif !found {\n\t\treturn version, errors.New(\"couldn't find 'extra' version info\")\n\t}\n\textra, ok := val.(string)\n\tif !ok {\n\t\treturn version, errors.New(\"'extra' value isn't a string\")\n\t}\n\n\t// yolo for now\n\tversion.Major = uint8(data[0].(int64))\n\tversion.Minor = uint8(data[1].(int64))\n\n\tif len(data) > 2 {\n\t\tversion.Patch = uint8(data[2].(int64))\n\t}\n\tversion.Extra = extra\n\n\treturn version, nil\n}",
"func (f *Features) getVersion(ctx context.Context, adminDB *mongo.Database) {\n\tcmd := bson.D{\n\t\t{\n\t\t\tKey: \"buildInfo\",\n\t\t\tValue: 1,\n\t\t},\n\t}\n\tvar result buildInfo\n\terr := adminDB.RunCommand(ctx, cmd).Decode(&result)\n\tif err != nil {\n\t\tf.MongoVersion = &semver.Version{}\n\t\treturn\n\t}\n\n\tf.MongoVersion = semver.MustParse(result.Version)\n}",
"func (pr LocalPackageReference) GeneratorVersion() string {\n\treturn pr.generatorVersion\n}",
"func (o *ClusterUpgrade) GetVersion() (value string, ok bool) {\n\tok = o != nil && o.bitmap_&8 != 0\n\tif ok {\n\t\tvalue = o.version\n\t}\n\treturn\n}",
"func (c *Connection) Version(ctx context.Context) (string, error) {\n\tresp, err := c.Request(ctx).\n\t\tSetResult(&api.VersionResponse{}).\n\t\tGet(\"/version\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn resp.Result().(*api.VersionResponse).Version, nil\n}",
"func GetVersion() string {\n\tif len(Version) == 0 {\n\t\treturn \"dev\"\n\t}\n\treturn Version\n}",
"func GetVersion() string {\n\treturn version.VERSIONSTR\n}",
"func GetVersion() string {\n\treturn \"v\" + appVersion\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 281