query
stringlengths 8
6.75k
| document
stringlengths 9
1.89M
| negatives
listlengths 19
19
| metadata
dict |
---|---|---|---|
StopTask function is expected to stop a running task by sending the given signal to it. If the task does not stop during the given timeout, the driver must forcefully kill the task. StopTask does not clean up resources of the task or remove it from the driver's internal state.
|
func (d *Driver) StopTask(taskID string, timeout time.Duration, signal string) error {
d.logger.Info("Stopping task", "taskID", taskID, "signal", signal)
handle, ok := d.tasks.Get(taskID)
if !ok {
return drivers.ErrTaskNotFound
}
// fixme send proper signal to container
err := d.podman.ContainerStop(d.ctx, handle.containerID, int(timeout.Seconds()))
if err != nil {
d.logger.Error("Could not stop/kill container", "containerID", handle.containerID, "err", err)
return err
}
return nil
}
|
[
"func (d *Driver) StopTask(taskID string, timeout time.Duration, signal string) error {\n\td.logger.Debug(\"StopTask called\")\n\th, ok := d.tasks.Get(taskID)\n\tif !ok {\n\t\treturn drivers.ErrTaskNotFound\n\t}\n\n\t// implement driver specific logic to stop a task.\n\t//\n\t// The StopTask function is expected to stop a running task by sending the\n\t// given signal to it. If the task does not stop during the given timeout,\n\t// the driver must forcefully kill the task.\n\td.logger.Debug(\"StopTask returning\")\n\treturn h.KillVM()\n}",
"func (t *Task) StopWithTimeout(timeout time.Duration) error {\n\tdefer func() { recover() }()\n\tclose(*t.Stopper)\n\n\tselect {\n\tcase <-*t.Done:\n\t\treturn nil\n\tcase <-time.After(timeout):\n\t\treturn goerr.Wrap(&ErrStoppingTaskTimeout{})\n\t}\n}",
"func (e *Executor) KillTask(executor.ExecutorDriver, *mesosproto.TaskID) {\n\te.Called()\n}",
"func (s *Session) Stop() error {\n\tif s.task == nil {\n\t\treturn errors.New(\"snap task not running or not found\")\n\t}\n\n\trs := s.pClient.StopTask(s.task.ID)\n\tif rs.Err != nil {\n\t\treturn errors.Wrapf(rs.Err, \"could not send stop signal to task %q\", s.task.ID)\n\t}\n\n\terr := s.waitForStop()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"could not stop task %q\", s.task.ID)\n\t}\n\n\trr := s.pClient.RemoveTask(s.task.ID)\n\tif rr.Err != nil {\n\t\treturn errors.Wrapf(rr.Err, \"could not remove task %q\", s.task.ID)\n\t}\n\n\ts.task = nil\n\n\treturn nil\n}",
"func (s *scheduler) StopTask(t Task) error {\n\tif _, ok := s.tasks[t.name()]; !ok {\n\t\treturn errTaskNotExists\n\t}\n\tdelete(s.tasks, t.name())\n\terr := t.stop()\n\tt = nil\n\treturn err\n}",
"func (c *Consumer) stopTask(taskID string) {\n\tc.runL.Lock()\n\ttask, ok := c.running[taskID]\n\tc.runL.Unlock()\n\n\tif !ok {\n\t\t// This can happen if a task completes during Balance() and is not an error.\n\t\tWarnf(\"%s tried to release a non-running task=%q\", c, taskID)\n\t\treturn\n\t}\n\n\t// all handler methods must be wrapped in a recover to prevent a misbehaving\n\t// handler from crashing the entire consumer\n\tgo func() {\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tstack := make([]byte, 50*1024)\n\t\t\t\tsz := runtime.Stack(stack, false)\n\t\t\t\tErrorf(\"%s Handler %s panic()'d on Stop: %v\\n%s\", c, taskID, err, stack[:sz])\n\t\t\t}\n\t\t}()\n\n\t\t// Serialize calls to Stop as a convenience to handler implementors.\n\t\ttask.stop()\n\t}()\n}",
"func (d *Driver) SignalTask(taskID string, signal string) error {\n\thandle, ok := d.tasks.Get(taskID)\n\tif !ok {\n\t\treturn drivers.ErrTaskNotFound\n\t}\n\n\treturn d.podman.ContainerKill(d.ctx, handle.containerID, signal)\n}",
"func (c Control) ServeStopTask(w http.ResponseWriter, r *http.Request) {\n\tc.ServeTaskAction(w, r, false)\n}",
"func (cbl *Launcher) StopTask(stp *StopTaskParams) (*StopTaskResult, error) {\n\tres, err := cbl.codeBuildSvc.StopBuild(&codebuild.StopBuildInput{\n\t\tId: aws.String(stp.ID),\n\t})\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to stop project.\")\n\t}\n\n\tbuildStatus := aws.StringValue(res.Build.BuildStatus)\n\n\treturn &StopTaskResult{\n\t\tBuildStatus: buildStatus,\n\t\tTaskStatus: convertTaskStatus(buildStatus),\n\t}, nil\n}",
"func (t *Task) Stop() error {\n\tselect {\n\tcase <-t.startedChan:\n\tdefault:\n\t\treturn ErrNotExecuting\n\t}\n\n\tt.cancelCtx()\n\n\treturn nil\n}",
"func (builder *ImageBuilder) KillTask(executor.ExecutorDriver, *mesosproto.TaskID) {\n\tfmt.Println(\"Kill task\")\n}",
"func (task *Task) Cancel() {\n\tfmt.Println(\"Cancelling task ...\")\n\ttask.lock.Lock()\n\ttask.shouldStop = true\n\ttask.lock.Unlock()\n\tselect {\n\tcase <-task.StopChan():\n\tcase <-time.After(5 * time.Second):\n\t}\n\ttask.result = \"INTERRUPTED\"\n}",
"func (c *Client) TerminateTask(guid string) error {\n\treq := c.NewRequest(\"PUT\", fmt.Sprintf(\"/v3/tasks/%s/cancel\", guid))\n\tresp, err := c.DoRequest(req)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Error terminating task\")\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 202 {\n\t\treturn errors.Wrapf(err, \"Failed terminating task, response status code %d\", resp.StatusCode)\n\t}\n\treturn nil\n}",
"func (d *Driver) SignalTask(taskID string, signal string) error {\n\td.logger.Debug(\"signal task called and returning\")\n\n\thandle, ok := d.tasks.Get(taskID)\n\tif !ok {\n\t\treturn drivers.ErrTaskNotFound\n\t}\n\n\t// TODO: implement driver specific signal handling logic.\n\t//\n\t// The given signal must be forwarded to the target taskID. If this plugin\n\t// doesn't support receiving signals (capability SendSignals is set to\n\t// false) you can just return nil.\n\tsig := os.Interrupt\n\tif s, ok := signals.SignalLookup[signal]; ok {\n\t\tsig = s\n\t} else {\n\t\td.logger.Warn(\"unknown signal to send to task, using SIGINT instead\", \"signal\", signal, \"task_id\", handle.task.ID)\n\t}\n\treturn handle.Signal(sig)\n}",
"func (c *DockerScheduler) stopTask(task *demand.Task) error {\n\tvar err error\n\n\t// Kill a currently-running container of this type\n\tc.Lock()\n\ttheseContainers := c.taskContainers[task.Name]\n\tvar containerToKill string\n\tfor id, v := range theseContainers {\n\t\tif v.state == \"running\" {\n\t\t\tcontainerToKill = id\n\t\t\tv.state = \"stopping\"\n\t\t\tbreak\n\t\t}\n\t}\n\tc.Unlock()\n\n\tif containerToKill == \"\" {\n\t\treturn fmt.Errorf(\"[stop] No containers of type %s to kill\", task.Name)\n\t}\n\n\tremoveOpts := docker.RemoveContainerOptions{\n\t\tID: containerToKill,\n\t\tRemoveVolumes: true,\n\t}\n\n\tgo func() {\n\t\tscaling.Add(1)\n\t\tdefer scaling.Done()\n\n\t\tlog.Debugf(\"[stopping] container for task %s with ID %s\", task.Name, containerToKill)\n\t\terr = c.client.StopContainer(containerToKill, 1)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Couldn't stop container %s: %v\", containerToKill, err)\n\t\t\treturn\n\t\t}\n\n\t\tc.Lock()\n\t\tc.taskContainers[task.Name][containerToKill].state = \"removing\"\n\t\tc.Unlock()\n\n\t\tlog.Debugf(\"[removing] container for task %s with ID %s\", task.Name, containerToKill)\n\t\terr = c.client.RemoveContainer(removeOpts)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Couldn't remove container %s: %v\", containerToKill, err)\n\t\t\treturn\n\t\t}\n\t}()\n\n\treturn nil\n}",
"func (t *Task) Stop() {\n\tdefer func() { recover() }()\n\tclose(*t.Stopper)\n\t<-*t.Done\n}",
"func HandleStopTask(w http.ResponseWriter, r *http.Request) {\n\tlog.Root.Info(\"HandleStopTask BEGIN\")\n\n\tif r.Method != http.MethodPost {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tHttpResponseError(w, ErrNotFound)\n\t\treturn\n\t}\n\n\tdefer r.Body.Close()\n\tbody, _ := ioutil.ReadAll(r.Body)\n\n\tdata := make(map[string]interface{})\n\terr := json.Unmarshal(body, &data)\n\tif err != nil {\n\t\tlog.Root.Error(\"HandleStopTask Parse HTTP request body error\")\n\t\tHttpResponseError(w, ErrForm)\n\t\treturn\n\t}\n\n\telem, ok := data[\"taskID\"]\n\tif !ok {\n\t\tlog.Root.Error(\"HandleStopTask HTTP form data error\")\n\t\tHttpResponseError(w, ErrForm)\n\t\treturn\n\t}\n\n\ttaskID := elem.(string)\n\ttaskCapacity, err := node.StopTask(taskID)\n\tif err != nil {\n\t\tlog.Root.Error(\"HandleStopTask Stop task error. TaskID: %v\", taskID)\n\t\tHttpResponseError(w, ErrServer)\n\t\treturn\n\t}\n\n\tlog.Root.Info(\"HandleStopTask END\")\n\tHttpResponseData(w, H{\n\t\t\"taskCapacity\": taskCapacity,\n\t})\n\treturn\n}",
"func (m *MockECSAPI) StopTask(arg0 *ecs.StopTaskInput) (*ecs.StopTaskOutput, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"StopTask\", arg0)\n\tret0, _ := ret[0].(*ecs.StopTaskOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (k *KubernetesExecutor) KillTask(driver bindings.ExecutorDriver, taskId *mesos.TaskID) {\n\tif k.isDone() {\n\t\treturn\n\t}\n\tlog.Infof(\"Kill task %v\\n\", taskId)\n\n\tif !k.isConnected() {\n\t\t//TODO(jdefelice) sent TASK_LOST here?\n\t\tlog.Warningf(\"Ignore kill task because the executor is disconnected\\n\")\n\t\treturn\n\t}\n\n\tk.lock.Lock()\n\tdefer k.lock.Unlock()\n\tk.killPodForTask(driver, taskId.GetValue(), messages.TaskKilled)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
DestroyTask function cleans up and removes a task that has terminated. If force is set to true, the driver must destroy the task even if it is still running.
|
func (d *Driver) DestroyTask(taskID string, force bool) error {
handle, ok := d.tasks.Get(taskID)
if !ok {
return drivers.ErrTaskNotFound
}
if handle.isRunning() && !force {
return fmt.Errorf("cannot destroy running task")
}
if handle.isRunning() {
d.logger.Debug("Have to destroyTask but container is still running", "containerID", handle.containerID)
// we can not do anything, so catching the error is useless
err := d.podman.ContainerStop(d.ctx, handle.containerID, 60)
if err != nil {
d.logger.Warn("failed to stop/kill container during destroy", "error", err)
}
// wait a while for stats emitter to collect exit code etc.
for i := 0; i < 20; i++ {
if !handle.isRunning() {
break
}
time.Sleep(time.Millisecond * 250)
}
if handle.isRunning() {
d.logger.Warn("stats emitter did not exit while stop/kill container during destroy", "error", err)
}
}
if handle.removeContainerOnExit {
err := d.podman.ContainerDelete(d.ctx, handle.containerID, true, true)
if err != nil {
d.logger.Warn("Could not remove container", "container", handle.containerID, "error", err)
}
}
d.tasks.Delete(taskID)
return nil
}
|
[
"func (d *Driver) DestroyTask(taskID string, force bool) error {\n\td.logger.Debug(\"DestroyTask called\")\n\th, ok := d.tasks.Get(taskID)\n\tif !ok {\n\t\treturn drivers.ErrTaskNotFound\n\t}\n\n\t// implement driver specific logic to destroy a complete task.\n\t//\n\t// Destroying a task includes removing any resources used by task and any\n\t// local references in the plugin. If force is set to true the task should\n\t// be destroyed even if it's currently running.\n\tif err := h.DestroyVM(); err != nil {\n\t\treturn err\n\t}\n\n\td.tasks.Delete(taskID)\n\td.logger.Debug(\"DestroyTask returning\")\n\treturn nil\n}",
"func CleanTask() {\n\tfor taskID, t := range kv.DefaultClient.GetStorage().Tasks {\n\t\tflag := true\n\t\tfor nid := range kv.DefaultClient.GetStorage().Nodes {\n\t\t\tif t.NodeID == nid {\n\t\t\t\tflag = false\n\t\t\t}\n\t\t}\n\t\tif flag {\n\t\t\tif t.Timer {\n\t\t\t\tlog.Info(\"clean timer:\", taskID)\n\t\t\t\tormTimer := new(orm.Timer)\n\t\t\t\tormTimer.ID = taskID\n\t\t\t\tormTimer.Status = false\n\t\t\t\terr := orm.UpdateTimerStatus(ormTimer)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Info(\"clean task:\", taskID)\n\t\t\t\tormTask := new(orm.Task)\n\t\t\t\tormTask.ID = taskID\n\t\t\t\tormTask.Status = \"error\"\n\t\t\t\terr := orm.UpdateTask(ormTask)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tkv.DefaultClient.DeleteTask(taskID)\n\t\t}\n\t}\n}",
"func (t *task) deleteTask() {\n\t// There is no state to clean up as of now.\n\t// If the goal state was set to DELETED, then let the\n\t// listeners know that the task has been deleted.\n\n\tvar runtimeCopy *pbtask.RuntimeInfo\n\tvar labelsCopy []*peloton.Label\n\n\t// notify listeners after dropping the lock\n\tdefer func() {\n\t\tif runtimeCopy != nil {\n\t\t\tt.jobFactory.notifyTaskRuntimeChanged(\n\t\t\t\tt.jobID,\n\t\t\t\tt.id,\n\t\t\t\tt.jobType,\n\t\t\t\truntimeCopy,\n\t\t\t\tlabelsCopy,\n\t\t\t)\n\t\t}\n\t}()\n\n\tt.RLock()\n\tdefer t.RUnlock()\n\n\tif t.runtime == nil {\n\t\treturn\n\t}\n\n\tif t.runtime.GetGoalState() != pbtask.TaskState_DELETED {\n\t\treturn\n\t}\n\n\truntimeCopy = proto.Clone(t.runtime).(*pbtask.RuntimeInfo)\n\truntimeCopy.State = pbtask.TaskState_DELETED\n\tlabelsCopy = t.copyLabelsInCache()\n}",
"func cleanupTask(ctx context.Context, t *testing.T, c cocoa.ECSClient, runOut *ecs.RunTaskOutput) {\n\tif runOut != nil && len(runOut.Tasks) > 0 && runOut.Tasks[0].TaskArn != nil {\n\t\tout, err := c.StopTask(ctx, &ecs.StopTaskInput{\n\t\t\tCluster: aws.String(testutil.ECSClusterName()),\n\t\t\tTask: aws.String(*runOut.Tasks[0].TaskArn),\n\t\t})\n\t\trequire.NoError(t, err)\n\t\trequire.NotZero(t, out)\n\t}\n}",
"func (cm *Manager) Delete(ctx context.Context, taskID string, force bool) error {\n\tif !force {\n\t\treturn cm.pieceMD5Manager.removePieceMD5sByTaskID(taskID)\n\t}\n\n\treturn deleteTaskFiles(ctx, cm.cacheStore, taskID)\n}",
"func (t *TimeTask) DeleteTask(task *RawTask) {\n\tt.deleteChan <- task\n}",
"func (e *Executor) KillTask(executor.ExecutorDriver, *mesosproto.TaskID) {\n\te.Called()\n}",
"func (builder *ImageBuilder) KillTask(executor.ExecutorDriver, *mesosproto.TaskID) {\n\tfmt.Println(\"Kill task\")\n}",
"func (m *Manager) DeleteTask(t *tasks.Task) (err error) {\n\terr = os.RemoveAll(m.Config.TasksPath + string(os.PathSeparator) + t.Directory)\n\tif err != nil {\n\t\treturn\n\t}\n\tvar newTaskList []*tasks.Task\n\tfor _, task := range m.Tasks {\n\t\tif t == task {\n\t\t\tcontinue\n\t\t}\n\t\tnewTaskList = append(newTaskList, task)\n\t}\n\tm.Tasks = newTaskList\n\treturn\n}",
"func RemoveForced(in io.Reader, out io.Writer, args []string, server DeleteServer) error {\n\tpArg, err := strconv.Atoi(args[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\tt, err := GetTaskAtPosition(server, pArg-1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = server.DeleteTask(t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintln(out, \"Removed tasks:\")\n\tfmt.Fprintln(out, fmt.Sprintf(\"%d%s %s \", t.Order, \"X\", t.Title))\n\n\treturn nil\n}",
"func cleanupTaskDefinition(ctx context.Context, t *testing.T, c cocoa.ECSClient, out *ecs.RegisterTaskDefinitionOutput) {\n\tif out != nil && out.TaskDefinition != nil && out.TaskDefinition.TaskDefinitionArn != nil {\n\t\tout, err := c.DeregisterTaskDefinition(ctx, &ecs.DeregisterTaskDefinitionInput{\n\t\t\tTaskDefinition: out.TaskDefinition.TaskDefinitionArn,\n\t\t})\n\t\trequire.NoError(t, err)\n\t\trequire.NotZero(t, out)\n\t}\n}",
"func (na *cnmNetworkAllocator) DeallocateTask(t *api.Task) error {\n\tdelete(na.tasks, t.ID)\n\treturn na.releaseEndpoints(t.Networks)\n}",
"func (r *TaskRepository) DeleteTask(id int64) error{\n\tvar task, _ = r.GetTask(id)\n\n\t// soft delete, according to doc UpdatedAt will be set automatically\n\tr.DB.Model(&task).Update(\"IsDeleted\", true)\n\n return nil\n}",
"func (md *ManagementNode) DelTask(id string) {\n\tmd.scheduledTasksMtx.Lock()\n\tdefer md.scheduledTasksMtx.Unlock()\n\n\tdelete(md.scheduledTasks, id)\n\n}",
"func CleanTask() {\n\tvar wg sync.WaitGroup\n\tremove := []string{\n\t\t\"pkg/res/data.go\",\n\t\t\"res/generated/bundle.js\",\n\t\t\"res/generated/style.css\",\n\t\t\"browser/chrome-ext/src/src.zip\",\n\t}\n\tremoveAll := []string{\n\t\t\"dist/\",\n\t\t\"dist-archives/\",\n\t\t\"site/\",\n\t\t\"build/\",\n\t\t\"res/generated/\",\n\t\t\"res/messages/_ref\",\n\t\t\"browser/chrome-ext/src/javascripts\",\n\t\t\"AlkasirChromeExtension/\",\n\t}\n\twg.Add(len(remove))\n\twg.Add(len(removeAll))\n\tfor _, v := range remove {\n\t\tgo func(f string) {\n\t\t\tdefer wg.Done()\n\t\t\tos.Remove(f)\n\t\t}(v)\n\t}\n\tfor _, v := range removeAll {\n\t\tgo func(f string) {\n\t\t\tdefer wg.Done()\n\t\t\tos.RemoveAll(f)\n\t\t}(v)\n\t}\n\twg.Wait()\n}",
"func (c *DockerScheduler) stopTask(task *demand.Task) error {\n\tvar err error\n\n\t// Kill a currently-running container of this type\n\tc.Lock()\n\ttheseContainers := c.taskContainers[task.Name]\n\tvar containerToKill string\n\tfor id, v := range theseContainers {\n\t\tif v.state == \"running\" {\n\t\t\tcontainerToKill = id\n\t\t\tv.state = \"stopping\"\n\t\t\tbreak\n\t\t}\n\t}\n\tc.Unlock()\n\n\tif containerToKill == \"\" {\n\t\treturn fmt.Errorf(\"[stop] No containers of type %s to kill\", task.Name)\n\t}\n\n\tremoveOpts := docker.RemoveContainerOptions{\n\t\tID: containerToKill,\n\t\tRemoveVolumes: true,\n\t}\n\n\tgo func() {\n\t\tscaling.Add(1)\n\t\tdefer scaling.Done()\n\n\t\tlog.Debugf(\"[stopping] container for task %s with ID %s\", task.Name, containerToKill)\n\t\terr = c.client.StopContainer(containerToKill, 1)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Couldn't stop container %s: %v\", containerToKill, err)\n\t\t\treturn\n\t\t}\n\n\t\tc.Lock()\n\t\tc.taskContainers[task.Name][containerToKill].state = \"removing\"\n\t\tc.Unlock()\n\n\t\tlog.Debugf(\"[removing] container for task %s with ID %s\", task.Name, containerToKill)\n\t\terr = c.client.RemoveContainer(removeOpts)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Couldn't remove container %s: %v\", containerToKill, err)\n\t\t\treturn\n\t\t}\n\t}()\n\n\treturn nil\n}",
"func DeleteTask(id string) {\n\ttasks.Lock()\n\tdefer tasks.Unlock()\n\n\tif tasks.m == nil {\n\t\treturn\n\t}\n\tdelete(tasks.m, id)\n}",
"func (f *Failer) KillTask(host, task string) error {\n\tscript := \"sudo pkill -x %s\"\n\tlog.V(1).Infof(\"Killing task %s on host %s\", task, host)\n\treturn f.runWithEvilTag(host, fmt.Sprintf(script, task))\n}",
"func (c *ProjectsLocationsTagTemplatesDeleteCall) Force(force bool) *ProjectsLocationsTagTemplatesDeleteCall {\n\tc.urlParams_.Set(\"force\", fmt.Sprint(force))\n\treturn c\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
InspectTask function returns detailed status information for the referenced taskID.
|
func (d *Driver) InspectTask(taskID string) (*drivers.TaskStatus, error) {
d.logger.Debug("InspectTask called")
handle, ok := d.tasks.Get(taskID)
if !ok {
return nil, drivers.ErrTaskNotFound
}
return handle.taskStatus(), nil
}
|
[
"func (d *Driver) InspectTask(taskID string) (*drivers.TaskStatus, error) {\n\td.logger.Debug(\"InspectTask called\")\n\th, ok := d.tasks.Get(taskID)\n\tif !ok {\n\t\treturn nil, drivers.ErrTaskNotFound\n\t}\n\n\tstatus := &drivers.TaskStatus{\n\t\tID: h.task.ID,\n\t\tName: h.task.Name,\n\t\tStartedAt: h.startedAt,\n\t\tCompletedAt: h.completedAt,\n\t\tNetworkOverride: h.net,\n\t\tExitResult: h.exitResult,\n\t}\n\n\tstatus.State = drivers.TaskStateUnknown\n\n\ts, err := d.domainManager.VMState(api.TaskID2DomainName(h.task.ID))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// reflect the domain actual status\n\tswitch s {\n\tcase api.Running, api.Blocked, api.Paused, api.Shutdown, api.PMSuspended:\n\t\tstatus.State = drivers.TaskStateRunning\n\tcase api.Shutoff, api.Crashed:\n\t\tstatus.State = drivers.TaskStateExited\n\t}\n\n\td.logger.Debug(\"InspectTask returning\")\n\treturn status, nil\n}",
"func (c *Client) InspectTask(id string) (*swarm.Task, error) {\n\tresp, err := c.do(http.MethodGet, \"/tasks/\"+id, doOptions{})\n\tif err != nil {\n\t\tvar e *Error\n\t\tif errors.As(err, &e) && e.Status == http.StatusNotFound {\n\t\t\treturn nil, &NoSuchTask{ID: id}\n\t\t}\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tvar task swarm.Task\n\tif err := json.NewDecoder(resp.Body).Decode(&task); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &task, nil\n}",
"func (ts *TasksRPC) GetTaskMonitor(ctx context.Context, req *taskproto.GetTaskRequest) (*taskproto.TaskResponse, error) {\n\tvar rsp taskproto.TaskResponse\n\tctx = common.GetContextData(ctx)\n\tctx = common.ModifyContext(ctx, common.TaskService, podName)\n\n\tl.LogWithFields(ctx).Debugf(\"Incoming request to get the task details and response body for the task %v\", req.TaskID)\n\trsp.Header = map[string]string{\n\t\t\"Date\": time.Now().Format(http.TimeFormat),\n\t}\n\tprivileges := []string{common.PrivilegeLogin}\n\tauthResp, err := ts.AuthenticationRPC(ctx, req.SessionToken, privileges)\n\tif authResp.StatusCode != http.StatusOK {\n\t\tif err != nil {\n\t\t\tl.LogWithFields(ctx).Errorf(\"Error while authorizing the session token : %s\", err.Error())\n\t\t}\n\t\tfillProtoResponse(ctx, &rsp, authResp)\n\t\treturn &rsp, nil\n\t}\n\t_, err = ts.GetSessionUserNameRPC(ctx, req.SessionToken)\n\tif err != nil {\n\t\tl.LogWithFields(ctx).Printf(authErrorMessage)\n\t\tfillProtoResponse(ctx, &rsp, common.GeneralError(http.StatusUnauthorized, response.NoValidSession, authErrorMessage, nil, nil))\n\t\treturn &rsp, nil\n\t}\n\t// get task status from database using task id\n\ttask, err := ts.GetTaskStatusModel(ctx, req.TaskID, common.InMemory)\n\tif err != nil {\n\t\tl.LogWithFields(ctx).Printf(\"error getting task status : %v\", err)\n\t\tfillProtoResponse(ctx, &rsp, common.GeneralError(http.StatusNotFound, response.ResourceNotFound, err.Error(), []interface{}{\"Task\", req.TaskID}, nil))\n\t\treturn &rsp, nil\n\t}\n\n\t// Check the state of the task\n\tif task.TaskState == \"Completed\" || task.TaskState == \"Cancelled\" || task.TaskState == \"Killed\" || task.TaskState == \"Exception\" {\n\t\t// return with the actual status code, along with response header and response body\n\t\t//Build the response Body\n\t\trsp.Header = task.Payload.HTTPHeaders\n\t\trsp.Body = task.TaskResponse\n\t\trsp.StatusCode = task.StatusCode\n\t\t// Delete the task from db as it is completed and user requested for the details.\n\t\t// return the user with task details by deleting the task from db\n\t\t// User should be careful as this is the last call to Task monitor API.\n\t\t/*\n\t\t\terr := task.Delete()\n\t\t\tif err != nil {\n\t\t\t\tl.Log.Printf(\"error while deleting the task from db: %v\", err)\n\t\t\t}\n\t\t*/\n\t\treturn &rsp, nil\n\t}\n\t// Construct the Task object to return as long as 202 code is being returned.\n\n\tmessageList := []tresponse.Messages{}\n\tfor _, element := range task.Messages {\n\t\tmessage := tresponse.Messages{\n\t\t\tMessageID: element.MessageID,\n\t\t\tRelatedProperties: element.RelatedProperties,\n\t\t\tMessage: element.Message,\n\t\t\tMessageArgs: element.MessageArgs,\n\t\t\tSeverity: element.Severity,\n\t\t}\n\t\tmessageList = append(messageList, message)\n\t}\n\n\tcommonResponse := response.Response{\n\t\tOdataType: common.TaskType,\n\t\tID: task.ID,\n\t\tName: task.Name,\n\t\tOdataContext: \"/redfish/v1/$metadata#Task.Task\",\n\t\tOdataID: \"/redfish/v1/TaskService/Tasks/\" + task.ID,\n\t}\n\trsp.StatusCode = http.StatusAccepted\n\trsp.StatusMessage = response.TaskStarted\n\tcommonResponse.MessageArgs = []string{task.ID}\n\tcommonResponse.CreateGenericResponse(rsp.StatusMessage)\n\n\thttpHeaders := []string{}\n\tfor key, value := range task.Payload.HTTPHeaders {\n\t\thttpHeaders = append(httpHeaders, fmt.Sprintf(\"%v: %v\", key, value))\n\t}\n\n\ttaskResponse := tresponse.Task{\n\t\tResponse: commonResponse,\n\t\tTaskState: task.TaskState,\n\t\tStartTime: task.StartTime.UTC(),\n\t\tEndTime: task.EndTime.UTC(),\n\t\tTaskStatus: task.TaskStatus,\n\t\tMessages: messageList,\n\t\tTaskMonitor: task.TaskMonitor,\n\t\tPayload: tresponse.Payload{\n\t\t\tHTTPHeaders: httpHeaders,\n\t\t\tHTTPOperation: task.Payload.HTTPOperation,\n\t\t\tJSONBody: string(task.Payload.JSONBody),\n\t\t\tTargetURI: task.Payload.TargetURI,\n\t\t},\n\t\tPercentComplete: task.PercentComplete,\n\t}\n\tif task.ParentID == \"\" {\n\t\tvar subTask = tresponse.ListMember{\n\t\t\tOdataID: \"/redfish/v1/TaskService/Tasks/\" + task.ID + \"/SubTasks\",\n\t\t}\n\t\ttaskResponse.SubTasks = &subTask\n\t}\n\trespBody := generateResponse(ctx, taskResponse)\n\trsp.Body = respBody\n\tl.LogWithFields(ctx).Debugf(\"Outgoing response for getting subtasks: %v\", string(respBody))\n\n\trsp.Header[\"location\"] = task.TaskMonitor\n\treturn &rsp, nil\n}",
"func (c CDCEtcdClient) GetTaskStatus(\n\tctx context.Context,\n\tchangefeedID string,\n\tcaptureID string,\n) (int64, *model.TaskStatus, error) {\n\tkey := GetEtcdKeyTaskStatus(changefeedID, captureID)\n\tresp, err := c.Client.Get(ctx, key)\n\tif err != nil {\n\t\treturn 0, nil, cerror.WrapError(cerror.ErrPDEtcdAPIError, err)\n\t}\n\tif resp.Count == 0 {\n\t\treturn 0, nil, cerror.ErrTaskStatusNotExists.GenWithStackByArgs(key)\n\t}\n\tinfo := &model.TaskStatus{}\n\terr = info.Unmarshal(resp.Kvs[0].Value)\n\treturn resp.Kvs[0].ModRevision, info, errors.Trace(err)\n}",
"func ShowTaskStat(taskJSON []byte, scale float64) error {\n\ttask := Task{}\n\n\terror := json.Unmarshal([]byte(taskJSON), &task)\n\tif error != nil {\n\t\treturn errors.New(\"[ERROR] Couldn't parse task log as JSON:\" + string(taskJSON))\n\t}\n\n\tstartDateTime, _ := time.Parse(DateTimeLayout, task.Start)\n\tendDateTime, _ := time.Parse(DateTimeLayout, task.End)\n\n\t// TODO: γγγ‘γγ£γ¨γγ©γΌγγγδ½γ¨γγγ\n\tduration := float64(endDateTime.Sub(startDateTime).Seconds()) * scale\n\tminutes := duration / 60.0\n\thours := minutes / 60.0\n\n\t// ε°ζ°ηΉδΈΈγ\n\tfmt.Fprintf(os.Stdout, \"%s %v\\n\", task.Name, math.Trunc(hours*100)/100.0)\n\treturn nil\n}",
"func (ctl *StatusController) PrintStatusOfTask(ctx context.Context, name string) error {\n\ttasks, err := ctl.getTask(ctx, name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tctl.printToView(tasks)\n\treturn nil\n}",
"func CheckStatus(c *fiber.Ctx) error {\n\ttaskID := c.Params(\"id\")\n\terr := TaskDB.View(func(txn *badger.Txn) error {\n\t\titem, err := txn.Get([]byte(taskID))\n\t\tif err != nil {\n\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\tvar data []byte\n\t\terr = item.Value(func(val []byte) error {\n\n\t\t\tdata = append([]byte{}, val...)\n\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\tlog.Printf(\"values: %s\\n\", data)\n\t\tp := new(Task)\n\t\terr = json.Unmarshal(data, &p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn c.JSON(fiber.Map{\n\t\t\t\"exec_id\": taskID,\n\t\t\t\"result\": p.Result,\n\t\t\t\"status\": p.Status,\n\t\t})\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (g *Grid) TaskDetails(pk string) (*TaskObject, *Response, error) {\n\ttaskObject := new(TaskObject)\n\turl := fmt.Sprintf(\"api/v2/task/%v/\", pk)\n\treq, err := g.NewRequest(\"GET\", url, nil)\n\tresp, err := g.Do(req, taskObject)\n\treturn taskObject, resp, err\n}",
"func (d *dispatcher) monitorTask(taskID int64) (finished bool, subTaskErrs []error) {\n\t// TODO: Consider putting the following operations into a transaction.\n\tvar err error\n\td.task, err = d.taskMgr.GetGlobalTaskByID(taskID)\n\tif err != nil {\n\t\tlogutil.BgLogger().Error(\"check task failed\", zap.Int64(\"task ID\", d.task.ID), zap.Error(err))\n\t\treturn false, nil\n\t}\n\tswitch d.task.State {\n\tcase proto.TaskStateCancelling:\n\t\treturn false, []error{errors.New(\"cancel\")}\n\tcase proto.TaskStateReverting:\n\t\tcnt, err := d.taskMgr.GetSubtaskInStatesCnt(d.task.ID, proto.TaskStateRevertPending, proto.TaskStateReverting)\n\t\tif err != nil {\n\t\t\tlogutil.BgLogger().Warn(\"check task failed\", zap.Int64(\"task ID\", d.task.ID), zap.Error(err))\n\t\t\treturn false, nil\n\t\t}\n\t\treturn cnt == 0, nil\n\tdefault:\n\t\tsubTaskErrs, err = d.taskMgr.CollectSubTaskError(d.task.ID)\n\t\tif err != nil {\n\t\t\tlogutil.BgLogger().Warn(\"collect subtask error failed\", zap.Int64(\"task ID\", d.task.ID), zap.Error(err))\n\t\t\treturn false, nil\n\t\t}\n\t\tif len(subTaskErrs) > 0 {\n\t\t\treturn false, subTaskErrs\n\t\t}\n\t\t// check subtasks pending or running.\n\t\tcnt, err := d.taskMgr.GetSubtaskInStatesCnt(d.task.ID, proto.TaskStatePending, proto.TaskStateRunning)\n\t\tif err != nil {\n\t\t\tlogutil.BgLogger().Warn(\"check task failed\", zap.Int64(\"task ID\", d.task.ID), zap.Error(err))\n\t\t\treturn false, nil\n\t\t}\n\t\treturn cnt == 0, nil\n\t}\n}",
"func GetEtcdKeyTaskStatus(changeFeedID, captureID string) string {\n\treturn TaskStatusKeyPrefix + \"/\" + captureID + \"/\" + changeFeedID\n}",
"func (l *LogCache) GetTaskStatus() define.TaskStatus {\n\treturn l.status\n}",
"func (k *K8sFlink) Inspect(ctx context.Context, task *spec.PipelineTask) (apistructs.TaskInspect, error) {\n\treturn apistructs.TaskInspect{}, errors.New(\"k8sflink doesn`t support inspect\")\n}",
"func (c *Client) GetDetail(taskId string) (*DtsTaskMeta, error) {\n\tresult := &DtsTaskMeta{}\n\terr := bce.NewRequestBuilder(c).\n\t\tWithMethod(http.GET).\n\t\tWithURL(getDtsUriWithTaskId(taskId)).\n\t\tWithResult(result).\n\t\tDo()\n\n\treturn result, err\n}",
"func getTaskDetail(w http.ResponseWriter, r *http.Request) {\n\t// Get request query value.\n\tif r.Method != http.MethodGet {\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\tfmt.Fprintf(w, \"invalid_http_method\")\n\t\treturn\n\t}\n\tinputTaskID := r.URL.Query().Get(\"taskid\")\n\tif inputTaskID == \"\" {\n\t\thttp.Error(w, \"Can't get value.\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Check from database.\n\tqueryStr := \"SELECT * FROM task WHERE id='\" + inputTaskID + \"';\"\n\trets, err := dbconn.DBConn.Query(queryStr)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Send back json data.\n\tfor rets.Next() {\n\t\tvar taskData database.TaskType\n\t\tif err = rets.Scan(&taskData.ID, &taskData.Title, &taskData.Desc, &taskData.Duration, &taskData.RemainTime,\n\t\t\t&taskData.Type, &taskData.IsFinish, &taskData.IsGroupTask); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadGateway)\n\t\t}\n\n\t\tuserJson, err := json.Marshal(taskData)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write(userJson)\n\t}\n}",
"func (c CDCEtcdClient) GetAllTaskStatus(ctx context.Context, changefeedID string) (model.ProcessorsInfos, error) {\n\tresp, err := c.Client.Get(ctx, TaskStatusKeyPrefix, clientv3.WithPrefix())\n\tif err != nil {\n\t\treturn nil, cerror.WrapError(cerror.ErrPDEtcdAPIError, err)\n\t}\n\tpinfo := make(map[string]*model.TaskStatus, resp.Count)\n\tfor _, rawKv := range resp.Kvs {\n\t\tchangeFeed, err := model.ExtractKeySuffix(string(rawKv.Key))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tendIndex := len(rawKv.Key) - len(changeFeed) - 1\n\t\tcaptureID, err := model.ExtractKeySuffix(string(rawKv.Key[0:endIndex]))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif changeFeed != changefeedID {\n\t\t\tcontinue\n\t\t}\n\t\tinfo := &model.TaskStatus{}\n\t\terr = info.Unmarshal(rawKv.Value)\n\t\tif err != nil {\n\t\t\treturn nil, cerror.ErrDecodeFailed.GenWithStackByArgs(\"failed to unmarshal task status: %s\", err)\n\t\t}\n\t\tinfo.ModRevision = rawKv.ModRevision\n\t\tpinfo[captureID] = info\n\t}\n\treturn pinfo, nil\n}",
"func (v1 *V1) GetTask(w http.ResponseWriter, r *http.Request) {\n\ttaskID := chi.URLParam(r, \"taskID\")\n\tshouldDeleteTask := false\n\tdeleteParam := r.URL.Query().Get(\"delete\")\n\tif deleteParam == \"1\" {\n\t\tshouldDeleteTask = true\n\t}\n\n\ttask := v1.metaCrawlSvc.TaskByID(taskID)\n\tif task == nil {\n\t\tv1.responseErrorJSON(w, \"task not found\", 404)\n\t\treturn\n\t}\n\n\ttaskStatus := task.Status()\n\tswitch taskStatus {\n\tcase metacrawl.TaskInProgress:\n\t\tv1.responseJSON(w, \"task in progress\", 204)\n\t\treturn\n\tcase metacrawl.TaskCompleted:\n\t\tif shouldDeleteTask {\n\t\t\tv1.metaCrawlSvc.DeleteTaskByID(taskID)\n\t\t}\n\n\t\tv1.responseCSV(w, taskID, task.Render(), 200)\n\t}\n}",
"func (p *proteusAPI) GetTemplateTaskStatus(taskId string) (string, error) {\n\tΞ± := struct {\n\t\tM OperationGetTemplateTaskStatus `xml:\"tns:getTemplateTaskStatus\"`\n\t}{\n\t\tOperationGetTemplateTaskStatus{\n\t\t\t&taskId,\n\t\t},\n\t}\n\n\tΞ³ := struct {\n\t\tM OperationGetTemplateTaskStatusResponse `xml:\"getTemplateTaskStatusResponse\"`\n\t}{}\n\tif err := p.cli.RoundTripWithAction(\"GetTemplateTaskStatus\", Ξ±, &Ξ³); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn *Ξ³.M.Return, nil\n}",
"func GetVipStatusByTask(taskId int64) (VipStatusByTaskItems, error) {\r\n\t// generate url\r\n\turl := fmt.Sprintf(\"http://rms.baidu.com/?r=interface/api&handler=getBgwListCurrentStep&list_id=%d\", taskId)\r\n\r\n\t// request api for result\r\n\treturn getVipStatusByTask(url)\r\n}",
"func (c *Client) GetConnectorTaskStatus(name string, taskID int) (*TaskState, *http.Response, error) {\n\tpath := fmt.Sprintf(\"connectors/%v/tasks/%v/status\", name, taskID)\n\tstatus := new(TaskState)\n\trespnse, err := c.get(path, status)\n\treturn status, respnse, err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TaskStats function returns a channel which the driver should send stats to at the given interval. The driver must send stats at the given interval until the given context is canceled or the task terminates.
|
func (d *Driver) TaskStats(ctx context.Context, taskID string, interval time.Duration) (<-chan *drivers.TaskResourceUsage, error) {
d.logger.Debug("TaskStats called", "taskID", taskID)
handle, ok := d.tasks.Get(taskID)
if !ok {
return nil, drivers.ErrTaskNotFound
}
statsChannel := make(chan *drivers.TaskResourceUsage)
go handle.runStatsEmitter(ctx, statsChannel, interval)
return statsChannel, nil
}
|
[
"func (d *Driver) TaskStats(ctx context.Context, taskID string, interval time.Duration) (<-chan *drivers.TaskResourceUsage, error) {\n\td.logger.Debug(\"TaskStats called\")\n\th, ok := d.tasks.Get(taskID)\n\tif !ok {\n\t\treturn nil, drivers.ErrTaskNotFound\n\t}\n\n\t// TODO: implement driver specific logic to send task stats.\n\t//\n\t// This function returns a channel that Nomad will use to listen for task\n\t// stats (e.g., CPU and memory usage) in a given interval. It should send\n\t// stats until the context is canceled or the task stops running.\n\td.logger.Debug(\"TaskStats returning\")\n\treturn h.Stats(ctx, interval)\n}",
"func (d *NvidiaDevice) Stats(ctx context.Context, interval time.Duration) (<-chan *device.StatsResponse, error) {\n\toutCh := make(chan *device.StatsResponse)\n\tgo d.stats(ctx, outCh, interval)\n\treturn outCh, nil\n}",
"func ChannelStats(namespace string) StatsCollector {\n\tlabels := []string{\"topic\", \"channel\", \"paused\"}\n\tnamespace += \"_channel\"\n\n\treturn channelStats{\n\t\t{\n\t\t\tval: func(c *channel) float64 { return float64(len(c.Clients)) },\n\t\t\tvec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\tNamespace: namespace,\n\t\t\t\tName: \"client_count\",\n\t\t\t\tHelp: \"Number of clients\",\n\t\t\t}, labels),\n\t\t},\n\t\t{\n\t\t\tval: func(c *channel) float64 { return float64(c.Depth) },\n\t\t\tvec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\tNamespace: namespace,\n\t\t\t\tName: \"depth\",\n\t\t\t\tHelp: \"Queue depth\",\n\t\t\t}, labels),\n\t\t},\n\t\t{\n\t\t\tval: func(c *channel) float64 { return float64(c.BackendDepth) },\n\t\t\tvec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\tNamespace: namespace,\n\t\t\t\tName: \"backend_depth\",\n\t\t\t\tHelp: \"Queue backend depth\",\n\t\t\t}, labels),\n\t\t},\n\t\t{\n\t\t\tval: func(c *channel) float64 { return float64(c.MessageCount) },\n\t\t\tvec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\tNamespace: namespace,\n\t\t\t\tName: \"message_count\",\n\t\t\t\tHelp: \"Queue message count\",\n\t\t\t}, labels),\n\t\t},\n\t\t{\n\t\t\tval: func(c *channel) float64 { return float64(c.InFlightCount) },\n\t\t\tvec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\tNamespace: namespace,\n\t\t\t\tName: \"in_flight_count\",\n\t\t\t\tHelp: \"In flight count\",\n\t\t\t}, labels),\n\t\t},\n\t\t{\n\t\t\tval: func(c *channel) float64 { return c.E2eLatency.percentileValue(0) },\n\t\t\tvec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\tNamespace: namespace,\n\t\t\t\tName: \"e2e_latency_99p\",\n\t\t\t\tHelp: \"e2e latency 99th percentile\",\n\t\t\t}, labels),\n\t\t},\n\t\t{\n\t\t\tval: func(c *channel) float64 { return c.E2eLatency.percentileValue(1) },\n\t\t\tvec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\tNamespace: namespace,\n\t\t\t\tName: \"e2e_latency_95p\",\n\t\t\t\tHelp: \"e2e latency 95th percentile\",\n\t\t\t}, labels),\n\t\t},\n\t\t{\n\t\t\tval: func(c *channel) float64 { return float64(c.DeferredCount) },\n\t\t\tvec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\tNamespace: namespace,\n\t\t\t\tName: \"deferred_count\",\n\t\t\t\tHelp: \"Deferred count\",\n\t\t\t}, labels),\n\t\t},\n\t\t{\n\t\t\tval: func(c *channel) float64 { return float64(c.RequeueCount) },\n\t\t\tvec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\tNamespace: namespace,\n\t\t\t\tName: \"requeue_count\",\n\t\t\t\tHelp: \"Requeue Count\",\n\t\t\t}, labels),\n\t\t},\n\t\t{\n\t\t\tval: func(c *channel) float64 { return float64(c.TimeoutCount) },\n\t\t\tvec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\tNamespace: namespace,\n\t\t\t\tName: \"timeout_count\",\n\t\t\t\tHelp: \"Timeout count\",\n\t\t\t}, labels),\n\t\t},\n\t}\n}",
"func Interval(ctx context.Context, duration time.Duration) (chan struct{}, context.CancelFunc) {\n\tctx, cancel := context.WithCancel(ctx)\n\tch := make(chan struct{}, 1)\n\tgo func() {\n\t\tticker := time.NewTicker(duration)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tclose(ch)\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tch <- struct{}{}\n\t\t\t}\n\t\t}\n\t}()\n\treturn ch, cancel\n}",
"func (c *TimeoutChan) Stats() TimeoutChanStats {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\treturn TimeoutChanStats{\n\t\tPushed: c.pushed,\n\t\tPopped: c.popped,\n\t\tCleared: c.cleared,\n\t}\n}",
"func (c *Crontab) StatsChan() chan ExecStats {\n\treturn c.statsChan\n}",
"func ComputeStatsInterval(r io.Reader, start, end time.Time) (cs []Stats, err error) {\n\tch := make(chan Chunk)\n\twg := new(sync.WaitGroup)\n\twg.Add(1)\n\tgo func() {\n\t\tfor c := range ch {\n\t\t\tif c.Clip(start, end) {\n\t\t\t\tcs = append(cs, c.Stats())\n\t\t\t}\n\t\t}\n\t\twg.Done()\n\t}()\n\terr = Chunks(r, ch)\n\tif err != nil {\n\t\treturn\n\t}\n\twg.Wait()\n\treturn\n}",
"func GetStats(p *config.ProxyMonitorMetric, cfg *config.CCConfig, timeout time.Duration) *Stats {\n\tbytes := config.Encode(p)\n\tfmt.Println(string(bytes))\n\tvar ch = make(chan struct{})\n\tvar host = p.IP + \":\" + p.AdminPort\n\tfmt.Println(host)\n\tstats := &Stats{}\n\n\tgo func(host string) {\n\t\tdefer close(ch)\n\t\tstats.Host = host\n\t\terr := pingCheck(host, cfg.CCProxyServer.User, cfg.CCProxyServer.Password)\n\t\tif err != nil {\n\t\t\tstats.Error = err.Error()\n\t\t\tstats.Closed = true\n\t\t} else {\n\t\t\tstats.Closed = false\n\t\t}\n\t}(host)\n\n\tselect {\n\tcase <-ch:\n\t\treturn stats\n\tcase <-time.After(timeout):\n\t\treturn &Stats{Host: host, Timeout: true}\n\t}\n}",
"func (jbobject *TaskContext) TaskMetrics() *ExecutorTaskMetrics {\n\tjret, err := jbobject.CallMethod(javabind.GetEnv(), \"taskMetrics\", \"org/apache/spark/executor/TaskMetrics\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tretconv := javabind.NewJavaToGoCallable()\n\tdst := &javabind.Callable{}\n\tretconv.Dest(dst)\n\tif err := retconv.Convert(javabind.ObjectRef(jret)); err != nil {\n\t\tpanic(err)\n\t}\n\tretconv.CleanUp()\n\tunique_x := &ExecutorTaskMetrics{}\n\tunique_x.Callable = dst\n\treturn unique_x\n}",
"func logTaskEndStats(ctx context.Context, t *task.Task) error {\n\tmsg := message.Fields{\n\t\t\"abort\": t.Aborted,\n\t\t\"activated_by\": t.ActivatedBy,\n\t\t\"build\": t.BuildId,\n\t\t\"current_runtime_secs\": t.FinishTime.Sub(t.StartTime).Seconds(),\n\t\t\"display_task\": t.DisplayOnly,\n\t\t\"execution\": t.Execution,\n\t\t\"generator\": t.GenerateTask,\n\t\t\"group\": t.TaskGroup,\n\t\t\"group_max_hosts\": t.TaskGroupMaxHosts,\n\t\t\"priority\": t.Priority,\n\t\t\"project\": t.Project,\n\t\t\"requester\": t.Requester,\n\t\t\"stat\": \"task-end-stats\",\n\t\t\"status\": t.GetDisplayStatus(),\n\t\t\"task\": t.DisplayName,\n\t\t\"task_id\": t.Id,\n\t\t\"total_wait_secs\": t.FinishTime.Sub(t.ActivatedTime).Seconds(),\n\t\t\"start_time\": t.StartTime,\n\t\t\"scheduled_time\": t.ScheduledTime,\n\t\t\"variant\": t.BuildVariant,\n\t\t\"version\": t.Version,\n\t}\n\n\tif t.IsPartOfDisplay() {\n\t\tmsg[\"display_task_id\"] = t.DisplayTaskId\n\t}\n\n\tpRef, _ := FindBranchProjectRef(t.Project)\n\tif pRef != nil {\n\t\tmsg[\"project_identifier\"] = pRef.Identifier\n\t}\n\n\tisHostMode := t.IsHostTask()\n\tif isHostMode {\n\t\ttaskHost, err := host.FindOneId(ctx, t.HostId)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif taskHost == nil {\n\t\t\treturn errors.Errorf(\"host '%s' not found\", t.HostId)\n\t\t}\n\t\tmsg[\"host_id\"] = taskHost.Id\n\t\tmsg[\"distro\"] = taskHost.Distro.Id\n\t\tmsg[\"provider\"] = taskHost.Distro.Provider\n\t\tif evergreen.IsEc2Provider(taskHost.Distro.Provider) && len(taskHost.Distro.ProviderSettingsList) > 0 {\n\t\t\tinstanceType, ok := taskHost.Distro.ProviderSettingsList[0].Lookup(\"instance_type\").StringValueOK()\n\t\t\tif ok {\n\t\t\t\tmsg[\"instance_type\"] = instanceType\n\t\t\t}\n\t\t}\n\t} else {\n\t\ttaskPod, err := pod.FindOneByID(t.PodID)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"finding pod '%s'\", t.PodID)\n\t\t}\n\t\tif taskPod == nil {\n\t\t\treturn errors.Errorf(\"pod '%s' not found\", t.PodID)\n\t\t}\n\t\tmsg[\"pod_id\"] = taskPod.ID\n\t\tmsg[\"pod_os\"] = taskPod.TaskContainerCreationOpts.OS\n\t\tmsg[\"pod_arch\"] = taskPod.TaskContainerCreationOpts.Arch\n\t\tmsg[\"cpu\"] = taskPod.TaskContainerCreationOpts.CPU\n\t\tmsg[\"memory_mb\"] = taskPod.TaskContainerCreationOpts.MemoryMB\n\t\tif taskPod.TaskContainerCreationOpts.OS.Matches(evergreen.ECSOS(pod.OSWindows)) {\n\t\t\tmsg[\"windows_version\"] = taskPod.TaskContainerCreationOpts.WindowsVersion\n\t\t}\n\t}\n\n\tif !t.DependenciesMetTime.IsZero() {\n\t\tmsg[\"dependencies_met_time\"] = t.DependenciesMetTime\n\t}\n\n\tif !t.ContainerAllocatedTime.IsZero() {\n\t\tmsg[\"container_allocated_time\"] = t.ContainerAllocatedTime\n\t}\n\n\tgrip.Info(msg)\n\treturn nil\n}",
"func (c *Client) GetStats() (*Tasks, error) {\n\tqp := map[string]string{\n\t\t\"skip\": \"0\",\n\t\t\"take\": \"0\",\n\t}\n\tresp, err := c.DoGetRequest(\"tasks\", qp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttasks := Tasks{}\n\terr = json.NewDecoder(resp.Body).Decode(&tasks)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &tasks, nil\n}",
"func (t *proxyTask) Stats(ctx context.Context) (*types.Any, error) {\n\tresp, err := t.rt.Stats(ctx, &rtapi.StatsRequest{\n\t\tID: t.tid,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.Stats, nil\n}",
"func (i *instanceManager) dispenseTaskEventsCh() (<-chan *drivers.TaskEvent, context.CancelFunc, error) {\n\tdriver, err := i.dispense()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tctx, cancel := context.WithCancel(i.ctx)\n\teventsCh, err := driver.TaskEvents(ctx)\n\tif err != nil {\n\t\tcancel()\n\t\treturn nil, nil, err\n\t}\n\n\treturn eventsCh, cancel, nil\n}",
"func (filter TaskReliabilityFilter) GetTaskStats() (taskStats []taskstats.TaskStats, err error) {\n\tpipeline := filter.taskReliabilityQueryPipeline()\n\terr = db.Aggregate(taskstats.DailyTaskStatsCollection, pipeline, &taskStats)\n\treturn\n}",
"func (stats *Reporter) reportInterval() (time.Duration, <-chan struct{}) {\n\tstats.frequencyMu.Lock()\n\tdefer stats.frequencyMu.Unlock()\n\treturn ReporterInterval.Get(&stats.settings.SV), stats.frequencyMu.changeCh\n}",
"func (node *DataNode) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error) {\n\treturn &milvuspb.StringResponse{\n\t\tStatus: &commonpb.Status{\n\t\t\tErrorCode: commonpb.ErrorCode_Success,\n\t\t\tReason: \"\",\n\t\t},\n\t\tValue: \"\",\n\t}, nil\n}",
"func monitorTask(ctx context.Context, task *model.Task, channel chan model.Event) {\n\t// derive new timeout context\n\tmonitorCtx, cancel := context.WithTimeout(ctx, 10 * time.Second)\n\tdefer cancel()\n\n\tselect {\n\tcase <- monitorCtx.Done():\n\t\t// check status of task\n\t\tstatus := task.GetStatus()\n\n\t\tif status != model.TaskStatusInitial && status != model.TaskStatusExecuting {\n\t\t\treturn\n\t\t}\n\n\t\t// task may still be active\n\t\tswitch monitorCtx.Err().Error() {\n\t\tcase \"context canceled\": // termination of processes\n\t\t\tutil.LogInfo(task.UUID, \"ENG\", \"termination\")\n\t\t\tchannel <- model.NewEvent(task.Domain, task.UUID, model.EventTypeTaskTermination, task.UUID, \"termination\")\n\t\tdefault: // timeout\n\t\t\tutil.LogInfo(task.UUID, \"ENG\", \"timeout\")\n\t\t\tchannel <- model.NewEvent(task.Domain, task.UUID, model.EventTypeTaskTimeout, task.UUID, \"timeout\")\n\t\t}\n\t}\n}",
"func (s *Store) Stats(ctx context.Context, req *jobqueue.StatsRequest) (*jobqueue.Stats, error) {\n\ts.stmtOnce.Do(s.initStmt)\n\n\tstats := new(jobqueue.Stats)\n\tg, ctx := errgroup.WithContext(ctx)\n\n\t// Waiting\n\tg.Go(func() error {\n\t\twhere := map[string]interface{}{\n\t\t\t\"state\": jobqueue.Waiting,\n\t\t}\n\t\tif v := req.Topic; v != \"\" {\n\t\t\twhere[\"topic\"] = v\n\t\t}\n\t\tif v := req.CorrelationGroup; v != \"\" {\n\t\t\twhere[\"correlation_group\"] = v\n\t\t}\n\t\tsql, args, err := sq.Select(\"COUNT(*)\").From(\"jobqueue_jobs\").Where(where).ToSql()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn s.db.QueryRowContext(ctx, sql, args...).Scan(&stats.Waiting)\n\t})\n\n\t// Working\n\tg.Go(func() error {\n\t\twhere := map[string]interface{}{\n\t\t\t\"state\": jobqueue.Working,\n\t\t}\n\t\tif v := req.Topic; v != \"\" {\n\t\t\twhere[\"topic\"] = v\n\t\t}\n\t\tif v := req.CorrelationGroup; v != \"\" {\n\t\t\twhere[\"correlation_group\"] = v\n\t\t}\n\t\tsql, args, err := sq.Select(\"COUNT(*)\").From(\"jobqueue_jobs\").Where(where).ToSql()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn s.db.QueryRowContext(ctx, sql, args...).Scan(&stats.Working)\n\t})\n\n\t// Succeeded\n\tg.Go(func() error {\n\t\twhere := map[string]interface{}{\n\t\t\t\"state\": jobqueue.Succeeded,\n\t\t}\n\t\tif v := req.Topic; v != \"\" {\n\t\t\twhere[\"topic\"] = v\n\t\t}\n\t\tif v := req.CorrelationGroup; v != \"\" {\n\t\t\twhere[\"correlation_group\"] = v\n\t\t}\n\t\tsql, args, err := sq.Select(\"COUNT(*)\").From(\"jobqueue_jobs\").Where(where).ToSql()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn s.db.QueryRowContext(ctx, sql, args...).Scan(&stats.Succeeded)\n\t})\n\n\t// Failed\n\tg.Go(func() error {\n\t\twhere := map[string]interface{}{\n\t\t\t\"state\": jobqueue.Failed,\n\t\t}\n\t\tif v := req.Topic; v != \"\" {\n\t\t\twhere[\"topic\"] = v\n\t\t}\n\t\tif v := req.CorrelationGroup; v != \"\" {\n\t\t\twhere[\"correlation_group\"] = v\n\t\t}\n\t\tsql, args, err := sq.Select(\"COUNT(*)\").From(\"jobqueue_jobs\").Where(where).ToSql()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn s.db.QueryRowContext(ctx, sql, args...).Scan(&stats.Failed)\n\t})\n\n\tif err := g.Wait(); err != nil {\n\t\treturn nil, s.wrapError(err)\n\t}\n\treturn stats, nil\n}",
"func (csw *ChannelStatsWatcher) Run(ctx context.Context) {\n\tflushed, unregister := csw.statser.RegisterFlush()\n\tdefer unregister()\n\n\tticker := time.NewTicker(csw.sampleInterval)\n\tdefer ticker.Stop()\n\n\tcsw.sample()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-flushed:\n\t\t\tcsw.emit()\n\t\t\tcsw.sample() // Ensure there will always be at least one sample\n\t\tcase <-ticker.C:\n\t\t\tcsw.sample()\n\t\t}\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TaskEvents function allows the driver to publish driver specific events about tasks and the Nomad client publishes events associated with an allocation.
|
func (d *Driver) TaskEvents(ctx context.Context) (<-chan *drivers.TaskEvent, error) {
return d.eventer.TaskEvents(ctx)
}
|
[
"func (d *Driver) TaskEvents(ctx context.Context) (<-chan *drivers.TaskEvent, error) {\n\td.logger.Debug(\"task events called and returning\")\n\treturn d.eventer.TaskEvents(ctx)\n}",
"func (c *RabbitMQConnection) PublishEventsTrackingTask(payload []byte) error {\n\tch, err := c.Channel()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer ch.Close()\n\n\terr = ch.Publish(\n\t\t\"tracking\", //Exchange\n\t\t\"tracking-queue\", //Routing key\n\t\tfalse, //Mandatory\n\t\tfalse, //Immediate\n\t\tamqp.Publishing{\n\t\t\tDeliveryMode: amqp.Persistent,\n\t\t\tContentType: \"text/plain\",\n\t\t\tBody: payload,\n\t\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (b *Broker) PublishTaskEvent(key string, message *broker.Message) error {\n\tbytes, err := json.Marshal(message)\n\tif err != nil {\n\t\tlog.Error(\"Failed to marshal message: \", err)\n\t\treturn err\n\t}\n\tconn := b.pool.Get()\n\tdefer conn.Close()\n\t_, err = conn.Do(\"PUBLISH\", TaskEventChannel, bytes)\n\treturn err\n}",
"func (engine *DockerTaskEngine) TaskEvents() (<-chan api.ContainerStateChange, <-chan error) {\n\treturn engine.container_events, engine.event_errors\n}",
"func consumeTaskEvents(event interface{}) {\n\tdata, _ := json.Marshal(&event)\n\tvar eventData common.Events\n\terr := json.Unmarshal(data, &eventData)\n\tif err != nil {\n\t\tl.Log.Error(\"Error while consuming task events\", err)\n\t\treturn\n\t}\n\n\tvar taskEvent dmtf.Event\n\terr = json.Unmarshal(eventData.Request, &taskEvent)\n\tif err != nil {\n\t\tl.Log.Error(\"Error while consuming task events\", err)\n\t\treturn\n\t}\n\n\tfor _, eventRecord := range taskEvent.Events {\n\t\tTaskEventRecvQueue <- eventRecord\n\t}\n}",
"func TaskProcessInfoEvents(taskId string, ts time.Time, limit, sort int) db.Q {\n\tfilter := bson.M{\n\t\tDataKey + \".\" + ResourceTypeKey: EventTaskProcessInfo,\n\t\tResourceIdKey: taskId,\n\t\tTypeKey: EventTaskProcessInfo,\n\t}\n\n\tsortSpec := TimestampKey\n\n\tif sort < 0 {\n\t\tsortSpec = \"-\" + sortSpec\n\t\tfilter[TimestampKey] = bson.M{\"$lte\": ts}\n\t} else {\n\t\tfilter[TimestampKey] = bson.M{\"$gte\": ts}\n\t}\n\n\treturn db.Query(filter).Sort([]string{sortSpec}).Limit(limit)\n}",
"func TaskSystemInfoEvents(taskId string, ts time.Time, limit, sort int) db.Q {\n\tfilter := bson.M{\n\t\tDataKey + \".\" + ResourceTypeKey: EventTaskSystemInfo,\n\t\tResourceIdKey: taskId,\n\t\tTypeKey: EventTaskSystemInfo,\n\t}\n\n\tsortSpec := TimestampKey\n\n\tif sort < 0 {\n\t\tsortSpec = \"-\" + sortSpec\n\t\tfilter[TimestampKey] = bson.M{\"$lte\": ts}\n\t} else {\n\t\tfilter[TimestampKey] = bson.M{\"$gte\": ts}\n\t}\n\n\treturn db.Query(filter).Sort([]string{sortSpec}).Limit(limit)\n}",
"func (c *subContext) processTaskEvents(ctx context.Context, eventCh <-chan subtaskapi.Event, indCh chan<- indication.Indication) {\n\t// Create a wait group to close the indications channel\n\twg := &sync.WaitGroup{}\n\n\t// Wait for the watch context to be done\n\twg.Add(1)\n\tgo func() {\n\t\t<-ctx.Done()\n\t\twg.Done()\n\t}()\n\n\t// Once the wait group is complete, close the indications channel\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(indCh)\n\t}()\n\n\tvar prevCancel context.CancelFunc\n\tvar prevEndpoint epapi.ID\n\tfor event := range eventCh {\n\t\t// Only interested in tasks related to this subscription\n\t\tif event.Task.SubscriptionID != c.sub.ID {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If the stream is already open for the associated E2 endpoint, skip the event\n\t\tif event.Task.EndpointID == prevEndpoint && event.Task.Lifecycle.Failure == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If the task failed, propagate the error\n\t\tif event.Task.Lifecycle.Failure != nil {\n\t\t\tc.errCh <- errors.NewInternal(event.Task.Lifecycle.Failure.Message)\n\t\t}\n\n\t\t// If the task was assigned to a new endpoint, close the prior stream and open a new one.\n\t\t// If the task was unassigned, close the prior stream and wait for a new event.\n\t\tif event.Type == subtaskapi.EventType_NONE || event.Type == subtaskapi.EventType_CREATED {\n\t\t\tif prevCancel != nil {\n\t\t\t\tprevCancel()\n\t\t\t}\n\t\t\twg.Add(1)\n\t\t\tctx, cancel := context.WithCancel(context.Background())\n\t\t\tgo func(epID epapi.ID) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tdefer cancel()\n\t\t\t\terr := c.openStream(ctx, epID, indCh)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(err)\n\t\t\t\t}\n\t\t\t}(event.Task.EndpointID)\n\t\t\tprevEndpoint = event.Task.EndpointID\n\t\t\tprevCancel = cancel\n\t\t} else if event.Type == subtaskapi.EventType_REMOVED {\n\t\t\tprevEndpoint = \"\"\n\t\t\tif prevCancel != nil {\n\t\t\t\tprevCancel()\n\t\t\t\tprevCancel = nil\n\t\t\t}\n\t\t}\n\t}\n}",
"func assertTaskEvent(t *testing.T, ev *events.Event, task *types.Task) {\n\tassert.Equal(t, TASK_STREAM, ev.Stream)\n\tvar other types.Task\n\tassert.NoError(t, gob.NewDecoder(bytes.NewReader(ev.Data)).Decode(&other))\n\tdeepequal.AssertDeepEqual(t, task, &other)\n\tassert.True(t, task.Created.Equal(ev.Timestamp))\n}",
"func Event(ctx context.Context, scope EventScope, name string, args ...interface{}) {\n\tt := GetTask(ctx)\n\tonEvent(ctx, t, scope, name, args)\n}",
"func (m *LB) PublishEvents(\n\tsignaler op.Signaler,\n\topts outputs.Options,\n\tevents []common.MapStr,\n) error {\n\treturn m.publishEventsMessage(opts, eventsMessage{\n\t\tworker: -1,\n\t\tsignaler: signaler,\n\t\tevents: events,\n\t})\n}",
"func eventCommon(request *TaskRequest, w *Worker, s *state, t time.Time) *metrics.TaskEvent {\n\tvar baseLabels sort.StringSlice = request.BaseLabels\n\tvar provLabels sort.StringSlice = request.ProvisionableLabels\n\tvar botID string\n\tvar botLabels sort.StringSlice\n\tvar cost []float32\n\tif w != nil {\n\t\tbotID = string(w.ID)\n\t\tbotLabels = w.Labels.ToSlice()\n\t\tbotLabels.Sort()\n\t\tcost = w.runningTask.cost[:]\n\t}\n\tbaseLabels.Sort()\n\tprovLabels.Sort()\n\taccountBalance, accountValid := s.balances[request.AccountID]\n\treturn &metrics.TaskEvent{\n\t\tAccountBalance: accountBalance[:],\n\t\tAccountId: string(request.AccountID),\n\t\tAccountValid: accountValid,\n\t\tBaseLabels: baseLabels,\n\t\tBotId: botID,\n\t\tBotDimensions: botLabels,\n\t\tCost: cost,\n\t\tProvisionableLabels: provLabels,\n\t\tTaskId: string(request.ID),\n\t\tTime: tutils.TimestampProto(t),\n\t}\n}",
"func (e *Endpoints) DealTaskEvent(ctx context.Context, r *http.Request, vars map[string]string) (httpserver.Responser, error) {\n\tvar (\n\t\treq apistructs.PipelineTaskEvent\n\t\trunningTaskID int64\n\t\terr error\n\t)\n\tif r.Body == nil {\n\t\treturn apierrors.ErrDealTaskEvents.MissingParameter(\"body\").ToResp(), nil\n\t}\n\n\tif err := json.NewDecoder(r.Body).Decode(&req); err != nil {\n\t\treturn apierrors.ErrDealTaskEvents.InvalidParameter(err).ToResp(), nil\n\t}\n\tlogrus.Debugf(\"ReceiveTaskEvents: request body: %+v\", req)\n\n\tif req.Event == \"pipeline_task\" {\n\t\tif runningTaskID, err = e.orgResource.DealReceiveTaskEvent(&req); err != nil {\n\t\t\treturn apierrors.ErrDealTaskEvents.InvalidParameter(err).ToResp(), nil\n\t\t}\n\t} else if req.Event == \"pipeline_task_runtime\" {\n\t\tif runningTaskID, err = e.orgResource.DealReceiveTaskRuntimeEvent(&req); err != nil {\n\t\t\treturn apierrors.ErrDealTaskEvents.InvalidParameter(err).ToResp(), nil\n\t\t}\n\t}\n\n\treturn httpserver.OkResp(runningTaskID)\n}",
"func (d *Datastore) WriteEvent(ctx context.Context, e *events.Event) error {\n\n\tswitch e.Type {\n\n\tcase events.Type_TASK_CREATED:\n\t\tputKeys, putData := marshalTask(e.GetTask())\n\t\t_, err := d.client.PutMulti(ctx, putKeys, putData)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\tcase events.Type_EXECUTOR_STDOUT:\n\t\t_, err := d.client.Put(ctx, stdoutKey(e.Id, e.Attempt, e.Index), marshalEvent(e))\n\t\treturn err\n\n\tcase events.Type_EXECUTOR_STDERR:\n\t\t_, err := d.client.Put(ctx, stderrKey(e.Id, e.Attempt, e.Index), marshalEvent(e))\n\t\treturn err\n\n\tcase events.Type_TASK_STATE:\n\t\tres, err := d.GetTask(ctx, &tes.GetTaskRequest{\n\t\t\tId: e.Id,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfrom := res.State\n\t\tto := e.GetState()\n\t\tif err := tes.ValidateTransition(from, to); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfallthrough\n\n\tdefault:\n\t\t_, err := d.client.RunInTransaction(ctx, func(tx *datastore.Transaction) error {\n\t\t\tprops := datastore.PropertyList{}\n\t\t\terr := tx.Get(taskKey(e.Id), &props)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\ttask := &tes.Task{}\n\t\t\tunmarshalTask(task, props)\n\t\t\ttb := events.TaskBuilder{Task: task}\n\t\t\terr = tb.WriteEvent(context.Background(), e)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tputKeys, putData := marshalTask(task)\n\t\t\t_, err = tx.PutMulti(putKeys, putData)\n\t\t\treturn err\n\t\t})\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (s *BaseAspidaListener) EnterTasks(ctx *TasksContext) {}",
"func (c *CloudWatchLogs) TaskLogEvents(logGroupName string, streamLastEventTime map[string]int64, opts ...GetLogEventsOpts) (*LogEventsOutput, error) {\n\tvar events []*Event\n\tvar in *cloudwatchlogs.GetLogEventsInput\n\tlogStreamNames, err := c.logStreams(logGroupName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, logStreamName := range logStreamNames {\n\t\tin = &cloudwatchlogs.GetLogEventsInput{\n\t\t\tLogGroupName: aws.String(logGroupName),\n\t\t\tLogStreamName: logStreamName,\n\t\t\tLimit: aws.Int64(10), // default to be 10\n\t\t}\n\t\tfor _, opt := range opts {\n\t\t\topt(in)\n\t\t}\n\t\tif streamLastEventTime[*logStreamName] != 0 {\n\t\t\t// If last event for this log stream exists, increment last log event timestamp\n\t\t\t// by one to get logs after the last event.\n\t\t\tin.SetStartTime(streamLastEventTime[*logStreamName] + 1)\n\t\t}\n\t\t// TODO: https://github.com/aws/amazon-ecs-cli-v2/pull/628#discussion_r374291068 and https://github.com/aws/amazon-ecs-cli-v2/pull/628#discussion_r374294362\n\t\tresp, err := c.client.GetLogEvents(in)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"get log events of %s/%s: %w\", logGroupName, *logStreamName, err)\n\t\t}\n\n\t\tfor _, event := range resp.Events {\n\t\t\tlog := &Event{\n\t\t\t\tLogStreamName: trimLogStreamName(*logStreamName),\n\t\t\t\tIngestionTime: aws.Int64Value(event.IngestionTime),\n\t\t\t\tMessage: aws.StringValue(event.Message),\n\t\t\t\tTimestamp: aws.Int64Value(event.Timestamp),\n\t\t\t}\n\t\t\tevents = append(events, log)\n\t\t}\n\t\tif len(resp.Events) != 0 {\n\t\t\tstreamLastEventTime[*logStreamName] = *resp.Events[len(resp.Events)-1].Timestamp\n\t\t}\n\t}\n\tsort.SliceStable(events, func(i, j int) bool { return events[i].Timestamp < events[j].Timestamp })\n\tvar truncatedEvents []*Event\n\tif len(events) >= int(*in.Limit) {\n\t\ttruncatedEvents = events[len(events)-int(*in.Limit):]\n\t} else {\n\t\ttruncatedEvents = events\n\t}\n\treturn &LogEventsOutput{\n\t\tEvents: truncatedEvents,\n\t\tLastEventTime: streamLastEventTime,\n\t}, nil\n}",
"func eventAssigned(request *TaskRequest, w *Worker, s *state, t time.Time, details *metrics.TaskEvent_AssignedDetails) *metrics.TaskEvent {\n\te := eventCommon(request, w, s, t)\n\te.EventType = metrics.TaskEvent_QSCHEDULER_ASSIGNED\n\te.Category = metrics.TaskEvent_CATEGORY_QSCHEDULER\n\te.Details = &metrics.TaskEvent_AssignedDetails_{AssignedDetails: details}\n\treturn e\n}",
"func (ep *LocalEventPublisher) PublishEvents(events []EventJsoner) error {\n\tfor _, event := range events {\n\t\tjsn, err := json.Marshal(event)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Printf(\"Serialised event: %s\\n\", jsn)\n\t}\n\treturn nil\n}",
"func (sender *Sender) SendEvents(events moira.NotificationEvents, contact moira.ContactData, trigger moira.TriggerData, plots [][]byte, throttled bool) error {\n\tcreateAlertRequest := sender.makeCreateAlertRequest(events, contact, trigger, plots, throttled)\n\t_, err := sender.client.Create(context.Background(), createAlertRequest)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to send %s event message to opsgenie: %s\", trigger.ID, err.Error())\n\t}\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SignalTask function is used by drivers which support sending OS signals (SIGHUP, SIGKILL, SIGUSR1 etc.) to the task. It is an optional function and is listed as a capability in the driver Capabilities struct.
|
func (d *Driver) SignalTask(taskID string, signal string) error {
handle, ok := d.tasks.Get(taskID)
if !ok {
return drivers.ErrTaskNotFound
}
return d.podman.ContainerKill(d.ctx, handle.containerID, signal)
}
|
[
"func (d *Driver) SignalTask(taskID string, signal string) error {\n\td.logger.Debug(\"signal task called and returning\")\n\n\thandle, ok := d.tasks.Get(taskID)\n\tif !ok {\n\t\treturn drivers.ErrTaskNotFound\n\t}\n\n\t// TODO: implement driver specific signal handling logic.\n\t//\n\t// The given signal must be forwarded to the target taskID. If this plugin\n\t// doesn't support receiving signals (capability SendSignals is set to\n\t// false) you can just return nil.\n\tsig := os.Interrupt\n\tif s, ok := signals.SignalLookup[signal]; ok {\n\t\tsig = s\n\t} else {\n\t\td.logger.Warn(\"unknown signal to send to task, using SIGINT instead\", \"signal\", signal, \"task_id\", handle.task.ID)\n\t}\n\treturn handle.Signal(sig)\n}",
"func ProcessSignal(p *os.Process, sig os.Signal,) error",
"func getTaskKillSignal(signal string) (os.Signal, error) {\n\tif signal == \"\" {\n\t\treturn os.Interrupt, nil\n\t}\n\n\ttaskKillSignal := signals.SignalLookup[signal]\n\tif taskKillSignal == nil {\n\t\treturn nil, fmt.Errorf(\"Signal %s is not supported\", signal)\n\t}\n\n\treturn taskKillSignal, nil\n}",
"func signal(s os.Signal) {\n\tp, _ := os.FindProcess(os.Getpid())\n\t_ = p.Signal(s)\n\t// Sleep so test won't finish and signal will be received.\n\ttime.Sleep(999)\n}",
"func TestSignal(t *testing.T) {\n\tstarted := StartedProcess{\n\t\tExecutable: \"useless\",\n\t\tServer: Target {\n\t\t\tAuth: Auth{\n\t\t\t\tPassword: \"\",\n\t\t\t\tPrivateKey: \"\",\n\t\t\t},\n\t\t\tHostname: \"local\",\n\t\t\tName: \"local\",\n\t\t\tPort: 0,\n\t\t\tUsername: \"\",\n\t\t},\n\t\tPid: os.Getpid(),\n\t\tLogs: Logs{\n\t\t\tStdout: \"vms/log\",\n\t\t\tStderr: \"vms/log\",\n\t\t},\n\t\tName: \"useless\",\n\t}\n\n\terr := started.Signal(syscall.SIGUSR1)\n\tif err != nil {\n\t\tt.Errorf(\"Expected nil got %s\", err.Error())\n\t}\n}",
"func (n *mockAgent) signalProcess(c *Container, processID string, signal syscall.Signal, all bool) error {\n\treturn nil\n}",
"func Signal(tag string) {\n\tglobalEvents.Signal(tag)\n}",
"func sendSignal(status string) {\n\tcf := cloudformation.New(session.New(&aws.Config{Region: ®ion}))\n\tparams := &cloudformation.SignalResourceInput{\n\t\tLogicalResourceId: &resource,\n\t\tStackName: &stack,\n\t\tStatus: &status,\n\t\tUniqueId: &uniqueID,\n\t}\n\t_, err := cf.SignalResource(params)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to signal CloudFormation: %q.\\n\", err.Error())\n\t}\n\tlog.Printf(\"Sent a %q signal to CloudFormation.\\n\", status)\n\treturn\n}",
"func (p *Process) Signal(sig os.Signal) error {\n return p.Process.Signal(sig)\n}",
"func (x *CtlCommand) signal(rpcc *xmlrpcclient.XMLRPCClient, sigName string, processes []string) {\n\tfor _, process := range processes {\n\t\tif process == \"all\" {\n\t\t\treply, err := rpcc.SignalAll(process)\n\t\t\tif err == nil {\n\t\t\t\tx.showProcessInfo(&reply, make(map[string]bool))\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"Fail to send signal %s to all process\", sigName)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t} else {\n\t\t\treply, err := rpcc.SignalProcess(sigName, process)\n\t\t\tif err == nil && reply.Success {\n\t\t\t\tfmt.Printf(\"Succeed to send signal %s to process %s\\n\", sigName, process)\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"Fail to send signal %s to process %s\\n\", sigName, process)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t}\n\t}\n}",
"func (c *qemuCmd) Signal(sig unix.Signal) error {\n\tcommand := api.InstanceExecControl{\n\t\tCommand: \"signal\",\n\t\tSignal: int(sig),\n\t}\n\n\t// Check handler hasn't finished.\n\tselect {\n\tcase <-c.dataDone:\n\t\treturn fmt.Errorf(\"no such process\") // Aligns with error retured from unix.Kill in lxc's Signal().\n\tdefault:\n\t}\n\n\tc.controlSendCh <- command\n\terr := <-c.controlResCh\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Debugf(`Forwarded signal \"%d\" to lxd-agent`, sig)\n\treturn nil\n}",
"func SignalGroup(p *os.Process, sig os.Signal) error {\n\tif pg, err := os.FindProcess(-p.Pid); err == nil {\n\t\tif pg.Signal(sig) == nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn p.Signal(sig)\n}",
"func SendUserSignal(pname string) {\n\tif !IsPlatformWindows() {\n\t\tcmd := fmt.Sprintf(\"pidof %s | xargs kill -USR1 > /dev/null 2>&1\", pname)\n\t\tOCTSystem(cmd)\n\t}\n}",
"func (wc *workflowClient) SignalWorkflow(ctx context.Context, workflowID string, runID string, signalName string, arg interface{}) error {\n\tvar input []byte\n\tif arg != nil {\n\t\tvar err error\n\t\tif input, err = getHostEnvironment().encodeArg(arg); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\trequest := &s.SignalWorkflowExecutionRequest{\n\t\tDomain: common.StringPtr(wc.domain),\n\t\tWorkflowExecution: &s.WorkflowExecution{\n\t\t\tWorkflowId: common.StringPtr(workflowID),\n\t\t\tRunId: getRunID(runID),\n\t\t},\n\t\tSignalName: common.StringPtr(signalName),\n\t\tInput: input,\n\t\tIdentity: common.StringPtr(wc.identity),\n\t}\n\n\treturn backoff.Retry(ctx,\n\t\tfunc() error {\n\t\t\ttchCtx, cancel, opt := newChannelContext(ctx)\n\t\t\tdefer cancel()\n\t\t\treturn wc.workflowService.SignalWorkflowExecution(tchCtx, request, opt...)\n\t\t}, serviceOperationRetryPolicy, isServiceTransientError)\n}",
"func (p *Process) SendSignal(sig Signal) error {\n\treturn p.SendSignalWithContext(context.Background(), sig)\n}",
"func (b *BoatHandle) Signal(sig os.Signal) error { return b.cmd.Process.Signal(sig) }",
"func (c *gcsCore) SignalProcess(pid int, options prot.SignalProcessOptions) error {\n\tc.processCacheMutex.Lock()\n\tif _, ok := c.processCache[pid]; !ok {\n\t\tc.processCacheMutex.Unlock()\n\t\treturn gcserr.NewHresultError(gcserr.HrErrNotFound)\n\t}\n\tc.processCacheMutex.Unlock()\n\n\t// Interpret signal value 0 as SIGKILL.\n\t// TODO: Remove this special casing when we are not worried about breaking\n\t// older Windows builds which don't support sending signals.\n\tvar signal syscall.Signal\n\tif options.Signal == 0 {\n\t\tsignal = unix.SIGKILL\n\t} else {\n\t\tsignal = syscall.Signal(options.Signal)\n\t}\n\n\tif err := syscall.Kill(pid, signal); err != nil {\n\t\treturn errors.Wrapf(err, \"failed call to kill on process %d with signal %d\", pid, options.Signal)\n\t}\n\n\treturn nil\n}",
"func ToSignal(signalName string) (os.Signal, error) {\n\tif signalName == \"HUP\" {\n\t\treturn syscall.SIGHUP, nil\n\t} else if signalName == \"INT\" {\n\t\treturn syscall.SIGINT, nil\n\t} else if signalName == \"QUIT\" {\n\t\treturn syscall.SIGQUIT, nil\n\t} else if signalName == \"KILL\" {\n\t\treturn syscall.SIGKILL, nil\n\t} else if signalName == \"USR1\" {\n\t\treturn syscall.SIGUSR1, nil\n\t} else if signalName == \"USR2\" {\n\t\treturn syscall.SIGUSR2, nil\n\t} else {\n\t\treturn syscall.SIGTERM, nil\n\n\t}\n\n}",
"func X__sysv_signal(tls *TLS, signum int32, handler uintptr) {\n\tch := make(chan os.Signal)\n\tgo func() {\n\t\t<-ch\n\t\t(*(*func(*TLS, int32))(unsafe.Pointer(&handler)))(tls, signum)\n\t}()\n\tsignal.Notify(ch, syscall.Signal(signum))\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ExecTask function is used by the Nomad client to execute commands inside the task execution context.
|
func (d *Driver) ExecTask(taskID string, cmd []string, timeout time.Duration) (*drivers.ExecTaskResult, error) {
return nil, fmt.Errorf("Podman driver does not support exec")
}
|
[
"func (d *Driver) ExecTask(taskID string, cmdArgs []string, timeout time.Duration) (*drivers.ExecTaskResult, error) {\n\td.logger.Debug(\"ExecTask called and returning\")\n\t// TODO: implement driver specific logic to execute commands in a task.\n\treturn nil, fmt.Errorf(\"libvirt driver does not support execute commands\")\n}",
"func (h *DriverHandle) Exec(timeout time.Duration, cmd string, args []string) ([]byte, int, error) {\n\tcommand := append([]string{cmd}, args...)\n\tres, err := h.driver.ExecTask(h.taskID, command, timeout)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\treturn res.Stdout, res.ExitResult.ExitCode, res.ExitResult.Err\n}",
"func (inst *IndependentInstance) execTask(behavior model.TaskBehavior, taskInst *TaskInst) {\n\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\n\t\t\terr := fmt.Errorf(\"Unhandled Error executing task '%s' : %v\", taskInst.task.Name(), r)\n\t\t\tlogger.Error(err)\n\n\t\t\t// todo: useful for debugging\n\t\t\tlogger.Errorf(\"StackTrace: %s\", debug.Stack())\n\n\t\t\tif !taskInst.flowInst.isHandlingError {\n\n\t\t\t\ttaskInst.appendErrorData(NewActivityEvalError(taskInst.task.Name(), \"unhandled\", err.Error()))\n\t\t\t\tinst.HandleGlobalError(taskInst.flowInst, err)\n\t\t\t}\n\t\t\t// else what should we do?\n\t\t}\n\t}()\n\n\tvar err error\n\n\tvar evalResult model.EvalResult\n\n\tif taskInst.status == model.TaskStatusWaiting {\n\n\t\tevalResult, err = behavior.PostEval(taskInst)\n\n\t} else {\n\t\tevalResult, err = behavior.Eval(taskInst)\n\t}\n\n\tif err != nil {\n\t\ttaskInst.returnError = err\n\t\tinst.handleTaskError(behavior, taskInst, err)\n\t\treturn\n\t}\n\n\tswitch evalResult {\n\tcase model.EVAL_DONE:\n\t\ttaskInst.SetStatus(model.TaskStatusDone)\n\t\tinst.handleTaskDone(behavior, taskInst)\n\tcase model.EVAL_SKIP:\n\t\ttaskInst.SetStatus(model.TaskStatusSkipped)\n\t\tinst.handleTaskDone(behavior, taskInst)\n\tcase model.EVAL_WAIT:\n\t\ttaskInst.SetStatus(model.TaskStatusWaiting)\n\tcase model.EVAL_FAIL:\n\t\ttaskInst.SetStatus(model.TaskStatusFailed)\n\tcase model.EVAL_REPEAT:\n\t\ttaskInst.SetStatus(model.TaskStatusReady)\n\t\t//task needs to iterate or retry\n\t\tinst.scheduleEval(taskInst)\n\t}\n}",
"func (cht *clusterHeartbeatTask) TaskExec(ctx context.Context, tOps hk.TaskOps) {\n\tc := cht.c\n\tapp := c.app\n\to := tOps.Object()\n\tmsgs := util.NewMsgList(o.Messages)\n\t// establish an existential lock across the updates\n\tif app.CrudHelpers == nil {\n\t\tc.Log.Errorf(\"Task %s: not ready\", o.Meta.ID)\n\t\tmsgs.Insert(\"Not ready\")\n\t\to.Messages = msgs.ToModel()\n\t\ttOps.SetState(hk.TaskStateFailed) // does not return\n\t}\n\tapp.CrudHelpers.RLock()\n\tdefer app.CrudHelpers.RUnlock()\n\tapp.CrudHelpers.NodeLock()\n\tdefer app.CrudHelpers.NodeUnlock()\n\tapp.CrudHelpers.ClusterLock()\n\tdefer app.CrudHelpers.ClusterUnlock()\n\t// is this a cluster timeout task?\n\tisClusterTimeoutTask := false\n\tif len(o.Messages) == 1 && o.Messages[0].Message == chtTimeoutTaskMessage {\n\t\tisClusterTimeoutTask = true\n\t}\n\t// update the cluster object\n\tclID := string(o.ObjectID)\n\tclSS := o.ServiceStates[clID]\n\tclObj, err := cht.ops.updateCluster(ctx, clID, &clSS, isClusterTimeoutTask)\n\tif err != nil {\n\t\tc.Log.Errorf(\"Task %s: cluster[%s] %s\", o.Meta.ID, clID, err.Error())\n\t\tmsgs.Insert(\"Failed to update cluster\")\n\t\to.Messages = msgs.ToModel()\n\t\ttOps.SetState(hk.TaskStateFailed) // does not return\n\t}\n\tc.Log.Debugf(\"Task %s: cluster[%s] state %s\", o.Meta.ID, clID, clSS.State)\n\tmsgs.Insert(\"Updated cluster\")\n\t// organize nodes by state; save a representative node state for its temporal settings\n\tvar nodeSS *models.ServiceState\n\tnodesByState := map[string][]string{}\n\tfor nID, ss := range o.ServiceStates {\n\t\tif nID == clID {\n\t\t\tcontinue\n\t\t}\n\t\tif _, found := nodesByState[ss.State]; !found {\n\t\t\tnodesByState[ss.State] = make([]string, 0, 1)\n\t\t}\n\t\tnodesByState[ss.State] = append(nodesByState[ss.State], nID)\n\t\tif nodeSS == nil {\n\t\t\tnodeSS = &models.ServiceState{}\n\t\t\t*nodeSS = ss\n\t\t}\n\t}\n\t// update nodes in bulk\n\tnumN, upN := 0, 0\n\tfor state, nIDs := range nodesByState {\n\t\tss := *nodeSS // shallow copy with heartbeat pulse of nodes\n\t\tss.State = state // override the state\n\t\t// check if any of the nodes have restarted, e.g. changing state to READY, then send audit log event\n\t\tc.Log.Debugf(\"State: %s, nodes: %s\", state, strings.Join(nIDs, \" ,\"))\n\t\tif state == com.ServiceStateReady && len(nIDs) > 0 {\n\t\t\tcht.ops.reportRestartedNodesToAuditLog(ctx, clObj, nIDs)\n\t\t}\n\t\tchgN, foundN, err := cht.ops.updateNodes(ctx, nIDs, &ss)\n\t\tif err == nil {\n\t\t\tupN += chgN\n\t\t\tnumN += foundN\n\t\t}\n\t}\n\tc.Log.Debugf(\"Updated %d/%d nodes\", upN, numN)\n\tmsgs.Insert(\"Updated %d/%d nodes\", upN, numN)\n\t// timeout nodes older than the cluster\n\ttoN := 0\n\tif chgN, foundN, err := cht.ops.timeoutNodes(ctx, clObj); err == nil && foundN > 0 {\n\t\tmsgs.Insert(\"Timed out %d/%d nodes\", chgN, foundN)\n\t\ttoN = chgN\n\t\tcht.ops.reportTimedoutNodesToAuditLog(ctx, clObj)\n\t}\n\to.Messages = msgs.ToModel()\n\t// send the summary node CRUDE\n\tcht.ops.sendNodeCrudEvent(clObj, upN, toN)\n}",
"func (d *deviceCommunicationProvider) Exec(cmd ...string) (stdout, stderr []byte, err error) {\n\treturn d.sessionProvider.Exec(append([]string{prefixPath + \"dut_exec.sh\"}, cmd...)...)\n}",
"func (pge *PgEngine) ExecuteSQLTask(ctx context.Context, tx pgx.Tx, task *ChainTask, paramValues []string) (out string, err error) {\n\tvar execTx pgx.Tx\n\tvar remoteDb PgxConnIface\n\tvar executor executor\n\n\texecTx = tx\n\tif task.Autonomous {\n\t\texecutor = pge.ConfigDb\n\t} else {\n\t\texecutor = tx\n\t}\n\n\t//Connect to Remote DB\n\tif task.ConnectString.Status != pgtype.Null {\n\t\tremoteDb, execTx, err = pge.GetRemoteDBTransaction(ctx, task.ConnectString.String)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tif task.Autonomous {\n\t\t\texecutor = remoteDb\n\t\t\t_ = execTx.Rollback(ctx)\n\t\t} else {\n\t\t\texecutor = execTx\n\t\t}\n\n\t\tdefer pge.FinalizeRemoteDBConnection(ctx, remoteDb)\n\t}\n\n\t// Set Role\n\tif task.RunAs.Status != pgtype.Null && !task.Autonomous {\n\t\tpge.SetRole(ctx, execTx, task.RunAs)\n\t}\n\n\tif task.IgnoreError && !task.Autonomous {\n\t\tpge.MustSavepoint(ctx, execTx, fmt.Sprintf(\"task_%d\", task.TaskID))\n\t}\n\n\tout, err = pge.ExecuteSQLCommand(ctx, executor, task.Script, paramValues)\n\n\tif err != nil && task.IgnoreError && !task.Autonomous {\n\t\tpge.MustRollbackToSavepoint(ctx, execTx, fmt.Sprintf(\"task_%d\", task.TaskID))\n\t}\n\n\t//Reset The Role\n\tif task.RunAs.Status != pgtype.Null && !task.Autonomous {\n\t\tpge.ResetRole(ctx, execTx)\n\t}\n\n\t// Commit changes on remote server\n\tif task.ConnectString.Status != pgtype.Null && !task.Autonomous {\n\t\tpge.MustCommitTransaction(ctx, execTx)\n\t}\n\n\treturn\n}",
"func (t *task) Exec(ctx context.Context, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Process, error) {\n\thcsContainer, err := t.getHCSContainer()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlogger := t.ctr.client.logger.WithFields(log.Fields{\n\t\t\"container\": t.ctr.id,\n\t\t\"exec\": processID,\n\t})\n\n\t// Note we always tell HCS to\n\t// create stdout as it's required regardless of '-i' or '-t' options, so that\n\t// docker can always grab the output through logs. We also tell HCS to always\n\t// create stdin, even if it's not used - it will be closed shortly. Stderr\n\t// is only created if it we're not -t.\n\tcreateProcessParms := &hcsshim.ProcessConfig{\n\t\tCreateStdInPipe: true,\n\t\tCreateStdOutPipe: true,\n\t\tCreateStdErrPipe: !spec.Terminal,\n\t}\n\tif spec.Terminal {\n\t\tcreateProcessParms.EmulateConsole = true\n\t\tif spec.ConsoleSize != nil {\n\t\t\tcreateProcessParms.ConsoleSize[0] = uint(spec.ConsoleSize.Height)\n\t\t\tcreateProcessParms.ConsoleSize[1] = uint(spec.ConsoleSize.Width)\n\t\t}\n\t}\n\n\t// Take working directory from the process to add if it is defined,\n\t// otherwise take from the first process.\n\tif spec.Cwd != \"\" {\n\t\tcreateProcessParms.WorkingDirectory = spec.Cwd\n\t} else {\n\t\tcreateProcessParms.WorkingDirectory = t.ctr.ociSpec.Process.Cwd\n\t}\n\n\t// Configure the environment for the process\n\tcreateProcessParms.Environment = setupEnvironmentVariables(spec.Env)\n\n\t// Configure the CommandLine/CommandArgs\n\tsetCommandLineAndArgs(spec, createProcessParms)\n\tlogger.Debugf(\"exec commandLine: %s\", createProcessParms.CommandLine)\n\n\tcreateProcessParms.User = spec.User.Username\n\n\t// Start the command running in the container.\n\tnewProcess, err := hcsContainer.CreateProcess(createProcessParms)\n\tif err != nil {\n\t\tlogger.WithError(err).Errorf(\"exec's CreateProcess() failed\")\n\t\treturn nil, err\n\t}\n\tpid := newProcess.Pid()\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tif err := newProcess.Kill(); err != nil {\n\t\t\t\tlogger.WithError(err).Error(\"failed to kill process\")\n\t\t\t}\n\t\t\tgo func() {\n\t\t\t\tif err := newProcess.Wait(); err != nil {\n\t\t\t\t\tlogger.WithError(err).Error(\"failed to wait for process\")\n\t\t\t\t}\n\t\t\t\tif err := newProcess.Close(); err != nil {\n\t\t\t\t\tlogger.WithError(err).Error(\"failed to clean process resources\")\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}()\n\n\tdio, err := newIOFromProcess(newProcess, spec.Terminal)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"failed to get stdio pipes\")\n\t\treturn nil, err\n\t}\n\t// Tell the engine to attach streams back to the client\n\t_, err = attachStdio(dio)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp := &process{\n\t\tid: processID,\n\t\tctr: t.ctr,\n\t\thcsProcess: newProcess,\n\t\twaitCh: make(chan struct{}),\n\t}\n\n\t// Spin up a goroutine to notify the backend and clean up resources when\n\t// the process exits. Defer until after the start event is sent so that\n\t// the exit event is not sent out-of-order.\n\tdefer func() { go p.reap() }()\n\n\tt.ctr.client.eventQ.Append(t.ctr.id, func() {\n\t\tei := libcontainerdtypes.EventInfo{\n\t\t\tContainerID: t.ctr.id,\n\t\t\tProcessID: p.id,\n\t\t\tPid: uint32(pid),\n\t\t}\n\t\tt.ctr.client.logger.WithFields(log.Fields{\n\t\t\t\"container\": t.ctr.id,\n\t\t\t\"event\": libcontainerdtypes.EventExecAdded,\n\t\t\t\"event-info\": ei,\n\t\t}).Info(\"sending event\")\n\t\terr := t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventExecAdded, ei)\n\t\tif err != nil {\n\t\t\tt.ctr.client.logger.WithError(err).WithFields(log.Fields{\n\t\t\t\t\"container\": t.ctr.id,\n\t\t\t\t\"event\": libcontainerdtypes.EventExecAdded,\n\t\t\t\t\"event-info\": ei,\n\t\t\t}).Error(\"failed to process event\")\n\t\t}\n\t\terr = t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventExecStarted, ei)\n\t\tif err != nil {\n\t\t\tt.ctr.client.logger.WithError(err).WithFields(log.Fields{\n\t\t\t\t\"container\": t.ctr.id,\n\t\t\t\t\"event\": libcontainerdtypes.EventExecStarted,\n\t\t\t\t\"event-info\": ei,\n\t\t\t}).Error(\"failed to process event\")\n\t\t}\n\t})\n\n\treturn p, nil\n}",
"func (cli *MockCli) ExecuteTask(startMessage, errorMessage, successMessage string, function func() error) error {\n\terr := function()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (tx *TxMsg) Exec(ctx context.Context) (result btypes.Result, crossTxQcps *txs.TxQcp) {\n\treturn\n}",
"func runExec(serviceName string, operation string) (string, error) {\n\tbytes, err := exec.Command(Configuration.ExecutorPath, serviceName, operation).CombinedOutput()\n\treturn string(bytes), err\n}",
"func (d *dispatcher) ExecuteTask() {\n\tlogutil.BgLogger().Info(\"execute one task\", zap.Int64(\"task ID\", d.task.ID),\n\t\tzap.String(\"state\", d.task.State), zap.Uint64(\"concurrency\", d.task.Concurrency))\n\td.scheduleTask(d.task.ID)\n}",
"func (a AmbariRegistry) ExecuteRemoteCommandTask(task Task, filteredHosts map[string]bool) {\n\tif len(task.Command) > 0 {\n\t\tfmt.Println(\"Execute remote command: \" + task.Command)\n\t\ta.RunRemoteHostCommand(task.Command, filteredHosts, task.AmbariServerFilter)\n\t}\n}",
"func (t TaskFunc) Execute() { t() }",
"func execTasks(parent context.Context, c TimedActuator,\n\texecFunc func(f func()), tasks ...Task) error {\n\tsize := len(tasks)\n\tif size == 0 {\n\t\treturn nil\n\t}\n\n\tctx, cancel := context.WithCancel(parent)\n\tresChan := make(chan error, size)\n\twg := &sync.WaitGroup{}\n\twg.Add(size)\n\n\t// Make sure the tasks are completed and channel is closed\n\tgo func() {\n\t\twg.Wait()\n\t\tcancel()\n\t\tclose(resChan)\n\t}()\n\n\t// Sadly we can not kill a goroutine manually\n\t// So when an error happens, the other tasks will continue\n\t// But the good news is that main progress\n\t// will know the error immediately\n\tfor _, task := range tasks {\n\t\tchild, _ := context.WithCancel(ctx)\n\t\tf := wrapperTask(child, task, wg, resChan)\n\t\texecFunc(f)\n\t}\n\n\treturn wait(ctx, c, resChan, cancel)\n}",
"func (c *GQTPClient) Exec(cmd string, body io.Reader) (Response, error) {\n\tcommand, err := ParseCommand(cmd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcommand.SetBody(body)\n\treturn c.Query(command)\n}",
"func (e *GCloudSSHExecutor) Exec(command string, node *v1.Node, stdin io.Reader) error {\n\tzone, ok := node.Labels[\"failure-domain.beta.kubernetes.io/zone\"]\n\tif !ok {\n\t\treturn fmt.Errorf(\"unknown zone for %q node: no failure-domain.beta.kubernetes.io/zone label\", node.Name)\n\t}\n\tcmd := exec.Command(\"gcloud\", \"compute\", \"ssh\", \"--zone\", zone, \"--command\", command, node.Name)\n\tcmd.Stdin = stdin\n\toutput, err := cmd.CombinedOutput()\n\tklog.Infof(\"ssh to %q finished with %q: %v\", node.Name, string(output), err)\n\treturn err\n}",
"func (a *Agent) Exec(floeID string, delay time.Duration) error {\n\n\tlog.Info(\"FLOW EXEC SYNC \", floeID)\n\n\tec := make(chan *par.Params)\n\n\t_, err := a.start(floeID, delay, ec)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tres := <-ec\n\n\tlog.Info(\"end result\", res)\n\n\tif res.Status == 0 {\n\t\tlog.Info(\"FLOW SUCCEEDED\")\n\t} else {\n\t\tlog.Info(\"FLOW FAILED\")\n\t}\n\n\treturn nil\n}",
"func (ne *NSEnter) Exec(cmd string, args []string) exec.Cmd {\n\thostProcMountNsPath := filepath.Join(ne.hostRootFsPath, mountNsPath)\n\tfullArgs := append([]string{fmt.Sprintf(\"--mount=%s\", hostProcMountNsPath), \"--\"},\n\t\tappend([]string{ne.AbsHostPath(cmd)}, args...)...)\n\tklog.V(5).Infof(\"Running nsenter command: %v %v\", nsenterPath, fullArgs)\n\treturn ne.executor.Command(nsenterPath, fullArgs...)\n}",
"func TaskExecID(taskID, execID string) (string, error) {\n\terr := identifiers.Validate(taskID)\n\tif execID != \"\" {\n\t\terr = multierror.Append(err, identifiers.Validate(execID)).ErrorOrNil()\n\t}\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// use \"/\" as a separator, which should be an illegal character for the IDs themselves\n\t// after doing the above validation\n\treturn base64.URLEncoding.EncodeToString([]byte(fmt.Sprintf(\"%s/%s\", taskID, execID))), nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
expandPath returns the absolute path of dir, relative to base if dir is relative path. base is expected to be an absolute path
|
func expandPath(base, dir string) string {
if filepath.IsAbs(dir) {
return filepath.Clean(dir)
}
return filepath.Clean(filepath.Join(base, dir))
}
|
[
"func expandFilePath(filePath string, baseDir string) string {\n\tif !strings.HasPrefix(filePath, \"/\") {\n\t\tfilePath = path.Join(baseDir, filePath)\n\t}\n\treturn filePath\n}",
"func AbsBase(path string) (dir string) {\n\treturn filepath.Base(Abs(path))\n}",
"func expandPath(path string) (string, error) {\n\tif len(path) == 0 {\n\t\treturn \"\", nil\n\t}\n\tif path[0] == '~' && (len(path) == 1 || os.IsPathSeparator(path[1])) {\n\t\tusr, err := user.Current()\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"Failed to get the home directory of the user\")\n\t\t}\n\t\tpath = filepath.Join(usr.HomeDir, path[1:])\n\t}\n\n\tvar err error\n\tpath, err = filepath.Abs(path)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Failed to generate absolute path\")\n\t}\n\treturn path, nil\n}",
"func ExpandPath(pathToExpand string) string {\n\tif !path.IsAbs(pathToExpand) {\n\t\twd, err := os.Getwd()\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"Getwd failed: %s\", err))\n\t\t}\n\t\treturn path.Clean(path.Join(wd, pathToExpand))\n\t}\n\treturn pathToExpand\n}",
"func expand(path string) (string, error) {\n\t// Ignore if path has no leading tilde.\n\tif path != \"~\" && !strings.HasPrefix(path, \"~\"+string(os.PathSeparator)) {\n\t\treturn path, nil\n\t}\n\n\t// Fetch the current user to determine the home path.\n\tu, err := user.Current()\n\tif err != nil {\n\t\treturn path, err\n\t} else if u.HomeDir == \"\" {\n\t\treturn path, fmt.Errorf(\"home directory unset\")\n\t}\n\n\tif path == \"~\" {\n\t\treturn u.HomeDir, nil\n\t}\n\treturn filepath.Join(u.HomeDir, strings.TrimPrefix(path, \"~\"+string(os.PathSeparator))), nil\n}",
"func expand(path string) (string, error) {\n\tif len(path) == 0 || path[0] != '~' {\n\t\treturn path, nil\n\t}\n\n\tusr, err := user.Current()\n\tif err != nil {\n\t\treturn path, err\n\t}\n\n\treturn filepath.Join(usr.HomeDir, path[1:]), nil\n}",
"func expandPath(filepath string) (expandedPath string) {\n\tcleanedPath := path.Clean(filepath)\n\texpandedPath = cleanedPath\n\tif strings.HasPrefix(cleanedPath, \"~\") {\n\t\trest := cleanedPath[2:]\n\t\texpandedPath = path.Join(getHomeFolder(), rest)\n\t}\n\treturn\n}",
"func expandPath(path string) (string, error) {\n\tpath = os.ExpandEnv(path)\n\n\tif !filepath.IsAbs(path) {\n\t\tvar err error\n\t\tpath, err = filepath.Abs(path)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn path, nil\n}",
"func Expand(path string, dir string) string {\n\tif strings.HasPrefix(path, \"~/\") {\n\t\treturn filepath.Join(dir, strings.TrimPrefix(path, \"~/\"))\n\t}\n\n\treturn path\n}",
"func Base() string {\n\treturn filepath.Join(path, \"../..\")\n}",
"func ExpandPath(p string) string {\n\tif strings.HasPrefix(p, \"~/\") {\n\t\tuser, _ := user.Current()\n\t\thomeDir := user.HomeDir\n\t\tp = path.Join(homeDir, p[2:])\n\t} else if !strings.HasPrefix(p, \"/\") { // Parse relative paths\n\t\tcwd, _ := os.Getwd()\n\t\tp = path.Join(cwd, p)\n\t}\n\treturn resolveSymlink(p)\n}",
"func RelativePathBaseOn(basePath, filePath string) string {\n\tif filepath.IsAbs(filePath) {\n\t\treturn filePath\n\t}\n\treturn filepath.Join(basePath, filePath)\n}",
"func GetPathWithBase(path, base string) string {\n\tif base != \"\" {\n\t\tparts := strings.Split(path, \"/\")\n\t\tparts = append(parts, \"\")\n\t\tcopy(parts[3:], parts[2:])\n\t\tparts[2] = strings.TrimPrefix(base, \"/\")\n\t\treturn strings.Join(parts, \"/\")\n\t}\n\treturn path\n}",
"func Relative(base, complete string) string {\n\trel, err := filepath.Rel(base, complete)\n\tif err != nil {\n\t\tFatalf(\"Diff Path %s, %s: %s\", base, complete, err)\n\t}\n\n\t// special case\n\tif rel == \".\" {\n\t\trel = \"\"\n\t}\n\n\treturn rel\n}",
"func joinPath(dir, file string) string {\n\tif filepath.IsAbs(file) {\n\t\treturn file\n\t}\n\treturn filepath.Join(dir, file)\n}",
"func (pr *PathResolver) RelativePath(filename, baseDirectory string) string {\n\tif filepath.IsAbs(filename) && filepath.IsAbs(baseDirectory) {\n\t\toffset := baseDirectory\n\t\tif strings.HasSuffix(baseDirectory, string(pr.FileSeparator())) {\n\t\t\toffset = baseDirectory[:len(baseDirectory)-1]\n\t\t}\n\t\tfilename = filename[len(offset)+1:]\n\t}\n\treturn filename\n}",
"func ExpandPath(p string) (string, error) {\n\tif strings.HasPrefix(p, \"~/\") || strings.HasPrefix(p, \"~\\\\\") {\n\t\tif home := HomeDir(); home != \"\" {\n\t\t\tp = home + p[1:]\n\t\t}\n\t}\n\treturn filepath.Abs(path.Clean(os.ExpandEnv(p)))\n}",
"func joinSrcDirToDestBase(fi os.FileInfo, e error, srcAbs, destAbs *string) {\n\t// if user not specify the file name in dest, make it happen\n\tif e != nil {\n\t\tlog.Fatal(e)\n\t} else if fi.IsDir() {\n\t\t(*destAbs) = filepath.Join((*destAbs), filepath.Base(*srcAbs))\n\t\tdestDir = filepath.Dir(*destAbs)\n\t\tdestBase = filepath.Base(*destAbs)\n\t}\n\n}",
"func relWithin(base, target string) (string, error) {\n\trel, err := filepath.Rel(base, target)\n\tif err != nil {\n\t\t// TODO: wrap\n\t\treturn \"\", err\n\t}\n\n\tparts := strings.SplitN(rel, \"/\", 2)\n\tif len(parts) > 0 {\n\t\tif parts[0] == \"..\" {\n\t\t\treturn \"\", errors.Errorf(\"Path %q not within root %q\", target, base)\n\t\t}\n\t}\n\n\treturn rel, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
isParentPath returns true if path is a child or a descendant of parent path. Both inputs need to be absolute paths.
|
func isParentPath(parent, path string) bool {
rel, err := filepath.Rel(parent, path)
return err == nil && !strings.HasPrefix(rel, "..")
}
|
[
"func PathIsChild(parent, child string) bool {\n\tparentParts := strings.Split(parent, string(os.PathSeparator))\n\tchildParts := strings.Split(child, string(os.PathSeparator))\n\tif len(childParts) < len(parentParts) {\n\t\treturn false\n\t}\n\tfor i, part := range parentParts {\n\t\tif part != childParts[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func pathIsChild(parent, child string) bool {\n\tif !strings.HasPrefix(child, parent) {\n\t\treturn false\n\t}\n\trel := child[len(parent):]\n\trel = strings.Trim(rel, \"/\")\n\treturn !strings.Contains(rel, \"/\")\n}",
"func isChildPath(parent string, child string) bool {\n\t// The parent should at least be the common part plus a '/' and something else\n\tif len(child) < len(parent)+2 {\n\t\treturn false\n\t}\n\t// We compare the |parent| part which should be the same.\n\tif child[:len(parent)] != parent {\n\t\treturn false\n\t}\n\t// If |child| is indeed underneath, now it would have to have a separator to\n\t// represent the sub-directory. Depending on the type of path (local-system one or repo\n\t// relative, the separator could be different).t\n\treturn child[len(parent)] == '/' || child[len(parent)] == '\\\\'\n}",
"func (d *Driver) isParent(id, parent string) bool {\n\t// TODO (maybe): this function is called a lot and does lots of sub-routine calls and I/O.\n\t// One might want to cache parent but should first be confirmed via profiling that really noticable performance cost\n\tlogrus.Debugf(\"secureoverlay2: isParent called w. id: %s, parent: %s\", id, parent)\n\tlowers, err := d.getLowerDirs(id)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif parent == \"\" && len(lowers) > 0 {\n\t\treturn false\n\t}\n\n\tparentDir := d.dir(parent)\n\tvar ld string\n\tif len(lowers) > 0 {\n\t\tld = filepath.Dir(lowers[0])\n\t}\n\tif ld == \"\" && parent == \"\" {\n\t\treturn true\n\t}\n\treturn ld == parentDir\n}",
"func IsParentDir(parentDir, childPath string) bool {\n\treturn mgutil.IsParentDir(parentDir, childPath)\n}",
"func isParentPathEntity(entity1, entity2 string) bool {\n\tpath1, path2 := strings.TrimPrefix(entity1, \"path-\"), strings.TrimPrefix(entity2, \"path-\")\n\tif len(path1) == len(entity1) || len(path2) == len(entity2) {\n\t\treturn false\n\t}\n\tif !strings.HasPrefix(path2, path1) {\n\t\treturn false\n\t}\n\tif len(path1) == len(path2) {\n\t\treturn true\n\t}\n\treturn path2[len(path1)] == '/'\n}",
"func PathIsAncestor(p1, p2 string) bool {\n\treturn strings.HasPrefix(p2, p1+\"/\") || (p2 != \"\" && p1 == \"\")\n}",
"func (p path) hasValidParent() bool {\n\treturn len(p.path) >= 2 && p.parentPath().isValid()\n}",
"func isPathChild(a, b []string) bool {\n\t// If b does not have a greater path length than a, it cannot be a child. If\n\t// b has more than one element than a, it must be at least a grandchild.\n\tif len(b) <= len(a) || len(b) > len(a)+1 {\n\t\treturn false\n\t}\n\n\tfor i := range a {\n\t\tif a[i] != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func relativeToParent(parent, child string) (ok bool, rel string) {\n\tparent = filepath.Clean(parent)\n\tchild = filepath.Clean(child)\n\n\tif parent == child {\n\t\treturn true, \"\"\n\t}\n\n\tif !strings.HasPrefix(child, parent+string(filepath.Separator)) {\n\t\treturn false, \"\"\n\t}\n\treturn true, child[len(parent)+1:]\n}",
"func (p path) parentPath() *path {\n\treturn &path{p.FolderBranch, p.path[:len(p.path)-1]}\n}",
"func (p *path) IsSubPath(path string) bool {\n\thomedir := p.HomeDir()\n\tpath = p.Resolve(path)\n\trel, err := filepath.Rel(homedir, path)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn !strings.Contains(rel, \"..\")\n}",
"func (b *packageBuilder) isParent(id int) bool {\n\t_, ok := b.parentIDs[id]\n\treturn ok\n}",
"func parentPathIncludesNonDirectory(path string) (bool, error) {\n\tfor _, parentPath := range util.ParentDirectories(path) {\n\t\tlstat, err := os.Lstat(parentPath)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tif !lstat.IsDir() {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}",
"func IsRelativePath(path string) bool {\n\treturn !(types.StartWith(path, \"./\") ||\n\t\ttypes.StartWith(path, \".\\\\\") ||\n\t\ttypes.StartWith(path, \"~/\") ||\n\t\ttypes.StartWith(path, \"~\\\\\") ||\n\t\ttypes.StartWith(path, \"/\") ||\n\t\tIsWindowsRootpath(path))\n}",
"func IsWithinPath(basepath string, path string) bool {\n\trel, err := filepath.Rel(basepath, path)\n\treturn err == nil && !strings.HasPrefix(rel, \"..\")\n}",
"func isPath(path string) bool {\n\treturn strings.HasPrefix(path, \"~\") ||\n\t\tstrings.HasPrefix(path, \".\") ||\n\t\tstrings.HasPrefix(path, \"/\")\n}",
"func isChild(child, parent string) bool {\n\treturn strings.HasPrefix(child, dirName(parent))\n}",
"func (_Token *TokenCaller) IsParentOf(opts *bind.CallOpts, _shadyChild common.Address) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Token.contract.Call(opts, out, \"isParentOf\", _shadyChild)\n\treturn *ret0, err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetOcJusticeTerminalCase invokes the dt_oc_info.GetOcJusticeTerminalCase API synchronously
|
func (client *Client) GetOcJusticeTerminalCase(request *GetOcJusticeTerminalCaseRequest) (response *GetOcJusticeTerminalCaseResponse, err error) {
response = CreateGetOcJusticeTerminalCaseResponse()
err = client.DoAction(request, response)
return
}
|
[
"func (client *Client) GetOcJusticeTerminalCaseWithCallback(request *GetOcJusticeTerminalCaseRequest, callback func(response *GetOcJusticeTerminalCaseResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetOcJusticeTerminalCaseResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetOcJusticeTerminalCase(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) GetOcJusticeTerminalCaseWithChan(request *GetOcJusticeTerminalCaseRequest) (<-chan *GetOcJusticeTerminalCaseResponse, <-chan error) {\n\tresponseChan := make(chan *GetOcJusticeTerminalCaseResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetOcJusticeTerminalCase(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func CreateGetOcJusticeTerminalCaseRequest() (request *GetOcJusticeTerminalCaseRequest) {\n\trequest = &GetOcJusticeTerminalCaseRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"dt-oc-info\", \"2022-08-29\", \"GetOcJusticeTerminalCase\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func CreateGetOcJusticeTerminalCaseResponse() (response *GetOcJusticeTerminalCaseResponse) {\n\tresponse = &GetOcJusticeTerminalCaseResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func (b *OGame) GetCelestial(v any) (Celestial, error) {\n\treturn b.WithPriority(taskRunner.Normal).GetCelestial(v)\n}",
"func (client IdentityClient) getCompartment(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/compartments/{compartmentId}\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response GetCompartmentResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}",
"func (c *Client) GetCase(caseID int) (Case, error) {\n\treturnCase := Case{}\n\terr := c.sendRequest(\"GET\", fmt.Sprintf(\"get_case/%d\", caseID), nil, &returnCase)\n\treturn returnCase, err\n}",
"func (c *CustomerService) Cases(id string, params *url.Values) (*Page, *http.Response, error) {\n\trestful := Restful{}\n\tpage := new(Page)\n\tpath := NewIdentityResourcePath(id, NewCustomer()).SetNested(NewCase())\n\tresp, err := restful.\n\t\tGet(path.Path()).\n\t\tJson(page).\n\t\tParams(params).\n\t\tClient(c.client).\n\t\tDo()\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\terr = c.unravelPage(page)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn page, resp, err\n}",
"func (client IdentityClient) GetCompartment(ctx context.Context, request GetCompartmentRequest) (response GetCompartmentResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.getCompartment, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = GetCompartmentResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = GetCompartmentResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(GetCompartmentResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into GetCompartmentResponse\")\n\t}\n\treturn\n}",
"func CustomerGetoneVehicleforview(w http.ResponseWriter, r *http.Request) {\n\tvehicle := services.GetOneVehicle(r)\n\tcusttpl.ExecuteTemplate(w, \"viewvehicle.html\", vehicle)\n}",
"func GetTrafficOpsCookie(cdnUri, user, pass string) (string, error) {\n\turi := cdnUri + `/api/1.2/user/login`\n\tpostdata := `{\"u\":\"` + user + `\", \"p\":\"` + pass + `\"}`\n\treq, err := http.NewRequest(\"POST\", uri, strings.NewReader(postdata))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treq.Header.Add(\"Accept\", \"application/json\")\n\n\tclient := getClient()\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tfor _, cookie := range resp.Cookies() {\n\t\tif cookie.Name == `mojolicious` {\n\t\t\treturn cookie.Value, nil\n\t\t}\n\t}\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn \"\", errors.New(\"No login cookie received: \" + string(data))\n}",
"func (h *Handler) RetrieveCertificate(companyChainID string, uuid string) (*entityApi.TransactionWrapper, error) {\n apiResponse, err := h.apiClient.Get(fmt.Sprintf(certificateRoute, companyChainID, uuid), nil)\n if err != nil {\n return nil, err\n }\n var transactionWrapper entityApi.TransactionWrapper\n if err := unmarshalApiResponse(apiResponse, &transactionWrapper); err != nil {\n return nil, err\n }\n return &transactionWrapper, nil\n}",
"func ceGetActiveScenario(w http.ResponseWriter, r *http.Request) {\n\tlog.Debug(\"CEGetActiveScenario\")\n\n\t// Retrieve active scenario\n\tvar scenario Scenario\n\terr := getScenario(true, db, activeScenarioName, &scenario)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusNotFound)\n\t\treturn\n\t}\n\n\t// NOTE: For now, return full scenario without status information.\n\t// Eventually, we will need to fetch latest status information from DB or k8s.\n\n\t// // Create Scenario object\n\t// var deployment Deployment\n\t// var scenario Scenario\n\t// scenario.Name = \"Edge-Enabled 5G Video\"\n\t// scenario.Deployment = &deployment\n\n\t// err := monitorActiveDeployment(&deployment)\n\t// if err != nil {\n\t// \tlog.Error(err.Error())\n\t// \thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t// \treturn\n\t// }\n\n\t// Format response\n\tjsonResponse, err := json.Marshal(scenario)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Send response\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusOK)\n\tfmt.Fprint(w, string(jsonResponse))\n}",
"func (term *Terminology) ReadV2toSNOMEDCT(ctx context.Context, id *apiv1.Identifier, f func(*apiv1.Identifier) error) error {\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\tresponse, err := term.client.FromCrossMap(ctx, &snomed.TranslateFromRequest{S: id.GetValue(), RefsetId: 900000000000497000})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(response.GetTranslations()) == 0 {\n\t\tlog.Printf(\"no translations found for map from '%s:%s' to '%s'\", id.GetSystem(), id.GetValue(), identifiers.SNOMEDCT)\n\t}\n\tfor _, t := range response.GetTranslations() {\n\t\tref := t.GetReferenceSetItem().GetReferencedComponentId()\n\t\tif err := f(&apiv1.Identifier{System: identifiers.SNOMEDCT, Value: strconv.FormatInt(ref, 10)}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func GetClinicDoctors(c *gin.Context) {\n\tlog.Infof(\"Get all doctors registered with specific physical clinic\")\n\taddressID := c.Param(\"addressId\")\n\tif addressID == \"\" {\n\t\tc.AbortWithStatusJSON(\n\t\t\thttp.StatusBadRequest,\n\t\t\tgin.H{\n\t\t\t\tconstants.RESPONSE_JSON_DATA: nil,\n\t\t\t\tconstants.RESPONSDE_JSON_ERROR: \"clinic address id not provided\",\n\t\t\t},\n\t\t)\n\t\treturn\n\t}\n\tctx := c.Request.Context()\n\tuserEmail, userID, gproject, err := getUserDetails(ctx, c.Request)\n\tif err != nil {\n\t\tc.AbortWithStatusJSON(\n\t\t\thttp.StatusInternalServerError,\n\t\t\tgin.H{\n\t\t\t\tconstants.RESPONSE_JSON_DATA: nil,\n\t\t\t\tconstants.RESPONSDE_JSON_ERROR: err.Error(),\n\t\t\t},\n\t\t)\n\t\treturn\n\t}\n\tctx, span := trace.StartSpan(ctx, \"Get all doctors registered for a clinic\")\n\tdefer span.End()\n\tclinicMetaDB := datastoredb.NewClinicMetaHandler()\n\terr = clinicMetaDB.InitializeDataBase(ctx, gproject)\n\tif err != nil {\n\t\tc.AbortWithStatusJSON(\n\t\t\thttp.StatusInternalServerError,\n\t\t\tgin.H{\n\t\t\t\tconstants.RESPONSE_JSON_DATA: nil,\n\t\t\t\tconstants.RESPONSDE_JSON_ERROR: err.Error(),\n\t\t\t},\n\t\t)\n\t\treturn\n\t}\n\tregisteredDoctors, err := clinicMetaDB.GetClinicDoctors(ctx, userEmail, userID, addressID)\n\tif err != nil {\n\t\tc.AbortWithStatusJSON(\n\t\t\thttp.StatusInternalServerError,\n\t\t\tgin.H{\n\t\t\t\tconstants.RESPONSE_JSON_DATA: nil,\n\t\t\t\tconstants.RESPONSDE_JSON_ERROR: err.Error(),\n\t\t\t},\n\t\t)\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\n\t\tconstants.RESPONSE_JSON_DATA: registeredDoctors,\n\t\tconstants.RESPONSDE_JSON_ERROR: nil,\n\t})\n\tclinicMetaDB.Close()\n}",
"func (o LookupControlResultOutput) SearchSolutionUseCase() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v LookupControlResult) []string { return v.SearchSolutionUseCase }).(pulumi.StringArrayOutput)\n}",
"func (a *KubernetesApiService) GetKubernetesAciCniProfileByMoid(ctx context.Context, moid string) ApiGetKubernetesAciCniProfileByMoidRequest {\n\treturn ApiGetKubernetesAciCniProfileByMoidRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}",
"func GetVMObject(ctx context.Context, conn *vclib.VSphereConnection, vmUUID string) (*vclib.VirtualMachine, error) {\n\t// TODO change impl below using multiple goroutines and sync.WaitGroup to make it faster\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\tif err := conn.Connect(ctx); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(vmUUID) == 0 {\n\t\treturn nil, fmt.Errorf(\"virtual machine uuid is required\")\n\t}\n\n\tdatacenterObjs, err := vclib.GetAllDatacenter(ctx, conn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Lookup in each vsphere datacenter for this virtual machine\n\tfor _, dc := range datacenterObjs {\n\t\tvm, err := dc.GetVMByUUID(ctx, vmUUID)\n\t\tif err != nil {\n\t\t\tif err != vclib.ErrNoVMFound {\n\t\t\t\tlogrus.Warnf(\"failed to find vm with uuid: %s in datacenter: %s due to err: %v\", vmUUID, dc.Name(), err)\n\t\t\t\t// don't let one bad egg fail entire search. keep looking.\n\t\t\t} else {\n\t\t\t\tlogrus.Debugf(\"did not find vm with uuid: %s in datacenter: %s\", vmUUID, dc.Name())\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif vm != nil {\n\t\t\treturn vm, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"failed to find vm with uuid: %s in any datacenter for vc: %s\", vmUUID, conn.Hostname)\n}",
"func GetIncident(c *gin.Context) {\n\tvar err error\n\tvar output *incident.Incident\n\tvar incidentID int64\n\tvar taskID int64\n\tincidentID, err = strconv.ParseInt(c.Param(\"incidentId\"), 10, 64)\n\tctx, _ := authcontext.NewAuthContext(c)\n\n\tif taskID, err = strconv.ParseInt(c.Param(\"taskId\"), 10, 64); err == nil {\n\t\tif output, err = incident.GetByID(ctx, taskID, incidentID); err == nil {\n\t\t\tc.JSON(http.StatusOK, output)\n\t\t}\n\t}\n\n\tif err != nil {\n\t\tc.JSON(http.StatusPreconditionFailed, ResponseObject{\"error\": err.Error()})\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetOcJusticeTerminalCaseWithChan invokes the dt_oc_info.GetOcJusticeTerminalCase API asynchronously
|
func (client *Client) GetOcJusticeTerminalCaseWithChan(request *GetOcJusticeTerminalCaseRequest) (<-chan *GetOcJusticeTerminalCaseResponse, <-chan error) {
responseChan := make(chan *GetOcJusticeTerminalCaseResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.GetOcJusticeTerminalCase(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
|
[
"func (client *Client) GetOcJusticeTerminalCaseWithCallback(request *GetOcJusticeTerminalCaseRequest, callback func(response *GetOcJusticeTerminalCaseResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetOcJusticeTerminalCaseResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetOcJusticeTerminalCase(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) GetOcJusticeTerminalCase(request *GetOcJusticeTerminalCaseRequest) (response *GetOcJusticeTerminalCaseResponse, err error) {\n\tresponse = CreateGetOcJusticeTerminalCaseResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}",
"func CreateGetOcJusticeTerminalCaseRequest() (request *GetOcJusticeTerminalCaseRequest) {\n\trequest = &GetOcJusticeTerminalCaseRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"dt-oc-info\", \"2022-08-29\", \"GetOcJusticeTerminalCase\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func CreateGetOcJusticeTerminalCaseResponse() (response *GetOcJusticeTerminalCaseResponse) {\n\tresponse = &GetOcJusticeTerminalCaseResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func (client *Client) GetWsCustomizedChO2OWithChan(request *GetWsCustomizedChO2ORequest) (<-chan *GetWsCustomizedChO2OResponse, <-chan error) {\n\tresponseChan := make(chan *GetWsCustomizedChO2OResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetWsCustomizedChO2O(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func (client *Client) RecognizeVehicleDashboardWithChan(request *RecognizeVehicleDashboardRequest) (<-chan *RecognizeVehicleDashboardResponse, <-chan error) {\n\tresponseChan := make(chan *RecognizeVehicleDashboardResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.RecognizeVehicleDashboard(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func (client *Client) GetOpenNLUWithChan(request *GetOpenNLURequest) (<-chan *GetOpenNLUResponse, <-chan error) {\n\tresponseChan := make(chan *GetOpenNLUResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetOpenNLU(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func (client *Client) GetWsCustomizedChO2OWithCallback(request *GetWsCustomizedChO2ORequest, callback func(response *GetWsCustomizedChO2OResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetWsCustomizedChO2OResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetWsCustomizedChO2O(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) DescribeCustinsKernelReleaseNotesWithChan(request *DescribeCustinsKernelReleaseNotesRequest) (<-chan *DescribeCustinsKernelReleaseNotesResponse, <-chan error) {\n\tresponseChan := make(chan *DescribeCustinsKernelReleaseNotesResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.DescribeCustinsKernelReleaseNotes(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func (client *Client) ListCasesWithChan(request *ListCasesRequest) (<-chan *ListCasesResponse, <-chan error) {\n\tresponseChan := make(chan *ListCasesResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.ListCases(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func (client *Client) GetCurrentTermRepayInfoWithChan(request *GetCurrentTermRepayInfoRequest) (<-chan *GetCurrentTermRepayInfoResponse, <-chan error) {\n\tresponseChan := make(chan *GetCurrentTermRepayInfoResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetCurrentTermRepayInfo(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func (client *Client) ListCityMapAoisWithChan(request *ListCityMapAoisRequest) (<-chan *ListCityMapAoisResponse, <-chan error) {\n\tresponseChan := make(chan *ListCityMapAoisResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.ListCityMapAois(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func (client *Client) DescribeCustinsKernelReleaseNotesWithCallback(request *DescribeCustinsKernelReleaseNotesRequest, callback func(response *DescribeCustinsKernelReleaseNotesResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *DescribeCustinsKernelReleaseNotesResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.DescribeCustinsKernelReleaseNotes(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) GetOpenNLUWithCallback(request *GetOpenNLURequest, callback func(response *GetOpenNLUResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetOpenNLUResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetOpenNLU(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) RecognizeVehicleDashboardWithCallback(request *RecognizeVehicleDashboardRequest, callback func(response *RecognizeVehicleDashboardResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *RecognizeVehicleDashboardResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.RecognizeVehicleDashboard(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) GetCustomerAccountInfoWithChan(request *GetCustomerAccountInfoRequest) (<-chan *GetCustomerAccountInfoResponse, <-chan error) {\nresponseChan := make(chan *GetCustomerAccountInfoResponse, 1)\nerrChan := make(chan error, 1)\nerr := client.AddAsyncTask(func() {\ndefer close(responseChan)\ndefer close(errChan)\nresponse, err := client.GetCustomerAccountInfo(request)\nif err != nil {\nerrChan <- err\n} else {\nresponseChan <- response\n}\n})\nif err != nil {\nerrChan <- err\nclose(responseChan)\nclose(errChan)\n}\nreturn responseChan, errChan\n}",
"func (client *Client) GetIndustryCommerceInfoWithChan(request *GetIndustryCommerceInfoRequest) (<-chan *GetIndustryCommerceInfoResponse, <-chan error) {\n\tresponseChan := make(chan *GetIndustryCommerceInfoResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetIndustryCommerceInfo(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func (client *Client) GetSessionInfoByTicketWithCallback(request *GetSessionInfoByTicketRequest, callback func(response *GetSessionInfoByTicketResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetSessionInfoByTicketResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetSessionInfoByTicket(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (ovscni *OvsCni) HandleCni(d *render.RenderData) error {\n\n\t//For VlanType=trunk we do not need to do anything\n\tswitch ovscni.VlanType {\n\tcase \"access\":\n\t\tif len(ovscni.L2srvResources) != 1 {\n\t\t\terr := errors.New(\"Cannot use more than one L2Services for VlanType=access case\")\n\t\t\tovscni.Log.Error(err, \"L2Services cannot contain more than one L2Services in VlanType=access case\")\n\t\t\treturn err\n\t\t}\n\t\td.Data[\"AccessVlan\"] = ovscni.L2srvResources[0].Spec.SegmentationID\n\tcase \"selectivetrunk\":\n\t\ttmpList := []string{}\n\t\tfor _, l2srvObj := range ovscni.L2srvResources {\n\t\t\ttmpStr := \"{\\\"id\\\": \" + strconv.Itoa(int(l2srvObj.Spec.SegmentationID)) + \"}\"\n\t\t\ttmpList = append(tmpList, tmpStr)\n\t\t}\n\t\td.Data[\"SelectiveVlan\"] = \"[\" + strings.Join(tmpList, \",\") + \"]\"\n\tcase \"trunk\":\n\t\tovscni.Log.Info(\"Transparent Trunk case in cluster level\")\n\t}\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetOcJusticeTerminalCaseWithCallback invokes the dt_oc_info.GetOcJusticeTerminalCase API asynchronously
|
func (client *Client) GetOcJusticeTerminalCaseWithCallback(request *GetOcJusticeTerminalCaseRequest, callback func(response *GetOcJusticeTerminalCaseResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *GetOcJusticeTerminalCaseResponse
var err error
defer close(result)
response, err = client.GetOcJusticeTerminalCase(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
|
[
"func (client *Client) GetOcJusticeTerminalCaseWithChan(request *GetOcJusticeTerminalCaseRequest) (<-chan *GetOcJusticeTerminalCaseResponse, <-chan error) {\n\tresponseChan := make(chan *GetOcJusticeTerminalCaseResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetOcJusticeTerminalCase(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func (client *Client) GetOcJusticeTerminalCase(request *GetOcJusticeTerminalCaseRequest) (response *GetOcJusticeTerminalCaseResponse, err error) {\n\tresponse = CreateGetOcJusticeTerminalCaseResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}",
"func CreateGetOcJusticeTerminalCaseResponse() (response *GetOcJusticeTerminalCaseResponse) {\n\tresponse = &GetOcJusticeTerminalCaseResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateGetOcJusticeTerminalCaseRequest() (request *GetOcJusticeTerminalCaseRequest) {\n\trequest = &GetOcJusticeTerminalCaseRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"dt-oc-info\", \"2022-08-29\", \"GetOcJusticeTerminalCase\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func (client *Client) DescribeCustinsKernelReleaseNotesWithCallback(request *DescribeCustinsKernelReleaseNotesRequest, callback func(response *DescribeCustinsKernelReleaseNotesResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *DescribeCustinsKernelReleaseNotesResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.DescribeCustinsKernelReleaseNotes(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) ListCasesWithCallback(request *ListCasesRequest, callback func(response *ListCasesResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *ListCasesResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.ListCases(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) GetOpenNLUWithCallback(request *GetOpenNLURequest, callback func(response *GetOpenNLUResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetOpenNLUResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetOpenNLU(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) GetIndustryCommerceInfoWithCallback(request *GetIndustryCommerceInfoRequest, callback func(response *GetIndustryCommerceInfoResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetIndustryCommerceInfoResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetIndustryCommerceInfo(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) RecognizeVehicleDashboardWithCallback(request *RecognizeVehicleDashboardRequest, callback func(response *RecognizeVehicleDashboardResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *RecognizeVehicleDashboardResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.RecognizeVehicleDashboard(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) GetCurrentTermRepayInfoWithCallback(request *GetCurrentTermRepayInfoRequest, callback func(response *GetCurrentTermRepayInfoResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetCurrentTermRepayInfoResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetCurrentTermRepayInfo(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) GetWsCustomizedChO2OWithCallback(request *GetWsCustomizedChO2ORequest, callback func(response *GetWsCustomizedChO2OResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetWsCustomizedChO2OResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetWsCustomizedChO2O(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func Callback(c *gin.Context) {\n\tprovider := c.Param(\"provider\")\n\n\tvar logincode vo.LoginReq\n\tif err := c.ShouldBindQuery(&logincode); err != nil {\n\t\tfmt.Println(\"xxxx\", err)\n\t}\n\n\tfmt.Println(\"provider\", provider, logincode)\n\n\tuserInfo := vo.GetUserInfoFromOauth(provider, logincode.Code, logincode.State)\n\tfmt.Println(\"get user info\", userInfo)\n\n\tif userInfo == nil {\n\t\tc.JSON(http.StatusOK, sailor.HTTPAirdbResponse{\n\t\t\tCode: enum.AirdbSuccess,\n\t\t\tSuccess: true,\n\t\t\tData: vo.LoginResp{\n\t\t\t\tNickname: \"xxx\",\n\t\t\t\tHeadimgurl: \"xxx.png\",\n\t\t\t},\n\t\t})\n\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, sailor.HTTPAirdbResponse{\n\t\tCode: enum.AirdbSuccess,\n\t\tSuccess: true,\n\t\tData: vo.LoginResp{\n\t\t\tNickname: userInfo.Login,\n\t\t\tHeadimgurl: userInfo.AvatarURL,\n\t\t},\n\t})\n}",
"func (client *Client) GetCustomerAccountInfoWithCallback(request *GetCustomerAccountInfoRequest, callback func(response *GetCustomerAccountInfoResponse, err error)) (<-chan int) {\nresult := make(chan int, 1)\nerr := client.AddAsyncTask(func() {\nvar response *GetCustomerAccountInfoResponse\nvar err error\ndefer close(result)\nresponse, err = client.GetCustomerAccountInfo(request)\ncallback(response, err)\nresult <- 1\n})\nif err != nil {\ndefer close(result)\ncallback(nil, err)\nresult <- 0\n}\nreturn result\n}",
"func (client *Client) GetUserNetProfileDescriptionWithCallback(request *GetUserNetProfileDescriptionRequest, callback func(response *GetUserNetProfileDescriptionResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetUserNetProfileDescriptionResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetUserNetProfileDescription(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) DescribeUserVvTopByDayWithCallback(request *DescribeUserVvTopByDayRequest, callback func(response *DescribeUserVvTopByDayResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *DescribeUserVvTopByDayResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.DescribeUserVvTopByDay(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (h *Handler) oidcCallback(w http.ResponseWriter, r *http.Request, p httprouter.Params) (interface{}, error) {\n\tresult, err := h.GetConfig().Auth.ValidateOIDCAuthCallback(r.URL.Query())\n\tif err != nil {\n\t\th.Warnf(\"Error validating callback: %v.\", err)\n\t\thttp.Redirect(w, r, \"/web/msg/error/login_failed\", http.StatusFound)\n\t\treturn nil, nil\n\t}\n\th.Infof(\"Callback: %v %v %v.\", result.Username, result.Identity, result.Req.Type)\n\treturn nil, h.CallbackHandler(w, r, webapi.CallbackParams{\n\t\tUsername: result.Username,\n\t\tIdentity: result.Identity,\n\t\tSession: result.Session,\n\t\tCert: result.Cert,\n\t\tTLSCert: result.TLSCert,\n\t\tHostSigners: result.HostSigners,\n\t\tType: result.Req.Type,\n\t\tCreateWebSession: result.Req.CreateWebSession,\n\t\tCSRFToken: result.Req.CSRFToken,\n\t\tPublicKey: result.Req.PublicKey,\n\t\tClientRedirectURL: result.Req.ClientRedirectURL,\n\t})\n}",
"func (client *Client) ListCityMapAoisWithCallback(request *ListCityMapAoisRequest, callback func(response *ListCityMapAoisResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *ListCityMapAoisResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.ListCityMapAois(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) DescribeUserCertificateDetailWithCallback(request *DescribeUserCertificateDetailRequest, callback func(response *DescribeUserCertificateDetailResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *DescribeUserCertificateDetailResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.DescribeUserCertificateDetail(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) GetSessionInfoByTicketWithCallback(request *GetSessionInfoByTicketRequest, callback func(response *GetSessionInfoByTicketResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetSessionInfoByTicketResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetSessionInfoByTicket(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
CreateGetOcJusticeTerminalCaseRequest creates a request to invoke GetOcJusticeTerminalCase API
|
func CreateGetOcJusticeTerminalCaseRequest() (request *GetOcJusticeTerminalCaseRequest) {
request = &GetOcJusticeTerminalCaseRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("dt-oc-info", "2022-08-29", "GetOcJusticeTerminalCase", "", "")
request.Method = requests.POST
return
}
|
[
"func (client *Client) GetOcJusticeTerminalCase(request *GetOcJusticeTerminalCaseRequest) (response *GetOcJusticeTerminalCaseResponse, err error) {\n\tresponse = CreateGetOcJusticeTerminalCaseResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}",
"func CreateGetOcJusticeTerminalCaseResponse() (response *GetOcJusticeTerminalCaseResponse) {\n\tresponse = &GetOcJusticeTerminalCaseResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func (client *Client) GetOcJusticeTerminalCaseWithCallback(request *GetOcJusticeTerminalCaseRequest, callback func(response *GetOcJusticeTerminalCaseResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetOcJusticeTerminalCaseResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetOcJusticeTerminalCase(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) GetOcJusticeTerminalCaseWithChan(request *GetOcJusticeTerminalCaseRequest) (<-chan *GetOcJusticeTerminalCaseResponse, <-chan error) {\n\tresponseChan := make(chan *GetOcJusticeTerminalCaseResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetOcJusticeTerminalCase(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func CreateGetOpenNLURequest() (request *GetOpenNLURequest) {\n\trequest = &GetOpenNLURequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetOpenNLU\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func CreateGetWsCustomizedChO2ORequest() (request *GetWsCustomizedChO2ORequest) {\n\trequest = &GetWsCustomizedChO2ORequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetWsCustomizedChO2O\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func CreateDescribeCustinsKernelReleaseNotesRequest() (request *DescribeCustinsKernelReleaseNotesRequest) {\n\trequest = &DescribeCustinsKernelReleaseNotesRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Rds\", \"2014-08-15\", \"DescribeCustinsKernelReleaseNotes\", \"rds\", \"openAPI\")\n\treturn\n}",
"func CreateGetIndustryCommerceInfoRequest() (request *GetIndustryCommerceInfoRequest) {\n\trequest = &GetIndustryCommerceInfoRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"companyreg\", \"2020-10-22\", \"GetIndustryCommerceInfo\", \"companyreg\", \"openAPI\")\n\trequest.Method = requests.GET\n\treturn\n}",
"func CreateListCasesRequest() (request *ListCasesRequest) {\n\trequest = &ListCasesRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"CCC\", \"2020-07-01\", \"ListCases\", \"CCC\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func CreateDescribeUserCertificateDetailRequest() (request *DescribeUserCertificateDetailRequest) {\n\trequest = &DescribeUserCertificateDetailRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"cas\", \"2018-07-13\", \"DescribeUserCertificateDetail\", \"cas\", \"openAPI\")\n\treturn\n}",
"func CreateGetTrailStatusRequest() (request *GetTrailStatusRequest) {\n\trequest = &GetTrailStatusRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Actiontrail\", \"2020-07-06\", \"GetTrailStatus\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func CreateGetCurrentTermRepayInfoRequest() (request *GetCurrentTermRepayInfoRequest) {\n\trequest = &GetCurrentTermRepayInfoRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"finmall\", \"2018-07-23\", \"GetCurrentTermRepayInfo\", \"finmall\", \"openAPI\")\n\treturn\n}",
"func CreateGetKeywordChEcomRequest() (request *GetKeywordChEcomRequest) {\n\trequest = &GetKeywordChEcomRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetKeywordChEcom\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func (client *Datetimerfc1123Client) getUTCLowercaseMaxDateTimeCreateRequest(ctx context.Context, options *Datetimerfc1123ClientGetUTCLowercaseMaxDateTimeOptions) (*policy.Request, error) {\n\turlPath := \"/datetimerfc1123/max/lowercase\"\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func CreateGetSessionInfoByTicketRequest() (request *GetSessionInfoByTicketRequest) {\n\trequest = &GetSessionInfoByTicketRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Aas\", \"2015-07-01\", \"GetSessionInfoByTicket\", \"aas\", \"openAPI\")\n\treturn\n}",
"func CreateGetTaxationInfoRequest() (request *GetTaxationInfoRequest) {\n\trequest = &GetTaxationInfoRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"companyreg\", \"2020-10-22\", \"GetTaxationInfo\", \"companyreg\", \"openAPI\")\n\trequest.Method = requests.GET\n\treturn\n}",
"func CreateDescribeIpDdosThresholdRequest() (request *DescribeIpDdosThresholdRequest) {\n\trequest = &DescribeIpDdosThresholdRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"antiddos-public\", \"2017-05-18\", \"DescribeIpDdosThreshold\", \"ddosbasic\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func CreateGetContactRequest() (request *GetContactRequest) {\n\trequest = &GetContactRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Subscription\", \"2021-01-15\", \"GetContact\", \"\", \"\")\n\treturn\n}",
"func CreateGetTaxInfoRequest() (request *GetTaxInfoRequest) {\n\trequest = &GetTaxInfoRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"companyreg\", \"2020-10-22\", \"GetTaxInfo\", \"companyreg\", \"openAPI\")\n\trequest.Method = requests.GET\n\treturn\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
CreateGetOcJusticeTerminalCaseResponse creates a response to parse from GetOcJusticeTerminalCase response
|
func CreateGetOcJusticeTerminalCaseResponse() (response *GetOcJusticeTerminalCaseResponse) {
response = &GetOcJusticeTerminalCaseResponse{
BaseResponse: &responses.BaseResponse{},
}
return
}
|
[
"func CreateGetOcJusticeTerminalCaseRequest() (request *GetOcJusticeTerminalCaseRequest) {\n\trequest = &GetOcJusticeTerminalCaseRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"dt-oc-info\", \"2022-08-29\", \"GetOcJusticeTerminalCase\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func (client *Client) GetOcJusticeTerminalCase(request *GetOcJusticeTerminalCaseRequest) (response *GetOcJusticeTerminalCaseResponse, err error) {\n\tresponse = CreateGetOcJusticeTerminalCaseResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}",
"func CreateGetOpenNLUResponse() (response *GetOpenNLUResponse) {\n\tresponse = &GetOpenNLUResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func (client *Client) GetOcJusticeTerminalCaseWithChan(request *GetOcJusticeTerminalCaseRequest) (<-chan *GetOcJusticeTerminalCaseResponse, <-chan error) {\n\tresponseChan := make(chan *GetOcJusticeTerminalCaseResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetOcJusticeTerminalCase(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func (client *Client) GetOcJusticeTerminalCaseWithCallback(request *GetOcJusticeTerminalCaseRequest, callback func(response *GetOcJusticeTerminalCaseResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetOcJusticeTerminalCaseResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetOcJusticeTerminalCase(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func CreateGetWsCustomizedChO2OResponse() (response *GetWsCustomizedChO2OResponse) {\n\tresponse = &GetWsCustomizedChO2OResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateDescribeCustinsKernelReleaseNotesResponse() (response *DescribeCustinsKernelReleaseNotesResponse) {\n\tresponse = &DescribeCustinsKernelReleaseNotesResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateGetIndustryCommerceInfoResponse() (response *GetIndustryCommerceInfoResponse) {\n\tresponse = &GetIndustryCommerceInfoResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateListCasesResponse() (response *ListCasesResponse) {\n\tresponse = &ListCasesResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateUppateEnvHsfTrafficControlResponse() (response *UppateEnvHsfTrafficControlResponse) {\n\tresponse = &UppateEnvHsfTrafficControlResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateStartNotaryResponse() (response *StartNotaryResponse) {\n\tresponse = &StartNotaryResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateGetSessionInfoByTicketResponse() (response *GetSessionInfoByTicketResponse) {\n\tresponse = &GetSessionInfoByTicketResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateCreateEnvHsfTrafficControlResponse() (response *CreateEnvHsfTrafficControlResponse) {\n\tresponse = &CreateEnvHsfTrafficControlResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateGetCurrentTermRepayInfoResponse() (response *GetCurrentTermRepayInfoResponse) {\n\tresponse = &GetCurrentTermRepayInfoResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateOemSitingSelctionResponse() (response *OemSitingSelctionResponse) {\n\tresponse = &OemSitingSelctionResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateIntelligentCompositionResponse() (response *IntelligentCompositionResponse) {\n\tresponse = &IntelligentCompositionResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateGetContactResponse() (response *GetContactResponse) {\n\tresponse = &GetContactResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateListCityMapAoisResponse() (response *ListCityMapAoisResponse) {\n\tresponse = &ListCityMapAoisResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateDescribeUserCertificateDetailResponse() (response *DescribeUserCertificateDetailResponse) {\n\tresponse = &DescribeUserCertificateDetailResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetCmdQueryProfile queries a profile from the given address or dtag
|
func GetCmdQueryProfile(cdc *codec.Codec) *cobra.Command {
return &cobra.Command{
Use: "profile [address_or_dtag]",
Short: "Retrieve the profile having the specified user address or profile dtag, if any.",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
cliCtx := context.NewCLIContext().WithCodec(cdc)
route := fmt.Sprintf("custom/%s/%s/%s", types.QuerierRoute, types.QueryProfile, args[0])
res, _, err := cliCtx.QueryWithData(route, nil)
if err != nil {
fmt.Printf("Could not find a profile with dtag %s \n", args[0])
return nil
}
var out types.Profile
cdc.MustUnmarshalJSON(res, &out)
return cliCtx.PrintOutput(out)
},
}
}
|
[
"func queryProfile(ctx sdk.Context, path []string, _ abci.RequestQuery, keeper Keeper) ([]byte, error) {\n\tif len(strings.TrimSpace(path[0])) == 0 {\n\t\treturn nil, sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, \"DTag or address cannot be empty or blank\")\n\t}\n\n\taddress, err := sdk.AccAddressFromBech32(path[0])\n\tif err != nil {\n\t\taddress = keeper.GetDtagRelatedAddress(ctx, path[0])\n\t\tif address == nil {\n\t\t\treturn nil, sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, fmt.Sprintf(\"No address related to this dtag: %s\", path[0]))\n\t\t}\n\n\t}\n\n\taccount, found := keeper.GetProfile(ctx, address)\n\n\tif !found {\n\t\treturn nil, sdkerrors.Wrap(sdkerrors.ErrInvalidRequest,\n\t\t\tfmt.Sprintf(\"Profile with address %s doesn't exists\", path[0]))\n\t}\n\n\tbz, err := codec.MarshalJSONIndent(keeper.Cdc, &account)\n\tif err != nil {\n\t\tpanic(\"could not marshal result to JSON\")\n\t}\n\n\treturn bz, nil\n}",
"func (s *SmartContract) QueryProfile(ctx contractapi.TransactionContextInterface, carNumber string) (*Profile, error) {\n\tcarAsBytes, err := ctx.GetStub().GetState(\"profile-\" + carNumber)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to read from world state. %s\", err.Error())\n\t}\n\n\tif carAsBytes == nil {\n\t\treturn nil, fmt.Errorf(\"%s does not exist\", carNumber)\n\t}\n\n\tcar := new(Profile)\n\t_ = json.Unmarshal(carAsBytes, car)\n\n\treturn car, nil\n}",
"func GetCmdQueryProfileParams(cdc *codec.Codec) *cobra.Command {\n\treturn &cobra.Command{\n\t\tUse: \"parameters\",\n\t\tShort: \"Retrieve all the profile module parameters\",\n\t\tArgs: cobra.NoArgs,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcliCtx := context.NewCLIContext().WithCodec(cdc)\n\n\t\t\troute := fmt.Sprintf(\"custom/%s/%s\", types.QuerierRoute, types.QueryParams)\n\t\t\tres, _, err := cliCtx.QueryWithData(route, nil)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Could not find profile parameters\")\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tvar out types.Params\n\t\t\tcdc.MustUnmarshalJSON(res, &out)\n\t\t\treturn cliCtx.PrintOutput(out)\n\t\t},\n\t}\n}",
"func GetCmdQueryProfiles(cdc *codec.Codec) *cobra.Command {\n\treturn &cobra.Command{\n\t\tUse: \"all\",\n\t\tShort: \"Retrieve all the registered profiles.\",\n\t\tArgs: cobra.NoArgs,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcliCtx := context.NewCLIContext().WithCodec(cdc)\n\n\t\t\troute := fmt.Sprintf(\"custom/%s/%s\", types.QuerierRoute, types.QueryProfiles)\n\t\t\tres, _, err := cliCtx.QueryWithData(route, nil)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Could not find any profile\")\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tvar out types.Profiles\n\t\t\tcdc.MustUnmarshalJSON(res, &out)\n\t\t\treturn cliCtx.PrintOutput(out)\n\t\t},\n\t}\n}",
"func (u *User) QueryProfile() *ProfileQuery {\n\treturn (&UserClient{config: u.config}).QueryProfile(u)\n}",
"func (ctx *Context) ProfileQuery(query string, profileLabel string) QueryResultsChan {\n\tresCh := make(QueryResultsChan)\n\n\tgo runQuery(query, ctx, resCh, profileLabel)\n\n\treturn resCh\n}",
"func queryProfileParams(ctx sdk.Context, _ abci.RequestQuery, keeper Keeper) ([]byte, error) {\n\tprofileParams := keeper.GetParams(ctx)\n\n\tbz, err := codec.MarshalJSONIndent(keeper.Cdc, &profileParams)\n\tif err != nil {\n\t\tpanic(\"could not marshal result to JSON\")\n\t}\n\n\treturn bz, nil\n}",
"func (svc *inmemService) GetProfile(ctx context.Context, id string) (Profile, error) {\n\t// Get the Read lock from the inmemService struct\n\tsvc.mtx.RLock()\n\n\t// Immediately set up a lock release to occur when the function finishes\n\tdefer svc.mtx.RUnlock()\n\n\t// Look for the profile by the `id` function param\n\tprofile, ok := svc.profiles[id]\n\n\t// Check if the profile id was not found in the datastore\n\tif !ok {\n\n\t\t// Return an empty profile and an error informing the caller that the profile was not found\n\t\treturn Profile{}, ErrNotFound\n\n\t}\n\n\t// Return the profile to the caller and a nil error\n\treturn profile, nil\n\n}",
"func GetProfile(c *gin.Context) {\n\tvar user models.User\n\tif err := c.BindQuery(&user); err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error(), \"message\": \"Check input NUSNET_ID\"})\n\t\tfmt.Println(\"Error in reading input NUSNET_ID. \" + err.Error() + \"\\n\")\n\t\treturn\n\t}\n\n\t// get account from user input\n\taccount, exists, err := GetAccountDetailed(DB, user)\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error(), \"message\": \"Check database query\"})\n\t\tfmt.Println(\"Error in retrieving profile details from Database. \" + err.Error() + \"\\n\")\n\t\treturn\n\t}\n\tif !exists {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"success\": false, \"message\": \"Account does not exist\"})\n\t\tfmt.Println(\"Account does not exist.\")\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\"data\": account})\n\tfmt.Println(\"Successfully retrieved profile details.\")\n}",
"func (a *DeviceProfileServiceAPI) Get(ctx context.Context, req *pb.GetDeviceProfileRequest) (*pb.GetDeviceProfileResponse, error) {\n\tdpID, err := uuid.FromString(req.Id)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.InvalidArgument, \"uuid error: %s\", err)\n\t}\n\tdp, err := a.st.GetDeviceProfile(ctx, dpID, false)\n\tif err != nil {\n\t\treturn nil, helpers.ErrToRPCError(err)\n\t}\n\n\tcred, err := a.auth.GetCredentials(ctx, auth.NewOptions().WithOrgID(dp.OrganizationID))\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Unauthenticated, \"authentication failed: %v\", err)\n\t}\n\tif !cred.IsGlobalAdmin && !cred.IsOrgUser {\n\t\treturn nil, status.Errorf(codes.PermissionDenied, \"permission denied\")\n\t}\n\n\tnsClient, err := a.nsCli.GetNetworkServerServiceClient(dp.NetworkServerID)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, err.Error())\n\t}\n\tres, err := nsClient.GetDeviceProfile(ctx, &ns.GetDeviceProfileRequest{\n\t\tId: dpID.Bytes(),\n\t})\n\tif err != nil {\n\t\treturn nil, helpers.ErrToRPCError(err)\n\t}\n\tif res.DeviceProfile == nil {\n\t\treturn nil, helpers.ErrToRPCError(err)\n\t}\n\n\tdp.DeviceProfile = *res.DeviceProfile\n\n\tresp := pb.GetDeviceProfileResponse{\n\t\tDeviceProfile: &pb.DeviceProfile{\n\t\t\tId: dpID.String(),\n\t\t\tName: dp.Name,\n\t\t\tOrganizationId: dp.OrganizationID,\n\t\t\tNetworkServerId: dp.NetworkServerID,\n\t\t\tPayloadCodec: string(dp.PayloadCodec),\n\t\t\tPayloadEncoderScript: dp.PayloadEncoderScript,\n\t\t\tPayloadDecoderScript: dp.PayloadDecoderScript,\n\t\t\tSupportsClassB: dp.DeviceProfile.SupportsClassB,\n\t\t\tClassBTimeout: dp.DeviceProfile.ClassBTimeout,\n\t\t\tPingSlotPeriod: dp.DeviceProfile.PingSlotPeriod,\n\t\t\tPingSlotDr: dp.DeviceProfile.PingSlotDr,\n\t\t\tPingSlotFreq: dp.DeviceProfile.PingSlotFreq,\n\t\t\tSupportsClassC: dp.DeviceProfile.SupportsClassC,\n\t\t\tClassCTimeout: dp.DeviceProfile.ClassCTimeout,\n\t\t\tMacVersion: dp.DeviceProfile.MacVersion,\n\t\t\tRegParamsRevision: dp.DeviceProfile.RegParamsRevision,\n\t\t\tRxDelay_1: dp.DeviceProfile.RxDelay_1,\n\t\t\tRxDrOffset_1: dp.DeviceProfile.RxDrOffset_1,\n\t\t\tRxDatarate_2: dp.DeviceProfile.RxDatarate_2,\n\t\t\tRxFreq_2: dp.DeviceProfile.RxFreq_2,\n\t\t\tMaxEirp: dp.DeviceProfile.MaxEirp,\n\t\t\tMaxDutyCycle: dp.DeviceProfile.MaxDutyCycle,\n\t\t\tSupportsJoin: dp.DeviceProfile.SupportsJoin,\n\t\t\tRfRegion: dp.DeviceProfile.RfRegion,\n\t\t\tSupports_32BitFCnt: dp.DeviceProfile.Supports_32BitFCnt,\n\t\t\tFactoryPresetFreqs: dp.DeviceProfile.FactoryPresetFreqs,\n\t\t\tTags: make(map[string]string),\n\t\t\tUplinkInterval: ptypes.DurationProto(dp.UplinkInterval),\n\t\t},\n\t}\n\n\tresp.CreatedAt = timestamppb.New(dp.CreatedAt)\n\tresp.UpdatedAt = timestamppb.New(dp.UpdatedAt)\n\n\tfor k, v := range dp.Tags.Map {\n\t\tresp.DeviceProfile.Tags[k] = v.String\n\t}\n\n\treturn &resp, nil\n}",
"func (repository *Datastore)GetProfile(username string)(*user.Person,error){\n\tperson := newUser() //initialize user.Person and will used to store profile info\n\tquery := `SELECT * FROM userRepository WHERE username = ?`\n\terr := repository.Db.Get(&person, query, username) //get person profile details\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &person, nil\n}",
"func (k Keeper) GetProfile(ctx sdk.Context, address string) (profile *types.Profile, found bool, err error) {\n\tsdkAcc, err := sdk.AccAddressFromBech32(address)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tstored, ok := k.ak.GetAccount(ctx, sdkAcc).(*types.Profile)\n\tif !ok {\n\t\treturn nil, false, nil\n\t}\n\n\treturn stored, true, nil\n}",
"func GetCmdQueryTxPairStats() *cobra.Command {\n\tbech32PrefixAccAddr := sdk.GetConfig().GetBech32AccountAddrPrefix()\n\n\tcmd := &cobra.Command{\n\t\tUse: \"tx-pair-stats [pool]\",\n\t\tArgs: cobra.ExactArgs(1),\n\t\tShort: \"Query an tx-pair from a pool\",\n\t\tLong: strings.TrimSpace(\n\t\t\tfmt.Sprintf(`Query a tx-pair from a pool.\n\nExample:\n$ %s query %s tx-pair-stats %s1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj\n`,\n\t\t\t\tversion.AppName, types.ModuleName, bech32PrefixAccAddr,\n\t\t\t),\n\t\t),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tclientCtx, err := client.GetClientQueryContext(cmd)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tqueryClient := types.NewQueryClient(clientCtx)\n\n\t\t\tpoolAddr, err := sdk.AccAddressFromBech32(args[0])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tpageReq, err := client.ReadPageRequest(cmd.Flags())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tparams := &types.QueryTxPairsStatsRequest{PoolAddress: poolAddr.String(), Pagination: pageReq}\n\t\t\tres, err := queryClient.TxPairsStats(context.Background(), params)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn clientCtx.PrintProto(res)\n\t\t},\n\t}\n\n\tflags.AddQueryFlagsToCmd(cmd)\n\tflags.AddPaginationFlagsToCmd(cmd, \"txPairStats\")\n\n\treturn cmd\n}",
"func GetProfile(ctx *router.Context) {\n\tuin, _ := ctx.Keys[middleware.ComerUinContextKey].(uint64)\n\tresponse, err := service.GetComerProfile(uin)\n\tif err != nil {\n\t\tctx.ERROR(\n\t\t\trouter.ErrBuisnessError,\n\t\t\t\"wrong metamask login parameter\",\n\t\t)\n\t\treturn\n\t}\n\n\tctx.OK(response)\n}",
"func (a *DeviceAPI) GetDeviceProfile(ctx context.Context, req *api.GetDSDeviceProfileRequest) (*api.GetDSDeviceProfileResponse, error) {\n\tlogInfo := \"api/appserver_serves_ui/GetDeviceProfile org=\" + strconv.FormatInt(req.OrgId, 10)\n\n\t// verify if user is global admin\n\tu, err := devmod.NewValidator(a.st).GetUser(ctx)\n\tif err != nil {\n\t\tlog.WithError(err).Error(logInfo)\n\t\treturn &api.GetDSDeviceProfileResponse{}, status.Errorf(codes.Internal, \"unable to verify user: %s\", err.Error())\n\t}\n\t// is user is not global admin, user must have accesss to this organization\n\tif !u.IsGlobalAdmin {\n\t\tif valid, err := organization.NewValidator(a.st).ValidateOrganizationAccess(ctx, authcus.Read, req.OrgId); !valid || err != nil {\n\t\t\treturn &api.GetDSDeviceProfileResponse{}, status.Errorf(codes.Unauthenticated, \"authentication failed: %s\", err)\n\t\t}\n\t}\n\n\tdevClient := mxpcli.Global.GetM2MDeviceServiceClient()\n\n\tresp, err := devClient.GetDeviceProfile(ctx, &pb.GetDSDeviceProfileRequest{\n\t\tOrgId: req.OrgId,\n\t\tDevId: req.DevId,\n\t})\n\tif err != nil {\n\t\tlog.WithError(err).Error(logInfo)\n\t\treturn &api.GetDSDeviceProfileResponse{}, status.Errorf(codes.Unavailable, err.Error())\n\t}\n\n\treturn &api.GetDSDeviceProfileResponse{\n\t\tDevProfile: &api.DSDeviceProfile{\n\t\t\tId: resp.DevProfile.Id,\n\t\t\tDevEui: resp.DevProfile.DevEui,\n\t\t\tFkWallet: resp.DevProfile.FkWallet,\n\t\t\tMode: api.DeviceMode(resp.DevProfile.Mode),\n\t\t\tCreatedAt: resp.DevProfile.CreatedAt,\n\t\t\tLastSeenAt: resp.DevProfile.LastSeenAt,\n\t\t\tApplicationId: resp.DevProfile.ApplicationId,\n\t\t\tName: resp.DevProfile.Name,\n\t\t},\n\t}, status.Error(codes.OK, \"\")\n}",
"func (dpc *deviceProfileRestClient) requestDeviceProfile(\n\tctx context.Context,\n\turlSuffix string) (models.DeviceProfile, error) {\n\n\tdata, err := clients.GetRequest(ctx, urlSuffix, dpc.urlClient)\n\tif err != nil {\n\t\treturn models.DeviceProfile{}, err\n\t}\n\n\tdp := models.DeviceProfile{}\n\terr = json.Unmarshal(data, &dp)\n\treturn dp, err\n}",
"func (pu *ProfileUKM) QueryOwnerProfile() *ProfileQuery {\n\treturn (&ProfileUKMClient{config: pu.config}).QueryOwnerProfile(pu)\n}",
"func (nq *N1qlQuery) Profile(profileMode QueryProfileType) *N1qlQuery {\n\tnq.options[\"profile\"] = profileMode\n\treturn nq\n}",
"func (mc MongoClient) getDeviceProfiles(q bson.M) ([]contract.DeviceProfile, error) {\n\ts := mc.session.Copy()\n\tdefer s.Close()\n\n\tvar dps []models.DeviceProfile\n\terr := s.DB(mc.database.Name).C(db.DeviceProfile).Find(q).Sort(\"queryts\").All(&dps)\n\tif err != nil {\n\t\treturn []contract.DeviceProfile{}, errorMap(err)\n\t}\n\n\tcdps := make([]contract.DeviceProfile, 0)\n\tfor _, dp := range dps {\n\t\tc, err := dp.ToContract()\n\t\tif err != nil {\n\t\t\treturn []contract.DeviceProfile{}, err\n\t\t}\n\t\tcdps = append(cdps, c)\n\t}\n\treturn cdps, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetCmdQueryProfiles queries all the profiles
|
func GetCmdQueryProfiles(cdc *codec.Codec) *cobra.Command {
return &cobra.Command{
Use: "all",
Short: "Retrieve all the registered profiles.",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
cliCtx := context.NewCLIContext().WithCodec(cdc)
route := fmt.Sprintf("custom/%s/%s", types.QuerierRoute, types.QueryProfiles)
res, _, err := cliCtx.QueryWithData(route, nil)
if err != nil {
fmt.Printf("Could not find any profile")
return nil
}
var out types.Profiles
cdc.MustUnmarshalJSON(res, &out)
return cliCtx.PrintOutput(out)
},
}
}
|
[
"func (s *SmartContract) QueryAllProfiles(ctx contractapi.TransactionContextInterface) ([]QueryProfileResult, error) {\n\tstartKey := \"\"\n\tendKey := \"\"\n\n\tresultsIterator, err := ctx.GetStub().GetStateByRange(startKey, endKey)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resultsIterator.Close()\n\n\tresults := []QueryProfileResult{}\n\n\tfor resultsIterator.HasNext() {\n\t\tqueryResponse, err := resultsIterator.Next()\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif strings.Contains(queryResponse.Key, \"profile-\"){\n\t\t\tprofile := new(UserProfile)\n\t\t\t_ = json.Unmarshal(queryResponse.Value, profile)\n\n\t\t\tqueryResult := QueryProfileResult{Key: queryResponse.Key, Record: profile}\n\t\t\tresults = append(results, queryResult)\n\t\t}\n\t\t\n\t}\n\n\treturn results, nil\n}",
"func GetCmdQueryProfileParams(cdc *codec.Codec) *cobra.Command {\n\treturn &cobra.Command{\n\t\tUse: \"parameters\",\n\t\tShort: \"Retrieve all the profile module parameters\",\n\t\tArgs: cobra.NoArgs,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcliCtx := context.NewCLIContext().WithCodec(cdc)\n\n\t\t\troute := fmt.Sprintf(\"custom/%s/%s\", types.QuerierRoute, types.QueryParams)\n\t\t\tres, _, err := cliCtx.QueryWithData(route, nil)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Could not find profile parameters\")\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tvar out types.Params\n\t\t\tcdc.MustUnmarshalJSON(res, &out)\n\t\t\treturn cliCtx.PrintOutput(out)\n\t\t},\n\t}\n}",
"func getProfiles(ctx context.Context, stmt *sql.Stmt, args ...any) ([]Profile, error) {\n\tobjects := make([]Profile, 0)\n\n\tdest := func(scan func(dest ...any) error) error {\n\t\tp := Profile{}\n\t\terr := scan(&p.ID, &p.ProjectID, &p.Project, &p.Name, &p.Description)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tobjects = append(objects, p)\n\n\t\treturn nil\n\t}\n\n\terr := query.SelectObjects(ctx, stmt, dest, args...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to fetch from \\\"profiles\\\" table: %w\", err)\n\t}\n\n\treturn objects, nil\n}",
"func GetCmdQueryProfile(cdc *codec.Codec) *cobra.Command {\n\treturn &cobra.Command{\n\t\tUse: \"profile [address_or_dtag]\",\n\t\tShort: \"Retrieve the profile having the specified user address or profile dtag, if any.\",\n\t\tArgs: cobra.ExactArgs(1),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcliCtx := context.NewCLIContext().WithCodec(cdc)\n\n\t\t\troute := fmt.Sprintf(\"custom/%s/%s/%s\", types.QuerierRoute, types.QueryProfile, args[0])\n\t\t\tres, _, err := cliCtx.QueryWithData(route, nil)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Could not find a profile with dtag %s \\n\", args[0])\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tvar out types.Profile\n\t\t\tcdc.MustUnmarshalJSON(res, &out)\n\t\t\treturn cliCtx.PrintOutput(out)\n\t\t},\n\t}\n}",
"func FetchProfiles() map[string]string {\n\ttoken := auth.NewToken()\n\tquery := queryPayload{\"SELECT Id, ProfileName FROM CommunicationProfile\"}\n\tpayload, err := json.Marshal(query)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treq, err := http.NewRequest(\"POST\", viper.GetString(\"baseurl\")+\"/v1/action/query\", bytes.NewBuffer(payload))\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Authorization\", \"Bearer \"+token.Val)\n\n\tresponse, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer response.Body.Close()\n\n\tif response.StatusCode != 200 {\n\t\tbody, err := ioutil.ReadAll(response.Body)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tlog.Fatal(string(body))\n\t}\n\n\tdec := json.NewDecoder(response.Body)\n\tvar body profilesQueryResponse\n\tif err = dec.Decode(&body); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif !body.Done {\n\t\tlog.Fatalln(\"there are more communication profile to query\")\n\t}\n\n\tresult := make(map[string]string)\n\tfor _, p := range body.Records {\n\t\tresult[p.Name] = p.ID\n\t}\n\n\treturn result\n}",
"func (c *OVClient) GetProfiles(start string, count string, filter string, sort string, scopeUris string) (ServerProfileList, error) {\n\tvar (\n\t\turi = \"/rest/server-profiles\"\n\t\tq map[string]interface{}\n\t\tprofiles ServerProfileList\n\t)\n\tq = make(map[string]interface{})\n\tif len(filter) > 0 {\n\t\tq[\"filter\"] = filter\n\t}\n\n\tif sort != \"\" {\n\t\tq[\"sort\"] = sort\n\t}\n\n\tif start != \"\" {\n\t\tq[\"start\"] = start\n\t}\n\n\tif count != \"\" {\n\t\tq[\"count\"] = count\n\t}\n\n\tif scopeUris != \"\" {\n\t\tq[\"scopeUris\"] = scopeUris\n\t}\n\n\t// refresh login\n\tc.RefreshLogin()\n\tc.SetAuthHeaderOptions(c.GetAuthHeaderMap())\n\t// Setup query\n\tif len(q) > 0 {\n\t\tc.SetQueryString(q)\n\t}\n\tdata, err := c.RestAPICall(rest.GET, uri, nil)\n\tif err != nil {\n\t\treturn profiles, err\n\t}\n\n\tlog.Debugf(\"GetProfiles %s\", data)\n\tif err := json.Unmarshal([]byte(data), &profiles); err != nil {\n\t\treturn profiles, err\n\t}\n\treturn profiles, nil\n}",
"func GetProfiles() []Profile {\n\t// Get a session.\n\tsession := getSession()\n\tdefer session.Close()\n\n\tc := session.DB(\"ProfileService\").C(\"Profiles\")\n\n\tvar profiles []Profile\n\t// NOTE: Indent your code\n\t// TODO: Do you really think not checking your error is appropriate here, if am running this\n\t// on production server, my whole app will have a fault because i dont even know\n\t// if i found a valid record.\n\t// Those that make sense to you? O_O\n\terr = c.Find(bson.M{}).All(&profiles)\n\n\treturn profiles\n}",
"func listProfiles(ctx context.Context, _ []string) error {\n\tm, err := cmdutils.LoadManager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Printf(\"%15s %s\\n\", \"ID\", \"NAME\")\n\tfor _, p := range m.Profiles() {\n\t\tfmt.Printf(\"%15s %s\\n\", p.Id, p.Name)\n\t}\n\n\treturn nil\n}",
"func (mc MongoClient) getDeviceProfiles(q bson.M) ([]contract.DeviceProfile, error) {\n\ts := mc.session.Copy()\n\tdefer s.Close()\n\n\tvar dps []models.DeviceProfile\n\terr := s.DB(mc.database.Name).C(db.DeviceProfile).Find(q).Sort(\"queryts\").All(&dps)\n\tif err != nil {\n\t\treturn []contract.DeviceProfile{}, errorMap(err)\n\t}\n\n\tcdps := make([]contract.DeviceProfile, 0)\n\tfor _, dp := range dps {\n\t\tc, err := dp.ToContract()\n\t\tif err != nil {\n\t\t\treturn []contract.DeviceProfile{}, err\n\t\t}\n\t\tcdps = append(cdps, c)\n\t}\n\treturn cdps, nil\n}",
"func (s *Server) GetProfiles(ctx context.Context, req *pb.Request) (*pb.Result, error) {\n\t// session, err := mgo.Dial(\"mongodb-profile\")\n\t// if err != nil {\n\t// \tpanic(err)\n\t// }\n\t// defer session.Close()\n\n\tlog.Trace().Msgf(\"In GetProfiles\")\n\n\tres := new(pb.Result)\n\thotels := make([]*pb.Hotel, 0)\n\tvar wg sync.WaitGroup\n\tvar mutex sync.Mutex\n\n\t// one hotel should only have one profile\n\thotelIds := make([]string, 0)\n\tprofileMap := make(map[string]struct{})\n\tfor _, hotelId := range req.HotelIds {\n\t\thotelIds = append(hotelIds, hotelId)\n\t\tprofileMap[hotelId] = struct{}{}\n\t}\n\tmemSpan, _ := opentracing.StartSpanFromContext(ctx, \"memcached_get_profile\")\n\tmemSpan.SetTag(\"span.kind\", \"client\")\n\tresMap, err := s.MemcClient.GetMulti(hotelIds)\n\tmemSpan.Finish()\n\tif err != nil && err != memcache.ErrCacheMiss {\n\t\tlog.Panic().Msgf(\"Tried to get hotelIds [%v], but got memmcached error = %s\", hotelIds, err)\n\t} else {\n\t\tfor hotelId, item := range resMap {\n\t\t\tprofileStr := string(item.Value)\n\t\t\tlog.Trace().Msgf(\"memc hit with %v\", profileStr)\n\n\t\t\thotelProf := new(pb.Hotel)\n\t\t\tjson.Unmarshal(item.Value, hotelProf)\n\t\t\thotels = append(hotels, hotelProf)\n\t\t\tdelete(profileMap, hotelId)\n\t\t}\n\n\t\twg.Add(len(profileMap))\n\t\tfor hotelId := range profileMap {\n\t\t\tgo func(hotelId string) {\n\t\t\t\tsession := s.MongoSession.Copy()\n\t\t\t\tdefer session.Close()\n\t\t\t\tc := session.DB(\"profile-db\").C(\"hotels\")\n\n\t\t\t\thotelProf := new(pb.Hotel)\n\t\t\t\tmongoSpan, _ := opentracing.StartSpanFromContext(ctx, \"mongo_profile\")\n\t\t\t\tmongoSpan.SetTag(\"span.kind\", \"client\")\n\t\t\t\terr := c.Find(bson.M{\"id\": hotelId}).One(&hotelProf)\n\t\t\t\tmongoSpan.Finish()\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error().Msgf(\"Failed get hotels data: \", err)\n\t\t\t\t}\n\n\t\t\t\tmutex.Lock()\n\t\t\t\thotels = append(hotels, hotelProf)\n\t\t\t\tmutex.Unlock()\n\n\t\t\t\tprofJson, err := json.Marshal(hotelProf)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error().Msgf(\"Failed to marshal hotel [id: %v] with err:\", hotelProf.Id, err)\n\t\t\t\t}\n\t\t\t\tmemcStr := string(profJson)\n\n\t\t\t\t// write to memcached\n\t\t\t\tgo s.MemcClient.Set(&memcache.Item{Key: hotelId, Value: []byte(memcStr)})\n\t\t\t\tdefer wg.Done()\n\t\t\t}(hotelId)\n\t\t}\n\t}\n\twg.Wait()\n\n\tres.Hotels = hotels\n\tlog.Trace().Msgf(\"In GetProfiles after getting resp\")\n\treturn res, nil\n}",
"func GetProfiles(params preferences.GetProfilesParams) middleware.Responder {\n\tfmt.Printf(\"Starting --> GetProfiles id:%s and %d\\n\", params.ID, len(params.ID))\n\n\tif len(params.ID) > 0 {\n\t\tfmt.Printf(\"\\tIn GetPRofiles by ID\\n\")\n\n\t\tids := params.ID\n\n\t\tprofiles := make([]*models.Profile, 0, len(ids))\n\t\tfor _, id := range ids {\n\t\t\tprofile, err := store.ReadLatestProfile(id)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"\\tError: %s\\n\", err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tprofiles = append(profiles, profile)\n\t\t}\n\n\t\tresponse := preferences.NewGetProfilesOK()\n\t\tresponse.SetPayload(profiles)\n\t\treturn response\n\t} else if params.Ownerid != nil {\n\n\t\tfmt.Printf(\"Starting --> GetProfiles Owner:%s\\n\", *params.Ownerid)\n\t\towner := *params.Ownerid\n\t\tprofiles, err := store.ReadLatestProfilesForOwner(owner)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"\\tError: %s\\n\", err.Error())\n\t\t\treturn preferences.NewGetProfilesInternalServerError()\n\t\t}\n\n\t\tresponse := preferences.NewGetProfilesOK()\n\t\tresponse.SetPayload(profiles)\n\t\treturn response\n\t}\n\treturn preferences.NewGetProfilesBadRequest()\n\n}",
"func (k Keeper) GetAllProfile(ctx sdk.Context) (list []types.Profile) {\n\tstore := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.ProfileKey))\n\titerator := sdk.KVStorePrefixIterator(store, []byte{})\n\n\tdefer iterator.Close()\n\n\tfor ; iterator.Valid(); iterator.Next() {\n\t\tvar val types.Profile\n\t\tk.cdc.MustUnmarshalBinaryBare(iterator.Value(), &val)\n\t\tlist = append(list, val)\n\t}\n\n\treturn\n}",
"func GetProfiles() *[]Profile {\n\t// Get a session.\n\tsession := getSession()\n\tdefer session.Close()\n\n\tc := session.DB(\"ProfileService\").C(\"Profiles\")\n\n\tvar profiles []Profile\n\n\terr := c.Find(bson.M{}).All(&profiles)\n\tif err != nil {\n\t\tlog.Println(\"Error getting profiles: \", err.Error())\n\t\treturn &profiles\n\t}\n\n\treturn &profiles\n}",
"func getProfilesRaw(ctx context.Context, tx *sql.Tx, sql string, args ...any) ([]Profile, error) {\n\tobjects := make([]Profile, 0)\n\n\tdest := func(scan func(dest ...any) error) error {\n\t\tp := Profile{}\n\t\terr := scan(&p.ID, &p.ProjectID, &p.Project, &p.Name, &p.Description)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tobjects = append(objects, p)\n\n\t\treturn nil\n\t}\n\n\terr := query.Scan(ctx, tx, sql, dest, args...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to fetch from \\\"profiles\\\" table: %w\", err)\n\t}\n\n\treturn objects, nil\n}",
"func (a *App) GetAllProfiles(w http.ResponseWriter, r *http.Request) {\n\thandler.GetAllProfiles(a.DB, w, r)\n}",
"func (s *HighAvailabilityService) GetAvailabilityProfilesCommand(input *GetAvailabilityProfilesCommandInput) (output *models.AvailabilityProfilesView, resp *http.Response, err error) {\n\tpath := \"/highAvailability/availabilityProfiles\"\n\top := &request.Operation{\n\t\tName: \"GetAvailabilityProfilesCommand\",\n\t\tHTTPMethod: \"GET\",\n\t\tHTTPPath: path,\n\t\tQueryParams: map[string]string{\n\t\t\t\"page\": input.Page,\n\t\t\t\"numberPerPage\": input.NumberPerPage,\n\t\t\t\"filter\": input.Filter,\n\t\t\t\"name\": input.Name,\n\t\t\t\"sortKey\": input.SortKey,\n\t\t\t\"order\": input.Order,\n\t\t},\n\t}\n\toutput = &models.AvailabilityProfilesView{}\n\treq := s.newRequest(op, nil, output)\n\n\tif req.Send() == nil {\n\t\treturn output, req.HTTPResponse, nil\n\t}\n\treturn nil, req.HTTPResponse, req.Error\n}",
"func queryProfileParams(ctx sdk.Context, _ abci.RequestQuery, keeper Keeper) ([]byte, error) {\n\tprofileParams := keeper.GetParams(ctx)\n\n\tbz, err := codec.MarshalJSONIndent(keeper.Cdc, &profileParams)\n\tif err != nil {\n\t\tpanic(\"could not marshal result to JSON\")\n\t}\n\n\treturn bz, nil\n}",
"func (s *Scraper) SearchProfiles(ctx context.Context, query string, maxProfilesNbr int) <-chan *ProfileResult {\n\treturn getUserTimeline(ctx, query, maxProfilesNbr, s.FetchSearchProfiles)\n}",
"func (s *Service) ProfilesGet(userID string) *ProfilesGetOp {\n\treturn &ProfilesGetOp{\n\t\tCredential: s.credential,\n\t\tMethod: \"GET\",\n\t\tPath: strings.Join([]string{\"users\", userID, \"profile\"}, \"/\"),\n\t\tAccept: \"application/json\",\n\t\tQueryOpts: make(url.Values),\n\t\tVersion: esign.APIv21,\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetCmdQueryProfileParams queries all the profiles' module params
|
func GetCmdQueryProfileParams(cdc *codec.Codec) *cobra.Command {
return &cobra.Command{
Use: "parameters",
Short: "Retrieve all the profile module parameters",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
cliCtx := context.NewCLIContext().WithCodec(cdc)
route := fmt.Sprintf("custom/%s/%s", types.QuerierRoute, types.QueryParams)
res, _, err := cliCtx.QueryWithData(route, nil)
if err != nil {
fmt.Printf("Could not find profile parameters")
return nil
}
var out types.Params
cdc.MustUnmarshalJSON(res, &out)
return cliCtx.PrintOutput(out)
},
}
}
|
[
"func queryProfileParams(ctx sdk.Context, _ abci.RequestQuery, keeper Keeper) ([]byte, error) {\n\tprofileParams := keeper.GetParams(ctx)\n\n\tbz, err := codec.MarshalJSONIndent(keeper.Cdc, &profileParams)\n\tif err != nil {\n\t\tpanic(\"could not marshal result to JSON\")\n\t}\n\n\treturn bz, nil\n}",
"func GetCmdQueryProfiles(cdc *codec.Codec) *cobra.Command {\n\treturn &cobra.Command{\n\t\tUse: \"all\",\n\t\tShort: \"Retrieve all the registered profiles.\",\n\t\tArgs: cobra.NoArgs,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcliCtx := context.NewCLIContext().WithCodec(cdc)\n\n\t\t\troute := fmt.Sprintf(\"custom/%s/%s\", types.QuerierRoute, types.QueryProfiles)\n\t\t\tres, _, err := cliCtx.QueryWithData(route, nil)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Could not find any profile\")\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tvar out types.Profiles\n\t\t\tcdc.MustUnmarshalJSON(res, &out)\n\t\t\treturn cliCtx.PrintOutput(out)\n\t\t},\n\t}\n}",
"func GetCmdQueryProfile(cdc *codec.Codec) *cobra.Command {\n\treturn &cobra.Command{\n\t\tUse: \"profile [address_or_dtag]\",\n\t\tShort: \"Retrieve the profile having the specified user address or profile dtag, if any.\",\n\t\tArgs: cobra.ExactArgs(1),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcliCtx := context.NewCLIContext().WithCodec(cdc)\n\n\t\t\troute := fmt.Sprintf(\"custom/%s/%s/%s\", types.QuerierRoute, types.QueryProfile, args[0])\n\t\t\tres, _, err := cliCtx.QueryWithData(route, nil)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Could not find a profile with dtag %s \\n\", args[0])\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tvar out types.Profile\n\t\t\tcdc.MustUnmarshalJSON(res, &out)\n\t\t\treturn cliCtx.PrintOutput(out)\n\t\t},\n\t}\n}",
"func GetProfileParameters() (ProfileParameters, Alerts) {\n\treturn ProfileParameters{}, Alerts{}\n}",
"func GetCmdQueryParams() *cobra.Command {\n\treturn &cobra.Command{\n\t\tUse: \"params\",\n\t\tShort: fmt.Sprintf(\"get the %s module parameters\", types.ModuleName),\n\t\tLong: \"Get the current issuance module parameters.\",\n\t\tArgs: cobra.NoArgs,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tclientCtx, err := client.GetClientQueryContext(cmd)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tqueryClient := types.NewQueryClient(clientCtx)\n\n\t\t\tres, err := queryClient.Params(context.Background(), &types.QueryParamsRequest{})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn clientCtx.PrintProto(&res.Params)\n\t\t},\n\t}\n}",
"func GetCmdQueryParams(cdc *codec.Codec) *cobra.Command {\n\treturn &cobra.Command{\n\t\tUse: \"params\",\n\t\tArgs: cobra.NoArgs,\n\t\tShort: \"Query the current staking parameters information\",\n\t\tLong: strings.TrimSpace(\n\t\t\tfmt.Sprintf(`Query values set as staking parameters.\n\nExample:\n$ %s query staking params\n`,\n\t\t\t\tversion.ClientName,\n\t\t\t),\n\t\t),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcliCtx := context.NewCLIContext().WithCodec(cdc)\n\n\t\t\troute := fmt.Sprintf(\"custom/%s/%s\", types.StoreKey, staking.QueryParameters)\n\t\t\tbz, _, err := cliCtx.QueryWithData(route, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tvar mergedParams types.MergedParams\n\t\t\tcdc.MustUnmarshalJSON(bz, &mergedParams)\n\t\t\treturn cliCtx.PrintOutput(mergedParams)\n\t\t},\n\t}\n}",
"func (s *SmartContract) QueryAllProfiles(ctx contractapi.TransactionContextInterface) ([]QueryProfileResult, error) {\n\tstartKey := \"\"\n\tendKey := \"\"\n\n\tresultsIterator, err := ctx.GetStub().GetStateByRange(startKey, endKey)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resultsIterator.Close()\n\n\tresults := []QueryProfileResult{}\n\n\tfor resultsIterator.HasNext() {\n\t\tqueryResponse, err := resultsIterator.Next()\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif strings.Contains(queryResponse.Key, \"profile-\"){\n\t\t\tprofile := new(UserProfile)\n\t\t\t_ = json.Unmarshal(queryResponse.Value, profile)\n\n\t\t\tqueryResult := QueryProfileResult{Key: queryResponse.Key, Record: profile}\n\t\t\tresults = append(results, queryResult)\n\t\t}\n\t\t\n\t}\n\n\treturn results, nil\n}",
"func NewGetPbxDeviceProfileitemsParams() *GetPbxDeviceProfileitemsParams {\n\tvar ()\n\treturn &GetPbxDeviceProfileitemsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func GetCmdQueryParams() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"params\",\n\t\tShort: \"Query the current slashing parameters\",\n\t\tArgs: cobra.NoArgs,\n\t\tLong: strings.TrimSpace(`Query genesis parameters for the slashing module:\n\n$ <appcli> query slashing params\n`),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tclientCtx := client.GetClientContextFromCmd(cmd)\n\t\t\tclientCtx, err := client.ReadQueryCommandFlags(clientCtx, cmd.Flags())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\troute := fmt.Sprintf(\"custom/%s/parameters\", types.StoreKey)\n\t\t\tres, _, err := clientCtx.QueryWithData(route, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tvar params types.Params\n\t\t\tif err := clientCtx.JSONMarshaler.UnmarshalJSON(res, ¶ms); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn clientCtx.PrintOutput(params)\n\t\t},\n\t}\n\n\tflags.AddQueryFlagsToCmd(cmd)\n\n\treturn cmd\n}",
"func QueryParams() *cobra.Command {\n\treturn qflags(&cobra.Command{\n\t\tUse: \"params\",\n\t\tShort: \"Query the current ecocredit module parameters\",\n\t\tLong: strings.TrimSpace(\n\t\t\tfmt.Sprintf(`Query the current ecocredit module parameters\n\t\t\t\nExamples:\n$%s query %s params\n$%s q %s params\n\t\t\t`, version.AppName, ecocredit.ModuleName, version.AppName, ecocredit.ModuleName),\n\t\t),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tc, ctx, err := mkQueryClient(cmd)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tres, err := c.Params(cmd.Context(), &ecocredit.QueryParamsRequest{})\n\t\t\treturn print(ctx, res, err)\n\t\t},\n\t})\n}",
"func GetQueryCmdParams(route string, cdc *codec.Codec) *cobra.Command {\n\treturn &cobra.Command{\n\t\tUse: \"params\",\n\t\tArgs: cobra.NoArgs,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcliCtx := context.NewCLIContext().WithCodec(cdc)\n\t\t\tbz, _, err := cliCtx.Query(fmt.Sprintf(\"custom/%s/%s\", route, types.QueryParams))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn printOutput(cliCtx, cdc, bz, &types.Params{})\n\t\t},\n\t}\n}",
"func FetchProfiles() map[string]string {\n\ttoken := auth.NewToken()\n\tquery := queryPayload{\"SELECT Id, ProfileName FROM CommunicationProfile\"}\n\tpayload, err := json.Marshal(query)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treq, err := http.NewRequest(\"POST\", viper.GetString(\"baseurl\")+\"/v1/action/query\", bytes.NewBuffer(payload))\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Authorization\", \"Bearer \"+token.Val)\n\n\tresponse, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer response.Body.Close()\n\n\tif response.StatusCode != 200 {\n\t\tbody, err := ioutil.ReadAll(response.Body)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tlog.Fatal(string(body))\n\t}\n\n\tdec := json.NewDecoder(response.Body)\n\tvar body profilesQueryResponse\n\tif err = dec.Decode(&body); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif !body.Done {\n\t\tlog.Fatalln(\"there are more communication profile to query\")\n\t}\n\n\tresult := make(map[string]string)\n\tfor _, p := range body.Records {\n\t\tresult[p.Name] = p.ID\n\t}\n\n\treturn result\n}",
"func GetCmdParams() *cobra.Command {\r\n\tcmd := &cobra.Command{\r\n\t\tUse: \"params\",\r\n\t\tShort: \"Query the current ibc-transfer parameters\",\r\n\t\tLong: \"Query the current ibc-transfer parameters\",\r\n\t\tArgs: cobra.NoArgs,\r\n\t\tExample: fmt.Sprintf(\"%s query ibc-transfer params\", version.AppName),\r\n\t\tRunE: func(cmd *cobra.Command, _ []string) error {\r\n\t\t\tclientCtx, err := client.GetClientQueryContext(cmd)\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn err\r\n\t\t\t}\r\n\t\t\tqueryClient := types.NewQueryClient(clientCtx)\r\n\r\n\t\t\tres, _ := queryClient.Params(context.Background(), &types.QueryParamsRequest{})\r\n\t\t\treturn clientCtx.PrintProto(res.Params)\r\n\t\t},\r\n\t}\r\n\r\n\tflags.AddQueryFlagsToCmd(cmd)\r\n\r\n\treturn cmd\r\n}",
"func (k Keeper) Params(c context.Context, req *types.QueryParamsRequest) (*types.QueryParamsResponse, error) {\n\tdefer telemetry.MeasureSince(time.Now(), types.ModuleName, \"query\", \"Params\")\n\tctx := sdk.UnwrapSDKContext(c)\n\tvar params types.Params\n\tk.paramSpace.GetParamSet(ctx, ¶ms)\n\n\treturn &types.QueryParamsResponse{Params: params, Request: req}, nil\n}",
"func NewGetPbxDeviceProfileitemsRequest(server string, params *GetPbxDeviceProfileitemsParams) (*http.Request, error) {\n\tvar err error\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/pbxdeviceprofiles\")\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tqueryValues := queryUrl.Query()\n\n\tif queryFrag, err := runtime.StyleParam(\"form\", true, \"name\", params.Name); err != nil {\n\t\treturn nil, err\n\t} else if parsed, err := url.ParseQuery(queryFrag); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tfor k, v := range parsed {\n\t\t\tfor _, v2 := range v {\n\t\t\t\tqueryValues.Add(k, v2)\n\t\t\t}\n\t\t}\n\t}\n\n\tif queryFrag, err := runtime.StyleParam(\"form\", true, \"config_id\", params.ConfigId); err != nil {\n\t\treturn nil, err\n\t} else if parsed, err := url.ParseQuery(queryFrag); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tfor k, v := range parsed {\n\t\t\tfor _, v2 := range v {\n\t\t\t\tqueryValues.Add(k, v2)\n\t\t\t}\n\t\t}\n\t}\n\n\tif queryFrag, err := runtime.StyleParam(\"form\", true, \"order_by\", params.OrderBy); err != nil {\n\t\treturn nil, err\n\t} else if parsed, err := url.ParseQuery(queryFrag); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tfor k, v := range parsed {\n\t\t\tfor _, v2 := range v {\n\t\t\t\tqueryValues.Add(k, v2)\n\t\t\t}\n\t\t}\n\t}\n\n\tif queryFrag, err := runtime.StyleParam(\"form\", true, \"order_by_direction\", params.OrderByDirection); err != nil {\n\t\treturn nil, err\n\t} else if parsed, err := url.ParseQuery(queryFrag); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tfor k, v := range parsed {\n\t\t\tfor _, v2 := range v {\n\t\t\t\tqueryValues.Add(k, v2)\n\t\t\t}\n\t\t}\n\t}\n\n\tif queryFrag, err := runtime.StyleParam(\"form\", true, \"page\", params.Page); err != nil {\n\t\treturn nil, err\n\t} else if parsed, err := url.ParseQuery(queryFrag); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tfor k, v := range parsed {\n\t\t\tfor _, v2 := range v {\n\t\t\t\tqueryValues.Add(k, v2)\n\t\t\t}\n\t\t}\n\t}\n\n\tif queryFrag, err := runtime.StyleParam(\"form\", true, \"rows\", params.Rows); err != nil {\n\t\treturn nil, err\n\t} else if parsed, err := url.ParseQuery(queryFrag); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tfor k, v := range parsed {\n\t\t\tfor _, v2 := range v {\n\t\t\t\tqueryValues.Add(k, v2)\n\t\t\t}\n\t\t}\n\t}\n\n\tqueryUrl.RawQuery = queryValues.Encode()\n\n\treq, err := http.NewRequest(\"GET\", queryUrl.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}",
"func GetCmdQueryParams() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"params\",\n\t\tShort: \"Query the current minting parameters\",\n\t\tArgs: cobra.NoArgs,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tclientCtx, err := client.GetClientQueryContext(cmd)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tqueryClient := types.NewQueryClient(clientCtx)\n\n\t\t\tparams := &types.QueryParamsRequest{}\n\t\t\tres, err := queryClient.Params(cmd.Context(), params)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn clientCtx.PrintProto(&res.Params)\n\t\t},\n\t}\n\n\tflags.AddQueryFlagsToCmd(cmd)\n\n\treturn cmd\n}",
"func GetCmdQueryParams() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"params\",\n\t\tShort: \"Query the current htlc parameter values\",\n\t\tLong: \"Query values set as htlc parameters.\",\n\t\tExample: fmt.Sprintf(\"$ %s query htlc params\", version.AppName),\n\t\tArgs: cobra.NoArgs,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tclientCtx, err := client.GetClientQueryContext(cmd)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tqueryClient := types.NewQueryClient(clientCtx)\n\n\t\t\tres, err := queryClient.Params(context.Background(), &types.QueryParamsRequest{})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn clientCtx.PrintProto(&res.Params)\n\t\t},\n\t}\n\tflags.AddQueryFlagsToCmd(cmd)\n\treturn cmd\n}",
"func getParameters(c *cli.Context) error {\n\tif !isSystemRunning() {\n\t\treturn nil\n\t}\n\t_, _, _, controllers := getIPAddresses()\n\n\tparams := sendCommandToControllers(controllers, \"GetParams\", \"\")\n\tfmt.Println(params)\n\n\treturn nil\n}",
"func QueryParameters(f *cli.Fixtures, flags ...string) types.Params {\n\tcmd := fmt.Sprintf(\"%s query distribution params %v\", f.SimcliBinary, f.Flags())\n\tout, errStr := tests.ExecuteT(f.T, cli.AddFlags(cmd, flags), \"\")\n\trequire.Empty(f.T, errStr)\n\n\tvar params types.Params\n\trequire.NoError(f.T, f.Cdc.UnmarshalJSON([]byte(out), ¶ms))\n\treturn params\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
stringView returns a view of the []byte as a string. In unsafe mode, it doesn't incur allocation and copying caused by conversion. In regular safe mode, it is an allocation and copy. Usage: Always maintain a reference to v while result of this call is in use, and call keepAlive4BytesView(v) at point where done with view.
|
func stringView(v []byte) string {
return string(v)
}
|
[
"func bytesView(v string) []byte {\n\treturn []byte(v)\n}",
"func (v Version) String() string {\n\treturn bytes2String(v[:])\n}",
"func UnsafeString(b []byte) (s string) {\n\tsrc := (*sliceHeader)(unsafe.Pointer(&b))\n\tdst := (*stringHeader)(unsafe.Pointer(&s))\n\tdst.Data = src.Data\n\tdst.Len = src.Len\n\treturn s\n}",
"func (v *Value) AsStringUnsafe() (string, error) {\n\treturn unsafeBytesToString(v.Bytes), nil\n}",
"func String(b []byte) string {\n\tvar s string\n\tif len(b) == 0 {\n\t\treturn s\n\t}\n\n\t// NB(r): We need to declare a real string so internally the compiler\n\t// knows to use an unsafe.Pointer to keep track of the underlying memory so that\n\t// once the strings's array pointer is updated with the pointer to the byte slices's\n\t// underlying bytes, the compiler won't prematurely GC the memory when the byte slice\n\t// goes out of scope.\n\tstringHeader := (*reflect.StringHeader)(unsafe.Pointer(&s))\n\n\t// NB(r): This makes sure that even if GC relocates the byte slices's underlying\n\t// memory after this assignment, the corresponding unsafe.Pointer in the internal\n\t// string struct will be updated accordingly to reflect the memory relocation.\n\tstringHeader.Data = (*reflect.SliceHeader)(unsafe.Pointer(&b)).Data\n\n\t// NB(r): It is important that we access b after we assign the Data\n\t// pointer of the byte slice header to the Data pointer of the string header to\n\t// make sure the bytes don't get GC'ed before the assignment happens.\n\tl := len(b)\n\tstringHeader.Len = l\n\n\treturn s\n}",
"func (b *SafeBuffer) String() string {\n\tb.m.RLock()\n\tdefer b.m.RUnlock()\n\treturn b.b.String()\n}",
"func rawstring(size int) (s string, b []byte) {\n\tp := mallocgc(uintptr(size), nil, false)\n\n\tstringStructOf(&s).str = p\n\tstringStructOf(&s).len = size\n\n\t*(*slice)(unsafe.Pointer(&b)) = slice{p, size, size}\n\n\treturn\n}",
"func (s *Struct) Stringv(path ...string) (v string) {\n\tv, _ = s.Value(path...).(string)\n\treturn\n}",
"func (v String) String() string {\n\treturn v.v\n}",
"func StringUnsafe(b []byte) string {\n\tvar s string\n\tpb := (*reflect.SliceHeader)(unsafe.Pointer(&b))\n\tps := (*reflect.StringHeader)(unsafe.Pointer(&s))\n\tps.Data = pb.Data\n\tps.Len = pb.Len\n\treturn s\n}",
"func (b *Buf) UnsafeString() string {\n\treturn *(*string)(unsafe.Pointer(&b.s))\n}",
"func bytesToIPv4String(src []byte) string {\n\treturn net.IPv4(src[0], src[1], src[2], src[3]).String()\n}",
"func ToUnsafeString(b []byte) string {\n\treturn *(*string)(unsafe.Pointer(&b))\n}",
"func UnsafeString(b []byte) string {\n\treturn *(*string)(unsafe.Pointer(&b))\n}",
"func StringBytes(b []byte) string { return *(*string)(Pointer(&b)) }",
"func (v *Value) AsStringSafe() (string, error) {\n\treturn string(v.Bytes), nil\n}",
"func String(b []byte) string {\n\treturn string(b)\n}",
"func StringFromImmutableBytes(bs []byte) string {\n\tif len(bs) == 0 {\n\t\treturn \"\"\n\t}\n\treturn unsafe.String(&bs[0], len(bs))\n}",
"func (s *Buffer) String() string {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\treturn s.buffer.String()\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
bytesView returns a view of the string as a []byte. In unsafe mode, it doesn't incur allocation and copying caused by conversion. In regular safe mode, it is an allocation and copy. Usage: Always maintain a reference to v while result of this call is in use, and call keepAlive4BytesView(v) at point where done with view.
|
func bytesView(v string) []byte {
return []byte(v)
}
|
[
"func stringView(v []byte) string {\n\treturn string(v)\n}",
"func BytesView(s interface{}) []byte {\n\tv := reflect.ValueOf(s)\n\tfirst := v.Index(0)\n\tsz := int(first.Type().Size())\n\treturn *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{\n\t\tData: uintptr(unsafe.Pointer((*reflect.SliceHeader)(unsafe.Pointer(first.UnsafeAddr())))),\n\t\tLen: v.Len() * sz,\n\t\tCap: v.Cap() * sz,\n\t}))\n}",
"func SafeBytes(v any) []byte {\n\tbs, _ := ToBytesWithFunc(v, func(v any) ([]byte, error) {\n\t\treturn []byte(fmt.Sprint(v)), nil\n\t})\n\treturn bs\n}",
"func NewView(b []byte) *View { return &View{b} }",
"func (v ByteView) ByteSlice() []byte {\n\tif v.b != nil {\n\t\treturn cloneBytes(v.b)\n\t}\n\treturn []byte(v.s)\n}",
"func (v *Value) AsBytesUnsafe() ([]byte, error) {\n\treturn v.Bytes, nil\n}",
"func UnsafeBytes(s string) (b []byte) {\n\tsrc := (*stringHeader)(unsafe.Pointer(&s))\n\tdst := (*sliceHeader)(unsafe.Pointer(&b))\n\tdst.Data = src.Data\n\tdst.Len = src.Len\n\tdst.Cap = src.Len\n\treturn b\n}",
"func Bytes(v []byte) *[]byte { return &v }",
"func (s *Str) Unsafe() *ByteString {\n\treturn UnsafeString(s.Value)\n}",
"func GetBytesBuffer4K() *bytes.Buffer {\n\tif b := getb4K(); b != nil {\n\t\treturn b\n\t}\n\tif p := get4K(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 4096))\n}",
"func GetBytesBuffer4M() *bytes.Buffer {\n\tif b := getb4M(); b != nil {\n\t\treturn b\n\t}\n\tif p := get4M(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 4194304))\n}",
"func V4AddressFromBytes(buf []byte) Address {\n\treturn Address{netip.AddrFrom4(*(*[4]byte)(buf))}\n}",
"func (p *Parser) UnsafeBytes() []byte {\n\treturn p.buf[p.ctx.beg:p.ctx.end]\n}",
"func (v *Value) AsBytesSafe() ([]byte, error) {\n\treturn append([]byte(nil), v.Bytes...), nil\n}",
"func (str *PdfObjectString) Bytes() []byte {\n\treturn []byte(str.val)\n}",
"func (v ByteView) EqualString(s string) bool {\n\tif v.b == nil {\n\t\treturn v.s == s\n\t}\n\tl := v.Len()\n\tif len(s) != l {\n\t\treturn false\n\t}\n\tfor i, bi := range v.b {\n\t\tif bi != s[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func ImmutableBytesFromString(s string) []byte {\n\tb := unsafe.StringData(s)\n\treturn unsafe.Slice(b, len(s))\n}",
"func rawstring(size int) (s string, b []byte) {\n\tp := mallocgc(uintptr(size), nil, false)\n\n\tstringStructOf(&s).str = p\n\tstringStructOf(&s).len = size\n\n\t*(*slice)(unsafe.Pointer(&b)) = slice{p, size, size}\n\n\treturn\n}",
"func NewBytesViewer(from io.ReadCloser) (in io.ReadCloser) {\n\treturn &bytesViewer{from}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subscribe will create a channel that will be published to each time a change on ONE partition changes
|
func (n *Node) Subscribe(topic string) <-chan Message {
consumer := make(chan Message)
parts := n.getPartitionsFor(topic)
var part partition
for _, p := range parts {
part = smallestOf(part, p)
}
log.Debug("Created subscriber for %s:%v", topic, part)
part.Subscribe(consumer)
return consumer
}
|
[
"func (c Conference) Subscribe(id uuid.UUID, topic string, out chan Notification) {\n\ttree, ok := c.Room[topic]\n\tif !ok {\n\t\ttree = &bst.BinarySearchTree{}\n\t\tc.Room[topic] = tree\n\t}\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\ttree.Add(NewSubscriber(id, out))\n}",
"func (h *Hub) Subscribe(t string, c chan []byte) {\n\th.Lock()\n\tdefer h.Unlock()\n\tchans, ok := h.topicChans[t]\n\tif !ok {\n\t\tchans = make(map[chan []byte]struct{})\n\t\th.topicChans[t] = chans\n\t}\n\tchans[c] = struct{}{}\n\ttopics, ok := h.chanTopics[c]\n\tif !ok {\n\t\ttopics = make(map[string]struct{})\n\t\th.chanTopics[c] = topics\n\t}\n\ttopics[t] = struct{}{}\n}",
"func (b *Broker) Subscribe(\n\tconn *websocket.Conn,\n\ttopic string,\n\toffset int64) (*Subscription, error) {\n\n\t// create new subscription\n\tsubscription, err := NewSubscription(b, conn, topic, offset)\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\n\tvar subscriptions SubscriptionSet\n\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\t// save subscription\n\tsubscriptions, exists := b.subscriptions[topic]\n\tif !exists {\n\t\tsubscriptions = make(map[*Subscription]bool)\n\t\tb.subscriptions[topic] = subscriptions\n\t}\n\tsubscriptions[subscription] = true\n\n\treturn subscription, nil\n\n}",
"func (ps *Pubsub) Subscribe(topic string) <-chan string {\n\tps.Lock()\n\tdefer ps.Unlock()\n\n\tch := make(chan string, 10)\n\tps.subs[topic] = append(ps.subs[topic], ch)\n\n\treturn ch\n}",
"func (t *Topic) Subscribe(ctx context.Context) <-chan interface{} {\n\tch := make(chan interface{})\n\tt.subs[ch] = ctx\n\treturn ch\n}",
"func (r *reconciler) subscribeToBrokerChannel(ctx context.Context, t *v1alpha1.Trigger, brokerTrigger, brokerIngress *v1alpha1.Channel, svc *corev1.Service) (*v1alpha1.Subscription, error) {\n\turi := &url.URL{\n\t\tScheme: \"http\",\n\t\tHost: names.ServiceHostName(svc.Name, svc.Namespace),\n\t\tPath: path.Generate(t),\n\t}\n\texpected := resources.NewSubscription(t, brokerTrigger, brokerIngress, uri)\n\n\tsub, err := r.getSubscription(ctx, t)\n\t// If the resource doesn't exist, we'll create it\n\tif k8serrors.IsNotFound(err) {\n\t\tsub = expected\n\t\terr = r.client.Create(ctx, sub)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn sub, nil\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Update Subscription if it has changed. Ignore the generation.\n\texpected.Spec.DeprecatedGeneration = sub.Spec.DeprecatedGeneration\n\tif !equality.Semantic.DeepDerivative(expected.Spec, sub.Spec) {\n\t\t// Given that spec.channel is immutable, we cannot just update the Subscription. We delete\n\t\t// it and re-create it instead.\n\t\terr = r.client.Delete(ctx, sub)\n\t\tif err != nil {\n\t\t\tlogging.FromContext(ctx).Info(\"Cannot delete subscription\", zap.Error(err))\n\t\t\tr.recorder.Eventf(t, corev1.EventTypeWarning, subscriptionDeleteFailed, \"Delete Trigger's subscription failed: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\t\tsub = expected\n\t\terr = r.client.Create(ctx, sub)\n\t\tif err != nil {\n\t\t\tlogging.FromContext(ctx).Info(\"Cannot create subscription\", zap.Error(err))\n\t\t\tr.recorder.Eventf(t, corev1.EventTypeWarning, subscriptionCreateFailed, \"Create Trigger's subscription failed: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn sub, nil\n}",
"func (channel Channel) subscribe(observers ...Observer) {\n\tchannel.checkChannelMap()\n\tfor _, observer := range observers {\n\t\tchannel.observers[observer.id] = observer\n\t\tfmt.Printf(\"New observer %s subscribed in channel %s \\n\", observer.id, channel.id)\n\t}\n}",
"func (hc *Conn) Subscribe(topic string) {\n\thc.parent.subscribe <- subscription{topic, hc}\n}",
"func (bps *BasePubSub) Subscribe(pattern string) <-chan Message {\n\tbps.mu.Lock()\n\tdefer bps.mu.Unlock()\n\n\tsubscription := subscription{\n\t\tch: make(chan Message),\n\t\tpattern: pattern,\n\t}\n\n\tvar matched bool\n\n\tfor topic, producer := range bps.producers {\n\t\t// TODO: Use of a modified radix trie would provide significant improvement\n\t\t// if the number of producers is extremely large.\n\t\tif MatchTopic(topic, pattern) {\n\t\t\tmatched = true\n\n\t\t\t// Add the subscription to the producer in a goroutine as to not have the\n\t\t\t// Subscribe call hang for longer than necessary.\n\t\t\tgo func(producer *BaseProducer) {\n\t\t\t\tproducer.addSubscription(subscription)\n\t\t\t}(producer)\n\t\t}\n\t}\n\n\tif !matched {\n\t\tbps.idleSubscriptions[pattern] = subscription\n\t}\n\n\treturn subscription.ch\n}",
"func (s *Cluster) NotifySubscribe(conn security.ID, ssid subscription.Ssid) {\n\tevent := SubscriptionEvent{\n\t\tPeer: s.name,\n\t\tConn: conn,\n\t\tSsid: ssid,\n\t}\n\n\t// Add to our global state\n\ts.state.Add(event.Encode())\n\t// Create a delta for broadcasting just this operation\n\top := newSubscriptionState()\n\top.Add(event.Encode())\n\ts.gossip.GossipBroadcast(op)\n}",
"func SubscribeChannel(socket *websocket.Conn) {\n\tfmt.Printf(\"Subscribed channel by: %#v\\n\", socket)\n\n\t// TODO: query rethinkDB with the feature: changefeed,\n\t// it'll look up initial channels, then keep\n\t// blocking and waiting for channel changes such as ADD, REMOVE, or EDIT\n\tfor {\n\t\ttime.Sleep(time.Second * 1)\n\n\t\tmsg := models.Message{\n\t\t\t\"channel add\",\n\t\t\tmodels.Channel{\"1\", \"Software Support\"}}\n\t\tsocket.WriteJSON(msg)\n\t\tfmt.Println(\"sent newly added channel.\")\n\t}\n}",
"func (input *Input) ChannelSubscribe(samples chan *Samples) (err error) {\n\tif input == nil {\n\t\terr = errors.New(\"Input is null\")\n\t\treturn err\n\t}\n\t//input.mu.Lock()\n\t//defer input.mu.Unlock()\n\tgo func() {\n\t\tfor {\n\t\t\tinput.connector.Wait(-1)\n\t\t\tinput.Take()\n\t\t\tsamples <- input.Samples\n\t\t}\n\t}()\n\treturn nil\n}",
"func (k *KafkaBroker) Subscribe(topic string, output chan<- Message, offset int64) (err error) {\n\tconsumer, err := sarama.NewConsumerFromClient(k.Client)\n\n\tif err != nil {\n\t\tpanic(\"Got an error while trying to create a consumer: \" + err.Error())\n\t}\n\n\tconn, err := consumer.ConsumePartition(\n\t\ttopic,\n\t\t0,\n\t\toffset, // Start from the next unread message\n\t)\n\n\tif err != nil {\n\t\tpanic(\"Got an error while trying to consume a partition: \" + err.Error())\n\t}\n\n\tgo func() {\n\t\tfor msg := range conn.Messages() {\n\t\t\toutput <- Message{\n\t\t\t\tKey: msg.Key,\n\t\t\t\tValue: msg.Value,\n\t\t\t\tOffset: msg.Offset,\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn err\n}",
"func (c *Conn) Subscribe(feed cipher.PubKey) (err error) {\n\n\t// add the feed to node\n\n\tif err = c.n.Share(feed); err != nil {\n\t\treturn\n\t}\n\n\tvar reply msg.Msg\n\n\tif reply, err = c.sendRequest(&msg.Sub{Feed: feed}); err != nil {\n\t\treturn\n\t}\n\n\tswitch x := reply.(type) {\n\n\tcase *msg.Ok:\n\t// success\n\n\tcase *msg.Err:\n\t\terr = errors.New(x.Err)\n\n\tdefault:\n\t\terr = fmt.Errorf(\"invalid response type %T\", reply)\n\n\t}\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tc.n.fs.addConnFeed(c, feed)\n\tc.sendLastRoot(feed)\n\treturn\n}",
"func (d StaticAgentDiscovery) Subscribe(c chan<- []string) { go func() { c <- d }() }",
"func (n *notifier) Subscribe(ch chan<- []Update) {\n\tn.mu.Lock()\n\tdefer n.mu.Unlock()\n\tn.subs = append(n.subs, ch)\n}",
"func (s *SyncStorage) SubscribeChannel(ns string, cb func(string, ...string), channels ...string) error {\n\tnsPrefix := getNsPrefix(ns)\n\treturn s.getDbBackend(ns).SubscribeChannelDB(cb, s.setNamespaceToChannels(nsPrefix, channels...)...)\n}",
"func (s *Subscription) Subscribe(channels ...string) {\n\tif len(channels) < 1 {\n\t\treturn\n\t}\n\n\ts.broker.dataChan <- &envData{false, &envSubscription{true, s, channels}, nil}\n}",
"func (p *pahoClient) Subscribe(c chan error, topic string, qos uint8, callback CallbackHandler) {\n\thandler := func(i paho.Client, message paho.Message) {\n\t\tlog.Printf(\"RECEIVED - Topic: %s, Message Length: %d bytes\", message.Topic(), len(message.Payload()))\n\t\tif callback != nil {\n\t\t\tcallback(context.Background(), topic, p.clientID, message.Payload())\n\t\t}\n\t}\n\ttoken := p.client.Subscribe(topic, qos, handler)\n\tc <- p.waitForToken(token)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SubscribeToAll creates a channel that is registered to ALL of the different partitions
|
func (n *Node) SubscribeToAll(topic string) <-chan Message {
consumer := make(chan Message)
parts := n.getPartitionsFor(topic)
for _, p := range parts {
p.Subscribe(consumer)
}
return consumer
}
|
[
"func (x *Pep) SubscribeToAll(ctx context.Context, host string, jid *jid.JID) {\n\tx.runQueue.Run(func() {\n\t\tif err := x.subscribeToAll(ctx, host, jid); err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\t})\n}",
"func (c *Channel) PublishToAllSubscribers(message []byte) error {\n\tfor _, bucket := range c.buckets {\n\t\tbucket.queue <- message\n\t}\n\treturn nil\n}",
"func (cp *CandlesProvider) SubscribeAll(d time.Duration) chan schemas.ResultChannel {\n\tch := make(chan schemas.ResultChannel)\n\n\tfor _, orderBook := range cp.groups {\n\t\tgo orderBook.Subscribe(ch, d)\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n\treturn ch\n}",
"func (n *Node) Subscribe(topic string) <-chan Message {\n\tconsumer := make(chan Message)\n\n\tparts := n.getPartitionsFor(topic)\n\tvar part partition\n\tfor _, p := range parts {\n\t\tpart = smallestOf(part, p)\n\t}\n\n\tlog.Debug(\"Created subscriber for %s:%v\", topic, part)\n\tpart.Subscribe(consumer)\n\treturn consumer\n}",
"func (cp *CandlesProvider) SubscribeAll(d time.Duration) chan schemas.ResultChannel {\n\treturn nil\n}",
"func (subService *SubscriberService) SubscribeToTopics(topics []string) (chan *pubsub.Message, error) {\n\t// Temp disabled RA service\n\tif subService.ra != nil {\n\n\t\t// 1. Register on the blockchain\n\t\t// 2. Subscribe to pubsub\n\n\t\thost := subService.GetHost()\n\n\t\tfor _, topic := range topics {\n\t\t\terr := subService.ra.Subscribe(host.ID(), string(topic))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\t// Transaction on the blockchain won't be reflect immediately.\n\t// TODO: So, wait before sending a sub message.\n\n\tsubRouter := subService.GetPubSub()\n\tif subRouter == nil {\n\t\treturn nil, errors.New(\"subscriber router is nil\")\n\t}\n\n\tmsg := make(chan *pubsub.Message, 100)\n\n\tfor _, topic := range topics {\n\n\t\tsub, err := subRouter.Subscribe(topic)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tsubService.topicTracker[topic] = &TopicWrapper{\n\t\t\tsubscription: sub,\n\t\t}\n\n\t\tgo func(subs *pubsub.Subscription) {\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-subService.ctx.Done():\n\t\t\t\t\tclose(msg)\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t}\n\n\t\t\t\tm, err := subs.Next(subService.ctx)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tmsg <- m\n\t\t\t}\n\n\t\t}(sub)\n\t}\n\n\treturn msg, nil\n}",
"func (app *App) RegisterAll(to string) {\n\tm := &message.Register{\n\t\tHeader: &message.Header{},\n\t}\n\tm.Header.From = &app.ID\n\tm.Name = &app.Name\n\tm.Header.To = &to\n\tm.Devices = app.DeviceList()\n\n\tapp.Publish(queue.Inventory, m)\n}",
"func (opcuaExport *OpcuaExport) Subscribe() {\n\tglog.Infof(\"-- Initializing message bus context\")\n\tdefer opcuaExport.configMgr.Destroy()\n\n\tnumOfSubscriber, _ := opcuaExport.configMgr.GetNumSubscribers()\n\tfor i := 0; i < numOfSubscriber; i++ {\n\t\tsubctx, err := opcuaExport.configMgr.GetSubscriberByIndex(i)\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Failed to get subscriber context : %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tsubTopics, err := subctx.GetTopics()\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Failed to fetch topics : %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tconfig, err := subctx.GetMsgbusConfig()\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Failed to fetch msgbus config : %v\", err)\n\t\t\treturn\n\t\t}\n\t\tgo worker(opcuaExport, config, subTopics[0])\n\t\tsubctx.Destroy()\n\t}\n\t\n}",
"func (lh *LocationsAPIHandler) subscriptionFanout() {\n\n\tsub := lh.client.Subscribe(\n\t\tlh.client.Context(), \"currentLocationsPS\",\n\t)\n\n\tdefer func() {\n\t\tlog.Info(\"Exit from PUB/SUB Channel\")\n\t\tsub.Unsubscribe(lh.client.Context(), \"currentLocationsPS\")\n\t}()\n\n\t// Open Redis PUB/SUB Channel...\n\tchannel := sub.Channel()\n\tlog.Info(\"Reading from PUB/SUB Channel\")\n\n\tfor msg := range channel {\n\t\t// cast msg -> msgB and then send to all listening connections...\n\t\tmsgB := []byte(msg.Payload)\n\n\t\tfor i, sub := range lh.conns {\n\t\t\tif sub != nil {\n\t\t\t\t// Never Block!!\n\t\t\t\tselect {\n\t\t\t\tcase sub.cH <- msgB:\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t} else if i < lh.openIdx {\n\t\t\t\tlh.openIdx = i\n\t\t\t}\n\t\t}\n\t}\n\tlog.Info(\"Exit from PUB/SUB Channel\")\n}",
"func Subscribe() {\n\tfor d := range Devices { // Loop over all devices we know about\n\t\tif d.DeviceType != RF { // Obviously the RF switch isn't WiFi, so it has no MAC address, and therefore can't be subscribed to.\n\t\t\t// We send a message to each socket. reverseMAC takes a MAC address and reverses each pair (e.g. AC CF 23 becomes CA FC 32)\n\t\t\tsendMessage(\"636c\", reverseMAC(Devices[d].MACAddress)+macPadding, Devices[d])\n\t\t\tpassMessage(\"subscribe\", &d)\n\t\t}\n\t}\n\n\treturn\n}",
"func subscribeAllTopics() {\n\tfor key, value := range topicMap {\n\t\thelper.TokenClient = helper.Client.Subscribe(key, 0, value)\n\t\tif helper.TokenClient.Wait() && helper.TokenClient.Error() != nil {\n\t\t\tklog.Errorf(\"subscribe() Error in topic: %s is: %s\", key, helper.TokenClient.Error())\n\t\t}\n\t}\n}",
"func (cg *CandlesGroup) subscribe() {\n\tfor _, symb := range cg.symbols {\n\t\tmessage := candlesSubsMessage{\n\t\t\tEvent: eventSubscribe,\n\t\t\tChannel: \"candles\",\n\t\t\tKey: \"trade:1m:t\" + strings.ToUpper(symb.OriginalName),\n\t\t}\n\n\t\tif err := cg.wsClient.Write(message); err != nil {\n\t\t\tlog.Printf(\"[BITFINEX] Error subsciring to %v candles\", symb.Name)\n\t\t\tcg.restart()\n\t\t\treturn\n\t\t}\n\t}\n\tlog.Println(\"[BITFINEX] Subscription ok\")\n}",
"func (h *Hub) SendToAll(data []byte) {\n\th.mux.Lock()\n\tdefer h.mux.Unlock()\n\tfor conn := range h.conns {\n\t\th.Send(conn, data, websocket.TextMessage)\n\t}\n}",
"func (bps *BasePubSub) Subscribe(pattern string) <-chan Message {\n\tbps.mu.Lock()\n\tdefer bps.mu.Unlock()\n\n\tsubscription := subscription{\n\t\tch: make(chan Message),\n\t\tpattern: pattern,\n\t}\n\n\tvar matched bool\n\n\tfor topic, producer := range bps.producers {\n\t\t// TODO: Use of a modified radix trie would provide significant improvement\n\t\t// if the number of producers is extremely large.\n\t\tif MatchTopic(topic, pattern) {\n\t\t\tmatched = true\n\n\t\t\t// Add the subscription to the producer in a goroutine as to not have the\n\t\t\t// Subscribe call hang for longer than necessary.\n\t\t\tgo func(producer *BaseProducer) {\n\t\t\t\tproducer.addSubscription(subscription)\n\t\t\t}(producer)\n\t\t}\n\t}\n\n\tif !matched {\n\t\tbps.idleSubscriptions[pattern] = subscription\n\t}\n\n\treturn subscription.ch\n}",
"func (h *Hub) UnsubscribeAll(ch chan []byte) {\n\th.Lock()\n\tdefer h.Unlock()\n\ttopics, ok := h.chanTopics[ch]\n\tif !ok {\n\t\treturn\n\t}\n\tfor t := range topics {\n\t\tchans, ok := h.topicChans[t]\n\t\tif ok {\n\t\t\tdelete(chans, ch)\n\t\t}\n\t}\n\tdelete(h.chanTopics, ch)\n}",
"func (s *StanServer) initSubscriptions() error {\n\n\t// Do not create internal subscriptions in clustered mode,\n\t// the leader will when it gets elected.\n\tif !s.isClustered {\n\t\tcreateSubOnClientPublish := true\n\n\t\tif s.partitions != nil {\n\t\t\t// Receive published messages from clients, but only on the list\n\t\t\t// of static channels.\n\t\t\tif err := s.partitions.initSubscriptions(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// Since we create a subscription per channel, do not create\n\t\t\t// the internal subscription on the > wildcard\n\t\t\tcreateSubOnClientPublish = false\n\t\t}\n\n\t\tif err := s.initInternalSubs(createSubOnClientPublish); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ts.log.Debugf(\"Discover subject: %s\", s.info.Discovery)\n\t// For partitions, we actually print the list of channels\n\t// in the startup banner, so we don't need to repeat them here.\n\tif s.partitions != nil {\n\t\ts.log.Debugf(\"Publish subjects root: %s\", s.info.Publish)\n\t} else {\n\t\ts.log.Debugf(\"Publish subject: %s.>\", s.info.Publish)\n\t}\n\ts.log.Debugf(\"Subscribe subject: %s\", s.info.Subscribe)\n\ts.log.Debugf(\"Subscription Close subject: %s\", s.info.SubClose)\n\ts.log.Debugf(\"Unsubscribe subject: %s\", s.info.Unsubscribe)\n\ts.log.Debugf(\"Close subject: %s\", s.info.Close)\n\treturn nil\n}",
"func (pr *PieceRegistry) SubscribeAllPartsDownloaded() chan PieceRange {\n\treturn pr.plansCompletedCh\n}",
"func Register(ch chan ExecutionEvent, topics ...Topic) {\n\tfor _, t := range topics {\n\t\tsubscriberRegistry[t] = append(subscriberRegistry[t], ch)\n\t}\n}",
"func (q channelQuery) All(exec boil.Executor) (ChannelSlice, error) {\n\tvar o []*Channel\n\n\terr := q.Bind(nil, exec, &o)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"model: failed to assign all query results to Channel slice\")\n\t}\n\n\treturn o, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
RegisterStrategy allows a different RoutingStrategy to be specified for a given topic
|
func (n *Node) RegisterStrategy(topic string, strategy RoutingStrategy) {
n.strategyMap[topic] = strategy
}
|
[
"func RegisterStrategy(info *StrategyInfo) {\n\tstrategyRegistry = append(strategyRegistry, info)\n}",
"func RoutingStrategy(r RoutingStrategyType) metaOp {\n\treturn func(m *PluginMeta) {\n\t\tm.RoutingStrategy = r\n\t}\n}",
"func (t Twitter) registerRoutes(r *mux.Router) {\n\n}",
"func NewStrategy(allocator HostnameGenerator, sarClient SubjectAccessReviewInterface, allowExternalCertificates bool) routeStrategy {\n\treturn routeStrategy{\n\t\tObjectTyper: legacyscheme.Scheme,\n\t\tNameGenerator: names.SimpleNameGenerator,\n\t\thostnameGenerator: allocator,\n\t\tsarClient: sarClient,\n\t\tallowExternalCertificates: allowExternalCertificates,\n\t}\n}",
"func NewTopicRouter(br fiber.Router, conn *config.DBConn) {\n\trouter := br.Group(\"/topics\")\n\ttopicRepo := repository.NewTopicRepository(conn)\n\tpostRepo := repository.NewPostRepository(conn)\n\tuserRepo := repository.NewUserRepository(conn)\n\ttopicService := services.NewTopicService(topicRepo, postRepo, userRepo)\n\ttopicController := controllers.NewTopicController(topicService)\n\n\trouter.Get(\"/:id\", topicController.GetOne)\n\trouter.Get(\"/\", topicController.GetAll)\n\trouter.Post(\"/\", topicController.Create)\n}",
"func (group *RouterGroup) register(method string, subpattern string, handler HandlerFunc) {\n\tpattern := path.Join(group.prefix + subpattern)\n\tgroup.engine.router.addRoute(method, pattern, handler)\n}",
"func (r *LazyRouter) SetStrategy(strategy RoutingStrategy) {\n\tr.strategy = &baseRoutingStrategy{RoutingStrategy: strategy}\n}",
"func (s Site) AddTopic(t string) {\n\ts.Handler.HandleFunc(\"/\"+t+\"/\", s.TopicHandler)\n\ts.Handler.HandleFunc(\"/nominate/\"+t+\"/\", s.NominateHandler)\n}",
"func RegisterPlugin(key string, strategy ACLStrategy) {\n\tstrategiesMutex.Lock()\n\tdefer strategiesMutex.Unlock()\n\n\tif strategies == nil {\n\t\tstrategies = make(map[string]ACLStrategy)\n\t}\n\n\tstrategies[key] = strategy\n}",
"func NewStrategy(typer runtime.ObjectTyper) MicroservicesStrategy {\n\treturn MicroservicesStrategy{typer, names.SimpleNameGenerator}\n}",
"func RegisterTransport(name string, transport TransportFactory) {\n\ttransportFactories[name] = transport\n}",
"func Register(strategyName string, strategy Strategy) {\n\n\tif strategy == nil {\n\t\tpanic(\"sql: Register strategy is nil\")\n\t}\n\tif _, dup := strategyMaps[strategyName]; dup {\n\t\tpanic(\"sql: Register called twice for strategy \" + strategyName)\n\t}\n\tstrategyMaps[strategyName] = strategy\n\tlogger.Infoln(\"注εηη₯οΌ\" + strategyName)\n}",
"func (h *BasicHost) RegisterProtocol(\n\tpid common.Pid,\n\thandler ProtocolHandler,\n\tadapters ...ProtocolAdapter,\n) {\n\th.host.SetStreamHandler(pid.ProtocolID(), func(stream net.Stream) {\n\t\tdefer stream.Reset()\n\t\tmsg, err := common.ReadMessage(stream)\n\t\tif err != nil {\n\t\t\tlog.Println(\"failed to read message from stream :\", err)\n\t\t\treturn\n\t\t}\n\t\tgo handler.Handle(adapters...)(msg)\n\t})\n}",
"func (rabbit *RabbitMq) RegisterTopic(topic string) (err error) {\n\n\tchannel, err := rabbit.MqConnection.Channel()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer func() {\n\t\terr := channel.Close()\n\n\t\tif err != nil {\n\t\t\tlog.ErrorfNoContext(rabbit.AppID, component, \"Error closing channel while registering topic, %s\", err)\n\t\t}\n\t}()\n\n\t// topic exchange\n\terr = channel.ExchangeDeclare(\n\t\ttopic,\n\t\t\"fanout\",\n\t\ttrue,\n\t\tfalse,\n\t\tfalse,\n\t\tfalse,\n\t\tnil,\n\t)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trabbit.registeredTopic[topic] = true\n\n\tlog.PrintfNoContext(rabbit.AppID, component, \"Registered topic %s for app %s\", topic, rabbit.AppID)\n\n\treturn nil\n}",
"func addRoutes(p *nats.Conn) {\n\tr := mux.NewRouter()\n\tr.Methods(\"POST\").Path(\"/topics/{topic}\").Handler(\n\t\thandlers.LoggingHandler(os.Stdout, handler(p, topic)))\n\tr.Methods(\"POST\").Path(\"/requests/{topic}\").Handler(\n\t\thandlers.LoggingHandler(os.Stdout, handler(p, request)))\n\thttp.Handle(\"/\", r)\n}",
"func Register(name string, r routerFactory) {\n\trouters[name] = r\n}",
"func RegisterTransportOfDifferentAgent(factory func(microserviceLabel string) TransportAdapter) error {\n\taccess.Lock()\n\tdefer access.Unlock()\n\n\tfactoryOfDifferentAgent = factory\n\n\treturn nil\n}",
"func NewStrategy(cloudProfiles rest.StandardStorage) Strategy {\n\treturn Strategy{api.Scheme, names.SimpleNameGenerator, cloudProfiles}\n}",
"func RegisterTopologyAPI(r *shttp.Server, g *graph.Graph, parser *traversal.GremlinTraversalParser, authBackend shttp.AuthenticationBackend, extraMarshallers map[string]TopologyMarshaller) {\n\tt := &TopologyAPI{\n\t\tgremlinParser: parser,\n\t\tgraph: g,\n\t\textraMarshallers: extraMarshallers,\n\t}\n\n\tt.registerEndpoints(r, authBackend)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Write write the payload to a parition in the topic based on the RoutingStrategy specified
|
func (n *Node) Write(topic, payload string) error {
rs := n.getStrategyFor(topic)
partID := rs.WhichPartition(payload)
parts := n.getPartitionsFor(topic)
return parts[partID].Write(payload)
}
|
[
"func (route_p *Router) publish(pkt_p *defs.UDPMsg, addr_p *net.UDPAddr) {\n\tif pkt_p.Flag&defs.UDPMsg_NewNode == defs.UDPMsg_NewNode {\n\t\troute_p.nmap_p.AddNbr(&pkt_p.DstHash, addr_p)\n\t} else {\n\t\troute_p.omap_p.Insert(&(pkt_p.DstHash), &(pkt_p.Payload.Msg), pkt_p.Payload.IsAddr)\n\t}\n\n\tpkt_p.Hops--\n\tif pkt_p.Hops > 0 {\n\t\tnbrs, err := route_p.nmap_p.GetNbr(&pkt_p.DstHash)\n\t\tif err != nil {\n\t\t\treturn\n\t\t} else {\n\t\t\tcmds, err := proto.Marshal(pkt_p)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor _, nbr_p := range nbrs {\n\t\t\t\tif _, err := route_p.ucon_p.WriteToUDP(cmds, nbr_p.Addr_p); err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\treturn\n\t}\n}",
"func (d *Kafka) PublishTo(topic string) error { return nil }",
"func (d *RMQ) PublishTo(topic string) error { return nil }",
"func SendPayloadToTopic(topic, payload string) error {\n\tif topic == \"\" {\n\t\treturn errors.New(\"topic is missing\")\n\t}\n\n\tl, err := standalone.List()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(l) == 0 {\n\t\treturn errors.New(\"couldn't find a running Dapr instance\")\n\t}\n\n\tapp := l[0]\n\tb := []byte{}\n\n\tif payload != \"\" {\n\t\tb = []byte(payload)\n\t}\n\n\turl := fmt.Sprintf(\"http://localhost:%s/v%s/publish/%s\", fmt.Sprintf(\"%v\", app.HTTPPort), api.RuntimeAPIVersion, topic)\n\t// nolint: gosec\n\tr, err := http.Post(url, \"application/json\", bytes.NewBuffer(b))\n\n\tif r != nil {\n\t\tdefer r.Body.Close()\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (conn *extHost) sendMessageToReplicas(pr *inPutMessage, extSendTimer *common.Timer, watermark *int64) (int64, error) {\n\tvar err error\n\n\twatermarkOnly := pr == nil\n\tif watermarkOnly && conn.destType != shared.DestinationType_LOG {\n\t\tlog.Fatal(\"WatermarkOnly message requested for non LOG destination\")\n\t}\n\tmsg := store.NewAppendMessage()\n\tvar appendMsgAckCh chan *store.AppendMessageAck\n\tif watermarkOnly {\n\t\tif watermark == nil {\n\t\t\tlog.Fatal(\"nil watermark and pr\")\n\t\t}\n\t\tif conn.lastSentWatermark == *watermark {\n\t\t\treturn -1, nil\n\t\t}\n\t}\n\n\tconn.extMetrics.Increment(load.ExtentMetricMsgsIn)\n\tconn.dstMetrics.Increment(load.DstMetricMsgsIn)\n\tconn.hostMetrics.Increment(load.HostMetricMsgsIn)\n\n\t// increment seq-num; do atomically, since this could\n\t// be concurrently queried by the reporter\n\tsequenceNumber := atomic.AddInt64(&conn.seqNo, 1)\n\tmsg.SequenceNumber = common.Int64Ptr(sequenceNumber)\n\tmsg.EnqueueTimeUtc = common.Int64Ptr(conn.getEnqueueTime())\n\tif !watermarkOnly {\n\t\tmsg.Payload = pr.putMsg\n\t\tappendMsgAckCh = make(chan *store.AppendMessageAck, 5)\n\t}\n\tif watermark != nil && conn.lastSentWatermark < *watermark {\n\t\tmsg.FullyReplicatedWatermark = watermark\n\t}\n\n\t// we write the above same message to all the replicas\n\t// even if one of the replicas fail, we consider the message failed\n\t// no need to lock the conn.streams here because the replica set\n\t// for an extent will not change at all\n\terrCh := make(chan error)\n\tfor _, stream := range conn.streams {\n\t\tgo func(replInfo *replicaInfo, aMsg *store.AppendMessage, aMsgAckCh chan *store.AppendMessageAck) {\n\t\t\tpMsg := &replicaPutMsg{\n\t\t\t\tappendMsg: aMsg,\n\t\t\t\tappendMsgAckCh: aMsgAckCh,\n\t\t\t}\n\n\t\t\t// log disabled due to CPU cost\n\t\t\t// conn.logger.WithFields(logger.Fields{`replica`: replica, common.TagSeq: conn.seqNo, `Payload`: msg.Payload,}).Debug(`inputhost: sending data to store: ; seqno: , data`)\n\n\t\t\treplInfo.sendTimer.Reset(replicaSendTimeout)\n\t\t\tselect {\n\t\t\tcase replInfo.conn.putMessagesCh <- pMsg:\n\t\t\tcase <-replInfo.sendTimer.C:\n\t\t\t\terrCh <- ErrTimeout\n\t\t\t\treturn\n\t\t\t}\n\t\t\terrCh <- nil\n\t\t\treturn\n\t\t}(stream, msg, appendMsgAckCh)\n\t}\n\n\t// Wait for all the go routines above; we wait on the errCh to get the response from all replicas\n\tfor replica, stream := range conn.streams {\n\t\terr = <-errCh\n\t\tif err != nil {\n\t\t\tif watermarkOnly {\n\t\t\t\tconn.logger.WithFields(bark.Fields{`replica`: replica, common.TagErr: err, `putMessagesChLength`: len(stream.conn.putMessagesCh)}).Warn(`inputhost: sending fully replicated watermark to replica: , failed with error: ; length of putMsgCh: ;`)\n\t\t\t} else {\n\t\t\t\tconn.logger.WithFields(bark.Fields{`replica`: replica, common.TagErr: err, `putMessagesChLength`: len(stream.conn.putMessagesCh), `replyChLength`: len(stream.conn.replyCh)}).Error(`inputhost: sending msg to replica: , failed with error: ; length of putMsgCh: ; length of replyCh: ;`)\n\t\t\t}\n\t\t\treturn sequenceNumber, err\n\t\t}\n\t}\n\n\tif !watermarkOnly {\n\t\textSendTimer.Reset(replicaSendTimeout)\n\t\t// this is for the extHost's inflight messages for a successful message\n\t\tselect {\n\t\tcase conn.replyClientCh <- writeResponse{pr.putMsg.GetID(), sequenceNumber, appendMsgAckCh, pr.putMsgAckCh, pr.putMsgRecvTime, pr.putMsg.GetUserContext()}:\n\t\tcase <-extSendTimer.C:\n\t\t\tconn.logger.WithField(`lenReplyClientCh`, len(conn.replyClientCh)).Error(`inputhost: exthost: sending msg to the replyClientCh on exthost timed out`)\n\t\t\terr = ErrTimeout\n\t\t}\n\t}\n\tif err == nil && watermark != nil {\n\t\tconn.lastSentWatermark = *watermark\n\t}\n\treturn sequenceNumber, err\n}",
"func (ss *SparkServ) PostToRoom(text string, room rooms.Room, fileURL string) {\n\tvar jsonString string\n\tif fileURL != \"\" {\n\t\tjsonString = fmt.Sprintf(\"{ \\\"roomId\\\" : \\\"%s\\\" , \\\"file\\\" : \\\"%s\\\" , \\\"text\\\" : \\\"%s\\\" }\", room.ID, fileURL, text)\n\t} else {\n\t\tjsonString = fmt.Sprintf(\"{ \\\"roomId\\\" : \\\"%s\\\" , \\\"text\\\" : \\\"%s\\\" }\", room.ID, text)\n\t}\n\t//fmt.Println(jsonString)\n\tvar jsonStr = []byte(jsonString)\n\treq, err := http.NewRequest(\"POST\", sparkURL+\"/messages\", bytes.NewBuffer(jsonStr))\n\tif err != nil {\n\t\tlog.Println(\"creating request failed:\", err)\n\t}\n\n\t// ss.processRequest(req)\n\tresp := ss.processRequest(req)\n\tfmt.Printf(\"%s\", resp)\n}",
"func WritePayload(w io.Writer, session uint16, body []byte, meta uint32) (err error) {\n\thead := BuildHeader(session, body, meta)\n\tif _, err := w.Write(head); err != nil {\n\t\treturn err\n\t}\n\tif _, err := w.Write(body); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (t TopicCache) write(project, service string, topic *aiven.KafkaListTopic) (err error) {\n\tvar cachedService map[string]aiven.KafkaTopic\n\tvar ok bool\n\tif cachedService, ok = topics[project+service]; !ok {\n\t\tcachedService = make(map[string]aiven.KafkaTopic)\n\t}\n\n\ttopicForCache := aiven.KafkaTopic{\n\t\tMinimumInSyncReplicas: topic.MinimumInSyncReplicas,\n\t\tPartitions: partitions(topic.Partitions),\n\t\tReplication: topic.Replication,\n\t\tRetentionBytes: topic.RetentionBytes,\n\t\tRetentionHours: topic.RetentionHours,\n\t\tState: topic.State,\n\t\tTopicName: topic.TopicName,\n\t\tCleanupPolicy: topic.CleanupPolicy}\n\n\tcachedService[topic.TopicName] = topicForCache\n\ttopics[project+service] = cachedService\n\treturn\n}",
"func (c *Client) write(mt int, payload []byte) error {\n\tc.ws.SetWriteDeadline(time.Now().Add(writeWait))\n\treturn c.ws.WriteMessage(mt, payload)\n}",
"func (m *MQTT) Write(msg *message.Batch) error {\n\tm.connMut.RLock()\n\tclient := m.client\n\tm.connMut.RUnlock()\n\n\tif client == nil {\n\t\treturn component.ErrNotConnected\n\t}\n\n\treturn IterateBatchedSend(msg, func(i int, p *message.Part) error {\n\t\tretained := m.conf.Retained\n\t\tif m.retained != nil {\n\t\t\tvar parseErr error\n\t\t\tretained, parseErr = strconv.ParseBool(m.retained.String(i, msg))\n\t\t\tif parseErr != nil {\n\t\t\t\tm.log.Errorf(\"Error parsing boolean value from retained flag: %v \\n\", parseErr)\n\t\t\t}\n\t\t}\n\t\tmtok := client.Publish(m.topic.String(i, msg), m.conf.QoS, retained, p.Get())\n\t\tmtok.Wait()\n\t\tsendErr := mtok.Error()\n\t\tif sendErr == mqtt.ErrNotConnected {\n\t\t\tm.connMut.RLock()\n\t\t\tm.client = nil\n\t\t\tm.connMut.RUnlock()\n\t\t\tsendErr = component.ErrNotConnected\n\t\t}\n\t\treturn sendErr\n\t})\n}",
"func (opcuaExport *OpcuaExport) Publish(data interface{}) {\n\tpubTopics := opcuaExport.opcuaBus.pubTopics\n\ttopicConfigs := make([]map[string]string, len(pubTopics))\n\tfor i, pubTopic := range pubTopics {\n\t\ttopicConfigs[i] = map[string]string{\"ns\": \"StreamManager\", \"name\": pubTopic, \"dType\": \"string\"}\n\t}\n\tfor _, topicConfig := range topicConfigs {\n\t\topcuaData := fmt.Sprintf(\"%s %v\", topicConfig[\"name\"], data)\n\t\topcuaExport.opcuaBus.opcuaDatab.Publish(topicConfig, opcuaData)\n\t\tglog.Infof(\"Published data: %v on topic: %s\\n\", opcuaData, topicConfig)\n\t}\n}",
"func (c *Connection) write(mt int, payload []byte) error {\n\tc.ws.SetWriteDeadline(time.Now().Add(writeWait))\n\treturn c.ws.WriteMessage(mt, payload)\n}",
"func (c *connection) write(opCode int, payload []byte) error {\n\tc.ws.SetWriteDeadline(time.Now().Add(writeWait))\n\treturn c.ws.WriteMessage(opCode, payload)\n}",
"func (n *Node) RegisterStrategy(topic string, strategy RoutingStrategy) {\n\tn.strategyMap[topic] = strategy\n}",
"func (p producer) producerHandler(w http.ResponseWriter, r *http.Request) {\n\tp.topic = mux.Vars(r)[\"topic\"]\n\tvar err error\n\tp.body, err = ioutil.ReadAll(r.Body)\n\t//log.Println(\"body of request \",string(p.body))\n\tif err != nil {\n\t\tlog.Println(\"error while reading message body \", err)\n\t}\n\terr = kafkaprod.Produce(&kafka.Message{\n\t\tTopicPartition: kafka.TopicPartition{Topic: &p.topic, Partition: kafka.PartitionAny},\n\t\tValue: p.body,\n\t}, nil)\n\tif err != nil {\n\t\tlog.Println(\"error producing\", err)\n\t\twriteToFile(p.topic, p.body)\n\t}\n}",
"func (p *Publisher) Write(b []byte) (int, error) {\n\tpub := p.tmpl\n\tpub.Body = b\n\treturn len(b), p.Publish(pub)\n}",
"func (c client) Publish(route loadbalancer.Route) (loadbalancer.Result, error) {\n\t_, l4Type := c.name.GetLookupAndType()\n\treq := PublishRequest{Type: l4Type, Route: route}\n\tresp := PublishResponse{}\n\n\tif err := c.client.Call(\"L4.Publish\", req, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn clientResult(resp.Result), nil\n}",
"func (h *Handler) Publish(c *session.Client, topic *string, payload *[]byte) {\n\th.logger.Info(fmt.Sprintf(\"Publish() - username: %s, clientID: %s, topic: %s, payload: %s\", c.Username, c.ID, *topic, string(*payload)))\n}",
"func writeNotification(msg string) error {\n\tif err := kafkaWrite(\"inbound-twitch\", msg)\n\terr != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Restore creates a topic with an existing wal
|
func Restore(name string, walDir string) (*WALTopic, error) {
w, err := walFactory.Open(nil, nil, walDir)
if err != nil {
return nil, errors.Wrapf(err, "Could no open wal for topic %s", name)
}
m := make(map[int]uint64)
return &WALTopic{name: name, walDir: walDir, wal: w, firstIndexInSegment: m}, nil
}
|
[
"func (c *Cluster) RestoreTopic(toml []byte) error {\n\treturn c.createTopic(toml, true)\n}",
"func (ctl Controller) Restore(name string) *pitr.Error {\n\terr := ctl.cluster.Stop()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ctl.cluster.Clear()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstdout, stderr, runErr := ctl.runner.Run(\"sudo --login --user postgres wal-g backup-fetch %s %s\", ctl.cluster.DataDirectory(), name)\n\n\tif runErr != nil {\n\t\treturn &pitr.Error{\n\t\t\tMessage: runErr.Error(),\n\t\t\tStdout: stdout,\n\t\t\tStderr: stderr,\n\t\t}\n\t}\n\n\tctl.createRecoveryConf(`restore_command = 'bash --login -c \\\"wal-g wal-fetch %f %p\\\"'`)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ctl.cluster.Start()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (store *Store) CreateTopic(name string) error {\n\treturn nil\n}",
"func (m *MonLeaderDetector) Restore(id int) {\n\t// TODO(student): Implement\n\t_, ok := m.suspected[id]\n\tif ok == true {\n\t\tdelete(m.suspected, id)\n\t\tm.alive[id] = true\n\t}\n\n\t//Publish to subscribers\n\tvar j int\n\tnewLeader := m.Leader()\n\tif m.LeaderChange || m.Allsuspected {\n\t\tfor j < len(m.Channels) {\n\t\t\tm.Channels[j] <- newLeader\n\t\t\tj++\n\t\t}\n\t}\n}",
"func TestRestoreFromBackup(t *testing.T) {\n\tt.Run(\"Restore from PV backup of old cluster\", func(t *testing.T) {\n\t\ttestRestoreWithBackupPolicy(t, e2eutil.NewPVBackupPolicy(false, \"\"))\n\t})\n\tt.Run(\"Restore from S3 backup of old cluster\", func(t *testing.T) {\n\t\tt.Run(\"per cluster s3 policy\", func(t *testing.T) {\n\t\t\ttestRestoreWithBackupPolicy(t, e2eutil.NewS3BackupPolicy(false))\n\t\t})\n\t})\n}",
"func (env *Env) createTopic(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\ttopicName := ps.ByName(\"topicName\")\n\ttopic := pkg.NewTopic(topicName)\n\terr := env.broker.RegisterTopic(topic)\n\tif err == broker.ErrTopicAlreadyExists {\n\t\thttputil.SendErr(w, httputil.BadRequest)\n\t\treturn\n\t}\n\tif err != nil {\n\t\thttputil.SendErr(w, httputil.InternalServerError)\n\t\treturn\n\t}\n\tlog.Printf(\"Created new topic: %s\\n\", topicName)\n\thttputil.SendOK(w)\n}",
"func (ctl Controller) RestoreToTransactionID(txID int64) *pitr.Error {\n\terr := ctl.cluster.Stop()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ctl.cluster.Clear()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstdout, stderr, runErr := ctl.runner.Run(\"sudo --login --user postgres wal-g backup-fetch %s %s\", ctl.cluster.DataDirectory(), \"LATEST\")\n\n\tif runErr != nil {\n\t\treturn &pitr.Error{\n\t\t\tMessage: runErr.Error(),\n\t\t\tStdout: stdout,\n\t\t\tStderr: stderr,\n\t\t}\n\t}\n\n\tctl.createRecoveryConf(\n\t\t`restore_command = 'bash --login -c \\\"wal-g wal-fetch %f %p\\\"'`,\n\t\tfmt.Sprintf(\"recovery_target_xid = %d\", txID),\n\t\t\"recovery_target_action=promote\",\n\t)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif runErr != nil {\n\t\treturn &pitr.Error{\n\t\t\tMessage: runErr.Error(),\n\t\t\tStdout: stdout,\n\t\t\tStderr: stderr,\n\t\t}\n\t}\n\n\terr = ctl.cluster.Start()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (r *Reconciler) createTopic(ctx context.Context, topicName string, partitions int32, replicationFactor int16, retentionMillis int64) error {\n\n\t// Get The Logger From The Context\n\tlogger := logging.FromContext(ctx)\n\n\t// Create The TopicDefinition\n\tretentionMillisString := strconv.FormatInt(retentionMillis, 10)\n\ttopicDetail := &sarama.TopicDetail{\n\t\tNumPartitions: partitions,\n\t\tReplicationFactor: replicationFactor,\n\t\tReplicaAssignment: nil, // Currently Not Assigning Partitions To Replicas\n\t\tConfigEntries: map[string]*string{\n\t\t\tcommonconstants.KafkaTopicConfigRetentionMs: &retentionMillisString,\n\t\t},\n\t}\n\n\t// Attempt To Create The Topic & Process TopicError Results (Including Success ;)\n\terr := r.adminClient.CreateTopic(ctx, topicName, topicDetail)\n\tif err != nil {\n\t\tlogger := logger.With(zap.Int16(\"KError\", int16(err.Err)))\n\t\tswitch err.Err {\n\t\tcase sarama.ErrNoError:\n\t\t\tlogger.Info(\"Successfully Created New Kafka Topic (ErrNoError)\")\n\t\t\treturn nil\n\t\tcase sarama.ErrTopicAlreadyExists:\n\t\t\tlogger.Info(\"Kafka Topic Already Exists - No Creation Required\")\n\t\t\treturn nil\n\t\tdefault:\n\t\t\tlogger.Error(\"Failed To Create Topic\")\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tlogger.Info(\"Successfully Created New Kafka Topic (Nil TopicError)\")\n\t\treturn nil\n\t}\n}",
"func TestRestoreTransactions(t *testing.T) {\n\twt, err := newWALTester(t.Name(), prodDependencies{})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tdefer wt.Close()\n\n\t// Create 10 transactions with 1 update each\n\ttxns := []Transaction{}\n\ttotalPages := []page{}\n\ttotalUpdates := []Update{}\n\tfor i := 0; i < 2; i++ {\n\t\tupdates := []Update{}\n\t\tupdates = append(updates, Update{\n\t\t\tName: \"test\",\n\t\t\tVersion: \"1.0\",\n\t\t\tInstructions: fastrand.Bytes(5000), // ensures that 2 pages will be created\n\t\t})\n\t\ttotalUpdates = append(totalUpdates, updates...)\n\n\t\t// Create a new transaction\n\t\ttxn, err := wt.wal.NewTransaction(updates)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\twait := txn.SignalSetupComplete()\n\t\tif err := <-wait; err != nil {\n\t\t\tt.Errorf(\"SignalSetupComplete failed %v\", err)\n\t\t}\n\n\t\t// Check that 2 pages were created\n\t\tpages := transactionPages(txn)\n\t\tif len(pages) != 2 {\n\t\t\tt.Errorf(\"Txn has wrong size. Expected %v but was %v\", 2, len(pages))\n\t\t}\n\t\ttotalPages = append(totalPages, pages...)\n\t\ttxns = append(txns, *txn)\n\t}\n\n\t// restore the transactions\n\trecoveredTxns := []Transaction{}\n\tlogData, err := ioutil.ReadFile(wt.path)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor _, txn := range txns {\n\t\tvar restoredTxn Transaction\n\t\terr := unmarshalTransaction(&restoredTxn, txn.firstPage, txn.firstPage.nextPage.offset, logData)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\trecoveredTxns = append(recoveredTxns, restoredTxn)\n\t}\n\n\t// check if the recovered transactions have the same length as before\n\tif len(recoveredTxns) != len(txns) {\n\t\tt.Errorf(\"Recovered txns don't have same length as before. Expected %v but was %v\", len(txns),\n\t\t\tlen(recoveredTxns))\n\t}\n\n\t// check that all txns point to valid pages\n\tfor i, txn := range recoveredTxns {\n\t\tif txn.firstPage == nil {\n\t\t\tt.Errorf(\"%v: The firstPage of the txn is nil\", i)\n\t\t}\n\t\tif txn.firstPage.pageStatus != txns[i].firstPage.pageStatus {\n\t\t\tt.Errorf(\"%v: The pageStatus of the txn is %v but should be\",\n\t\t\t\ttxn.firstPage.pageStatus, txns[i].firstPage.pageStatus)\n\t\t}\n\t}\n\n\t// Decode the updates\n\trecoveredUpdates := []Update{}\n\tfor _, txn := range recoveredTxns {\n\t\t// loop over all the pages of the transaction, retrieve the payloads and decode them\n\t\tpage := txn.firstPage\n\t\tvar updateBytes []byte\n\t\tfor page != nil {\n\t\t\tupdateBytes = append(updateBytes, page.payload...)\n\t\t\tpage = page.nextPage\n\t\t}\n\t\t// Unmarshal the updates of the current transaction\n\t\tvar currentUpdates []Update\n\t\tcurrentUpdates, err := unmarshalUpdates(updateBytes)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Unmarshal of updates failed %v\", err)\n\t\t}\n\t\trecoveredUpdates = append(recoveredUpdates, currentUpdates...)\n\t}\n\n\t// Check if the number of recovered updates matches the total number of original updates\n\tif len(totalUpdates) != len(recoveredUpdates) {\n\t\tt.Errorf(\"The number of recovered updates doesn't match the number of original updates.\"+\n\t\t\t\" expected %v but was %v\", len(totalUpdates), len(recoveredUpdates))\n\t}\n\n\t// Check if the recovered updates match the original updates\n\toriginalData, err1 := json.Marshal(totalUpdates)\n\trecoveredData, err2 := json.Marshal(recoveredUpdates)\n\tif err1 != nil || err2 != nil {\n\t\tt.Errorf(\"Failed to marshall data for comparison\")\n\t}\n\tif bytes.Compare(originalData, recoveredData) != 0 {\n\t\tt.Errorf(\"The recovered data doesn't match the original data\")\n\t}\n}",
"func Restore(s string) error { return hometrash.Restore(s) }",
"func restore(ccmd *cobra.Command, args []string) error {\n\tkeyBytes, err := getPrivateKey()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar mdk types.MasterDerivationKey\n\tcopy(mdk[:], keyBytes)\n\tcwResponse, err := kmdClient.CreateWallet(WalletName, WalletPassword, kmd.DefaultWalletDriver, mdk)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating wallet - %s\", err)\n\t}\n\n\tfmt.Printf(\"Created wallet '%s' with ID: %s\\n\", cwResponse.Wallet.Name, cwResponse.Wallet.ID)\n\tif os.Getenv(\"GOTEST\") == \"true\" {\n\t\tccmd.Print(\"Created wallet successfully.\")\n\t}\n\n\treturn nil\n}",
"func (r Recovery) topicMessagesRecovery(from, to int64) error {\n\tmessages, err := r.mirrorClient.GetMessagesForTopicBetween(r.topicID, from, to)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(messages) == 0 {\n\t\tr.logger.Infof(\"No Messages found to recover for Topic [%s]\", r.topicID)\n\t\treturn nil\n\t}\n\n\tr.logger.Debugf(\"Found [%d] unprocessed messages for Topic [%s]\", len(messages), r.topicID)\n\tfor _, msg := range messages {\n\t\tm, err := message.FromString(msg.Contents, msg.ConsensusTimestamp)\n\t\tif err != nil {\n\t\t\tr.logger.Errorf(\"Skipping recovery of Topic Message with timestamp [%s]. Could not decode message. Error: [%s]\", msg.ConsensusTimestamp, err)\n\t\t\tcontinue\n\t\t}\n\n\t\terr = r.messages.ProcessSignature(*m)\n\t\tif err != nil {\n\t\t\tr.logger.Errorf(\"Error - could not handle recovery payload: [%s]\", err)\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tr.logger.Infof(\"Successfully recovered [%d] Messages for Topic [%s]\", len(messages), r.topicID)\n\treturn nil\n}",
"func RestoreFromBackup(apiClient apiextensionsclientset.Interface, kubeClient kubernetes.Interface, namespace string, backupName string) error {\n\tif backupName == \"\" {\n\t\treturn errors.Errorf(\"\")\n\t}\n\tlog.Logger().Infof(\"Using backup '%s'\", backupName)\n\n\targs := []string{\"create\", \"restore\", \"--from-backup\", backupName, \"--namespace\", namespace}\n\tcmd := util.Command{\n\t\tName: \"velero\",\n\t\tArgs: args,\n\t}\n\n\toutput, err := cmd.RunWithoutRetry()\n\tif err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"executing '%s %v' command\", cmd.Name, cmd.Args))\n\t}\n\n\tlog.Logger().Infof(output)\n\n\treturn nil\n}",
"func RestoreTo(s, p string) error { return hometrash.RestoreTo(s, p) }",
"func TestRestoreInvalidLearner(t *testing.T) {\n\ts := pb.Snapshot{\n\t\tMetadata: pb.SnapshotMetadata{\n\t\t\tIndex: 11, // magic number\n\t\t\tTerm: 11, // magic number\n\t\t\tConfState: pb.ConfState{Nodes: []uint64{1, 2}, Learners: []uint64{3}},\n\t\t},\n\t}\n\n\tstorage := NewMemoryStorage()\n\tsm := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, storage)\n\tdefer closeAndFreeRaft(sm)\n\n\tif sm.isLearner {\n\t\tt.Errorf(\"%x is learner, want not\", sm.id)\n\t}\n\tif ok := sm.restore(s); ok {\n\t\tt.Error(\"restore succeed, want fail\")\n\t}\n}",
"func TestRestoreLearnerPromotion(t *testing.T) {\n\ts := pb.Snapshot{\n\t\tMetadata: pb.SnapshotMetadata{\n\t\t\tIndex: 11, // magic number\n\t\t\tTerm: 11, // magic number\n\t\t\tConfState: pb.ConfState{Nodes: []uint64{1, 2, 3}},\n\t\t},\n\t}\n\n\tstorage := NewMemoryStorage()\n\tsm := newTestLearnerRaft(3, []uint64{1, 2}, []uint64{3}, 10, 1, storage)\n\tdefer closeAndFreeRaft(sm)\n\n\tif !sm.isLearner {\n\t\tt.Errorf(\"%x is not learner, want yes\", sm.id)\n\t}\n\n\tif ok := sm.restore(s); !ok {\n\t\tt.Error(\"restore fail, want succeed\")\n\t}\n\n\tif sm.isLearner {\n\t\tt.Errorf(\"%x is learner, want not\", sm.id)\n\t}\n}",
"func TestRestoreWithLearner(t *testing.T) {\n\ts := pb.Snapshot{\n\t\tMetadata: pb.SnapshotMetadata{\n\t\t\tIndex: 11, // magic number\n\t\t\tTerm: 11, // magic number\n\t\t\tConfState: pb.ConfState{Nodes: []uint64{1, 2}, Learners: []uint64{3}},\n\t\t},\n\t}\n\n\tstorage := NewMemoryStorage()\n\tsm := newTestLearnerRaft(3, []uint64{1, 2}, []uint64{3}, 10, 1, storage)\n\tdefer closeAndFreeRaft(sm)\n\tif ok := sm.restore(s); !ok {\n\t\tt.Error(\"restore fail, want succeed\")\n\t}\n\n\tif sm.raftLog.lastIndex() != s.Metadata.Index {\n\t\tt.Errorf(\"log.lastIndex = %d, want %d\", sm.raftLog.lastIndex(), s.Metadata.Index)\n\t}\n\tif mustTerm(sm.raftLog.term(s.Metadata.Index)) != s.Metadata.Term {\n\t\tt.Errorf(\"log.lastTerm = %d, want %d\", mustTerm(sm.raftLog.term(s.Metadata.Index)), s.Metadata.Term)\n\t}\n\tif !sm.isLearner {\n\t\tt.Errorf(\"%x is not learner, want yes\", sm.id)\n\t}\n\tsg := sm.nodes()\n\tif len(sg)+len(sm.learnerNodes()) != len(s.Metadata.ConfState.Nodes)+len(s.Metadata.ConfState.Learners) {\n\t\tt.Errorf(\"sm.Nodes = %+v, length not equal with %+v\", sg, s.Metadata.ConfState)\n\t}\n\tfor _, n := range s.Metadata.ConfState.Nodes {\n\t\tif sm.prs[n].IsLearner {\n\t\t\tt.Errorf(\"sm.Node %x isLearner = %s, want %t\", n, sm.prs[n], false)\n\t\t}\n\t}\n\tif len(s.Metadata.ConfState.Nodes) != len(sm.prs) {\n\t\tt.Errorf(\"sm.Nodes = %+v, length not equal with %+v\", sm.prs, s.Metadata.ConfState.Nodes)\n\t}\n\tfor _, n := range s.Metadata.ConfState.Learners {\n\t\tif !sm.learnerPrs[n].IsLearner {\n\t\t\tt.Errorf(\"sm.Node %x isLearner = %s, want %t\", n, sm.prs[n], true)\n\t\t}\n\t}\n\tif len(s.Metadata.ConfState.Learners) != len(sm.learnerPrs) {\n\t\tt.Errorf(\"sm.Nodes = %+v, length not equal with %+v\", sm.learnerPrs, s.Metadata.ConfState.Learners)\n\t}\n\n\tif ok := sm.restore(s); ok {\n\t\tt.Error(\"restore succeed, want fail\")\n\t}\n}",
"func (a *Agent) Restore(r pbm.RestoreCmd, opid pbm.OPID, ep pbm.Epoch) {\n\tl := a.log.NewEvent(string(pbm.CmdRestore), r.BackupName, opid.String(), ep.TS())\n\n\tnodeInfo, err := a.node.GetInfo()\n\tif err != nil {\n\t\tl.Error(\"get node info: %v\", err)\n\t\treturn\n\t}\n\tif !nodeInfo.IsPrimary {\n\t\tl.Info(\"node is not primary so it unsuitable to do restore\")\n\t\treturn\n\t}\n\n\tepts := ep.TS()\n\tlock := a.pbm.NewLock(pbm.LockHeader{\n\t\tType: pbm.CmdRestore,\n\t\tReplset: nodeInfo.SetName,\n\t\tNode: nodeInfo.Me,\n\t\tOPID: opid.String(),\n\t\tEpoch: &epts,\n\t})\n\n\tgot, err := a.aquireLock(lock, l, nil)\n\tif err != nil {\n\t\tl.Error(\"acquiring lock: %v\", err)\n\t\treturn\n\t}\n\tif !got {\n\t\tl.Debug(\"skip: lock not acquired\")\n\t\tl.Error(\"unbale to run the restore while another operation running\")\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\tl.Debug(\"releasing lock\")\n\t\terr := lock.Release()\n\t\tif err != nil {\n\t\t\tl.Error(\"release lock: %v\", err)\n\t\t}\n\t}()\n\n\tl.Info(\"restore started\")\n\terr = restore.New(a.pbm, a.node).Snapshot(r, opid, l)\n\tif err != nil {\n\t\tif errors.Is(err, restore.ErrNoDatForShard) {\n\t\t\tl.Info(\"no data for the shard in backup, skipping\")\n\t\t} else {\n\t\t\tl.Error(\"restore: %v\", err)\n\t\t}\n\t\treturn\n\t}\n\tl.Info(\"restore finished successfully\")\n\n\tif nodeInfo.IsLeader() {\n\t\tepch, err := a.pbm.ResetEpoch()\n\t\tif err != nil {\n\t\t\tl.Error(\"reset epoch\")\n\t\t\treturn\n\t\t}\n\n\t\tl.Debug(\"epoch set to %v\", epch)\n\t}\n}",
"func (p *PubsubValueStore) createTopicHandler(topic string) (*topicInfo, error) {\n\tt, err := p.ps.Join(topic)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsub, err := t.Subscribe()\n\tif err != nil {\n\t\t_ = t.Close()\n\t\treturn nil, err\n\t}\n\n\tevts, err := t.EventHandler()\n\tif err != nil {\n\t\tsub.Cancel()\n\t\t_ = t.Close()\n\t}\n\n\tti := &topicInfo{\n\t\ttopic: t,\n\t\tevts: evts,\n\t\tsub: sub,\n\t\tfinished: make(chan struct{}, 1),\n\t}\n\n\treturn ti, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Append appends to the wal of this topic
|
func (t *WALTopic) Append(data Data) error {
var buf bytes.Buffer
_ = gob.NewEncoder(&buf).Encode(data)
if err := t.wal.Log(buf.Bytes()); err != nil {
return errors.Wrapf(err, "Error appending to wal for topic: %s", t.name)
}
for _, a := range t.readerAlerts {
if a.waiting {
a.appendC <- true
}
}
return nil
}
|
[
"func (t *Topic) Append(msg schema.Message) {\n\tt.lock.Lock()\n\tdefer t.lock.Unlock()\n\tt.Depth++\n\tmsg.Offset = t.Depth\n\tt.Channel <- msg\n\tt.buffer.Put(msg)\n}",
"func (l *Log) Append(ctx context.Context, msg Message) error {\n\tdata, err := json.Marshal(msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar b pgx.Batch\n\tb.Queue(\"begin\")\n\tb.Queue(\"lock table switchover_log in exclusive mode\")\n\tb.Queue(\"insert into switchover_log (id, timestamp, data) values (coalesce((select max(id)+1 from switchover_log), 1), now(), $1)\", data)\n\tb.Queue(\"commit\")\n\tb.Queue(\"rollback\")\n\n\tconn, err := stdlib.AcquireConn(l.db)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer releaseConn(l.db, conn)\n\n\terr = conn.SendBatch(ctx, &b).Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (w *Wal) Append(elem mvcc.Element) {\n\tw.receiverChan <- elem\n}",
"func (env *Env) appendMessage(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tmsgBody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\thttputil.SendErr(w, httputil.BadRequest)\n\t\treturn\n\t}\n\tmsg := schema.NewMessage(ps.ByName(\"topicName\"), msgBody)\n\terr = env.broker.HandleMessage(msg)\n\tif err == broker.ErrNoSuchTopic {\n\t\thttputil.SendErr(w, httputil.BadRequest)\n\t\treturn\n\t}\n\tif err != nil {\n\t\thttputil.SendErr(w, httputil.InternalServerError)\n\t\treturn\n\t}\n\thttputil.SendOK(w)\n}",
"func (w *Writer) Append(msg *sej.Message) error {\n\twriter, err := w.ws[int(w.shard(msg)&w.shardMask)].getOrOpen()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn writer.Append(msg)\n}",
"func newStorageAppendMsg(r *raft, rd Ready) pb.Message {\n\tm := pb.Message{\n\t\tType: pb.MsgStorageAppend,\n\t\tTo: LocalAppendThread,\n\t\tFrom: r.id,\n\t\tEntries: rd.Entries,\n\t}\n\tif !IsEmptyHardState(rd.HardState) {\n\t\t// If the Ready includes a HardState update, assign each of its fields\n\t\t// to the corresponding fields in the Message. This allows clients to\n\t\t// reconstruct the HardState and save it to stable storage.\n\t\t//\n\t\t// If the Ready does not include a HardState update, make sure to not\n\t\t// assign a value to any of the fields so that a HardState reconstructed\n\t\t// from them will be empty (return true from raft.IsEmptyHardState).\n\t\tm.Term = rd.Term\n\t\tm.Vote = rd.Vote\n\t\tm.Commit = rd.Commit\n\t}\n\tif !IsEmptySnap(rd.Snapshot) {\n\t\tsnap := rd.Snapshot\n\t\tm.Snapshot = &snap\n\t}\n\t// Attach all messages in msgsAfterAppend as responses to be delivered after\n\t// the message is processed, along with a self-directed MsgStorageAppendResp\n\t// to acknowledge the entry stability.\n\t//\n\t// NB: it is important for performance that MsgStorageAppendResp message be\n\t// handled after self-directed MsgAppResp messages on the leader (which will\n\t// be contained in msgsAfterAppend). This ordering allows the MsgAppResp\n\t// handling to use a fast-path in r.raftLog.term() before the newly appended\n\t// entries are removed from the unstable log.\n\tm.Responses = r.msgsAfterAppend\n\tif needStorageAppendRespMsg(r, rd) {\n\t\tm.Responses = append(m.Responses, newStorageAppendRespMsg(r, rd))\n\t}\n\treturn m\n}",
"func (ck *Clerk) Append(key string, value string) {\n ck.PutAppend(key, value, \"Append\")\n}",
"func (l *TimestampedLog) Append(ts int64, data []byte) error {\n\tlatest, err := l.latest()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif ts < latest {\n\t\treturn errors.New(\"TimestampedLog.append: wrong timestamp\")\n\t}\n\n\tidx, err := l.addToSize(1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar buf bytes.Buffer\n\tbuf.Write(util.Uint64To8Bytes(uint64(ts)))\n\tbuf.Write(data)\n\tl.kvw.Set(l.getElemKey(idx), buf.Bytes())\n\treturn nil\n}",
"func (m *Medium) Add(t Topic) {\n\tm.mx.Lock()\n\tm.topics[t.Name()] = t\n\tm.mx.Unlock()\n}",
"func (wal *seriesWAL) Append(metricID uint32, tagsHash uint64, seriesID uint32) (err error) {\n\tif err := wal.base.checkPage(seriesEntryLength); err != nil {\n\t\treturn err\n\t}\n\twal.base.putUint32(metricID)\n\twal.base.putUint64(tagsHash)\n\twal.base.putUint32(seriesID)\n\n\treturn nil\n}",
"func (ck *Clerk) PutAppend(key string, value string, op string) {// {{{\n ck.debug(\"Start Op-%v: %v(key=%v, value=%v)\\n\", ck.opId, op, key, value)\n args := PutAppendArgs {\n Key: key,\n Value: value,\n Op: op,\n ClientId: ck.me,\n OpId: ck.opId}\n ck.opId++\n for {\n for i := range (ck.servers) {\n serverId := (ck.leaderId + i) % len(ck.servers)\n ck.debug(\"Op-%v sent to server-port-%v\\n\", args.OpId, serverId)\n var reply PutAppendReply //has to be inside the loop\n ok := ck.servers[serverId].Call(\"RaftKV.PutAppend\", &args, &reply)\n\n //no reponse or wrongLeader\n if !ok || reply.WrongLeader { continue } \n\n //success\n if reply.Err == OK {\n ck.debug(\"Finished Op-%v %v(key=%v, value=%v)\\n\\n\\n\", args.OpId, op, key, value)\n ck.leaderId = serverId\n return\n }\n }\n ck.debug(\"No Leader: waiting for election of Raft\\n\\n\")\n <-time.After(REQ_INTERVAL)\n }\n}",
"func (c *Client) Append(stream string, record []byte) error {\n\treturn c.sendMessage(encodeAppendByName(stream, record))\n}",
"func (c *Cache) appendEntries(topic, key string, entries Entries, new bool) error {\n\tt, ok := c.topics.Load(topic)\n\tif !ok {\n\t\treturn errors.New(\"Topic does not exist\")\n\t}\n\ttop := t.(*Topic)\n\n\tp, ok := top.partitions.Load(key)\n\tif !ok {\n\t\tnewPart, err := c.newPartition(topic, key)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttop.partitions.Store(key, newPart)\n\t\tp = newPart\n\t\tc.router.Update(topic, key, AddPartition)\n\t}\n\tpartition := p.(*Partition)\n\n\tpartition.mu.Lock()\n\tdefer partition.mu.Unlock()\n\tfpos := partition.clog.Tell()\n\n\tvar (\n\t\tfirstAppend *int\n\t\tlastEntry *Entry\n\t\tlastTime time.Time\n\t)\n\tif len(partition.entries) > 0 {\n\t\tlastEntry = partition.entries[len(partition.entries)-1]\n\t\tlastTime = lastEntry.Timestamp\n\t}\n\n\tfor i, entry := range entries {\n\t\tif entry.Timestamp.IsZero() {\n\t\t\t// maybe we want to error out in some cases in the future.\n\t\t\tentry.Timestamp = time.Now()\n\t\t\tif entry.Timestamp.Equal(lastTime) {\n\t\t\t\t// make sure it is unique (in some platform like play.golang.org,\n\t\t\t\t// time.Now() is second-precision)\n\t\t\t\tentry.Timestamp = entry.Timestamp.Add(time.Duration(1))\n\t\t\t}\n\t\t\tlastTime = entry.Timestamp\n\t\t}\n\n\t\tif i > 0 {\n\t\t\tlastEntry = entries[i-1]\n\t\t}\n\t\t// the behavior is to discard the entries that are before the latest\n\t\t// entry in the partition. if other entries in the request are after\n\t\t// though, they are still appended.\n\t\tif lastEntry != nil && entry.Timestamp.Before(lastEntry.Timestamp) {\n\t\t\tcontinue\n\t\t}\n\t\tif firstAppend == nil {\n\t\t\ttmp := i\n\t\t\tfirstAppend = &tmp\n\t\t}\n\t\tif new {\n\t\t\tif err := partition.clog.Append(&commitlog.Entry{\n\t\t\t\tTimestamp: entry.Timestamp,\n\t\t\t\tData: entry.Data}); err != nil {\n\t\t\t\tlog.Error(\"Failed to persist %v: %v\", entry, err)\n\t\t\t\tpartition.clog.Truncate(fpos)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\tif firstAppend != nil {\n\t\tpartition.entries = append(partition.entries, entries[*firstAppend:]...)\n\t} else {\n\t\treturn errors.New(\"Nothing new to append\")\n\t}\n\n\tc.LastCommit = CacheCommit{\n\t\tKey: fmt.Sprintf(\"%v_%v\", topic, key),\n\t\tTimestamp: entries[entries.Len()-1].Timestamp,\n\t}\n\treturn nil\n}",
"func (r *Raft) AppendToLog_Follower(request AppendEntriesReq) {\n\tterm := request.term\n\tcmd := request.entries\n\tindex := request.prevLogIndex + 1\n\tlogVal := LogVal{term, cmd, 0} //make object for log's value field\n\n\tif len(r.myLog) == index {\n\t\tr.myLog = append(r.myLog, logVal) //when trying to add a new entry\n\t} else {\n\t\tr.myLog[index] = logVal //overwriting in case of log repair\n\t\t//fmt.Println(\"Overwiriting!!\")\n\t}\n\t//fmt.Println(r.myId(), \"Append to log\", string(cmd))\n\t//modify metadata after appending\n\t//r.myMetaData.lastLogIndex = r.myMetaData.lastLogIndex + 1\n\t//r.myMetaData.prevLogIndex = r.myMetaData.lastLogIndex\n\t//\tif len(r.myLog) == 1 {\n\t//\t\tr.myMetaData.prevLogTerm = r.myMetaData.prevLogTerm + 1\n\t//\t} else if len(r.myLog) > 1 {\n\t//\t\tr.myMetaData.prevLogTerm = r.myLog[r.myMetaData.prevLogIndex].Term\n\t//\t}\n\n\t//Changed on 4th april, above is wrong in case of overwriting of log\n\tr.myMetaData.lastLogIndex = index\n\tr.myMetaData.prevLogIndex = index - 1\n\tif index == 0 {\n\t\tr.myMetaData.prevLogTerm = r.myMetaData.prevLogTerm + 1 //or simple -1\n\t} else if index >= 1 {\n\t\tr.myMetaData.prevLogTerm = r.myLog[index-1].Term\n\t}\n\n\t//Update commit index\n\tleaderCI := float64(request.leaderCommitIndex)\n\tmyLI := float64(r.myMetaData.lastLogIndex)\n\tif request.leaderCommitIndex > r.myMetaData.commitIndex {\n\t\tif myLI == -1 { //REDUNDANT since Append to log will make sure it is never -1,also must not copy higher CI if self LI is -1\n\t\t\tr.myMetaData.commitIndex = int(leaderCI)\n\t\t} else {\n\t\t\tr.myMetaData.commitIndex = int(math.Min(leaderCI, myLI))\n\t\t}\n\t}\n\t//fmt.Println(r.myId(), \"My CI is:\", r.myMetaData.commitIndex)\n\tr.WriteLogToDisk()\n}",
"func (m *MemoryLogger) Append(newEntry LogEntry) {\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\tm.Entries[m.index] = newEntry\n\tm.index = (m.index + 1) % maxLogItems\n}",
"func (h *HistoricalRecords) Append(tr *TransferRecord) {\n\th.mutex.Lock()\n\th.records = append(h.records, tr)\n\th.mutex.Unlock()\n}",
"func (list *WhoWasList) Append(whowas WhoWas) {\n\tlist.accessMutex.Lock()\n\tdefer list.accessMutex.Unlock()\n\n\tif len(list.buffer) == 0 {\n\t\treturn\n\t}\n\n\tvar pos int\n\tif list.start == -1 { // empty\n\t\tpos = 0\n\t\tlist.start = 0\n\t\tlist.end = 1\n\t} else if list.start != list.end { // partially full\n\t\tpos = list.end\n\t\tlist.end = (list.end + 1) % len(list.buffer)\n\t} else if list.start == list.end { // full\n\t\tpos = list.end\n\t\tlist.end = (list.end + 1) % len(list.buffer)\n\t\tlist.start = list.end // advance start as well, overwriting first entry\n\t}\n\n\tlist.buffer[pos] = whowas\n}",
"func (ck *Clerk) PutAppend(key string, value string, op string) {\n\t// You will have to modify this function.\n\n\treqID := atomic.AddInt32(&ck.nextReqID, 1)\n\n\targs := PutAppendArgs{Key: key, Value: value, Op: op, RequestID: reqID, ClientID: ck.id, PrevIndex: -1}\n\treply := PutAppendReply{Index: -1}\n\n\tck.logger.Debugf(\"Put Append calls start, key %s, value %s\", args.Key, args.Value)\n\n\tfor {\n\t\tfor _, server := range ck.servers {\n\n\t\t\t//ck.logger.Debugf(\"Put Append calls start: server %d, key %s, value %s\", index, args.Key, args.Value)\n\t\t\tok := server.Call(\"KVServer.PutAppend\", &args, &reply)\n\t\t\tif ok {\n\t\t\t\tif reply.Err == OK {\n\t\t\t\t\tck.logger.Debugf(\"Put Append End Successfully : %s\", value)\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\t//ck.logger.Debugf(\"Put Append Failed : Wrong leader %d\", index)\n\t\t\t\t\targs.PrevIndex = reply.Index\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t//ck.logger.Debugf(\"PutAppend RPC failed: %d\", index)\n\t\t\t}\n\t\t}\n\n\t}\n\n\n\n\n}",
"func Append(which string, what string) int {\n\tmu.Lock()\n\tdefer mu.Unlock()\n\ttopics[which] = append(topics[which], what)\n\treturn len(topics[which]) - 1\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Next gets the next AcceptedProposal or blocks
|
func (r *walTopicReader) Next(ctx context.Context) (*types.AcceptedProposal, error) {
return r.next(ctx, false)
}
|
[
"func (b *BlockProcessorQueue) ConfirmedNext() (uint64, bool) {\n\n\tresp := make(chan struct {\n\t\tStatus bool\n\t\tNumber uint64\n\t})\n\treq := Next{ResponseChan: resp}\n\n\tb.ConfirmedNextChan <- req\n\n\tv := <-resp\n\treturn v.Number, v.Status\n\n}",
"func (req *Request) Next() ([]blocks.Block, error) {\n\tselect {\n\tcase bs := <-req.bs:\n\t\treturn bs, nil\n\tcase <-req.Done():\n\t\tselect {\n\t\tcase bs := <-req.bs:\n\t\t\treturn bs, nil\n\t\tdefault:\n\t\t\tif req.err != nil {\n\t\t\t\treturn nil, req.err\n\t\t\t}\n\n\t\t\treturn nil, io.EOF\n\t\t}\n\t}\n}",
"func (pn *paxosNode) GetNextProposalNumber(args *paxosrpc.ProposalNumberArgs, reply *paxosrpc.ProposalNumberReply) error {\n\t// Will just give the Max([Nh/k]*k + id , )\n\tkey := args.Key\n\tpxi := pn.getInstance(key)\n\n\tpxi.mu.RLock()\n\tdefer pxi.mu.RUnlock()\n\n\treplyN := (pxi.Nh/pn.numNodes+1)*pn.numNodes + pn.id\n\treply.N = replyN\n\n\treturn nil\n}",
"func (p *Player) Next() { p.Player.Call(INTERFACE+\".Player.Next\", 0) }",
"func nextRequest(resp *swf.PollForDecisionTaskResponse) swf.RespondDecisionTaskCompletedRequest {\n\n\tcomp, total := 0, 0\n\tfor _, he := range resp.Events {\n\t\tfmt.Printf(\"Decider: EventType - %v, EventId - %v\\n\", he.EventType, he.EventID)\n\t\tif he.EventType == swf.EventTypeActivityTaskCompleted {\n\t\t\tcomp++\n\t\t}\n\t\tif isActivityType(he.EventType) {\n\t\t\ttotal++\n\t\t}\n\t}\n\tfmt.Printf(\"... completedCount = %v\\n\", comp)\n\n\tdecisions := []swf.Decision{}\n\tif total == 0 { // beggining of the workflow\n\t\tdecisions = scheduleActivity(\"HelloTask3\", decisions)\n\t\tdecisions = scheduleActivity(\"WorldTask3\", decisions)\n\t} else if comp == 2 {\n\t\t// complete workflow\n\t\tdecisions = append(decisions, swf.Decision{\n\t\t\tDecisionType: swf.DecisionTypeCompleteWorkflowExecution,\n\t\t\tCompleteWorkflowExecutionDecisionAttributes: &swf.CompleteWorkflowExecutionDecisionAttributes{\n\t\t\t\tResult: `{ \"Result\": \"WF Complete!\" }`,\n\t\t\t},\n\t\t})\n\n\t\tfmt.Println(\"Decider: WORKFLOW COMPLETE!!!!!!!!!!!!!!!!!!\")\n\t}\n\n\treturn swf.RespondDecisionTaskCompletedRequest{\n\t\tDecisions: decisions,\n\t\tTaskToken: resp.TaskToken,\n\t}\n}",
"func (alg *Algorand) blockProposal(resolveFork bool) *Block {\n\tlog.Trace(\"[algorand:blockProposal] a**\", \"node\", alg.id)\n\tround := alg.round() + 1\n\n\tvrf, proof, subusers := alg.sortition(alg.sortitionSeed(round), role(proposer, round, PROPOSE), expectedBlockProposers, alg.tokenOwn())\n\t// have been selected.\n\tlog.Trace(\"[algorand:blockProposal] b**\", \"node\", alg.id)\n\toverwrite := false\n\tisRRLeader := alg.id == 7 //&& alg.round() != 1\n\t//isRRLeader := alg.id%3 == 1\n\t//isRRLeader := true\n\tif isRRLeader && alg.isPoA {\n\t\toverwrite = true\n\t}\n\n\tlog.Trace(\"[algorand:blockProposal] c**\", \"node\", alg.id, \"isPOA\", alg.isPoA, \"overwrite?\", overwrite)\n\tif subusers > 0 && !alg.isPoA {\n\t\tlog.Info(fmt.Sprintf(\"** Node %d has %d sub-users in block proposal [algorand:blockProposal]\", alg.id, subusers))\n\t} else if overwrite {\n\t\tlog.Info(fmt.Sprintf(\"** Node %d is POA proposer [v,p,j,r] = %x, %x, %v, %v [algorand:blockProposal]\", alg.id, vrf, proof, subusers, round))\n\t} else if subusers > 0 {\n\t\tlog.Info(fmt.Sprintf(\"** Node %d has %d sub-users in block proposal but is not in POA [algorand:blockProposal]\", alg.id, subusers))\n\t}\n\n\tif (subusers > 0 && !alg.isPoA) || overwrite {\n\t\tvar (\n\t\t\t// use Block\n\t\t\tnewBlk *Block\n\t\t\tforked bool\n\t\t\t//proposalType int\n\t\t)\n\n\t\tif !resolveFork {\n\t\t\tnewBlk = alg.proposeBlock()\n\t\t\tforked = false\n\t\t\t//proposalType = BLOCK_PROPOSAL\n\t\t} else {\n\t\t\tnewBlk = alg.proposeFork()\n\t\t\tforked = true\n\t\t\t//proposalType = FORK_PROPOSAL\n\t\t}\n\n\t\tproposal := &Proposal{\n\t\t\tBlockNumber: newBlk.Round(),\n\t\t\tBlockHash: newBlk.Hash(),\n\t\t\tPrior: maxPriority(vrf, subusers),\n\t\t\tVRF: vrf,\n\t\t\tProof: proof,\n\t\t\tPubkey: alg.pubkey.Bytes(),\n\t\t}\n\n\t\t//log.Info(\"[Node%d] blockProposal\", \"node\", alg.id, \"proposal\", proposal, \"proposalHash\", proposal.Hash().Hex())\n\t\tlog.Info(fmt.Sprintf(\"[Node%d] blockProposal [algorand:blockProposal]\", alg.id), \"proposal hash\", proposal.Hash().Hex(), \"proposal\", proposal)\n\t\terr := alg.chain.WriteBlock(newBlk)\n\t\tif err != nil {\n\t\t\tlog.Info(\"[algorand:blockProposal] block proposal writeblock\", \"writeblock err\", err)\n\t\t}\n\n\t\talg.chain.protocolManager.setMaxProposal(round, proposal)\n\t\talg.chain.protocolManager.addBlock(newBlk.Hash(), newBlk)\n\n\t\tif alg.maliciousType == EvilBlockProposal && !resolveFork {\n\t\t\t//go alg.chain.protocolManager.Gossip(BLOCK, blkMsg, 0)\n\t\t\t//go alg.chain.protocolManager.halfGossip(proposalType, proposalMsg, 0)\n\n\t\t\t// gossip another version of block to the remaining half peers.\n\t\t\t// newBlk = alg.proposeBlock()\n\t\t\t// proposal = &Proposal{\n\t\t\t// \tRound: newBlk.Round(),\n\t\t\t// \tHash: newBlk.Hash(),\n\t\t\t// \tPrior: maxPriority(vrf, subusers),\n\t\t\t// \tVRF: vrf,\n\t\t\t// \tProof: proof,\n\t\t\t// \tPubkey: alg.pubkey.Bytes(),\n\t\t\t// }\n\t\t\t// blkMsg, _ = newBlk.Serialize()\n\t\t\t// proposalMsg, _ = proposal.Serialize()\n\t\t\t// go alg.chain.protocolManager.halfGossip(BLOCK, blkMsg, 1)\n\t\t\t// go alg.chain.protocolManager.halfGossip(proposalType, proposalMsg, 1)\n\t\t} else {\n\t\t\t//gossip block, proposal\n\t\t\tgo alg.chain.protocolManager.BroadcastBlock(newBlk)\n\t\t\talg.chain.protocolManager.proposal_Ch <- ProposalPreEvent{Proposal: proposal, Forked: forked}\n\t\t\t//go alg.chain.protocolManager.BroadcastProposal(proposal.Hash(), &ProposalWithType{Proposal: proposal, Forked: forked})\n\t\t\t//TODO: add gossip back\n\t\t}\n\t}\n\n\t// wait for Ξ»stepvar + Ξ»priority time to identify the highest priority.\n\ttimeoutForPriority := time.NewTimer(lamdaStepvar + lamdaPriority)\n\t<-timeoutForPriority.C\n\n\t// timeout for block gossiping.\n\ttimeoutForBlockFlying := time.NewTimer(lamdaBlock)\n\tticker := time.NewTicker(2000 * time.Millisecond)\n\tfor {\n\t\tselect {\n\t\tcase <-timeoutForBlockFlying.C:\n\t\t\t// empty block\n\t\t\tlog.Warn(fmt.Sprintf(\"** Node %d get timeoutForBlockFlying [algorand:blockProposal] \", alg.id))\n\t\t\tnewblk := alg.emptyBlock(round, alg.lastBlock().Hash(), alg.lastBlock().Seed)\n\t\t\talg.chain.protocolManager.addBlock(newblk.Hash(), newblk)\n\t\t\terr := alg.chain.WriteBlock(newblk)\n\t\t\tif err != nil {\n\t\t\t\tlog.Info(\"[algorand:blockProposal] block proposal writeblock\", \"writeblock err\", err)\n\t\t\t}\n\t\t\treturn newblk\n\t\t\t//return alg.emptyBlock(round, alg.lastBlock().Hash(), alg.lastBlock().Seed)\n\t\tcase <-ticker.C:\n\t\t\t// get the block with the highest priority\n\t\t\tpp := alg.chain.protocolManager.getMaxProposal(round)\n\n\t\t\tif pp == nil {\n\t\t\t\t//log.Trace(\"[algorand:blockProposal] proposal is nil, continue\", \"node\", alg.id)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tblk := alg.chain.protocolManager.getBlock(pp.BlockHash)\n\t\t\tif blk != nil {\n\t\t\t\treturn blk\n\t\t\t}\n\t\t\tlog.Info(\"[algorand:blockProposal] ** getBlock err: block is nil\", \"node\", alg.id, \"proposal\", pp.BlockHash.Hex())\n\t\t\t//log.Info(\"non-empty max proposal\", \"node\", alg.id, \"proposal\", pp.Hash.Hex())\n\t\t}\n\t}\n}",
"func (src *BundleSource) GetNextBundleToConfirm() *FirstBundleData {\n\tret, ok := <-src.theSource\n\tif ok {\n\t\treturn ret\n\t}\n\treturn nil\n}",
"func (pn *paxosNode) GetNextProposalNumber(args *paxosrpc.ProposalNumberArgs, reply *paxosrpc.ProposalNumberReply) error {\n\tpn.maxRoundNumber[args.Key]++\n\treply.N = mergeNumbers(pn.maxRoundNumber[args.Key], pn.myID)\n\treturn nil\n}",
"func (s *Server) GetNextProposalNumber(key string, nodeID int) (*paxosrpc.ProposalNumberReply, error) {\n\tfmt.Println(\"in app: get next proposal\")\n\n\targs := &paxosrpc.ProposalNumberArgs{Key: key}\n\tfmt.Println(\"in app: \", args)\n\tvar reply paxosrpc.ProposalNumberReply\n\terr := s.cliMap[nodeID].Call(\"PaxosNode.GetNextProposalNumber\", args, &reply)\n\tfmt.Println(\"in app: \", err)\n\treturn &reply, err\n}",
"func (chain *TestChain) NextBlock() {\n\t// set the last header to the current header\n\t// use nil trusted fields\n\tchain.LastHeader = chain.CurrentOCClientHeader()\n\n\t// increment the current header\n\tchain.CurrentHeader = tmproto.Header{\n\t\tChainID: chain.ChainID,\n\t\tHeight: chain.App.LastBlockHeight() + 1,\n\t\tAppHash: chain.App.LastCommitID().Hash,\n\t\t// NOTE: the time is increased by the coordinator to maintain time synchrony amongst\n\t\t// chains.\n\t\tTime: chain.CurrentHeader.Time,\n\t\tValidatorsHash: chain.Vals.Hash(),\n\t\tNextValidatorsHash: chain.Vals.Hash(),\n\t}\n\n\tchain.App.BeginBlock(ocabci.RequestBeginBlock{Header: chain.CurrentHeader})\n}",
"func NewAcceptedProposal(data []byte) (*AcceptedProposal, error) {\n\tvar ap AcceptedProposal\n\tdec := gob.NewDecoder(bytes.NewBuffer(data))\n\tif err := dec.Decode(&ap); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ap, nil\n}",
"func (ps *RandomPeerSelector) Next() *peers.Peer {\n\tselectablePeers := ps.selectablePeers\n\n\tif len(selectablePeers) == 0 {\n\t\treturn nil\n\t}\n\n\tif len(selectablePeers) > 1 {\n\t\t_, selectablePeers = peers.ExcludePeer(selectablePeers, ps.last)\n\t}\n\n\ti := rand.Intn(len(selectablePeers))\n\n\tpeer := selectablePeers[i]\n\n\treturn peer\n}",
"func (b *BlockProcessorQueue) UnconfirmedNext() (uint64, bool) {\n\n\tresp := make(chan struct {\n\t\tStatus bool\n\t\tNumber uint64\n\t})\n\treq := Next{ResponseChan: resp}\n\n\tb.UnconfirmedNextChan <- req\n\n\tv := <-resp\n\treturn v.Number, v.Status\n\n}",
"func (s *Subscriber) Next() (*Message, bool) {\n\tselect {\n\tcase m := <-s.msgs:\n\t\treturn m, false\n\tcase <-s.done:\n\t\treturn nil, true\n\t}\n}",
"func (c *Client) next() (rsp *Response, err error) {\n\traw, err := c.r.Next()\n\tif err == nil {\n\t\trsp, err = raw.Parse()\n\t}\n\treturn\n}",
"func (message *Message) Next() {\n\tif message == nil {\n\t\treturn\n\t}\n\n\tmessage.resolve(nil)\n}",
"func (cu *cursor) Next() (*cb.Block, cb.Status) {\n\t// This only loops once, as signal reading indicates the new block has been written\n\tfor {\n\t\tblock, found := cu.fl.readBlock(cu.blockNumber)\n\t\tif found {\n\t\t\tif block == nil {\n\t\t\t\treturn nil, cb.Status_SERVICE_UNAVAILABLE\n\t\t\t}\n\t\t\tcu.blockNumber++\n\t\t\treturn block, cb.Status_SUCCESS\n\t\t}\n\t\t<-cu.fl.signal\n\t}\n}",
"func (bce *BaseCompositeEndpoint) GetNext() CompositeEndpoint {\n\treturn bce.next\n}",
"func (cm *ConsensusManager) shareProposal(rs *RoundState) {\n\tprivValidator := cm.PrivValidator()\n\tproposal := rs.Proposal\n\tif privValidator == nil || proposal == nil {\n\t\treturn\n\t}\n\tprivValidator.SignProposal(rs.Round, proposal)\n\tblockParts := proposal.BlockParts()\n\tpeers := cm.sw.Peers().List()\n\tif len(peers) == 0 {\n\t\tlog.Warning(\"Could not propose: no peers\")\n\t\treturn\n\t}\n\tnumBlockParts := uint16(len(blockParts))\n\tkbpMsg := cm.makeKnownBlockPartsMessage(rs)\n\tfor i, peer := range peers {\n\t\tpeerState := cm.getPeerState(peer)\n\t\tif !peerState.IsConnected() {\n\t\t\tcontinue // Peer was disconnected.\n\t\t}\n\t\tstartIndex := uint16((i * len(blockParts)) / len(peers))\n\t\t// Create a function that when called,\n\t\t// starts sending block parts to peer.\n\t\tcb := func(peer *p2p.Peer, startIndex uint16) func() {\n\t\t\treturn func() {\n\t\t\t\t// TODO: if the clocks are off a bit,\n\t\t\t\t// peer may receive this before the round flips.\n\t\t\t\tpeer.Send(KnownPartsCh, kbpMsg)\n\t\t\t\tfor i := uint16(0); i < numBlockParts; i++ {\n\t\t\t\t\tpart := blockParts[(startIndex+i)%numBlockParts]\n\t\t\t\t\t// Ensure round hasn't expired on our end.\n\t\t\t\t\tcurrentRS := cm.cs.RoundState()\n\t\t\t\t\tif currentRS != rs {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\t// If peer wants the block:\n\t\t\t\t\tif peerState.WantsBlockPart(part) {\n\t\t\t\t\t\tpartMsg := &BlockPartMessage{BlockPart: part}\n\t\t\t\t\t\tpeer.Send(ProposalCh, partMsg)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}(peer, startIndex)\n\t\t// Call immediately or schedule cb for when peer is ready.\n\t\tpeerState.SetRoundCallback(rs.Height, rs.Round, cb)\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
User will inject the databaseTx in the `User` schema
|
func (gtx *GuardTx) User(user *schema.User) *schema.User {
if user == nil {
user = &schema.User{
Entity: schema.Entity{DBContract: gtx.dbTx},
}
} else {
user.DBContract = gtx.dbTx
}
user.SetValidator(gtx.validator.User)
return user
}
|
[
"func (env *Env) PostUserTransaction(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\tlogger := logging.FromContext(ctx)\n\n\tvalidate := validator.New()\n\ttxPayload := new(TxPayload)\n\tif err := json.NewDecoder(r.Body).Decode(txPayload); err != nil {\n\t\tlogger.WithError(err).Error(\"read input failed\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tif err := validate.Struct(txPayload); err != nil {\n\t\tlogger.WithError(err).Error(\"validate input failed\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\ttxAmount, err := decimal.NewFromString(txPayload.Amount)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"bad amount\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\ttxState := \"\"\n\tswitch txPayload.State {\n\tcase \"win\":\n\t\ttxState = db.Win\n\tcase \"lost\":\n\t\ttxState = db.Lost\n\t}\n\tif txState == \"\" {\n\t\tlogger.WithField(\"state\", txPayload.State).Error(\"unknown state\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tdbUser := middleware.UserFromContext(ctx)\n\tdbConn := env.DbConn\n\ttx, err := dbConn.BeginTxx(ctx, &sql.TxOptions{\n\t\tIsolation: sql.LevelSerializable,\n\t\tReadOnly: false,\n\t})\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"start transaction failed\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdefer func() {\n\t\ttxErr := tx.Rollback()\n\t\tif txErr != nil && txErr != sql.ErrTxDone {\n\t\t\tlogger.WithError(err).Error(\"rollback transaction failed\")\n\t\t}\n\t}()\n\n\t// we can not rely on the dbUser.Amount value, because it could be changed\n\t// before the transaction started\n\tactualBalance, err := db.ActualUserBalance(ctx, tx, dbUser.ID)\n\tif err != nil {\n\t\tlogger.\n\t\t\tWithError(err).\n\t\t\tWithField(\"user_id\", dbUser.ID).\n\t\t\tError(\"get actual balance failed\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tdbTxState, err := db.TxStateByName(ctx, tx, txState)\n\tif err != nil {\n\t\tlogger.\n\t\t\tWithError(err).\n\t\t\tWithField(\"state\", txState).\n\t\t\tError(\"get tx state failed\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\ttxID := uuid.New()\n\treceivedAt := time.Now().UTC()\n\ttxExtID := chi.URLParam(r, \"tx_id\")\n\tnewDbTransaction := &db.Transaction{\n\t\tID: txID,\n\t\tExtID: txExtID,\n\t\tUserID: dbUser.ID,\n\t\tTxStateID: dbTxState.ID,\n\t\tAmount: txAmount,\n\t\tReceivedAt: receivedAt,\n\t}\n\terr = db.CreateTransaction(ctx, tx, newDbTransaction)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"create transaction failed\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tswitch txState {\n\tcase db.Win:\n\t\terr = db.IncreaseUserBalance(ctx, tx, dbUser.ID, txAmount)\n\tcase db.Lost:\n\t\tif actualBalance.Sub(txAmount).IsNegative() {\n\t\t\tlogger.Error(\"balance can not be negative, can not decrease it\")\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\terr = db.DecreaseUserBalance(ctx, tx, dbUser.ID, txAmount)\n\tdefault:\n\t\terr = fmt.Errorf(\"unknown tx state '%s'\", txState)\n\t}\n\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"update balance failed\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\terr = tx.Commit()\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"commit failed\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n}",
"func (r *Resolver) User() exec.UserResolver { return &userResolver{r} }",
"func (r *Resolver) User() generated.UserResolver { return &userResolver{r} }",
"func TestCreateTablePutUser(t *testing.T) {\n\n\tdbsql, err := sql.Open(\"postgres\", \"user=postgres dbname=gorm password=simsim sslmode=disable\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdb, err := InitDB(dbsql)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = db.PutUser(12312)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}",
"func (r *Resolver) User() UserResolver { return &userResolver{r} }",
"func (_UserCrud *UserCrudTransactor) InsertUser(opts *bind.TransactOpts, userAddress common.Address, userEmail string, userAge *big.Int) (*types.Transaction, error) {\n\treturn _UserCrud.contract.Transact(opts, \"insertUser\", userAddress, userEmail, userAge)\n}",
"func Create(user User) error {\n\t\n}",
"func registerUser() {\n\tgoes.Register(\n\t\t&User{},\n\t\tFirstNameUpdatedV1{},\n\t\tCreatedV1{},\n\t)\n}",
"func SignUpUser(c *gin.Context) {\n\tvar db = models.InitDB()\n\tvar userData models.User\n\terr := c.Bind(&userData)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfmt.Println(userData)\n\tif err := db.Create(&userData).Error; err != nil {\n\t\tc.JSON(200, gin.H{\n\t\t\t\"creation\": \"false\",\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(200, gin.H{\n\t\t\"creation\": \"true\",\n\t})\n}",
"func (persist *Persister) createUserTable() {\n\n\tvar dbaser = persist.databaser\n\n\tdb, err := sql.Open(dbaser.Driver(), dbaser.Name())\n\tif err != nil {\n\t\tfmt.Println(\"Error on open of database\", err)\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\tvar query = fmt.Sprintf(\n\t\tcreateUserTable,\n\t\tdbaser.IncrementPrimaryKey(),\n\t\tdbaser.DateField())\n\n\t_, err = db.Exec(query)\n\tif err != nil {\n\t\tfmt.Printf(\"Error creating Users table, driver \\\"%s\\\", dbname \\\"%s\\\", query = \\\"%s\\\"\\n\",\n\t\t\tdbaser.Driver(), dbaser.Name(), query)\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n}",
"func (ua *UserAuth) User(ctx context.Context, db XODB) (*User, error) {\n\treturn UserByUserID(ctx, db, ua.UserID)\n}",
"func (ds *SQLDepositStore) RegisterUser(pubkey *koblitz.PublicKey, address string) (err error) {\n\t// ACID\n\tvar tx *sql.Tx\n\tif tx, err = ds.DBHandler.Begin(); err != nil {\n\t\terr = fmt.Errorf(\"Error when beginning transaction for RegisterUser: %s\", err)\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\terr = fmt.Errorf(\"Error for RegisterUser: \\n%s\", err)\n\t\t\treturn\n\t\t}\n\t\terr = tx.Commit()\n\t}()\n\n\t// First use the deposit address schema\n\tif _, err = tx.Exec(\"USE \" + ds.depositAddrSchemaName + \";\"); err != nil {\n\t\terr = fmt.Errorf(\"Error using puzzle schema for RegisterUser: %s\", err)\n\t\treturn\n\t}\n\n\tinsertUserQuery := fmt.Sprintf(\"INSERT INTO %s VALUES ('%x', '%s');\", ds.coin.Name, pubkey.SerializeCompressed(), address)\n\tif _, err = tx.Exec(insertUserQuery); err != nil {\n\t\terr = fmt.Errorf(\"Error adding user and address for RegisterUser: %s\", err)\n\t\treturn\n\t}\n\treturn\n}",
"func (s *UserRepository) User(id string) (*akwad.Account, error) {\n\n\treturn nil, nil\n}",
"func (userRepo *mockUserRepo) Initialize(ctx context.Context, db *sql.DB) {}",
"func (u *User) addUser(db *storage.DB) error {\n\t// Acquire write lock on the database.\n\tdb.Mu.Lock()\n\tdefer db.Mu.Unlock()\n\n\t// Start the transaction\n\ttx, err := db.Conn.Begin()\n\tdefer tx.Rollback()\n\tif err != nil {\n\t\tlog.Printf(\"account/adduser.go: %s\\n\",\n\t\t\t\"failed to begin transaction\")\n\t\treturn err\n\t}\n\n\tstmt, err := db.Conn.Prepare(`\nINSERT INTO accounts(id, username, hash, regTime) values(?, ?, ?, ?)`)\n\tif err != nil {\n\t\tlog.Printf(\"account/adduser.go: %s\\n\",\n\t\t\t\"failed to prepare statement\")\n\t\treturn err\n\t}\n\tdefer stmt.Close()\n\n\t_, err = stmt.Exec(u.ID, u.Username, u.Hash, time.Now().UTC())\n\tif err != nil {\n\t\tlog.Printf(\"account/adduser.go: %s\\n\",\n\t\t\t\"failed to execute statement\")\n\t\treturn err\n\t}\n\n\ttx.Commit()\n\treturn err\n}",
"func (db *UserDatabase) Init() error {\n\tvar err error\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, stmt := range schemaV1 {\n\t\tlog.Println(stmt)\n\t\ttx.MustExec(stmt)\n\t}\n\tdefaultPassword := getDefaultPassword()\n\t_, err = tx.CreateUser(claudia.ApplicationAdminUsername, defaultPassword)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsessionAuthKey := securecookie.GenerateRandomKey(32)\n\tsessionCryptKey := securecookie.GenerateRandomKey(32)\n\tcrt, key := util.GenerateSelfSignedCert()\n\ttx.MustExec(\"INSERT INTO configuration (schema_version, session_auth_key, session_crypt_key, private_key, public_certificate) VALUES ($1, $2, $3, $4, $5)\",\n\t\tSchemaVersion, sessionAuthKey, sessionCryptKey, key, crt)\n\ttx.Commit()\n\ttx, err = db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\tconf, err := tx.GetConfiguration()\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\ttx.Commit()\n\tlog.Printf(\"Successfully initialized database (schema: %d)\", conf.SchemaVersion)\n\treturn nil\n}",
"func init() {\n\tuserFields := schema.User{}.Fields()\n\t_ = userFields\n\t// userDescUserName is the schema descriptor for user_name field.\n\tuserDescUserName := userFields[0].Descriptor()\n\t// user.DefaultUserName holds the default value on creation for the user_name field.\n\tuser.DefaultUserName = userDescUserName.Default.(string)\n\t// userDescEmail is the schema descriptor for email field.\n\tuserDescEmail := userFields[1].Descriptor()\n\t// user.DefaultEmail holds the default value on creation for the email field.\n\tuser.DefaultEmail = userDescEmail.Default.(string)\n\t// userDescTelNum is the schema descriptor for tel_num field.\n\tuserDescTelNum := userFields[2].Descriptor()\n\t// user.DefaultTelNum holds the default value on creation for the tel_num field.\n\tuser.DefaultTelNum = userDescTelNum.Default.(string)\n\t// userDescPassword is the schema descriptor for password field.\n\tuserDescPassword := userFields[3].Descriptor()\n\t// user.DefaultPassword holds the default value on creation for the password field.\n\tuser.DefaultPassword = userDescPassword.Default.(string)\n\t// userDescPasswordStr is the schema descriptor for password_str field.\n\tuserDescPasswordStr := userFields[4].Descriptor()\n\t// user.DefaultPasswordStr holds the default value on creation for the password_str field.\n\tuser.DefaultPasswordStr = userDescPasswordStr.Default.(string)\n\t// userDescRegType is the schema descriptor for reg_type field.\n\tuserDescRegType := userFields[5].Descriptor()\n\t// user.DefaultRegType holds the default value on creation for the reg_type field.\n\tuser.DefaultRegType = userDescRegType.Default.(int8)\n\t// userDescRegisterIP is the schema descriptor for register_ip field.\n\tuserDescRegisterIP := userFields[7].Descriptor()\n\t// user.DefaultRegisterIP holds the default value on creation for the register_ip field.\n\tuser.DefaultRegisterIP = userDescRegisterIP.Default.(int)\n\t// userDescTelStatus is the schema descriptor for tel_status field.\n\tuserDescTelStatus := userFields[8].Descriptor()\n\t// user.DefaultTelStatus holds the default value on creation for the tel_status field.\n\tuser.DefaultTelStatus = userDescTelStatus.Default.(int8)\n\t// userDescStatus is the schema descriptor for status field.\n\tuserDescStatus := userFields[9].Descriptor()\n\t// user.DefaultStatus holds the default value on creation for the status field.\n\tuser.DefaultStatus = userDescStatus.Default.(int8)\n\t// userDescCreatedAt is the schema descriptor for created_at field.\n\tuserDescCreatedAt := userFields[10].Descriptor()\n\t// user.DefaultCreatedAt holds the default value on creation for the created_at field.\n\tuser.DefaultCreatedAt = userDescCreatedAt.Default.(func() time.Time)\n\tuserextendFields := schema.UserExtend{}.Fields()\n\t_ = userextendFields\n\t// userextendDescRealName is the schema descriptor for real_name field.\n\tuserextendDescRealName := userextendFields[1].Descriptor()\n\t// userextend.DefaultRealName holds the default value on creation for the real_name field.\n\tuserextend.DefaultRealName = userextendDescRealName.Default.(string)\n\t// userextendDescIDNumber is the schema descriptor for id_number field.\n\tuserextendDescIDNumber := userextendFields[2].Descriptor()\n\t// userextend.DefaultIDNumber holds the default value on creation for the id_number field.\n\tuserextend.DefaultIDNumber = userextendDescIDNumber.Default.(string)\n\t// userextendDescAge is the schema descriptor for age field.\n\tuserextendDescAge := userextendFields[3].Descriptor()\n\t// userextend.DefaultAge holds the default value on creation for the age field.\n\tuserextend.DefaultAge = userextendDescAge.Default.(int8)\n\t// userextendDescSex is the schema descriptor for sex field.\n\tuserextendDescSex := userextendFields[4].Descriptor()\n\t// userextend.DefaultSex holds the default value on creation for the sex field.\n\tuserextend.DefaultSex = userextendDescSex.Default.(int8)\n\t// userextendDescBirth is the schema descriptor for birth field.\n\tuserextendDescBirth := userextendFields[5].Descriptor()\n\t// userextend.DefaultBirth holds the default value on creation for the birth field.\n\tuserextend.DefaultBirth = userextendDescBirth.Default.(int)\n\t// userextendDescIcon is the schema descriptor for icon field.\n\tuserextendDescIcon := userextendFields[6].Descriptor()\n\t// userextend.DefaultIcon holds the default value on creation for the icon field.\n\tuserextend.DefaultIcon = userextendDescIcon.Default.(string)\n\t// userextendDescNickName is the schema descriptor for nick_name field.\n\tuserextendDescNickName := userextendFields[7].Descriptor()\n\t// userextend.DefaultNickName holds the default value on creation for the nick_name field.\n\tuserextend.DefaultNickName = userextendDescNickName.Default.(string)\n\t// userextendDescImei is the schema descriptor for imei field.\n\tuserextendDescImei := userextendFields[8].Descriptor()\n\t// userextend.DefaultImei holds the default value on creation for the imei field.\n\tuserextend.DefaultImei = userextendDescImei.Default.(string)\n\t// userextendDescOaid is the schema descriptor for oaid field.\n\tuserextendDescOaid := userextendFields[9].Descriptor()\n\t// userextend.DefaultOaid holds the default value on creation for the oaid field.\n\tuserextend.DefaultOaid = userextendDescOaid.Default.(string)\n\t// userextendDescDeviceID is the schema descriptor for device_id field.\n\tuserextendDescDeviceID := userextendFields[10].Descriptor()\n\t// userextend.DefaultDeviceID holds the default value on creation for the device_id field.\n\tuserextend.DefaultDeviceID = userextendDescDeviceID.Default.(string)\n\t// userextendDescSystemName is the schema descriptor for system_name field.\n\tuserextendDescSystemName := userextendFields[11].Descriptor()\n\t// userextend.DefaultSystemName holds the default value on creation for the system_name field.\n\tuserextend.DefaultSystemName = userextendDescSystemName.Default.(string)\n\t// userextendDescSystemVersion is the schema descriptor for system_version field.\n\tuserextendDescSystemVersion := userextendFields[12].Descriptor()\n\t// userextend.DefaultSystemVersion holds the default value on creation for the system_version field.\n\tuserextend.DefaultSystemVersion = userextendDescSystemVersion.Default.(string)\n\t// userextendDescAdid is the schema descriptor for adid field.\n\tuserextendDescAdid := userextendFields[13].Descriptor()\n\t// userextend.DefaultAdid holds the default value on creation for the adid field.\n\tuserextend.DefaultAdid = userextendDescAdid.Default.(string)\n\t// userextendDescGameID is the schema descriptor for game_id field.\n\tuserextendDescGameID := userextendFields[14].Descriptor()\n\t// userextend.DefaultGameID holds the default value on creation for the game_id field.\n\tuserextend.DefaultGameID = userextendDescGameID.Default.(string)\n\t// userextendDescThirdPlatformID is the schema descriptor for third_platform_id field.\n\tuserextendDescThirdPlatformID := userextendFields[15].Descriptor()\n\t// userextend.DefaultThirdPlatformID holds the default value on creation for the third_platform_id field.\n\tuserextend.DefaultThirdPlatformID = userextendDescThirdPlatformID.Default.(int8)\n\t// userextendDescCreatedAt is the schema descriptor for created_at field.\n\tuserextendDescCreatedAt := userextendFields[16].Descriptor()\n\t// userextend.DefaultCreatedAt holds the default value on creation for the created_at field.\n\tuserextend.DefaultCreatedAt = userextendDescCreatedAt.Default.(func() time.Time)\n}",
"func (mgr *UserMgr) InsertUser(user *User) {\n\tuser.Password = HashPassword(user.Password)\n\tsql := \"INSERT INTO users (username, email, password, role) VALUES (:username, :email, :password, :role)\"\n\t_, err := mgr.db.NamedExec(sql, user)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}",
"func (m *mysqlUserRepository) Store(u *domain.User) (err error) {\n\tquery := `INSERT INTO user SET name=?, email=?, password=?, updated_at=?, created_at, deleted_at`\n\n\tstmt, err := m.Conn.Prepare(query)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tres, err := stmt.Exec(u.Name, u.Email, u.Password, time.Now(), nil, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tlastID, err := res.LastInsertId()\n\tif err != nil {\n\t\treturn\n\t}\n\tu.ID = lastID\n\treturn\n\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Role will inject the databaseTx in the `Role` schema
|
func (gtx *GuardTx) Role(role *schema.Role) *schema.Role {
if role == nil {
role = &schema.Role{
Entity: schema.Entity{DBContract: gtx.dbTx},
}
} else {
role.DBContract = gtx.dbTx
}
role.SetValidator(gtx.validator.Role)
return role
}
|
[
"func (j *AuroraJob) Role(role string) Job {\n\tj.jobConfig.Key.Role = role\n\n\t// Will be deprecated\n\tidentity := &aurora.Identity{User: role}\n\tj.jobConfig.Owner = identity\n\tj.jobConfig.TaskConfig.Owner = identity\n\treturn j\n}",
"func (u *User) Role(ctx context.Context, store Storer) (role Role, err error) {\n\trole, err = store.GetRoleByID(ctx, u.RoleID)\n\treturn\n}",
"func RoleSeeder(db *gorm.DB) {\n\tdb.Exec(\"TRUNCATE TABLE roles\")\n\tdb.Create(&models.Role{Name: \"admin\"})\n\tdb.Create(&models.Role{Name: \"low\"})\n\tdb.Create(&models.Role{Name: \"middle\"})\n\tdb.Create(&models.Role{Name: \"high\"})\n}",
"func (hba *HostBasedAuthentication) Role(name string) *HostBasedAuthentication {\n\thba.user = \"+\" + hba.quote(name)\n\treturn hba\n}",
"func (_TellorMesosphere *TellorMesosphereTransactor) GrantRole(opts *bind.TransactOpts, role [32]byte, account common.Address) (*types.Transaction, error) {\n\treturn _TellorMesosphere.contract.Transact(opts, \"grantRole\", role, account)\n}",
"func (pge *PgEngine) SetRole(ctx context.Context, tx pgx.Tx, runUID pgtype.Varchar) {\n\tl := log.GetLogger(ctx)\n\tl.Info(\"Setting Role to \", runUID.String)\n\t_, err := tx.Exec(ctx, fmt.Sprintf(\"SET ROLE %v\", runUID.String))\n\tif err != nil {\n\t\tl.WithError(err).Error(\"Error in Setting role\", err)\n\t}\n}",
"func DBCreateACLRoleTableTx(ctx context.Context, tx Tx) error {\n\tq := \"CREATE TABLE `acl_role` (`id`VARCHAR(64) NOT NULL PRIMARY KEY,`checksum`CHAR(64),`name` VARCHAR(100) NOT NULL,`description`TEXT,`admin_user_id` VARCHAR(64),`customer_id`VARCHAR(64),`created_at` BIGINT UNSIGNED NOT NULL,`updated_at` BIGINT UNSIGNED,INDEX acl_role_name_index (`name`),INDEX acl_role_customer_id_index (`customer_id`)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;\"\n\t_, err := tx.ExecContext(ctx, q)\n\treturn err\n}",
"func (gb *CurrentGrantBuilder) Role(n string) GrantExecutable {\n\treturn &CurrentGrantExecutable{\n\t\tgrantName: gb.qualifiedName,\n\t\tgrantType: gb.grantType,\n\t\tgranteeName: n,\n\t\tgranteeType: roleType,\n\t}\n}",
"func RegisterRole(role Role) {\n\tallRoles = append(allRoles, role)\n}",
"func (t *ACLRole) DBFindTx(ctx context.Context, tx Tx, _params ...interface{}) (bool, error) {\n\tparams := []interface{}{\n\t\torm.Column(\"id\"),\n\t\torm.Column(\"checksum\"),\n\t\torm.Column(\"name\"),\n\t\torm.Column(\"description\"),\n\t\torm.Column(\"admin_user_id\"),\n\t\torm.Column(\"customer_id\"),\n\t\torm.Column(\"created_at\"),\n\t\torm.Column(\"updated_at\"),\n\t\torm.Table(ACLRoleTableName),\n\t}\n\tif len(_params) > 0 {\n\t\tfor _, param := range _params {\n\t\t\tparams = append(params, param)\n\t\t}\n\t}\n\tq, p := orm.BuildQuery(params...)\n\trow := tx.QueryRowContext(ctx, q, p...)\n\tvar _ID sql.NullString\n\tvar _Checksum sql.NullString\n\tvar _Name sql.NullString\n\tvar _Description sql.NullString\n\tvar _AdminUserID sql.NullString\n\tvar _CustomerID sql.NullString\n\tvar _CreatedAt sql.NullInt64\n\tvar _UpdatedAt sql.NullInt64\n\terr := row.Scan(\n\t\t&_ID,\n\t\t&_Checksum,\n\t\t&_Name,\n\t\t&_Description,\n\t\t&_AdminUserID,\n\t\t&_CustomerID,\n\t\t&_CreatedAt,\n\t\t&_UpdatedAt,\n\t)\n\tif err != nil && err != sql.ErrNoRows {\n\t\treturn false, err\n\t}\n\tif _ID.Valid {\n\t\tt.SetID(_ID.String)\n\t}\n\tif _Checksum.Valid {\n\t\tt.SetChecksum(_Checksum.String)\n\t}\n\tif _Name.Valid {\n\t\tt.SetName(_Name.String)\n\t}\n\tif _Description.Valid {\n\t\tt.SetDescription(_Description.String)\n\t}\n\tif _AdminUserID.Valid {\n\t\tt.SetAdminUserID(_AdminUserID.String)\n\t}\n\tif _CustomerID.Valid {\n\t\tt.SetCustomerID(_CustomerID.String)\n\t}\n\tif _CreatedAt.Valid {\n\t\tt.SetCreatedAt(_CreatedAt.Int64)\n\t}\n\tif _UpdatedAt.Valid {\n\t\tt.SetUpdatedAt(_UpdatedAt.Int64)\n\t}\n\treturn true, nil\n}",
"func TestDatabase_existingResourcePrivilegesForReadWriteRoles(t *testing.T) {\n\tpostgresqlHost := test.Integration(t)\n\tlog := test.SetLogger(t)\n\tlog.Info(\"TC: Connection as iam_creator\")\n\tdb, err := postgres.Connect(log, postgres.ConnectionString{\n\t\tHost: postgresqlHost,\n\t\tDatabase: \"postgres\",\n\t\tUser: \"iam_creator\",\n\t\tPassword: \"\",\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"connect to database failed: %v\", err)\n\t}\n\tdefer db.Close()\n\n\tname := fmt.Sprintf(\"test_%d\", time.Now().UnixNano())\n\tdeveloperName := fmt.Sprintf(\"%s_developer\", name)\n\tpassword := \"test\"\n\n\t// create a database and resources with plain SQL is if it already exists when\n\t// the controller tries to reconcile.\n\tlog.Info(\"TC: Creating user and database and changes owner\")\n\tdbExec(t, db, fmt.Sprintf(`CREATE USER %s WITH PASSWORD '%s' NOCREATEROLE VALID UNTIL 'infinity'`, name, password))\n\tdbExec(t, db, fmt.Sprintf(`CREATE DATABASE %s`, name))\n\tdbExec(t, db, fmt.Sprintf(`\n\tGRANT %[1]s TO CURRENT_USER;\n\tALTER DATABASE %[1]s OWNER TO %[1]s;\n\tREVOKE %[1]s FROM CURRENT_USER;\n\t`, name))\n\n\tlog.Info(\"TC: Connect as service user\")\n\tserviceDB, err := postgres.Connect(log, postgres.ConnectionString{\n\t\tHost: postgresqlHost,\n\t\tDatabase: name,\n\t\tUser: name,\n\t\tPassword: password,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"Connect as existing service user failed: %v\", err)\n\t}\n\tdefer serviceDB.Close()\n\tlog.Info(\"TC: Create schema, table and insert a row\")\n\tdbExec(t, serviceDB, fmt.Sprintf(`\n\tCREATE SCHEMA %[1]s;\n\tCREATE TABLE %[1]s.%[1]s (title varchar(40));\n\tINSERT INTO %[1]s.%[1]s VALUES('a product');\n\t`, name))\n\n\tlog.Info(\"TC: Run controller database creation\")\n\terr = postgres.Database(log, db, postgresqlHost, postgres.Credentials{\n\t\tName: name,\n\t\tUser: name,\n\t\tPassword: password,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"Create service database failed: %v\", err)\n\t}\n\n\tlog.Info(\"TC: Run controller user creation\")\n\terr = postgres.Role(log, db, developerName, nil, []postgres.DatabaseSchema{{\n\t\tName: name,\n\t\tSchema: name,\n\t\tPrivileges: postgres.PrivilegeRead,\n\t}})\n\tif err != nil {\n\t\tt.Fatalf(\"Create new developer role failed: %v\", err)\n\t}\n\n\tlog.Info(\"TC: Connect as developer\")\n\tdeveloperDB, err := postgres.Connect(log, postgres.ConnectionString{\n\t\tHost: postgresqlHost,\n\t\tDatabase: name,\n\t\tUser: developerName,\n\t\tPassword: \"\",\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"Connect as developer user failed: %v\", err)\n\t}\n\tdefer developerDB.Close()\n\t// This should not result in an error as the controller should have made sure\n\t// that the schema and table have been made available to the read and\n\t// readwrite roles\n\tlog.Info(\"TC: Select from table\")\n\tdbExec(t, developerDB, fmt.Sprintf(`SELECT * FROM %[1]s.%[1]s`, name))\n}",
"func withRole(node *Role) roleOption {\n\treturn func(m *RoleMutation) {\n\t\tm.oldValue = func(context.Context) (*Role, error) {\n\t\t\treturn node, nil\n\t\t}\n\t\tm.id = &node.ID\n\t}\n}",
"func (_TellorMesosphere *TellorMesosphereSession) GrantRole(role [32]byte, account common.Address) (*types.Transaction, error) {\n\treturn _TellorMesosphere.Contract.GrantRole(&_TellorMesosphere.TransactOpts, role, account)\n}",
"func populateRole(data *schema.ResourceData, role *Role) {\n\tdata.SetId(role.Key)\n\tdata.Set(\"key\", role.Key)\n\tdata.Set(\"name\", role.Name)\n\tdata.Set(\"description\", role.Description)\n\tdata.Set(\"level\", role.Level)\n\tdata.Set(\"owner\", role.Owner)\n\tdata.Set(\"created\", role.Created)\n\tdata.Set(\"updated\", role.Updated)\n\tdata.Set(\"changed_by\", role.ChangedBy)\n}",
"func (mRoleRepo *MockRoleRepo) StoreRole(role *entity.Role) (*entity.Role, []error) {\n\tRol := role\n\treturn Rol, nil\n}",
"func TestRole(t *testing.T) {\n\t// Initializing variables\n\tvar (\n\t\terr error\n\t\ttestDatastore *datastores.ConcreteDatastore\n\t)\n\n\tif testDatastore, err = datastores.NewDatabase(\"myTestDatabase.db\"); err != nil {\n\t\tt.Error(err)\n\t}\n\n\trole1 := model.Role{\n\t\tRoleId: 0,\n\t\tRoleName: \"Role 1\",\n\t\tCanAddAndModifyUsers: true,\n\t\tCanSeeOtherSchedules: true,\n\t\tCanAddProjects: true,\n\t\tCanSeeReports: true,\n\t}\n\n\trole2 := model.Role{\n\t\tRoleId: 0,\n\t\tRoleName: \"Role 2\",\n\t\tCanAddAndModifyUsers: false,\n\t\tCanSeeOtherSchedules: false,\n\t\tCanAddProjects: true,\n\t\tCanSeeReports: true,\n\t}\n\n\trole3 := model.Role{\n\t\tRoleId: 0,\n\t\tRoleName: \"Role 3\",\n\t\tCanAddAndModifyUsers: false,\n\t\tCanSeeOtherSchedules: false,\n\t\tCanAddProjects: false,\n\t\tCanSeeReports: false,\n\t}\n\n\t//\n\t//\tCreateRoles()\n\t//\n\n\tif role1.RoleId, err = testDatastore.CreateRole(role1); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif role2.RoleId, err = testDatastore.CreateRole(role2); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif role3.RoleId, err = testDatastore.CreateRole(role3); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tglobals.Log.Debug(\"CreateRole test - PASSED\")\n\n\t//\n\t// Test GetRoles()\n\t//\n\tvar (\n\t\tallRoles model.Roles\n\t\tdefaultRole1 model.Role\n\t\tdefaultRole2 model.Role\n\t\tdefaultRole3 model.Role\n\t)\n\n\t// Fetching the default roles\n\tif defaultRole1, err = testDatastore.GetRole(1); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif defaultRole2, err = testDatastore.GetRole(2); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif defaultRole3, err = testDatastore.GetRole(3); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// Formatting the data\n\troleList := model.Roles{}\n\troleList = append(roleList, defaultRole1)\n\troleList = append(roleList, defaultRole2)\n\troleList = append(roleList, defaultRole3)\n\troleList = append(roleList, role1)\n\troleList = append(roleList, role2)\n\troleList = append(roleList, role3)\n\n\t// Fetching all roles\n\tif allRoles, err = testDatastore.GetRoles(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// Verigying the result\n\tif !cmp.Equal(allRoles, roleList) {\n\t\tt.Error(err)\n\t}\n\n\tglobals.Log.Debug(\"GetRoles test - PASSED\")\n\n\t//\n\t// Test GetRole(RoleId)\n\t//\n\tvar role model.Role\n\n\t// Fetching a role\n\tif role, err = testDatastore.GetRole(role1.RoleId); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// Verifying the data\n\tif !cmp.Equal(role1, role) {\n\t\tt.Error(err)\n\t}\n\n\tglobals.Log.Debug(\"GetRole test - PASSED\")\n\n\t//\n\t// Test GetRoleOfUser\n\t//\n\n\t// Creating some data\n\tcontract := model.Contract{\n\t\tContractId: 0,\n\t\tContractName: \"Contract\",\n\t}\n\n\tif contract.ContractId, err = testDatastore.CreateContract(contract); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tuser := model.User{\n\t\tUserId: 0,\n\t\tContractId: contract.ContractId,\n\t\tRoleId: role1.RoleId,\n\t\tUsername: \"First User\",\n\t\tPassword: \"This is a password\",\n\t\tLastName: \"User\",\n\t\tFirstName: \"First\",\n\t\tMail: \"[email protected]\",\n\t\tTheoricalHoursWorked: 50,\n\t\tVacationHours: 50,\n\t}\n\n\tif user.UserId, err = testDatastore.CreateUser(user); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// Fetching the role of the user\n\tvar dbRoleOfUser model.Role\n\tif dbRoleOfUser, err = testDatastore.GetRoleOfUser(user.UserId); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// Verifying the data\n\tif !cmp.Equal(dbRoleOfUser, role1) {\n\t\tt.Error(err)\n\t}\n\n\tglobals.Log.Debug(\"GetRoleOfUser test - PASSED\")\n\n\t//\n\t// Test UpdateRole(Role)\n\t//\n\tvar updatedRole model.Role\n\n\t// Modifying the role\n\trole1.RoleName = \"New role name\"\n\trole1.CanAddAndModifyUsers = false\n\trole1.CanSeeOtherSchedules = false\n\trole1.CanAddProjects = false\n\trole1.CanSeeReports = false\n\n\t// Saving the changes\n\tif _, err = testDatastore.UpdateRole(role1); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// Getting the role so we can check the changes\n\tif updatedRole, err = testDatastore.GetRole(role1.RoleId); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// Checking changes\n\tif !cmp.Equal(role1, updatedRole) {\n\t\tt.Error(err)\n\t}\n\n\tglobals.Log.Debug(\"UpdateRole test - PASSED\")\n\n\t//\n\t// Test DeleteRole(RoleId)\n\t//\n\n\t// Creating a role so we can delete it\n\tvar ind int64\n\tif ind, err = testDatastore.CreateRole(model.Role{\n\t\tRoleId: 0,\n\t\tRoleName: \"This is a role\",\n\t}); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// Deleting it\n\tif err = testDatastore.DeleteRole(ind); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// trying to get the role we just deleted\n\tif _, err = testDatastore.GetRole(ind); err == nil {\n\t\tt.Error()\n\t}\n\n\tglobals.Log.Debug(\"DeleteRole test - PASSED\")\n\n\ttestDatastore.CloseDatabase()\n}",
"func InsertRole(db *sql.DB, name, intro string) error {\n\tresult, err := db.Exec(roleSQLString[mysqlRoleInsert], name, intro, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif rows, _ := result.RowsAffected(); rows == 0 {\n\t\treturn errInvalidMysql\n\t}\n\n\treturn nil\n}",
"func (arh *AdminRoleHandler) PutRole(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\n}",
"func (t *ACLRole) DBCreateTx(ctx context.Context, tx Tx) (sql.Result, error) {\n\tq := \"INSERT INTO `acl_role` (`acl_role`.`id`,`acl_role`.`checksum`,`acl_role`.`name`,`acl_role`.`description`,`acl_role`.`admin_user_id`,`acl_role`.`customer_id`,`acl_role`.`created_at`,`acl_role`.`updated_at`) VALUES (?,?,?,?,?,?,?,?)\"\n\tchecksum := t.CalculateChecksum()\n\tif t.GetChecksum() == checksum {\n\t\treturn nil, nil\n\t}\n\tt.Checksum = &checksum\n\treturn tx.ExecContext(ctx, q,\n\t\torm.ToSQLString(t.ID),\n\t\torm.ToSQLString(t.Checksum),\n\t\torm.ToSQLString(t.Name),\n\t\torm.ToSQLString(t.Description),\n\t\torm.ToSQLString(t.AdminUserID),\n\t\torm.ToSQLString(t.CustomerID),\n\t\torm.ToSQLInt64(t.CreatedAt),\n\t\torm.ToSQLInt64(t.UpdatedAt),\n\t)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Permission will inject the databaseTx in the `Permission` schema
|
func (gtx *GuardTx) Permission(permission *schema.Permission) *schema.Permission {
if permission == nil {
return &schema.Permission{
Entity: schema.Entity{DBContract: gtx.dbTx},
}
} else {
permission.DBContract = gtx.dbTx
}
permission.SetValidator(gtx.validator.Permission)
return permission
}
|
[
"func ResourceSqlPermissions() *schema.Resource {\n\ts := common.StructToSchema(SqlPermissions{}, func(s map[string]*schema.Schema) map[string]*schema.Schema {\n\t\talof := []string{\"database\", \"table\", \"view\", \"catalog\", \"any_file\", \"anonymous_function\"}\n\t\tfor _, field := range alof {\n\t\t\ts[field].AtLeastOneOf = alof\n\t\t}\n\t\ts[\"database\"].DiffSuppressFunc = func(k, old, new string, d *schema.ResourceData) bool {\n\t\t\tif old == \"default\" && new == \"\" {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t\ts[\"cluster_id\"].Computed = true\n\t\treturn s\n\t})\n\treturn common.Resource{\n\t\tSchema: s,\n\t\tCreate: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error {\n\t\t\tta, err := tableAclForUpdate(ctx, d, s, c)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = ta.enforce(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\td.SetId(ta.ID())\n\t\t\treturn nil\n\t\t},\n\t\tRead: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error {\n\t\t\tta, err := tableAclForLoad(ctx, d, s, c)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = ta.read(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif len(ta.PrivilegeAssignments) == 0 {\n\t\t\t\t// reflect resource is skipping empty privilege_assignments\n\t\t\t\td.Set(\"privilege_assignments\", []any{})\n\t\t\t}\n\t\t\tcommon.StructToData(ta, s, d)\n\t\t\treturn nil\n\t\t},\n\t\tUpdate: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error {\n\t\t\tta, err := tableAclForUpdate(ctx, d, s, c)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !d.HasChangesExcept(\"cluster_id\") {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn ta.enforce()\n\t\t},\n\t\tDelete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error {\n\t\t\tta, err := tableAclForLoad(ctx, d, s, c)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn ta.revoke()\n\t\t},\n\t}.ToResource()\n}",
"func (_Storage *StorageTransactor) GrantPermission(opts *bind.TransactOpts, kind uint8, addr common.Address) (*types.Transaction, error) {\n\treturn _Storage.contract.Transact(opts, \"grantPermission\", kind, addr)\n}",
"func (a *App) CreatePermission(w http.ResponseWriter, r *http.Request) {\n\thandler.CreatePermission(a.DB, w, r)\n}",
"func CreatePermissionSuite(\n\tdb Queryer,\n\tmodel interface{},\n) error {\n\tmType, err := mytype.ParseNodeType(structs.Name(model))\n\tif err != nil {\n\t\tmylog.Log.WithError(err).Debug(util.Trace(\"\"))\n\t\treturn err\n\t}\n\n\tfields := structs.Fields(model)\n\tn := len(fields)*len(accessLevelsWithFields) + len(accessLevelsWithoutFields)\n\tpermissions := make([][]interface{}, 0, n)\n\tfor _, al := range accessLevelsWithFields {\n\t\tfor _, f := range fields {\n\t\t\tid, _ := mytype.NewOID(\"Permission\")\n\t\t\tfield := f.Tag(\"db\")\n\t\t\tpermits := strings.Split(f.Tag(\"permit\"), \"/\")\n\t\t\tfor _, p := range permits {\n\t\t\t\tif strings.ToLower(p) == strings.ToLower(al.String()) {\n\t\t\t\t\tpermissions = append(permissions, []interface{}{\n\t\t\t\t\t\tid,\n\t\t\t\t\t\tal,\n\t\t\t\t\t\tmType,\n\t\t\t\t\t\tmytype.Authenticated,\n\t\t\t\t\t\tfield,\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tfor _, al := range accessLevelsWithoutFields {\n\t\tid, _ := mytype.NewOID(\"Permission\")\n\t\tpermissions = append(permissions, []interface{}{\n\t\t\tid,\n\t\t\tal,\n\t\t\tmType,\n\t\t\tmytype.Authenticated,\n\t\t\tnil,\n\t\t})\n\t}\n\n\ttx, err, newTx := BeginTransaction(db)\n\tif err != nil {\n\t\tmylog.Log.WithError(err).Debug(util.Trace(\"\"))\n\t\treturn err\n\t}\n\tif newTx {\n\t\tdefer RollbackTransaction(tx)\n\t}\n\n\tif err := DeletePermissionSuite(db, model); err != nil {\n\t\tmylog.Log.WithError(err).Debug(util.Trace(\"\"))\n\t\treturn err\n\t}\n\n\tcopyCount, err := tx.CopyFrom(\n\t\tpgx.Identifier{\"permission\"},\n\t\t[]string{\"id\", \"access_level\", \"type\", \"audience\", \"field\"},\n\t\tpgx.CopyFromRows(permissions),\n\t)\n\tif err != nil {\n\t\tif pgErr, ok := err.(pgx.PgError); ok {\n\t\t\tswitch PSQLError(pgErr.Code) {\n\t\t\tdefault:\n\t\t\t\tmylog.Log.WithError(err).Debug(util.Trace(\"\"))\n\t\t\t\treturn err\n\t\t\tcase UniqueViolation:\n\t\t\t\tmylog.Log.Warn(\"permissions already created\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tmylog.Log.WithError(err).Debug(util.Trace(\"\"))\n\t\treturn err\n\t}\n\n\tif newTx {\n\t\terr = CommitTransaction(tx)\n\t\tif err != nil {\n\t\t\tmylog.Log.WithError(err).Debug(util.Trace(\"\"))\n\t\t\treturn err\n\t\t}\n\t}\n\n\tmylog.Log.WithFields(logrus.Fields{\n\t\t\"n\": copyCount,\n\t\t\"type\": mType,\n\t}).Info(util.Trace(\"created permissions\"))\n\treturn nil\n}",
"func initPermission() {\n CorePermissionModuleID = AddModule(\"Core Permission Module\", 1)\n\n model := (*CorePermission)(nil)\n AddModel(model)\n\n // Temp\n InsertInitialModel(\n model,\n &CorePermission{\n ModuleID: CoreGroupModuleID,\n GroupID: 3,\n Read: CorePermissionAll,\n Create: CorePermissionAll,\n Update: CorePermissionNone,\n Delete: CorePermissionNone,\n },\n )\n}",
"func bindPermission(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(PermissionABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}",
"func (_Storage *StorageTransactorSession) GrantPermission(kind uint8, addr common.Address) (*types.Transaction, error) {\n\treturn _Storage.Contract.GrantPermission(&_Storage.TransactOpts, kind, addr)\n}",
"func changePermission(db *sqlite.Driver, acct *Account, newPerm PermLevel) (*Account, error) {\n\tvar err error\n\tvar stmt = fmt.Sprintf(\"update %s set permission_level = ? where id = ?\", tableName)\n\tif _, err = db.Exec(stmt, newPerm, acct.ID); err != nil {\n\t\treturn nil, err\n\t}\n\n\tacct.PermLevel = newPerm\n\treturn acct, nil\n}",
"func CreatePermission(c *gin.Context) {\n\tnewPermission := model.Permission{}\n\tc.BindJSON(&newPermission)\n\n\terr := service.CreatePermission(newPermission)\n\n\tif err != nil {\n\t\terror := service.GetGormErrorCode(err.Error())\n\t\tc.JSON(500, error)\n\t} else {\n\t\tc.String(200, \"ok\")\n\t}\n}",
"func (_Permission *PermissionTransactor) Insert(opts *bind.TransactOpts, table_name string, addr string) (*types.RawTransaction, error) {\n\treturn _Permission.contract.Transact(opts, \"insert\", table_name, addr)\n}",
"func Permission(tag tables.Tag) error {\n\tstmt, err := mysqlBus.DB.Prepare(\"UPDATE Tag SET status=? WHERE id=? AND username_tagger=? AND username_taggee=?\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = stmt.Exec(tag.Status, tag.ID, tag.UsernameTagger, tag.UsernameTaggee)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\n}",
"func (_Permission *PermissionTransactorSession) Insert(table_name string, addr string) (*types.RawTransaction, error) {\n\treturn _Permission.Contract.Insert(&_Permission.TransactOpts, table_name, addr)\n}",
"func (ctx *TestContext) addPermissionGranted(group, item, permission, permissionValue string) {\n\tgroupID := ctx.getReference(group)\n\titemID := ctx.getReference(item)\n\n\tpermissionsGrantedTable := \"permissions_granted\"\n\tkey := strconv.FormatInt(groupID, 10) + \",\" + strconv.FormatInt(itemID, 10)\n\n\tif !ctx.isInDatabase(permissionsGrantedTable, key) {\n\t\tctx.addInDatabase(permissionsGrantedTable, key, map[string]interface{}{\n\t\t\t\"group_id\": groupID,\n\t\t\t\"source_group_id\": groupID,\n\t\t\t\"item_id\": itemID,\n\t\t})\n\t}\n\n\tif permission == \"can_request_help_to\" {\n\t\tpermissionValue = strconv.FormatInt(ctx.getReference(permissionValue), 10)\n\t}\n\n\tif permission == \"is_owner\" {\n\t\tboolValue, err := strconv.ParseBool(permissionValue)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"%v cannot be parsed as a boolean\", boolValue))\n\t\t}\n\n\t\tctx.dbTables[permissionsGrantedTable][key][permission] = boolValue\n\t} else {\n\t\tctx.dbTables[permissionsGrantedTable][key][permission] = permissionValue\n\t}\n}",
"func (pr *PermissionsRepository) Add(permission *permission_model.Permission) error {\n\n\t// insert user\n\tresult, err := pr.database.NamedExec(`\n\tINSERT INTO gocms_permissions (name, description) VALUES (:name, :description)\n\t`, permission)\n\tif err != nil {\n\t\tlog.Errorf(\"Error adding permission to db: %s\\n\", err.Error())\n\t\treturn err\n\t}\n\tid, _ := result.LastInsertId()\n\tpermission.Id = id\n\n\treturn nil\n\n}",
"func (r *mutationResolver) CreatePermission(ctx context.Context, input *models.CreatePermissionInput) (*user.Permission, error) {\n\tpanic(\"not implemented\")\n}",
"func PostPermissions(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tvar permission types.Permission\n\tdecoder := json.NewDecoder(r.Body)\n\terr := decoder.Decode(&permission)\n\tif err != nil {\n\t\tutil.ErrorResponder(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tdb, err := db.Open()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer db.Close()\n\n\tif err := permission.Create(db); err != nil {\n\t\tutil.ErrorResponder(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tif err = json.NewEncoder(w).Encode(permission); err != nil {\n\t\tpanic(err)\n\t}\n}",
"func (r *Resolver) Permissions() PermissionsResolver { return &permissionsResolver{r} }",
"func (auup *AuthUserUserPermission) Insert(ctx context.Context, db DB) error {\n\tswitch {\n\tcase auup._exists: // already exists\n\t\treturn logerror(&ErrInsertFailed{ErrAlreadyExists})\n\tcase auup._deleted: // deleted\n\t\treturn logerror(&ErrInsertFailed{ErrMarkedForDeletion})\n\t}\n\t// insert (primary key generated and returned by database)\n\tconst sqlstr = `INSERT INTO django.auth_user_user_permissions (` +\n\t\t`user_id, permission_id` +\n\t\t`) VALUES (` +\n\t\t`:1, :2` +\n\t\t`) RETURNING id INTO :3`\n\t// run\n\tlogf(sqlstr, auup.UserID, auup.PermissionID)\n\tvar id int64\n\tif _, err := db.ExecContext(ctx, sqlstr, auup.UserID, auup.PermissionID, sql.Out{Dest: &id}); err != nil {\n\t\treturn logerror(err)\n\t} // set primary key\n\tauup.ID = int64(id)\n\t// set exists\n\tauup._exists = true\n\treturn nil\n}",
"func (b *BulkPermission) Apply(db *Database, actor Auditable) error {\n\t// Bulk update is all-or-nothing, so do everything in a transaction.\n\treturn db.db.Transaction(func(tx *gorm.DB) error {\n\t\t// Fetch all current memberships - this is required for re-building implied\n\t\t// permissions and auditing.\n\t\tvar memberships []*Membership\n\t\tif err := tx.\n\t\t\tSet(\"gorm:query_option\", \"FOR UPDATE\").\n\t\t\tModel(&Membership{}).\n\t\t\tWhere(\"realm_id = ?\", b.RealmID).\n\t\t\tWhere(\"user_id IN (?)\", b.UserIDs).\n\t\t\tFind(&memberships).\n\t\t\tError; err != nil {\n\t\t\tif IsNotFound(err) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\t// Process each membership individually.\n\t\tfor _, membership := range memberships {\n\t\t\t// Users cannot update their own permissions.\n\t\t\tif user, ok := actor.(*User); ok && membership.UserID == user.ID {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Compute new permissions.\n\t\t\tnewPerms, existingPerms := membership.Permissions, membership.Permissions\n\t\t\tswitch b.Action {\n\t\t\tcase BulkPermissionActionAdd:\n\t\t\t\tnewPerms = newPerms | b.Permissions\n\t\t\tcase BulkPermissionActionRemove:\n\t\t\t\tnewPerms = newPerms &^ b.Permissions\n\n\t\t\t\t// Re-compute implied permissions. This handles an edge case where\n\t\t\t\t// someone removes an implied permission but not the implying\n\t\t\t\t// permission. For example, if someone bulk-removed a Read but not\n\t\t\t\t// Write, memberships with Write should still retain read because its\n\t\t\t\t// implied.\n\t\t\t\t//\n\t\t\t\t// There's also a weird security edge case here in that we do not check\n\t\t\t\t// if the actor has this permission. In this case, the membership\n\t\t\t\t// already had the permission, so even if the actor doesn't have said\n\t\t\t\t// permission, it's not privilege escalation.\n\t\t\t\tnewPerms = rbac.AddImplied(newPerms)\n\t\t\t}\n\n\t\t\t// It's possible that no permissions have changed, in which case we don't\n\t\t\t// need to save the record or create an audit entry.\n\t\t\tif newPerms == existingPerms {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif newPerms == 0 {\n\t\t\t\tif err := tx.\n\t\t\t\t\tUnscoped().\n\t\t\t\t\tModel(&Membership{}).\n\t\t\t\t\tWhere(\"realm_id = ?\", membership.RealmID).\n\t\t\t\t\tWhere(\"user_id = ?\", membership.UserID).\n\t\t\t\t\tDelete(&Membership{\n\t\t\t\t\t\tRealmID: membership.RealmID,\n\t\t\t\t\t\tUserID: membership.UserID,\n\t\t\t\t\t}).\n\t\t\t\t\tError; err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"failed to delete membership: %w\", err)\n\t\t\t\t}\n\n\t\t\t\t// Generate audit\n\t\t\t\taudit := BuildAuditEntry(actor, \"removed user from realm\", membership.User, membership.RealmID)\n\t\t\t\tif err := tx.Save(audit).Error; err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"failed to save audit: %w\", err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Save the membership.\n\t\t\t\tif err := tx.\n\t\t\t\t\tModel(&Membership{}).\n\t\t\t\t\tWhere(\"realm_id = ?\", membership.RealmID).\n\t\t\t\t\tWhere(\"user_id = ?\", membership.UserID).\n\t\t\t\t\tUpdate(\"permissions\", newPerms).\n\t\t\t\t\tError; err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"failed to save membership: %w\", err)\n\t\t\t\t}\n\n\t\t\t\t// Audit if permissions were changed.\n\t\t\t\taudit := BuildAuditEntry(actor, \"updated user permissions\", actor, membership.RealmID)\n\t\t\t\taudit.Diff = stringSliceDiff(rbac.PermissionNames(existingPerms), rbac.PermissionNames(newPerms))\n\t\t\t\tif err := tx.Save(audit).Error; err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"failed to save audit: %w\", err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Cascade updated_at on user\n\t\t\tif err := tx.\n\t\t\t\tModel(&User{}).\n\t\t\t\tWhere(\"id = ?\", membership.UserID).\n\t\t\t\tUpdateColumn(\"updated_at\", time.Now().UTC()).\n\t\t\t\tError; err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to update user updated_at: %w\", err)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Rule will inject the databaseTx in the `Rule` schema
|
func (gtx *GuardTx) Rule(rule *schema.Rule) *schema.Rule {
if rule == nil {
return &schema.Rule{
Entity: schema.Entity{DBContract: gtx.dbTx},
}
}
rule.DBContract = gtx.dbTx
rule.SetValidator(gtx.validator.Rule)
return rule
}
|
[
"func (fw *IPtables) injectRule(rule *IPtablesRule) error {\n\trule2arr := strings.Split(rule.GetBody(), \" \")\n\tif len(rule2arr) < 3 {\n\t\treturn fmt.Errorf(\"In injectRule() not enough elements in rule %s\", rule.GetBody())\n\t}\n\n\truleChain := rule2arr[0]\n\n\tfor i, chain := range fw.chains {\n\t\tif chain.ChainName == ruleChain {\n\t\t\tfw.chains[i].Rules = append(fw.chains[i].Rules, rule)\n\t\t\tlog.Infof(\"In injectRule() adding rule %s into chain %s\", rule.GetBody(), chain.ChainName)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// TODO should we create new chain instead of throwing error?\n\treturn fmt.Errorf(\"In injectRule() firewall doesn't manage chain for rule %s\", rule.GetBody())\n}",
"func (_Ytm *YtmTransactor) AddRule(opts *bind.TransactOpts, addr common.Address, initPercent uint8, periods []*big.Int, percents []uint8) (*types.Transaction, error) {\n\treturn _Ytm.contract.Transact(opts, \"addRule\", addr, initPercent, periods, percents)\n}",
"func registerRule(app *extkingpin.App) {\n\tcomp := component.Rule\n\tcmd := app.Command(comp.String(), \"Ruler evaluating Prometheus rules against given Query nodes, exposing Store API and storing old blocks in bucket.\")\n\n\tconf := &ruleConfig{}\n\tconf.registerFlag(cmd)\n\n\tlabelStrs := cmd.Flag(\"label\", \"Labels to be applied to all generated metrics (repeated). Similar to external labels for Prometheus, used to identify ruler and its blocks as unique source.\").\n\t\tPlaceHolder(\"<name>=\\\"<value>\\\"\").Strings()\n\ttsdbBlockDuration := extkingpin.ModelDuration(cmd.Flag(\"tsdb.block-duration\", \"Block duration for TSDB block.\").\n\t\tDefault(\"2h\"))\n\ttsdbRetention := extkingpin.ModelDuration(cmd.Flag(\"tsdb.retention\", \"Block retention time on local disk.\").\n\t\tDefault(\"48h\"))\n\tnoLockFile := cmd.Flag(\"tsdb.no-lockfile\", \"Do not create lockfile in TSDB data directory. In any case, the lockfiles will be deleted on next startup.\").Default(\"false\").Bool()\n\twalCompression := cmd.Flag(\"tsdb.wal-compression\", \"Compress the tsdb WAL.\").Default(\"true\").Bool()\n\n\tcmd.Flag(\"data-dir\", \"data directory\").Default(\"data/\").StringVar(&conf.dataDir)\n\tcmd.Flag(\"rule-file\", \"Rule files that should be used by rule manager. Can be in glob format (repeated). Note that rules are not automatically detected, use SIGHUP or do HTTP POST /-/reload to re-read them.\").\n\t\tDefault(\"rules/\").StringsVar(&conf.ruleFiles)\n\tcmd.Flag(\"resend-delay\", \"Minimum amount of time to wait before resending an alert to Alertmanager.\").\n\t\tDefault(\"1m\").DurationVar(&conf.resendDelay)\n\tcmd.Flag(\"eval-interval\", \"The default evaluation interval to use.\").\n\t\tDefault(\"1m\").DurationVar(&conf.evalInterval)\n\tcmd.Flag(\"for-outage-tolerance\", \"Max time to tolerate prometheus outage for restoring \\\"for\\\" state of alert.\").\n\t\tDefault(\"1h\").DurationVar(&conf.outageTolerance)\n\tcmd.Flag(\"for-grace-period\", \"Minimum duration between alert and restored \\\"for\\\" state. This is maintained only for alerts with configured \\\"for\\\" time greater than grace period.\").\n\t\tDefault(\"10m\").DurationVar(&conf.forGracePeriod)\n\tcmd.Flag(\"restore-ignored-label\", \"Label names to be ignored when restoring alerts from the remote storage. This is only used in stateless mode.\").\n\t\tStringsVar(&conf.ignoredLabelNames)\n\n\tconf.rwConfig = extflag.RegisterPathOrContent(cmd, \"remote-write.config\", \"YAML config for the remote-write configurations, that specify servers where samples should be sent to (see https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). This automatically enables stateless mode for ruler and no series will be stored in the ruler's TSDB. If an empty config (or file) is provided, the flag is ignored and ruler is run with its own TSDB.\", extflag.WithEnvSubstitution())\n\n\treqLogDecision := cmd.Flag(\"log.request.decision\", \"Deprecation Warning - This flag would be soon deprecated, and replaced with `request.logging-config`. Request Logging for logging the start and end of requests. By default this flag is disabled. LogFinishCall: Logs the finish call of the requests. LogStartAndFinishCall: Logs the start and finish call of the requests. NoLogCall: Disable request logging.\").Default(\"\").Enum(\"NoLogCall\", \"LogFinishCall\", \"LogStartAndFinishCall\", \"\")\n\n\tconf.objStoreConfig = extkingpin.RegisterCommonObjStoreFlags(cmd, \"\", false)\n\n\treqLogConfig := extkingpin.RegisterRequestLoggingFlags(cmd)\n\n\tvar err error\n\tcmd.Setup(func(g *run.Group, logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, reload <-chan struct{}, _ bool) error {\n\t\tconf.lset, err = parseFlagLabels(*labelStrs)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"parse labels\")\n\t\t}\n\n\t\tconf.alertQueryURL, err = url.Parse(*conf.alertmgr.alertQueryURL)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"parse alert query url\")\n\t\t}\n\n\t\ttsdbOpts := &tsdb.Options{\n\t\t\tMinBlockDuration: int64(time.Duration(*tsdbBlockDuration) / time.Millisecond),\n\t\t\tMaxBlockDuration: int64(time.Duration(*tsdbBlockDuration) / time.Millisecond),\n\t\t\tRetentionDuration: int64(time.Duration(*tsdbRetention) / time.Millisecond),\n\t\t\tNoLockfile: *noLockFile,\n\t\t\tWALCompression: *walCompression,\n\t\t}\n\n\t\tagentOpts := &agent.Options{\n\t\t\tWALCompression: *walCompression,\n\t\t\tNoLockfile: *noLockFile,\n\t\t}\n\n\t\t// Parse and check query configuration.\n\t\tlookupQueries := map[string]struct{}{}\n\t\tfor _, q := range conf.query.addrs {\n\t\t\tif _, ok := lookupQueries[q]; ok {\n\t\t\t\treturn errors.Errorf(\"Address %s is duplicated for --query flag.\", q)\n\t\t\t}\n\n\t\t\tlookupQueries[q] = struct{}{}\n\t\t}\n\n\t\tconf.queryConfigYAML, err = conf.query.configPath.Content()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(conf.query.sdFiles) == 0 && len(conf.query.addrs) == 0 && len(conf.queryConfigYAML) == 0 {\n\t\t\treturn errors.New(\"no --query parameter was given\")\n\t\t}\n\t\tif (len(conf.query.sdFiles) != 0 || len(conf.query.addrs) != 0) && len(conf.queryConfigYAML) != 0 {\n\t\t\treturn errors.New(\"--query/--query.sd-files and --query.config* parameters cannot be defined at the same time\")\n\t\t}\n\n\t\t// Parse and check alerting configuration.\n\t\tconf.alertmgrsConfigYAML, err = conf.alertmgr.configPath.Content()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(conf.alertmgrsConfigYAML) != 0 && len(conf.alertmgr.alertmgrURLs) != 0 {\n\t\t\treturn errors.New(\"--alertmanagers.url and --alertmanagers.config* parameters cannot be defined at the same time\")\n\t\t}\n\n\t\tconf.alertRelabelConfigYAML, err = conf.alertmgr.alertRelabelConfigPath.Content()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\thttpLogOpts, err := logging.ParseHTTPOptions(*reqLogDecision, reqLogConfig)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"error while parsing config for request logging\")\n\t\t}\n\n\t\ttagOpts, grpcLogOpts, err := logging.ParsegRPCOptions(*reqLogDecision, reqLogConfig)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"error while parsing config for request logging\")\n\t\t}\n\n\t\treturn runRule(g,\n\t\t\tlogger,\n\t\t\treg,\n\t\t\ttracer,\n\t\t\tcomp,\n\t\t\t*conf,\n\t\t\treload,\n\t\t\tgetFlagsMap(cmd.Flags()),\n\t\t\thttpLogOpts,\n\t\t\tgrpcLogOpts,\n\t\t\ttagOpts,\n\t\t\ttsdbOpts,\n\t\t\tagentOpts,\n\t\t)\n\t})\n}",
"func (_Ytm *YtmTransactorSession) AddRule(addr common.Address, initPercent uint8, periods []*big.Int, percents []uint8) (*types.Transaction, error) {\n\treturn _Ytm.Contract.AddRule(&_Ytm.TransactOpts, addr, initPercent, periods, percents)\n}",
"func rulesetInsert(nkey string, key string, value string) (err error) {\n if ndb.Rdb == nil {\n logs.Error(\"rulesetInsert -- Can't access to database\")\n return errors.New(\"rulesetInsert -- Can't access to database\")\n }\n stmt, err := ndb.Rdb.Prepare(\"insert into ruleset (ruleset_uniqueid, ruleset_param, ruleset_value) values(?,?,?)\")\n if err != nil {\n logs.Error(\"rulesetInsert -- Prepare -> %s\", err.Error())\n return err\n }\n _, err = stmt.Exec(&nkey, &key, &value)\n if err != nil {\n logs.Error(\"rulesetInsert -- Execute -> %s\", err.Error())\n return err\n }\n return nil\n}",
"func (m Middleware) Tx(db *sql.DB) TxFunc {\n\treturn func(f func(tx daos.Transaction, w http.ResponseWriter, r *http.Request) error) http.HandlerFunc {\n\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\t\tt, err := db.Begin()\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tl := m.log.WithRequest(r)\n\t\t\t\tif p := recover(); p != nil {\n\t\t\t\t\tt.Rollback()\n\t\t\t\t\tl.Info(\"transaction rollbacked\")\n\t\t\t\t\tpanic(p)\n\t\t\t\t} else if err != nil {\n\t\t\t\t\tt.Rollback()\n\t\t\t\t\tl.Info(\"transaction rollbacked\")\n\t\t\t\t\tpanic(err)\n\t\t\t\t} else {\n\t\t\t\t\terr = t.Commit()\n\t\t\t\t\tl.Info(\"transaction commited\")\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\terr = f(t, w, r)\n\t\t}\n\t}\n}",
"func setupDatabase(db *sql.DB) error {\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tx.Rollback()\n\n\t// Creating a timescaledb extension for the database\n\tconst ext = `CREATE EXTENSION IF NOT EXISTS timescaledb CASCADE;`\n\tif _, err = tx.Exec(ext); err != nil {\n\t\treturn err\n\t}\n\n\t// creating schema in the database\n\tconst sch = `CREATE SCHEMA IF NOT EXISTS \"audit\"`\n\tif _, err = tx.Exec(sch); err != nil {\n\t\treturn err\n\t}\n\n\t// creating the audit log table\n\tconst tbl = `CREATE TABLE IF NOT EXISTS audit.\"Logs\" (\n\t\t\"Timestamp\" TIMESTAMPTZ NOT NULL,\n\t\t\"UserId\" text NOT NULL,\n\t\t\"Action\" text NOT NULL\n\t );`\n\tif _, err = tx.Exec(tbl); err != nil {\n\t\treturn err\n\t}\n\n\t// creating the hypertable of audit log table for timescaledb\n\tconst hptbl = `SELECT create_hypertable('audit.\"Logs\"', 'Timestamp',if_not_exists => true);`\n\tif _, err = tx.Exec(hptbl); err != nil {\n\t\treturn err\n\t}\n\treturn tx.Commit()\n}",
"func (probe *BridgeOfProbe) addRule(rule *Rule) {\n\tlogging.GetLogger().Infof(\"New rule %v added\", rule.UUID)\n\tg := probe.OvsOfProbe.Graph\n\tg.Lock()\n\tdefer g.Unlock()\n\tbridgeNode := probe.BridgeNode\n\tmetadata := graph.Metadata{\n\t\t\"Type\": \"ofrule\",\n\t\t\"cookie\": fmt.Sprintf(\"0x%x\", rule.Cookie),\n\t\t\"table\": rule.Table,\n\t\t\"filters\": rule.Filter,\n\t\t\"actions\": rule.Actions,\n\t\t\"priority\": rule.Priority,\n\t\t\"UUID\": rule.UUID,\n\t}\n\truleNode := g.NewNode(graph.GenID(), metadata)\n\tg.Link(bridgeNode, ruleNode, graph.Metadata{\"RelationType\": \"ownership\"})\n}",
"func (fw *IPtables) addIPtablesRule(rule *IPtablesRule) error {\n\tif err := fw.Store.addIPtablesRule(rule); err != nil {\n\t\tlog.Error(\"In addIPtablesRule failed to add \", rule.GetBody())\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil }",
"func (dsl *PutDSL) StnRule(val *stn.Rule) vppclient.PutDSL {\n\tdsl.parent.txn.Put(stn.Key(val.Interface, val.IpAddress), val)\n\treturn dsl\n}",
"func insertRulesetValues(uuid string, param string, value string)(err error){\n insertRulesetValues, err := ndb.Rdb.Prepare(\"insert into ruleset (ruleset_uniqueid, ruleset_param, ruleset_value) values (?,?,?);\")\n _, err = insertRulesetValues.Exec(&uuid, ¶m, &value)\n defer insertRulesetValues.Close()\n if (err != nil){\n return err\n }\n return nil\n}",
"func (c *Conn) Transaction(fn func(*Conn) error) error {\r\n\tvar (\r\n\t\ttx = c.Begin()\r\n\t\tconn = &Conn{}\r\n\t)\r\n\tcopier.Copy(conn, c)\r\n\tconn.DB = tx\r\n\tif err := fn(conn); err != nil {\r\n\t\ttx.Rollback()\r\n\t\treturn err\r\n\t}\r\n\ttx.Commit()\r\n\treturn nil\r\n}",
"func (tx *tx) Run(fn func(*Db) error) (err error) {\n\tdefer func() {\n\t\tif panicErr := recover(); panicErr != nil {\n\t\t\t// Don't try to rollback twice (if we're panicing after trying to rollback already)\n\t\t\t// attempt to rollback transaction\n\t\t\terr = tx.Rollback()\n\t\t\tif err != nil {\n\t\t\t\t// propagate panic\n\t\t\t\tpanic(fmt.Sprintf(\"Rollback error: \"+err.Error()+\"\\nOriginal Panic: %s\", panicErr))\n\t\t\t}\n\n\t\t\t// propagate original error\n\t\t\tpanic(panicErr)\n\t\t}\n\n\t\tif err != nil {\n\t\t\terr2 := tx.Rollback()\n\n\t\t\tif err2 != nil {\n\t\t\t\tpanic(\"Rollback error: \" + err2.Error() + \"\\nOriginal Error: \" + err.Error())\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\terr = tx.Commit()\n\t\treturn\n\t}()\n\n\terr = fn(tx.Db)\n\n\treturn\n}",
"func AddRule(r AuditRule) (err error) {\n\tard, _, _, err := r.toKernelAuditRule()\n\tif err != nil {\n\t\treturn\n\t}\n\tclient, err := libaudit.NewAuditClient(nil)\n\tdefer client.Close()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Failed to initialize client\")\n\t}\n\terr = client.AddRule(ard.toWireFormat())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (m *SQLIgnoreStore) Create(rule *IgnoreRule) error {\n\tstmt := `INSERT INTO ignorerule (userid, updated_by, expires, query, note)\n\t VALUES(?,?,?,?,?)`\n\n\tret, err := m.vdb.DB.Exec(stmt, rule.Name, rule.Name, rule.Expires.Unix(), rule.Query, rule.Note)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcreatedId, err := ret.LastInsertId()\n\tif err != nil {\n\t\treturn err\n\t}\n\trule.ID = int(createdId)\n\tm.inc()\n\treturn nil\n}",
"func (rc *RuleCreate) Save(ctx context.Context) (*Rule, error) {\n\tvar (\n\t\terr error\n\t\tnode *Rule\n\t)\n\trc.defaults()\n\tif len(rc.hooks) == 0 {\n\t\tif err = rc.check(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tnode, err = rc.sqlSave(ctx)\n\t} else {\n\t\tvar mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {\n\t\t\tmutation, ok := m.(*RuleMutation)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"unexpected mutation type %T\", m)\n\t\t\t}\n\t\t\tif err = rc.check(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\trc.mutation = mutation\n\t\t\tnode, err = rc.sqlSave(ctx)\n\t\t\tmutation.done = true\n\t\t\treturn node, err\n\t\t})\n\t\tfor i := len(rc.hooks) - 1; i >= 0; i-- {\n\t\t\tmut = rc.hooks[i](mut)\n\t\t}\n\t\tif _, err := mut.Mutate(ctx, rc.mutation); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn node, err\n}",
"func (t *ACLRole) DBCreateTx(ctx context.Context, tx Tx) (sql.Result, error) {\n\tq := \"INSERT INTO `acl_role` (`acl_role`.`id`,`acl_role`.`checksum`,`acl_role`.`name`,`acl_role`.`description`,`acl_role`.`admin_user_id`,`acl_role`.`customer_id`,`acl_role`.`created_at`,`acl_role`.`updated_at`) VALUES (?,?,?,?,?,?,?,?)\"\n\tchecksum := t.CalculateChecksum()\n\tif t.GetChecksum() == checksum {\n\t\treturn nil, nil\n\t}\n\tt.Checksum = &checksum\n\treturn tx.ExecContext(ctx, q,\n\t\torm.ToSQLString(t.ID),\n\t\torm.ToSQLString(t.Checksum),\n\t\torm.ToSQLString(t.Name),\n\t\torm.ToSQLString(t.Description),\n\t\torm.ToSQLString(t.AdminUserID),\n\t\torm.ToSQLString(t.CustomerID),\n\t\torm.ToSQLInt64(t.CreatedAt),\n\t\torm.ToSQLInt64(t.UpdatedAt),\n\t)\n}",
"func (r *BatchV1JobRule) createRule(job *batchV1.Job, ydr *YamlDerivedResource) *rule {\n\trule := &rule{\n\t\tID: r.ID,\n\t\tPrereqs: r.Prereqs,\n\t\tCondition: func() bool {\n\t\t\tif r.Condition == nil {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn r.Condition(job)\n\t\t},\n\t\tMessage: r.Message,\n\t\tLevel: r.Level,\n\t\tResources: []*YamlDerivedResource{ydr},\n\t\tFix: func() bool {\n\t\t\tif r.Fix == nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn r.Fix(job)\n\t\t},\n\t\tFixDescription: func() string {\n\t\t\tif r.FixDescription == nil {\n\t\t\t\treturn \"\"\n\t\t\t}\n\t\t\treturn r.FixDescription(job)\n\t\t},\n\t}\n\treturn rule\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
mark text as Output type
|
func (*text) isOutput() {
}
|
[
"func (t *typewriter) Typewrite() string {\n\t// Re-use the cached result if already processed.\n\tif t.cur != -1 {\n\t\treturn t.result\n\t}\n\n\tvar buf bytes.Buffer\n\n\tfor {\n\t\tsep, str := t.scanMorpheme()\n\t\tif str == \"\" {\n\t\t\tbreak\n\t\t}\n\n\t\tbuf.WriteString(sep)\n\t\tbuf.WriteString(str)\n\t}\n\n\tt.result = buf.String()\n\treturn t.result\n}",
"func (cb *printcb) outputText(data string) error {\n\treturn cb.outputBinary([]byte(data))\n}",
"func (r Text) WriteContentType(w http.ResponseWriter) {\n\twriteContentType(w, plainContentType)\n}",
"func (f *FakeOutput) Type() string { return \"fake_output\" }",
"func TestFieldOutputText(t *testing.T) {\n\tfield := NewField()\n\tfield.Name = \"foo\"\n\tfield.Type = \"text\"\n\n\ttag := field.output()\n\n\tassert.Equal(t, \"<input type=\\\"text\\\" name=\\\"foo\\\" id=\\\"foo\\\" value=\\\"\\\" />\", tag)\n}",
"func (m *Method) OutputsAsTSDeclarationText(pkgName string) string {\n\n\tif len(m.Returns) == 0 {\n\t\treturn \"void\"\n\t}\n\n\tvar result []string\n\n\tfor _, output := range m.Returns {\n\t\tresult = append(result, goTypeToTSDeclaration(output, pkgName))\n\t}\n\treturn strings.Join(result, \", \")\n}",
"func (t *Text) Text(str string) *Text {\n\tt.output = str\n\tt.textLines++\n\tt.lines[t.textLines] = str\n\treturn t\n}",
"func (this *Tidy) OutputEncoding(val int) (bool, error) {\n\tswitch val {\n\tcase Raw, Ascii, Latin0, Latin1, Utf8, Iso2022, Mac, Win1252, Ibm858, Utf16le, Utf16be, Utf16, Big5, Shiftjis:\n\t\treturn this.optSetInt(C.TidyOutCharEncoding, (C.ulong)(val))\n\t}\n\treturn false, errors.New(\"Argument val int is out of range (0-13)\")\n}",
"func IsText(t Type) bool {\n\treturn int(t)&flagIsText == flagIsText\n}",
"func (m *Method) OutputsAsTSText(pkgName string) string {\n\n\tif len(m.Returns) == 0 {\n\t\treturn \"void\"\n\t}\n\n\tvar result []string\n\n\tfor _, output := range m.Returns {\n\t\tresult = append(result, goTypeToTS(output, pkgName))\n\t}\n\treturn strings.Join(result, \", \")\n}",
"func (r renderer) NormalText(out *bytes.Buffer, text []byte) {\n\tout.Write(text)\n}",
"func (o DocumentDbOutputDataSourceOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DocumentDbOutputDataSource) string { return v.Type }).(pulumi.StringOutput)\n}",
"func output(data []byte, encodeType Encode) (string, error) {\n\tswitch encodeType {\n\tcase HEX:\n\t\treturn hex.EncodeToString(data), nil\n\tcase Base64:\n\t\treturn base64.StdEncoding.EncodeToString(data), nil\n\tcase None:\n\t\treturn string(data), nil\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"secretInfo OutputType unsupport\")\n\t}\n}",
"func (*HTML) isOutput() {\n}",
"func (c *Cmd) Output() ([]byte, error)",
"func (ct PacketType) Text() string {\n\treturn packetTypeText[ct]\n}",
"func (o BlobOutputDataSourceOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BlobOutputDataSource) string { return v.Type }).(pulumi.StringOutput)\n}",
"func (o TransformationOutput) Type() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Transformation) pulumi.StringPtrOutput { return v.Type }).(pulumi.StringPtrOutput)\n}",
"func (wm Watermark) isText() bool {\n\treturn wm.Mode == WMText\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
mark HTML as Output type
|
func (*HTML) isOutput() {
}
|
[
"func (r HTML) WriteContentType(w http.ResponseWriter) {\n\twriteContentType(w, htmlContentType)\n}",
"func (this *Tidy) OutputXhtml(val bool) (bool, error) {\n\treturn this.optSetBool(C.TidyXhtmlOut, cBool(val))\n}",
"func htmlFmt(w io.Writer, x interface{}, format string) {\n\twriteAny(w, x, true)\n}",
"func HTMLContentTypeMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"text/html; charset=UTF-8\")\n\t\tnext.ServeHTTP(w, r)\n\t})\n}",
"func WriteHTML(writer http.ResponseWriter, html string, code int) {\n\twriter.Header().Set(\"Content-Type\", \"text/html; charset=UTF-8\")\n\twriter.WriteHeader(code)\n\tfmt.Fprint(writer, html)\n}",
"func Html(resp http.ResponseWriter, content string, code int) error {\n\tresp.Header().Add(\"Content-Type\", \"text/html\")\n\tresp.WriteHeader(code)\n\t_, err := resp.Write([]byte(content))\n\treturn maskAny(err)\n}",
"func (c *Context) HTML(code int, tpl string) {\n}",
"func IfReturnHTMLResponse(w http.ResponseWriter, r *http.Request) bool {\n\taccepts := r.Header[\"Accept\"]\n\tfor _, accept := range accepts {\n\t\tfields := strings.Split(accept, \",\")\n\t\tfor _, field := range fields {\n\t\t\tif field == contentTypeHtml {\n\t\t\t\tw.Header().Set(\"Content-Type\", contentTypeHtml)\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}",
"func htmlEscaper(args ...interface{}) string {\n\ts, t := stringify(args...)\n\tif t == contentTypeHTML {\n\t\treturn s\n\t}\n\treturn htmlReplacer(s, htmlReplacementTable, true)\n}",
"func (this *Tidy) OutputEncoding(val int) (bool, error) {\n\tswitch val {\n\tcase Raw, Ascii, Latin0, Latin1, Utf8, Iso2022, Mac, Win1252, Ibm858, Utf16le, Utf16be, Utf16, Big5, Shiftjis:\n\t\treturn this.optSetInt(C.TidyOutCharEncoding, (C.ulong)(val))\n\t}\n\treturn false, errors.New(\"Argument val int is out of range (0-13)\")\n}",
"func (r Text) WriteContentType(w http.ResponseWriter) {\n\twriteContentType(w, plainContentType)\n}",
"func isContentTypeHtml(res *http.Response) bool {\n\tif res != nil {\n\t\tct := res.Header.Get(\"Content-Type\")\n\t\treturn ct == \"\" || strings.Contains(ct, \"text/html\")\n\t}\n\treturn false\n}",
"func (t FieldType) ToHTML() []byte {\n\treturn nil\n}",
"func (ra *ResponseAsserter) HTML() *HTMLAsserter {\n\t// @TODO do some basic html validation checking\n\treturn newHTMLAsserter(ra, ra.fail)\n}",
"func (self *Encoder) SetEscapeHTML(f bool) {\n if f {\n self.Opts |= EscapeHTML\n } else {\n self.Opts &= ^EscapeHTML\n }\n}",
"func (p Page) IsHTML() bool {\n\treturn p.Type().MediaType() == \"text/html\"\n}",
"func outputWriter(w http.ResponseWriter, body map[string]interface{}, contentType string, status int) (http.ResponseWriter, error) {\n\t// checks for the output version, either in XML or JSON\n\tre, err := regexp.Compile(`xml`)\n\tif re.MatchString(contentType) == true {\n\t\tw, err = xmlWriter(w, body, status)\n\t\treturn w, err\n\t}\n\tw, err = jsonWriter(w, body, status)\n\treturn w, err\n}",
"func (tcc *Tcc) SetOutputType(outputType int) {\n\tC.tcc_set_output_type(tcc.ctcc, C.int(outputType))\n}",
"func writeAny(w io.Writer, x interface{}, html bool) {\n\tswitch v := x.(type) {\n\tcase []byte:\n\t\twriteText(w, v, html)\n\tcase string:\n\t\twriteText(w, strings.Bytes(v), html)\n\tcase ast.Decl:\n\t\twriteNode(w, v, html, &defaultStyler)\n\tcase ast.Expr:\n\t\twriteNode(w, v, html, &defaultStyler)\n\tdefault:\n\t\tif html {\n\t\t\tvar buf bytes.Buffer;\n\t\t\tfmt.Fprint(&buf, x);\n\t\t\twriteText(w, buf.Bytes(), true);\n\t\t} else {\n\t\t\tfmt.Fprint(w, x)\n\t\t}\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update is for component to have itself rerendered.
|
func (s *State) Update() {
// fmt.Println("update")
s.render()
}
|
[
"func (component *Component) Update() {\n\tcomponent.WarnError(component.UpdateWithError())\n}",
"func (c *PureComponent) HasUpdated() bool { return false }",
"func (v *Component) ComponentDidUpdate(prevProps *Map, prevState *Map) {}",
"func (c *Component) OnUpdate() {\n\t// Logger.Trace().Str(\"component\", c.GetName()).Msg(\"OnUpdate\")\n\tif c.customOnUpdate != nil {\n\t\tc.customOnUpdate(c)\n\t}\n}",
"func (s *State) RequestUpdate() {\n\ts.update = true\n}",
"func UpdateComponent(component Component) {\n}",
"func (*ModuleBase) Update(*ggl.Window, float64) {}",
"func (v *Component) ComponentWillUpdate(nextProps *Map, nextState *Map) {}",
"func (o *CanvasItem) Update() {\n\t//log.Println(\"Calling CanvasItem.Update()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"CanvasItem\", \"update\")\n\n\t// Call the parent method.\n\t// void\n\tretPtr := gdnative.NewEmptyVoid()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n}",
"func (l *List) Update() {\n\tl.win.Update()\n}",
"func (p *Prog) Update() error {\n\t//ebitenutil.DebugPrint(screen, \"Hello World!\")\n\t// screen.Fill(color.Black)\n\treturn nil\n}",
"func (g *Game) Update(display chan<- Frame) {\n\t// apply animations\n\tif g.animator.Step() {\n\t\tg.needsRender = true\n\t}\n\t// render if needed\n\tif g.needsRender {\n\t\tdisplay <- g.Render()\n\t\tg.needsRender = false\n\t}\n}",
"func (w *Window) Update() {\n\tw.redraw()\n\tw.refreshWait()\n\tw.resize()\n\tpollEvents()\n}",
"func (v *Component) redraw() {\n\tv.Render()\n}",
"func (c *SceneController) OnUpdate() {\n\tc.Component.OnUpdate()\n}",
"func (b *Bar) Update(progress int64) {\n\tb.control <- progress\n}",
"func (s *State) Update(status mesos.TaskStatus) {\n\ts.updates <- status\n}",
"func (r *Base) Update() {\n\tr.UpdatedAt = clock.Now()\n}",
"func (f *Fake) Update(args component.Arguments) error {\n\tif f.UpdateFunc != nil {\n\t\treturn f.UpdateFunc(args)\n\t}\n\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
/ NewComboBoxSelector :initializer of combo box selector
|
func NewComboBoxSelector(label string, list []string) *ComboBoxSelector {
obj := new(ComboBoxSelector)
obj.SelectedItem = list[0]
// initialize widgets
obj.Cell = widgets.NewQWidget(nil, 0)
obj.box = widgets.NewQComboBox(obj.Cell)
obj.box.AddItems(list)
obj.textLabel = widgets.NewQLabel2(label, obj.Cell, 0)
// layout
layout := widgets.NewQHBoxLayout()
layout.AddWidget(obj.textLabel, 0, 0)
layout.AddWidget(obj.box, 0, 0)
// apply layout
obj.Cell.SetLayout(layout)
// action connection
obj.box.ConnectCurrentIndexChanged(func(index int) {
obj.SelectedItem = list[index]
//fmt.Println(obj.SelectedItem)
})
return obj
}
|
[
"func NewComboBox(\n\tctx *model.Context,\n\td types.Dict,\n\tv string,\n\tfonts map[string]types.IndirectRef) (*ComboBox, *types.IndirectRef, error) {\n\n\tcb := &ComboBox{Value: v}\n\n\tbb, err := types.RectForArray(d.ArrayEntry(\"Rect\"))\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tcb.BoundingBox = types.RectForDim(bb.Width(), bb.Height())\n\n\tfontIndRef, err := cb.calcFontFromDA(ctx, d, fonts)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tcb.HorAlign = types.AlignLeft\n\tif q := d.IntEntry(\"Q\"); q != nil {\n\t\tcb.HorAlign = types.HAlignment(*q)\n\t}\n\n\tbgCol, boCol, err := calcColsFromMK(ctx, d)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tcb.BgCol = bgCol\n\n\tvar b Border\n\tboWidth := calcBorderWidth(d)\n\tif boWidth > 0 {\n\t\tb.Width = boWidth\n\t\tb.col = boCol\n\t}\n\tcb.Border = &b\n\n\treturn cb, fontIndRef, nil\n}",
"func NewCombobox() *Combobox {\n\tc := new(Combobox)\n\n\tc.c = C.uiNewCombobox()\n\n\tC.pkguiComboboxOnSelected(c.c)\n\n\tc.ControlBase = NewControlBase(c, uintptr(unsafe.Pointer(c.c)))\n\treturn c\n}",
"func create_combo_box(strings []string) *gtk.ComboBoxText {\n\tcombo_box := gtk.NewComboBoxText()\n\tfor _, s := range strings {\n\t\tcombo_box.AppendText(s)\n\t}\n\tcombo_box.SetActive(0)\n\treturn combo_box\n}",
"func (app *controlsTestApplication) ForComboBox() *controls.ComboBoxBuilder {\n\treturn controls.NewComboBoxBuilder(app.ForLabel(), app.rectRenderer)\n}",
"func Combo(label, previewValue string, items []string, selected *int32) *ComboWidget {\n\treturn &ComboWidget{\n\t\tlabel: GenAutoID(label),\n\t\tpreviewValue: Context.FontAtlas.RegisterString(previewValue),\n\t\titems: Context.FontAtlas.RegisterStringSlice(items),\n\t\tselected: selected,\n\t\tflags: 0,\n\t\twidth: 0,\n\t\tonChange: nil,\n\t}\n}",
"func ComboCustom(label, previewValue string) *ComboCustomWidget {\n\treturn &ComboCustomWidget{\n\t\tlabel: GenAutoID(label),\n\t\tpreviewValue: Context.FontAtlas.RegisterString(previewValue),\n\t\twidth: 0,\n\t\tflags: 0,\n\t\tlayout: nil,\n\t}\n}",
"func NewSelector(ctx Context) Selector {\n\tstate := getState(ctx)\n\tstate.dispatcher.selectorSequence++\n\treturn NewNamedSelector(ctx, fmt.Sprintf(\"selector-%v\", state.dispatcher.selectorSequence))\n}",
"func NewSelector() Selector {\n\treturn internalSelector(nil)\n}",
"func New(data string) (*Selector, error) {\n\ts := &Selector{}\n\terr := json.Unmarshal([]byte(data), &s)\n\treturn s, err\n}",
"func createObjectPicker(n int64, distrib string) (ObjectSelector, error) {\n\n\tswitch distrib {\n\tcase ZIPFIAN_OBJECT_PICK:\n\t\tvar x = new(Zipf)\n\t\tx.SetParams(n, 0.8, 99)\n\t\treturn x, nil\n\tcase UNIFORM_OBJECT_PICK:\n\t\tvar y = new(Uniform)\n\t\ty.SetParams(n, 99)\n\t\treturn y, nil\n\t}\n\treturn nil, errors.New(\"Not a valid distribution for object selection\")\n}",
"func createObjectPicker(n int64, distrib string) (ObjectSelector, error) {\n\n\tswitch distrib {\n\tcase ZIPFIAN_OBJECT_PICK:\n\t\tvar x = new(Zipf)\n\t\tx.SetParams(n, 0.8, 99)\n\t\treturn x, nil\n\tcase UNIFORM_OBJECT_PICK:\n\t\tvar y = new(Uniform)\n\t\ty.SetParams(n, 99)\n\t\treturn y, nil\n\t}\n\n\tfmt.Println(\"what is the distribution :\", distrib)\n\treturn new(Uniform), errors.New(\"Not a valid distribution for object selection\")\n}",
"func NewCFNSelector(prompt Prompter) *CFNSelector {\n\treturn &CFNSelector{\n\t\tprompt: prompt,\n\t}\n}",
"func newSecretKeySelector(name, key string) secretKeySelector {\n\treturn secretKeySelector{\n\t\t\"name\": name,\n\t\t\"key\": key,\n\t}\n}",
"func GuiComboBox(bounds Rectangle, text string, active int) int {\n\tctext := C.CString(text)\n\tdefer C.free(unsafe.Pointer(ctext))\n\tcbounds := *bounds.cptr()\n\tres := C.GuiComboBox(cbounds, ctext, C.int(int32(active)))\n\treturn int(int32(res))\n}",
"func comboBoxFinalizer(cb *ComboBox) {\n\truntime.SetFinalizer(cb, func(cb *ComboBox) { gobject.Unref(cb) })\n}",
"func (t *OpenconfigSystem_System_Logging_Console_Selectors) NewSelector(Facility E_OpenconfigSystemLogging_SYSLOG_FACILITY, Severity E_OpenconfigSystemLogging_SyslogSeverity) (*OpenconfigSystem_System_Logging_Console_Selectors_Selector, error){\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.Selector == nil {\n\t\tt.Selector = make(map[OpenconfigSystem_System_Logging_Console_Selectors_Selector_Key]*OpenconfigSystem_System_Logging_Console_Selectors_Selector)\n\t}\n\n\tkey := OpenconfigSystem_System_Logging_Console_Selectors_Selector_Key{\n\t\tFacility: Facility,\n\t\tSeverity: Severity,\n\t}\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.Selector[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list Selector\", key)\n\t}\n\n\tt.Selector[key] = &OpenconfigSystem_System_Logging_Console_Selectors_Selector{\n\t\tFacility: Facility,\n\t\tSeverity: Severity,\n\t}\n\n\treturn t.Selector[key], nil\n}",
"func GuiComboBox(bounds Rectangle, text string, active int32) int32 {\n\tcbounds, _ := *(*C.Rectangle)(unsafe.Pointer(&bounds)), cgoAllocsUnknown\n\ttext = safeString(text)\n\tctext, _ := unpackPCharString(text)\n\tcactive, _ := (C.int)(active), cgoAllocsUnknown\n\t__ret := C.GuiComboBox(cbounds, ctext, cactive)\n\truntime.KeepAlive(text)\n\t__v := (int32)(__ret)\n\treturn __v\n}",
"func NewContractFunctionSelector(name string) ContractFunctionSelector {\n\tvar function *string\n\n\tif name == \"\" {\n\t\tfunction = nil\n\t} else {\n\t\tfunction = &name\n\t}\n\n\treturn ContractFunctionSelector{\n\t\tfunction: function,\n\t\tparams: \"\",\n\t\tparamTypes: []_Solidity{},\n\t}\n}",
"func NewSelectWatch(w Interface, fn SelectFunc) Interface {\n\tcxt, canceler := context.WithCancel(context.Background())\n\tf := &SelectorWatch{\n\t\twatch: w,\n\t\tcxt: cxt,\n\t\tselectFn: fn,\n\t\tstopFn: canceler,\n\t\teventChannel: make(chan Event, DefaultChannelBuffer),\n\t}\n\tgo f.selectWatchEvent()\n\treturn f\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
branchName takes the root directory and relative path to the directory and returns the branch name
|
func branchName(root, dirRelPath, openAPIFileName string) string {
name := filepath.Base(dirRelPath)
_, err := os.Stat(filepath.Join(root, dirRelPath, openAPIFileName))
if !os.IsNotExist(err) {
// add Pkg: prefix indicating that it is a separate package as it has
// openAPIFile
return fmt.Sprintf("Pkg: %s", name)
}
return name
}
|
[
"func (c *DeployServerConfig) BranchName() string {\n\treturn strings.Split(c.Ref, \"/\")[1]\n}",
"func (g *github) GetBranchName() string { return g.branchName }",
"func (ref RefName) BranchName() string {\n\treturn ref.nameWithoutPrefix(BranchPrefix)\n}",
"func gitBranchName() string {\n\t// branch name variable set by Github Actions\n\tif branch, isset := os.LookupEnv(\"GITHUB_HEAD_REF\"); isset && branch != \"\" {\n\t\treturn \"origin/\" + branch\n\t}\n\tif branch, isset := os.LookupEnv(\"GITHUB_REF\"); isset && branch != \"\" {\n\t\treturn \"origin/\" + strings.TrimPrefix(branch, \"refs/heads/\")\n\t}\n\tbranch := getCmdOutput(\"git rev-parse --abbrev-ref HEAD\")\n\treturn branch\n}",
"func (p *PipelineActivity) BranchName() string {\n\tpipelineName := p.Spec.Pipeline\n\tif pipelineName == \"\" {\n\t\treturn \"\"\n\t}\n\tpaths := strings.Split(pipelineName, \"/\")\n\tbranch := paths[len(paths)-1]\n\tp.Spec.GitBranch = branch\n\treturn branch\n}",
"func branchName() (string, string) {\n\tbranch := gitBranchName()\n\treleaseName := strings.TrimPrefix(branch, \"origin/\")\n\n\treturn releaseName, branch\n}",
"func Branch() string {\n\treturn run.Capture(\"git rev-parse --abbrev-ref HEAD\")\n}",
"func (g *GitLocal) Branch(dir string) (string, error) {\n\treturn g.GitCLI.Branch(dir)\n}",
"func GitBranch(dir string) (branch string, err error) {\n\tcmd := Cmd(dir, \"git rev-parse --abbrev-ref HEAD\")\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\treturn\n\t}\n\tbranch = strings.TrimSpace(string(out))\n\tif branch == \"HEAD\" {\n\t\terr = ErrGitDetached\n\t}\n\treturn\n}",
"func (c *config) branch(name string) (output string, err error) {\n\tlog.Printf(\"creating branch: %v\", name)\n\n\tdefaultCommand := []string{\"branch\", name}\n\n\treturn c.command(defaultCommand...)\n\n}",
"func (o DomainAssociationSubDomainOutput) BranchName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DomainAssociationSubDomain) string { return v.BranchName }).(pulumi.StringOutput)\n}",
"func (s *splicer) branch(name string) error {\n\treturn s.gitCall(\"checkout\", \"-B\", name, \"master\")\n}",
"func (o AppProductionBranchOutput) BranchName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v AppProductionBranch) *string { return v.BranchName }).(pulumi.StringPtrOutput)\n}",
"func getCurrentBranchName() string {\n\tcommand := exec.Command(\"git\", \"rev-parse\", \"--abbrev-ref\", \"HEAD\")\n\tcommandOutput, err := command.Output()\n\tif err != nil {\n\t\tpanic(\"Cannot get current branch\")\n\t}\n\treturn strings.Replace(string(commandOutput), \"\\n\", \"\", -1)\n}",
"func (w *Wiki) checkoutBranch(name string) (string, error) {\n\n\t// never checkout master in a linked repo\n\tif name == \"master\" {\n\t\treturn \"\", errors.New(\"cannot check out master in a linked repo\")\n\t}\n\n\t// TODO: make sure name is a simple string with no path elements\n\n\t// make cache/branch/ if needed\n\twikifier.MakeDir(filepath.Join(w.Opt.Dir.Cache, \"branch\"), \"\")\n\n\t// e.g. cache/branch/mybranchname\n\ttargetDir := filepath.Join(w.Opt.Dir.Cache, \"branch\", name)\n\n\t// directory already exists, so I'm good with saying the branch is there\n\tif fi, err := os.Stat(targetDir); err == nil && fi.IsDir() {\n\t\treturn targetDir, nil\n\t}\n\n\trepo, err := w.repo()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// create the linked repository\n\tif _, err = repo.PlainAddWorktree(name, targetDir, &git.AddWorktreeOptions{}); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn targetDir, nil\n}",
"func Branch() (b string) {\n\treturn branch\n}",
"func GetBranchFromRef(ref string) string {\n\tparts := strings.Split(ref, \"/\")\n\treturn strings.Join(parts[2:], \"/\")\n}",
"func GetCurrentBranch() string {\n\tcmd := exec.Command(\"git\", \"branch\")\n\tvar out bytes.Buffer\n\tcmd.Stdout = &out\n\tcmd.Run()\n\tlines := strings.Split(out.String(), \"\\n\")\n\tfor _, line := range lines {\n\t\tif strings.Contains(line, \"*\") {\n\t\t\tbranch := strings.Replace(line, \"*\", \"\", -1)\n\t\t\tbranch = strings.TrimSpace(branch)\n\t\t\treturn branch\n\t\t}\n\t}\n\treturn \"\"\n}",
"func (st *buildStatus) branch() string {\n\tif st.SubRev != \"\" {\n\t\treturn st.SubRevBranch\n\t}\n\treturn st.RevBranch\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
graphStructure writes the tree using owners for structure
|
func (p TreeWriter) graphStructure(nodes []*yaml.RNode) error {
resourceToOwner := map[string]*node{}
root := &node{}
// index each of the nodes by their owner
for _, n := range nodes {
ownerVal, err := ownerToString(n)
if err != nil {
return err
}
var owner *node
if ownerVal == "" {
// no owner -- attach to the root
owner = root
} else {
// owner found -- attach to the owner
var found bool
owner, found = resourceToOwner[ownerVal]
if !found {
// initialize the owner if not found
resourceToOwner[ownerVal] = &node{p: p}
owner = resourceToOwner[ownerVal]
}
}
nodeVal, err := nodeToString(n)
if err != nil {
return err
}
val, found := resourceToOwner[nodeVal]
if !found {
// initialize the node if not found -- may have already been initialized if it
// is the owner of another node
resourceToOwner[nodeVal] = &node{p: p}
val = resourceToOwner[nodeVal]
}
val.RNode = n
owner.children = append(owner.children, val)
}
for k, v := range resourceToOwner {
if v.RNode == nil {
return fmt.Errorf(
"owner '%s' not found in input, but found as an owner of input objects", k)
}
}
// print the tree
tree := treeprint.New()
if err := root.Tree(tree); err != nil {
return err
}
_, err := io.WriteString(p.Writer, tree.String())
return err
}
|
[
"func (q *Qualifier) Graph(w io.Writer) {\n\tfmt.Fprintf(w, \"digraph {\\n\")\n\tq.root.Graph(w, 0, \"[root]\")\n\tfmt.Fprintf(w, \"}\\n\")\n}",
"func WriteTree(writer io.Writer, hierarchy *Hierarchy, includeEmpty bool) {\n\ttree := assembleTree(hierarchy)\n\tkeys := make([]string, len(tree))\n\ti := 0\n\tfor k := range tree {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\tsort.Strings(keys)\n\tfor _, key := range keys {\n\t\tb := tree[key]\n\t\twriteBranch(writer, b, \"\", hierarchy, includeEmpty)\n\t}\n}",
"func (p *GameTree) writeTree(w *bufio.Writer, n TreeNodeIdx, needs bool, nMov int, nMovPerLine int) (err error) {\n\tdefer u(tr(\"writeTree\"))\n\tif needs == true {\n\t\tif nMov > 0 {\n\t\t\terr = w.WriteByte('\\n')\n\t\t\tnMov = 0\n\t\t}\n\t\terr = w.WriteByte('(')\n\t}\n\tif err == nil {\n\t\tif nMov == nMovPerLine {\n\t\t\terr = w.WriteByte('\\n')\n\t\t\tnMov = 0\n\t\t}\n\t\terr = w.WriteByte(';')\n\t\t// write the node\n\t\ttyp := p.treeNodes[n].TNodType\n\t\tswitch typ {\n\t\tcase GameInfoNode:\n\t\t\t// fmt.Println(\"writing GameInfoNode\\n\")\n\t\t\terr = p.writeProperties(w, n, true)\n\t\tcase InteriorNode:\n\t\t\t// fmt.Println(\"writing InteriorNode\\n\")\n\t\t\terr = p.writeProperties(w, n, false)\n\t\tcase BlackMoveNode:\n\t\t\t_, err = w.WriteString(\"B[\")\n\t\t\t_, err = w.Write(SGFCoords(ah.NodeLoc(p.treeNodes[n].propListOrNodeLoc), p.IsFF4()))\n\t\t\terr = w.WriteByte(']')\n\t\t\tnMov += 1\n\t\tcase WhiteMoveNode:\n\t\t\t_, err = w.WriteString(\"W[\")\n\t\t\t_, err = w.Write(SGFCoords(ah.NodeLoc(p.treeNodes[n].propListOrNodeLoc), p.IsFF4()))\n\t\t\terr = w.WriteByte(']')\n\t\t\tnMov += 1\n\t\tdefault:\n\t\t\tfmt.Println(\"*** unsupported TreeNodeType in writeTree\")\n\t\t\terr = errors.New(\"writeTree: unsupported TreeNodeType\" + strconv.FormatInt(int64(typ), 10))\n\t\t\treturn err\n\t\t}\n\t\tif err == nil {\n\t\t\t// write the children\n\t\t\tlastCh := p.treeNodes[n].Children\n\t\t\tif lastCh != nilTreeNodeIdx && err == nil {\n\t\t\t\tch := p.treeNodes[lastCh].NextSib\n\t\t\t\tchNeeds := (lastCh != ch)\n\t\t\t\terr = p.writeTree(w, ch, chNeeds, nMov, nMovPerLine)\n\t\t\t\tfor ch != lastCh && err == nil {\n\t\t\t\t\tch = p.treeNodes[ch].NextSib\n\t\t\t\t\t//\t\t\t\t\tnMov += 1\n\t\t\t\t\terr = p.writeTree(w, ch, chNeeds, nMov, nMovPerLine)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (err == nil) && (needs == true) {\n\t\t\t\terr = w.WriteByte(')')\n\t\t\t}\n\t\t}\n\t}\n\treturn err\n}",
"func (s *Stub) WriteGraphNodes(w io.Writer) {\n\tw.Write([]byte(fmt.Sprintf(\"\\\"%x\\\" [\\n\\tshape=box\\n\\tstyle=\\\"filled,dashed\\\"\\n\\ttextcolor=blue\\n\\tcolor=blue\\n\\tfillcolor=lightblue];\\n\", s.GetGraphHash())))\n}",
"func Marshal(g *graph.DirectedGraph) ([]byte, error) {\n\tvar b bytes.Buffer\n\n\t// Static graph configuration attributes\n\n\tb.WriteString(\"strict digraph bridge {\\n\")\n\tb.WriteByte('\\n')\n\tb.WriteString(\"graph [\\n\")\n\tb.WriteString(\" rankdir=LR\\n\")\n\tb.WriteString(\"]\\n\")\n\tb.WriteByte('\\n')\n\tb.WriteString(\"node [\\n\")\n\tb.WriteString(\" fontname=\\\"Helvetica\\\"\\n\")\n\tb.WriteString(\" shape=plain\\n\")\n\tb.WriteString(\"]\\n\")\n\tb.WriteByte('\\n')\n\n\t// Vertices\n\n\t// Index of vertices already converted to node, for faster access\n\t// during sorting of edges.\n\t// The keys used in the map are also the ones used in the graph.DirectedGraph\n\tvertIndex := make(map[interface{}]*node)\n\n\tsortedNodes := make(nodeList, 0, len(g.Vertices()))\n\tfor k, v := range g.Vertices() {\n\t\tn := graphVertexToNode(v)\n\t\tvertIndex[k] = n\n\n\t\tsortedNodes = append(sortedNodes, n)\n\t\tsort.Sort(sortedNodes)\n\t}\n\n\tfor _, n := range sortedNodes {\n\t\tdotN, err := n.marshalDOT()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"marshaling node to DOT: %w\", err)\n\t\t}\n\t\tb.Write(dotN)\n\t}\n\n\tb.WriteByte('\\n')\n\n\t// Edges\n\n\tsortedDownEdges := make(downEdgesList, 0, len(g.DownEdges()))\n\tfor tailVertKey, headVerts := range g.DownEdges() {\n\t\ttailNode := vertIndex[tailVertKey]\n\n\t\tsortedHeadNodes := make(nodeList, 0, len(headVerts))\n\t\tfor headVertKey := range headVerts {\n\t\t\tsortedHeadNodes = append(sortedHeadNodes, vertIndex[headVertKey])\n\t\t}\n\t\tsort.Sort(sortedHeadNodes)\n\n\t\tsortedDownEdges = append(sortedDownEdges, downEdges{\n\t\t\ttail: tailNode,\n\t\t\theads: sortedHeadNodes,\n\t\t})\n\t}\n\tsort.Sort(sortedDownEdges)\n\n\tfor _, e := range sortedDownEdges {\n\t\tb.WriteString(e.tail.id() + \" -> {\")\n\t\tfor _, h := range e.heads {\n\t\t\tb.WriteByte(' ')\n\t\t\tb.WriteString(h.id())\n\t\t}\n\t\tb.WriteString(\" }\\n\")\n\t}\n\n\tb.WriteByte('\\n')\n\n\tb.WriteString(\"}\\n\")\n\n\treturn b.Bytes(), nil\n}",
"func (n *NetworkBuilder) NetworkGraph(w io.Writer) error {\n\tnodes := make(map[string]dot.Node)\n\tnodesByID := make(map[string]dot.Node)\n\tgraph := dot.NewGraph(dot.Directed)\n\n\tfor _, nr := range n.NetResources {\n\t\tnode := graph.Node(strings.Join([]string{nr.NodeId, nr.Iprange.String()}, \"\\n\")).Box()\n\t\t// set special style for \"hidden\" nodes\n\t\tif len(nr.PubEndpoints) == 0 {\n\t\t\tnode.Attr(\"style\", \"dashed\")\n\t\t\tnode.Attr(\"color\", \"blue\")\n\t\t\tgraph.AddToSameRank(\"hidden nodes\", node)\n\t\t}\n\t\tnodes[nr.WireguardPublicKey] = node\n\t\tnodesByID[nr.NodeId] = node\n\t}\n\n\t// add external access\n\tfor _, ea := range n.AccessPoints {\n\t\tnode := graph.Node(strings.Join([]string{\"External network\", ea.Subnet.String()}, \"\\n\")).Box()\n\t\t// set style for hidden nodes\n\t\tnode.Attr(\"style\", \"dashed\")\n\t\tnode.Attr(\"color\", \"green\")\n\t\tgraph.AddToSameRank(\"external access\", node)\n\t\t// add link to access point\n\t\tedge := graph.Edge(node, nodesByID[ea.NodeID], n.Iprange.String())\n\t\tif ea.IP4 {\n\t\t\tedge.Attr(\"color\", \"blue\")\n\t\t}\n\t\tnodes[ea.WGPublicKey] = node\n\t}\n\n\tfor _, nr := range n.NetResources {\n\t\tfor _, peer := range nr.Peers {\n\t\t\tallowedIPs := make([]string, 0, len(peer.AllowedIprange)/2)\n\t\t\tfor _, aip := range peer.AllowedIprange {\n\t\t\t\tif !isCGN(aip) {\n\t\t\t\t\tallowedIPs = append(allowedIPs, aip.String())\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tedge := graph.Edge(nodes[nr.WireguardPublicKey], nodes[peer.PublicKey], strings.Join(allowedIPs, \"\\n\"))\n\t\t\tif peer.Endpoint == \"\" {\n\t\t\t\t// connections to this peer are IPv4 -> blue, and can not be initiated by this node -> dashed\n\t\t\t\tedge.Attr(\"color\", \"blue\").Attr(\"style\", \"dashed\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif net.ParseIP(peer.Endpoint[:strings.LastIndex(peer.Endpoint, \":\")]).To4() != nil {\n\t\t\t\t// IPv4 connection -> blue\n\t\t\t\tedge.Attr(\"color\", \"blue\")\n\t\t\t}\n\t\t}\n\t}\n\n\tgraph.Write(w)\n\treturn nil\n}",
"func (t *BPTree) WriteNodes(rwMode RWMode, syncEnable bool, flag int) error {\n\tvar (\n\t\tn *Node\n\t\ti int\n\t\terr error\n\t)\n\n\tfd, err := os.OpenFile(t.Filepath, os.O_CREATE|os.O_RDWR, 0644)\n\tdefer fd.Close()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tqueue = nil\n\n\tenqueue(t.root)\n\n\tfor queue != nil {\n\t\tn = dequeue()\n\n\t\t_, err := t.WriteNode(n, -1, syncEnable, fd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif n != nil {\n\t\t\tif !n.isLeaf {\n\t\t\t\tfor i = 0; i <= n.KeysNum; i++ {\n\t\t\t\t\tc, _ := n.pointers[i].(*Node)\n\t\t\t\t\tenqueue(c)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (decTree *Tree) WriteTree(filename string) {\n\tfile, err := os.Create(filename)\n\tif err != nil {\n\t\tfmt.Println(\"Error opening output file: \", filename)\n\t\treturn\n\t}\n\n\tcurrNode := decTree\n\tvar treeStack []*Tree\n\n\ttreeLen := 1\n\tfor treeLen != 0 {\n\t\tfile.WriteString(nodeToStr(currNode.Details))\n\n\t\tif currNode.Details.Leaf == false {\n\t\t\ttreeStack = append(treeStack, currNode.Right)\n\t\t\tcurrNode = currNode.Left\n\t\t\ttreeLen++\n\t\t} else {\n\t\t\t//get the length of the tree and set curr to the last element in the list\n\t\t\ttreeLen--\n\n\t\t\tif treeLen > 0 {\n\t\t\t\tcurrNode, treeStack = treeStack[treeLen-1], treeStack[:treeLen-1]\n\t\t\t}\n\t\t}\n\t}\n\n\tfile.Close()\n}",
"func DrawGraph(filename string, s spn.SPN) {\n\tfile, err := os.Create(filename)\n\n\tif err != nil {\n\t\tfmt.Printf(\"Error. Could not create file [%s].\\n\", filename)\n\t\tpanic(err)\n\t}\n\tdefer file.Close()\n\n\tfmt.Fprintf(file, \"graph {\\n\")\n\n\t// If the SPN is itself an univariate distribution, create a graph with a single node.\n\tif s.Type() == \"leaf\" {\n\t\tfmt.Fprintf(file, \"X1 [label=<X<sub>1</sub>>,shape=circle];\\n\")\n\t\tfmt.Fprintf(file, \"}\")\n\t\tfile.Close()\n\t\treturn\n\t}\n\n\t// Else, BFS the SPN and write nodes to filename.\n\tnvars, nsums, nprods := 0, 0, 0\n\tqueue := common.Queue{}\n\tqueue.Enqueue(&BFSPair{Spn: s, Pname: \"\", Weight: -1.0})\n\tfor !queue.Empty() {\n\t\tcurrpair := queue.Dequeue().(*BFSPair)\n\t\tcurr, pname, pw := currpair.Spn, currpair.Pname, currpair.Weight\n\t\tch := curr.Ch()\n\t\tnch := len(ch)\n\n\t\tname := \"N\"\n\t\tcurrt := curr.Type()\n\n\t\t// In case it is a sum node. Else product node.\n\t\tif currt == \"sum\" {\n\t\t\tname = fmt.Sprintf(\"S%d\", nsums)\n\t\t\tfmt.Fprintf(file, \"%s [label=\\\"+\\\",shape=circle];\\n\", name)\n\t\t\tnsums++\n\t\t} else if currt == \"product\" {\n\t\t\tname = fmt.Sprintf(\"P%d\", nprods)\n\t\t\tfmt.Fprintf(file, \"%s [label=<×>,shape=circle];\\n\", name)\n\t\t\tnprods++\n\t\t}\n\n\t\t// If pname is empty, then it is the root node. Else, link parent node to current node.\n\t\tif pname != \"\" {\n\t\t\tif pw >= 0 {\n\t\t\t\tfmt.Fprintf(file, \"%s -- %s [label=\\\"%.3f\\\"];\\n\", pname, name, pw)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(file, \"%s -- %s\\n\", pname, name)\n\t\t\t}\n\t\t}\n\n\t\tvar w []float64\n\t\tif curr.Type() == \"sum\" {\n\t\t\tw = (curr.(*spn.Sum).Weights())\n\t\t}\n\t\t// For each children, run the BFS.\n\t\tfor i := 0; i < nch; i++ {\n\t\t\tc := ch[i]\n\n\t\t\t// If leaf, then simply write to the graphviz dot file. Else, recurse the BFS.\n\t\t\tif c.Type() == \"leaf\" {\n\t\t\t\tcname := fmt.Sprintf(\"X%d\", nvars)\n\t\t\t\tfmt.Fprintf(file, \"%s [label=<X<sub>%d</sub>>,shape=circle];\\n\", cname, c.Sc()[0])\n\t\t\t\tnvars++\n\t\t\t\tif currt == \"sum\" {\n\t\t\t\t\tfmt.Fprintf(file, \"%s -- %s [label=\\\"%.3f\\\"]\\n\", name, cname, w[i])\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Fprintf(file, \"%s -- %s\\n\", name, cname)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\ttw := -1.0\n\t\t\t\tif w != nil {\n\t\t\t\t\ttw = w[i]\n\t\t\t\t}\n\t\t\t\tqueue.Enqueue(&BFSPair{Spn: c, Pname: name, Weight: tw})\n\t\t\t}\n\t\t}\n\t}\n\n\tfmt.Fprintf(file, \"}\")\n}",
"func (tree GitTree) Write(gitdir string) (string, error) {\n\t// Raw tree data (empty)\n\traw := []byte{}\n\n\t// For each tree-leaf\n\tfor _, t := range tree {\n\t\t// Building up data as [mode] space [path] 0x00 [sha-1]\n\t\traw = bytes.Join([][]byte{raw, []byte(t.Mode)}, []byte(\"\"))\n\t\traw = bytes.Join([][]byte{raw, []byte(\" \")}, []byte(\"\"))\n\t\traw = bytes.Join([][]byte{raw, []byte(t.Fpath)}, []byte(\"\"))\n\t\traw = bytes.Join([][]byte{raw, []byte{0x00}}, []byte(\"\"))\n\t\traw = bytes.Join([][]byte{raw, []byte(t.Sha)}, []byte(\"\"))\n\t}\n\n\t// Writetable Object from tree (GitTree)\n\twObj := GitObject{Kind: \"tree\", Data: raw}\n\n\t// Writing the Object\n\t// nFilePath, err := wObj.Write(\"git\")\n\t// return nFilePath, err\n\n\t// Writing the Object\n\tshaStr, err := wObj.Write(\"git\")\n\treturn shaStr, err\n}",
"func (g *Graph) writeGraph(output io.Writer) error {\n\theader := \"p tw \" + strconv.Itoa(g.numvert) + \" \" + strconv.Itoa(g.numedge) + \"\\n\"\n\tio.WriteString(output, header)\n\n\tline := \"\"\n\tfor edge, value := range g.edges {\n\t\tif value {\n\t\t\tline = strconv.Itoa(edge.from.vert) + \" \" + strconv.Itoa(edge.to.vert) + \"\\n\"\n\t\t\tio.WriteString(output, line)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (node *URLNode) WriteTree(writer io.Writer) {\n\tif _, err := writer.Write([]byte(node.GenerateTree())); err != nil {\n\t\tlog.Error(err)\n\t}\n}",
"func (b *builder) write() {\n\tfor i, name := range b.info.top {\n\t\te := b.index[name]\n\t\tif e != nil {\n\t\t\tfmt.Fprintf(b.w, comments[name])\n\t\t\tname := title(e.name)\n\t\t\tif i == 0 {\n\t\t\t\tname = b.info.root\n\t\t\t}\n\t\t\tfmt.Fprintf(b.w, \"type %s \", name)\n\t\t\tb.writeElem(0, e)\n\t\t\tfmt.Fprint(b.w, \"\\n\")\n\t\t}\n\t}\n}",
"func (st *STree) Write(w io.Writer) error {\n\tww := newWriter(w)\n\n\t// write nodes.\n\tww.writeInt(len(st.Nodes))\n\tif ww.err != nil {\n\t\treturn ww.err\n\t}\n\tfor _, n := range st.Nodes {\n\t\terr := n.write(ww)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// write levels.\n\tww.writeInt(len(st.Levels))\n\tif ww.err != nil {\n\t\treturn ww.err\n\t}\n\tfor _, lv := range st.Levels {\n\t\tww.writeInt(lv)\n\t}\n\tif ww.err != nil {\n\t\treturn ww.err\n\t}\n\n\tww.w.Flush()\n\treturn nil\n}",
"func Marshal(g *ag.Graph) ([]byte, error) {\n\tgv, err := graphviz.BuildGraph(g, graphviz.Options{ColoredTimeSteps: true})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn []byte(gv.String()), nil\n}",
"func (bpt *BplusTree) writeLayout() {\n\tleafIdx := 0\n\tnodeIdx := 0\n\tlevelIdx := 0\n\n\tif !bpt.initialized || bpt.rootKey.IsNil() {\n\t\treturn\n\t}\n\n\trootNode, _ := bpt.fetch(bpt.rootKey)\n\tif rootNode == nil {\n\t\tglog.Errorf(\"failed to fetch root key: %v. can not print the tree.\",\n\t\t\tbpt.rootKey)\n\t\treturn\n\t}\n\tglog.Infof(\"dumping the tree layout.. numChildren: %d\\n\",\n\t\tlen(rootNode.Children))\n\tnodeList := rootNode.Children\n\tnodeLensList := make([]int, 1)\n\tnodeLensList[0] = len(rootNode.Children)\n\tnumElems := nodeLensList[0]\n\tnumNodesAtLevel := 0\n\tprintLevel := true\n\tglog.Infof(\"level -- 0 <root: %v>\\n\", rootNode)\n\tif rootNode.IsLeaf {\n\t\treturn\n\t}\n\tfor i := 0; i < numElems; i++ {\n\t\tif printLevel {\n\t\t\tglog.Infof(\"level -- %d \", levelIdx+1)\n\t\t\tprintLevel = false\n\t\t}\n\t\tnode, _ := bpt.fetch(nodeList[i].NodeKey)\n\t\tif node == nil {\n\t\t\tglog.Errorf(\"failed to fetch root key: %v\", nodeList[i].NodeKey)\n\t\t\treturn\n\t\t}\n\n\t\tif node.IsLeaf {\n\t\t\tglog.Infof(\"level:%d <tree-L-node :%d, node: %v> \", levelIdx+1, leafIdx, node)\n\t\t\tleafIdx++\n\t\t} else {\n\t\t\tglog.Infof(\"level:%d <tree-I-node :%d, node: %v> \", levelIdx+1, nodeIdx, node)\n\t\t\tnodeList = append(nodeList, node.Children...)\n\t\t\tnumElems += len(node.Children)\n\t\t\tnumNodesAtLevel += len(node.Children)\n\t\t}\n\t\tnodeIdx++\n\t\tif nodeIdx >= nodeLensList[levelIdx] {\n\t\t\tnodeLensList = append(nodeLensList, numNodesAtLevel)\n\t\t\tlevelIdx++\n\t\t\tnodeIdx = 0\n\t\t\tnumNodesAtLevel = 0\n\t\t\tglog.Infof(\"\\n\")\n\t\t\tprintLevel = true\n\t\t}\n\t}\n\tglog.Infof(\"done.. dumping the layout\\n\")\n\tglog.Infof(\"----------------------------\\n\")\n}",
"func main() {\n\troot := TreeNode{\n\t\tVal: 1,\n\t\tLeft: &TreeNode{\n\t\t\tVal: 2,\n\t\t\tLeft: nil,\n\t\t\tRight: nil,\n\t\t},\n\t\tRight: &TreeNode{\n\t\t\tVal: 3,\n\t\t\tLeft: &TreeNode{\n\t\t\t\tVal: 4,\n\t\t\t\tRight: nil,\n\t\t\t\tLeft: nil,\n\t\t\t},\n\t\t\tRight: &TreeNode{\n\t\t\t\tVal: 5,\n\t\t\t\tRight: nil,\n\t\t\t\tLeft: nil,\n\t\t\t},\n\t\t},\n\t}\n\tobj := Constructor()\n\tdata := obj.serialize(&root)\n\tfmt.Println(data)\n}",
"func (graph *graphRW) Save() {\n\tgraph.parent.rwLock.Lock()\n\tdefer graph.parent.rwLock.Unlock()\n\n\tdestGraph := graph.parent.graph\n\n\t// propagate newly registered mappings\n\tfor mapName, mapping := range graph.mappings {\n\t\tif _, alreadyReg := destGraph.mappings[mapName]; !alreadyReg {\n\t\t\tdestGraph.mappings[mapName] = mapping\n\t\t}\n\t}\n\n\t// apply deleted nodes\n\tfor _, key := range graph.deleted {\n\t\tif node, has := destGraph.nodes[key]; has {\n\t\t\t// remove metadata\n\t\t\tif node.metadataAdded {\n\t\t\t\tif mapping, hasMapping := destGraph.mappings[node.metadataMap]; hasMapping {\n\t\t\t\t\tmapping.Delete(node.label)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// remove node from graph\n\t\t\tdelete(destGraph.nodes, key)\n\t\t}\n\t\tgraph.newRevs[key] = true\n\t}\n\tgraph.deleted = []string{}\n\n\t// apply new/changes nodes\n\tfor key, node := range graph.nodes {\n\t\tif !node.dataUpdated && !node.targetsUpdated && !node.sourcesUpdated {\n\t\t\tcontinue\n\t\t}\n\n\t\t// update metadata\n\t\tif !node.metaInSync {\n\t\t\t// update metadata map\n\t\t\tif mapping, hasMapping := destGraph.mappings[node.metadataMap]; hasMapping {\n\t\t\t\tif node.metadataAdded {\n\t\t\t\t\tif node.metadata == nil {\n\t\t\t\t\t\tmapping.Delete(node.label)\n\t\t\t\t\t\tnode.metadataAdded = false\n\t\t\t\t\t} else {\n\t\t\t\t\t\tprevMeta, _ := mapping.GetValue(node.label)\n\t\t\t\t\t\tif !reflect.DeepEqual(prevMeta, node.metadata) {\n\t\t\t\t\t\t\tmapping.Update(node.label, node.metadata)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else if node.metadata != nil {\n\t\t\t\t\tmapping.Put(node.label, node.metadata)\n\t\t\t\t\tnode.metadataAdded = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// mark node for recording during RW-handle release\n\t\t// (ignore if only sources have been updated)\n\t\tif node.dataUpdated || node.targetsUpdated {\n\t\t\tif _, newRev := graph.newRevs[key]; !newRev {\n\t\t\t\tgraph.newRevs[key] = false\n\t\t\t}\n\t\t\tgraph.newRevs[key] = graph.newRevs[key] || node.dataUpdated\n\t\t}\n\n\t\t// copy changed node to the actual graph\n\t\tnodeCopy := node.copy()\n\t\tnodeCopy.graph = destGraph\n\t\tdestGraph.nodes[key] = newNode(nodeCopy)\n\n\t\t// use copy-on-write targets+sources for the write-handle\n\t\tcowTargets := nodeCopy.targets\n\t\tnodeCopy.targets = node.targets\n\t\tnode.targets = cowTargets\n\t\tcowSources := nodeCopy.sources\n\t\tnodeCopy.sources = node.sources\n\t\tnode.sources = cowSources\n\n\t\t// working copy is now in-sync\n\t\tnode.dataUpdated = false\n\t\tnode.targetsUpdated = false\n\t\tnode.sourcesUpdated = false\n\t\tnode.metaInSync = true\n\t}\n}",
"func (r *MemRepo) Graph() (map[string]*dsgraph.Node, error) {\n\treturn Graph(r)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ownerToString generate a string to identify the owner matches nodeToString format
|
func ownerToString(node *yaml.RNode) (string, error) {
meta, err := node.GetMeta()
if err != nil {
return "", err
}
namespace := meta.Namespace
owners, err := node.Pipe(yaml.Lookup("metadata", "ownerReferences"))
if err != nil {
return "", err
}
if owners == nil {
return "", nil
}
elements, err := owners.Elements()
if err != nil {
return "", err
}
if len(elements) == 0 {
return "", err
}
owner := elements[0]
var kind, name string
if value := owner.Field("kind"); !value.IsNilOrEmpty() {
kind = value.Value.YNode().Value
}
if value := owner.Field("name"); !value.IsNilOrEmpty() {
name = value.Value.YNode().Value
}
return fmt.Sprintf("%s %s/%s", kind, namespace, name), nil
}
|
[
"func (t *TransactionV2) Owner() string {\n\treturn utils.EncodeToBase64(t.owner.Bytes())\n}",
"func (o BucketReplicationConfigurationRuleDestinationAccessControlTranslationOutput) Owner() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BucketReplicationConfigurationRuleDestinationAccessControlTranslation) string { return v.Owner }).(pulumi.StringOutput)\n}",
"func (t *Transaction) Owner() string {\n\treturn utils.EncodeToBase64(t.owner.Bytes())\n}",
"func (o LookupManagedPrefixListResultOutput) OwnerId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupManagedPrefixListResult) string { return v.OwnerId }).(pulumi.StringOutput)\n}",
"func (s *Stream) Owner() string {\n\tif s.OwnerRaw == nil {\n\t\treturn \"\"\n\t}\n\n\treturn *s.OwnerRaw\n}",
"func (o InternetGatewayOutput) OwnerId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *InternetGateway) pulumi.StringOutput { return v.OwnerId }).(pulumi.StringOutput)\n}",
"func (n *Node) Owner() *userpb.UserId {\n\treturn n.SpaceRoot.owner\n}",
"func (o GetAggregateConfigRulesRuleOutput) SourceOwner() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetAggregateConfigRulesRule) string { return v.SourceOwner }).(pulumi.StringOutput)\n}",
"func (me TAttlistGeneralNoteOwner) String() string { return xsdt.Token(me).String() }",
"func (o GetRulesRuleOutput) SourceOwner() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetRulesRule) string { return v.SourceOwner }).(pulumi.StringOutput)\n}",
"func TopLevelOwnerKey(ctx context.Context, obj metav1.Object, kubeContext string, kind string) string {\n\tfor {\n\t\tor := obj.GetOwnerReferences()\n\t\tif or == nil {\n\t\t\treturn fmt.Sprintf(\"%s-%s\", kind, obj.GetName())\n\t\t}\n\t\tvar err error\n\t\tkind = or[0].Kind\n\t\tobj, err = ownerMetaObject(ctx, obj.GetNamespace(), kubeContext, or[0])\n\t\tif err != nil {\n\t\t\tlog.Entry(ctx).Warnf(\"unable to get owner from reference: %v\", or[0])\n\t\t\treturn \"\"\n\t\t}\n\t}\n}",
"func (o SnapshotOutput) OwnerAccount() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Snapshot) pulumi.StringOutput { return v.OwnerAccount }).(pulumi.StringOutput)\n}",
"func (a Account) ShowOwner() string {\n\treturn a.owner\n}",
"func (r *NetworkAcl) OwnerId() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"ownerId\"])\n}",
"func (md *RawMetadata) Owner(inherit bool) string {\n\tif md.owner == \"\" && inherit && md.Parent != nil {\n\t\treturn md.Parent.owner\n\t} else {\n\t\treturn md.owner\n\t}\n}",
"func (o ResolverDnsSecConfigOutput) OwnerId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ResolverDnsSecConfig) pulumi.StringOutput { return v.OwnerId }).(pulumi.StringOutput)\n}",
"func (l *loadBalancer) Owner() string {\n\tif l.shared {\n\t\treturn \"\"\n\t}\n\n\tfor _, ingresses := range l.ingresses {\n\t\tfor _, ingress := range ingresses {\n\t\t\treturn fmt.Sprintf(\"%s/%s\", ingress.Namespace, ingress.Name)\n\t\t}\n\t}\n\n\treturn \"\"\n}",
"func (a Account) Owner() string {\n\treturn a.owner\n}",
"func (o LookupStreamingImageResultOutput) Owner() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v LookupStreamingImageResult) *string { return v.Owner }).(pulumi.StringPtrOutput)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
index indexes the Resources by their package
|
func (p TreeWriter) index(nodes []*yaml.RNode) map[string][]*yaml.RNode {
// index the ResourceNodes by package
indexByPackage := map[string][]*yaml.RNode{}
for i := range nodes {
meta, err := nodes[i].GetMeta()
if err != nil || meta.Kind == "" {
// not a resource
continue
}
pkg := filepath.Dir(meta.Annotations[kioutil.PathAnnotation])
indexByPackage[pkg] = append(indexByPackage[pkg], nodes[i])
}
return indexByPackage
}
|
[
"func IndexResourcesByName(items []envoy_types.ResourceWithTTL) map[string]envoy_types.ResourceWithTTL {\n\tindexed := make(map[string]envoy_types.ResourceWithTTL, len(items))\n\tfor _, item := range items {\n\t\tkey := fmt.Sprintf(\"%s.%s\", item.Resource.(*mesh_proto.KumaResource).GetMeta().GetName(), item.Resource.(*mesh_proto.KumaResource).GetMeta().GetMesh())\n\t\tindexed[key] = item\n\t}\n\treturn indexed\n}",
"func (pi *PackageIndexer) Index(pack *Package) string {\n\tpi.mutex.Lock() \n\tdefer pi.mutex.Unlock()\n\t// foreach loop over the package's dependencies \n\tfor _, dep := range pack.deps {\n\t\t// query for each dependency\n\t\tif dep.name != \"\" && pi.Query(dep.name) == FAIL {\n\t\t\t// dependency not installed, cannot be indexed \n\t\t\treturn FAIL\n\t\t}\n\t}\n\t// package dependencies exist: update/add package \n\tpi.packs[pack.name] = pack\n\n\treturn OK \n}",
"func (w *exportWriter) writeIndex(index map[types.Object]uint64) {\n\ttype pkgObj struct {\n\t\tobj types.Object\n\t\tname string // qualified name; differs from obj.Name for type params\n\t}\n\t// Build a map from packages to objects from that package.\n\tpkgObjs := map[*types.Package][]pkgObj{}\n\n\t// For the main index, make sure to include every package that\n\t// we reference, even if we're not exporting (or reexporting)\n\t// any symbols from it.\n\tif w.p.localpkg != nil {\n\t\tpkgObjs[w.p.localpkg] = nil\n\t}\n\tfor pkg := range w.p.allPkgs {\n\t\tpkgObjs[pkg] = nil\n\t}\n\n\tfor obj := range index {\n\t\tname := w.p.exportName(obj)\n\t\tpkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], pkgObj{obj, name})\n\t}\n\n\tvar pkgs []*types.Package\n\tfor pkg, objs := range pkgObjs {\n\t\tpkgs = append(pkgs, pkg)\n\n\t\tsort.Slice(objs, func(i, j int) bool {\n\t\t\treturn objs[i].name < objs[j].name\n\t\t})\n\t}\n\n\tsort.Slice(pkgs, func(i, j int) bool {\n\t\treturn w.exportPath(pkgs[i]) < w.exportPath(pkgs[j])\n\t})\n\n\tw.uint64(uint64(len(pkgs)))\n\tfor _, pkg := range pkgs {\n\t\tw.string(w.exportPath(pkg))\n\t\tw.string(pkg.Name())\n\t\tw.uint64(uint64(0)) // package height is not needed for go/types\n\n\t\tobjs := pkgObjs[pkg]\n\t\tw.uint64(uint64(len(objs)))\n\t\tfor _, obj := range objs {\n\t\t\tw.string(obj.name)\n\t\t\tw.uint64(index[obj.obj])\n\t\t}\n\t}\n}",
"func IndexResourcesByName(items []types.Resource) map[string]types.Resource {\n\tindexed := make(map[string]types.Resource, len(items))\n\tfor _, item := range items {\n\t\tindexed[GetResourceName(item)] = item\n\t}\n\treturn indexed\n}",
"func IndexResourcesByName(items []envoy_types.ResourceWithTTL) map[string]envoy_types.ResourceWithTTL {\n\tindexed := make(map[string]envoy_types.ResourceWithTTL, len(items))\n\tfor _, item := range items {\n\t\tkey := GetResourceName(item.Resource)\n\t\tindexed[key] = item\n\t}\n\treturn indexed\n}",
"func (api *MediaApi) index(c *routing.Context) error {\n\t// --- fetch search data\n\tsearchFields := []string{\"title\", \"type\", \"path\", \"created\", \"modified\"}\n\tsearchData := utils.GetSearchConditions(c, searchFields)\n\t// ---\n\n\t// --- fetch sort data\n\tsortFields := []string{\"title\", \"type\", \"path\", \"created\", \"modified\"}\n\tsortData := utils.GetSortFields(c, sortFields)\n\t// ---\n\n\ttotal, _ := api.dao.Count(searchData)\n\n\tlimit, page := utils.GetPaginationSettings(c, total)\n\n\tutils.SetPaginationHeaders(c, limit, total, page)\n\n\titems := []models.Media{}\n\n\tif total > 0 {\n\t\titems, _ = api.dao.GetList(limit, limit*(page-1), searchData, sortData)\n\n\t\titems = daos.ToAbsMediaPaths(items)\n\t}\n\n\treturn c.Write(items)\n}",
"func index(pkg *pkg) error {\n\n\t// ensure dependencies are indexed\n\tfor _, dependency := range pkg.Dependencies {\n\t\tif _, ok := indexRead(dependency); !ok {\n\t\t\treturn missingDependencies\n\t\t}\n\t}\n\n\t// if this index already exists we need to just update dependencies\n\texistingPkg, ok := indexRead(pkg.Name)\n\tif ok {\n\t\treturn updateDependents(existingPkg, pkg)\n\t}\n\n\t// update any dependants of this package\n\tupdateDependents(nil, pkg)\n\n\t// add the new index (possibly replacing the old)\n\tindexWrite(pkg.Name, pkg)\n\n\treturn nil\n}",
"func Index(realms map[string]*cloudformation.Realm, name, repo, dir, description string) j.ObjectType {\n\tfields := []j.Type{\n\t\td.Import(),\n\t\td.Pkg(name, path.Join(repo, dir, \"main.libsonnet\"), description),\n\t}\n\n\tfor _, realm := range realms {\n\t\timp := filepath.Join(GenPrefix, realm.N(\"realm\"), MainFile)\n\t\tfields = append(fields, j.Hidden(j.Import(realm.Name, imp)))\n\t}\n\n\tSortFields(fields)\n\n\treturn j.Object(\"\", fields...)\n}",
"func IndexPackage(packageName string, dependencies []string) {\n\tDebugf(\"Starting IndexPackage for %s: \", packageName)\n\trwMutex.Lock()\n\tcommandDependenciesMap[packageName] = dependencies\n\trwMutex.Unlock()\n\tDebugf(\"Done with IndexPackage for %s: \", packageName)\n}",
"func (router *Router) getResources(w http.ResponseWriter, r *http.Request) {\n\tclusterNames := r.URL.Query()[\"cluster\"]\n\tnamespaces := r.URL.Query()[\"namespace\"]\n\tname := r.URL.Query().Get(\"name\")\n\tresource := r.URL.Query().Get(\"resource\")\n\tpath := r.URL.Query().Get(\"path\")\n\tparamName := r.URL.Query().Get(\"paramName\")\n\tparam := r.URL.Query().Get(\"param\")\n\n\tlog.WithFields(logrus.Fields{\"clusters\": clusterNames, \"namespaces\": namespaces, \"name\": name, \"resource\": resource, \"path\": path, \"paramName\": paramName, \"param\": param}).Tracef(\"getResources\")\n\n\tvar resources []Resources\n\n\t// Loop through all the given cluster names and get for each provided name the cluster interface. After that we\n\t// check if the resource was provided via the forbidden resources list.\n\tfor _, clusterName := range clusterNames {\n\t\tcluster := router.clusters.GetCluster(clusterName)\n\t\tif cluster == nil {\n\t\t\terrresponse.Render(w, r, nil, http.StatusBadRequest, \"Invalid cluster name\")\n\t\t\treturn\n\t\t}\n\n\t\tif router.isForbidden(resource) {\n\t\t\terrresponse.Render(w, r, nil, http.StatusForbidden, fmt.Sprintf(\"Access for resource %s is forbidding\", resource))\n\t\t\treturn\n\t\t}\n\n\t\t// If the namespaces slice is nil, we retrieve the resource for all namespaces. If a list of namespaces was\n\t\t// provided we loop through all the namespaces and return the resources for these namespaces. All results are\n\t\t// added to the resources slice, which is then returned by the api.\n\t\tif namespaces == nil {\n\t\t\tlist, err := cluster.GetResources(r.Context(), \"\", name, path, resource, paramName, param)\n\t\t\tif err != nil {\n\t\t\t\terrresponse.Render(w, r, err, http.StatusBadRequest, \"Could not get resources\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tvar tmpResources map[string]interface{}\n\t\t\terr = json.Unmarshal(list, &tmpResources)\n\t\t\tif err != nil {\n\t\t\t\terrresponse.Render(w, r, err, http.StatusInternalServerError, \"Could not unmarshal resources\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tresources = append(resources, Resources{\n\t\t\t\tCluster: clusterName,\n\t\t\t\tNamespace: \"\",\n\t\t\t\tResources: tmpResources,\n\t\t\t})\n\t\t} else {\n\t\t\tfor _, namespace := range namespaces {\n\t\t\t\tlist, err := cluster.GetResources(r.Context(), namespace, name, path, resource, paramName, param)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrresponse.Render(w, r, err, http.StatusBadRequest, \"Could not get resources\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tvar tmpResources map[string]interface{}\n\t\t\t\terr = json.Unmarshal(list, &tmpResources)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrresponse.Render(w, r, err, http.StatusInternalServerError, \"Could not unmarshal resources\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tresources = append(resources, Resources{\n\t\t\t\t\tCluster: clusterName,\n\t\t\t\t\tNamespace: namespace,\n\t\t\t\t\tResources: tmpResources,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\tlog.WithFields(logrus.Fields{\"count\": len(resources)}).Tracef(\"getResources\")\n\trender.JSON(w, r, resources)\n}",
"func (s *Server) getIndexes(w http.ResponseWriter, r *http.Request) {\n\tfs, err := s.db.List(\"file\")\n\tif err != nil {\n\t\ts.logf(\"error listing files from mpd for building indexes: %v\", err)\n\t\twriteXML(w, errGeneric)\n\t\treturn\n\t}\n\tfiles := indexFiles(fs)\n\n\twriteXML(w, func(c *container) {\n\t\tc.Indexes = &indexesContainer{\n\t\t\tLastModified: time.Now().Unix(),\n\t\t}\n\n\t\t// Incremented whenever it's time to create a new index for a new\n\t\t// initial letter\n\t\tidx := -1\n\n\t\tvar indexes []index\n\n\t\t// A set of initial characters, used to deduplicate the addition of\n\t\t// nwe indexes\n\t\tseenChars := make(map[rune]struct{}, 0)\n\n\t\tfor _, f := range files {\n\t\t\t// Filter any non-top level items\n\t\t\tif strings.Contains(f.Name, string(os.PathSeparator)) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Initial rune is used to create an index name\n\t\t\tc, _ := utf8.DecodeRuneInString(f.Name)\n\t\t\tname := string(c)\n\n\t\t\t// If initial rune is a digit, put index under a numeric section\n\t\t\tif unicode.IsDigit(c) {\n\t\t\t\tc = '#'\n\t\t\t\tname = \"#\"\n\t\t\t}\n\n\t\t\t// If a new rune appears, create a new index for it\n\t\t\tif _, ok := seenChars[c]; !ok {\n\t\t\t\tseenChars[c] = struct{}{}\n\t\t\t\tindexes = append(indexes, index{Name: name})\n\t\t\t\tidx++\n\t\t\t}\n\n\t\t\tindexes[idx].Artists = append(indexes[idx].Artists, artist{\n\t\t\t\tName: f.Name,\n\t\t\t\tID: strconv.Itoa(f.ID),\n\t\t\t})\n\t\t}\n\n\t\tc.Indexes.Indexes = indexes\n\t})\n}",
"func (b *APIsBuilder) ParseIndex() {\n\tb.ByGroupVersionKind = map[string]map[string]map[string]*APIResource{}\n\tb.ByGroupKindVersion = map[string]map[string]map[string]*APIResource{}\n\n\tb.SubByGroupVersionKind = map[string]map[string]map[string]*types.Type{}\n\tfor _, c := range b.context.Order {\n\t\tif IsAPISubresource(c) {\n\t\t\tgroup := GetGroup(c)\n\t\t\tversion := GetVersion(c, group)\n\t\t\tkind := GetKind(c, group)\n\t\t\tif _, f := b.SubByGroupVersionKind[group]; !f {\n\t\t\t\tb.SubByGroupVersionKind[group] = map[string]map[string]*types.Type{}\n\t\t\t}\n\t\t\tif _, f := b.SubByGroupVersionKind[group][version]; !f {\n\t\t\t\tb.SubByGroupVersionKind[group][version] = map[string]*types.Type{}\n\t\t\t}\n\t\t\tb.SubByGroupVersionKind[group][version][kind] = c\n\t\t}\n\n\t\tif !IsAPIResource(c) {\n\t\t\tcontinue\n\t\t}\n\n\t\tr := &APIResource{\n\t\t\tType: c,\n\t\t\tNonNamespaced: IsNonNamespaced(c),\n\t\t}\n\t\tr.Group = GetGroup(c)\n\t\tr.Version = GetVersion(c, r.Group)\n\t\tr.Kind = GetKind(c, r.Group)\n\t\tr.Domain = b.Domain\n\n\t\trt := ParseResourceTag(b.GetResourceTag(c))\n\n\t\tr.Resource = rt.Resource\n\t\tr.REST = rt.REST\n\t\tr.ShortName = rt.ShortName\n\n\t\tr.Strategy = rt.Strategy\n\n\t\t// If not defined, default the strategy to the {{.Kind}}Strategy for backwards compatibility\n\t\tif len(r.Strategy) == 0 {\n\t\t\tr.Strategy = fmt.Sprintf(\"%sStrategy\", r.Kind)\n\t\t}\n\n\t\t// Copy the Status strategy to mirror the non-status strategy\n\t\tr.StatusStrategy = strings.TrimSuffix(r.Strategy, \"Strategy\")\n\t\tr.StatusStrategy = fmt.Sprintf(\"%sStatusStrategy\", r.StatusStrategy)\n\n\t\tif _, f := b.ByGroupKindVersion[r.Group]; !f {\n\t\t\tb.ByGroupKindVersion[r.Group] = map[string]map[string]*APIResource{}\n\t\t}\n\t\tif _, f := b.ByGroupKindVersion[r.Group][r.Kind]; !f {\n\t\t\tb.ByGroupKindVersion[r.Group][r.Kind] = map[string]*APIResource{}\n\t\t}\n\t\tif _, f := b.ByGroupVersionKind[r.Group]; !f {\n\t\t\tb.ByGroupVersionKind[r.Group] = map[string]map[string]*APIResource{}\n\t\t}\n\t\tif _, f := b.ByGroupVersionKind[r.Group][r.Version]; !f {\n\t\t\tb.ByGroupVersionKind[r.Group][r.Version] = map[string]*APIResource{}\n\t\t}\n\n\t\tb.ByGroupKindVersion[r.Group][r.Kind][r.Version] = r\n\t\tb.ByGroupVersionKind[r.Group][r.Version][r.Kind] = r\n\n\t\t// Do subresources\n\t\tif !HasSubresource(c) {\n\t\t\tcontinue\n\t\t}\n\t\tr.Type = c\n\t\tr.Subresources = b.GetSubresources(r)\n\t}\n}",
"func (d *docsIndexer) indexPackage(p *packages.Package) (docsPackage, error) {\n\tvar (\n\t\tpkgDocsMarkdown string\n\t\tconsts []constVarDocs\n\t\tvars []constVarDocs\n\t\ttypes []typeDocs\n\t\tfuncs []funcDocs\n\t\temitted = make(emittedDocumentationResults, 64)\n\t)\n\tfor _, file := range p.Syntax {\n\t\tfilename := p.Fset.Position(file.Pos()).Filename\n\t\tif !strings.HasPrefix(filename, d.i.projectRoot) {\n\t\t\t// Omit files (such as those generated by `go test`) that aren't in the project root\n\t\t\t// because those are not externally accessible under any circumstance.\n\t\t\tcontinue\n\t\t}\n\t\tfileDocs, err := d.indexFile(p, file, filepath.Base(filename), strings.HasSuffix(filename, \"_test.go\"))\n\t\tif err != nil {\n\t\t\treturn docsPackage{}, errors.Wrap(err, \"file \"+filename)\n\t\t}\n\t\tpkgDocsMarkdown += fileDocs.pkgDocsMarkdown\n\t\tfor _, c := range fileDocs.consts {\n\t\t\tconsts = append(consts, c)\n\t\t\temitted[c.def] = c.ID\n\t\t}\n\t\tfor _, v := range fileDocs.vars {\n\t\t\tvars = append(vars, v)\n\t\t\temitted[v.def] = v.ID\n\t\t}\n\t\tfor _, t := range fileDocs.types {\n\t\t\ttypes = append(types, t)\n\t\t\temitted[t.def] = t.ID\n\t\t}\n\t\tfor _, f := range fileDocs.funcs {\n\t\t\tfuncs = append(funcs, f)\n\t\t\temitted[f.def] = f.ID\n\t\t}\n\t}\n\n\trootPkgPath := d.rootPkgPath()\n\tshortestUniquePkgPath := strings.TrimPrefix(strings.TrimPrefix(pkgPathStdStrip(p.PkgPath), rootPkgPath), \"/\")\n\n\tvisibilityTags := []protocol.Tag{}\n\tif strings.Contains(p.PkgPath, \"/internal/\") || strings.HasSuffix(p.Name, \"_test\") {\n\t\tvisibilityTags = append(visibilityTags, protocol.TagPrivate)\n\t}\n\tif isDeprecated(pkgDocsMarkdown) {\n\t\tvisibilityTags = append(visibilityTags, protocol.TagDeprecated)\n\t}\n\tpkgTags := make([]protocol.Tag, len(visibilityTags))\n\tcopy(pkgTags, visibilityTags)\n\tpkgTags = append(pkgTags, protocol.TagPackage)\n\n\tpkgPathElements := strings.Split(pkgPathStdStrip(p.PkgPath), \"/\")\n\tpackageDocsID := (&documentationResult{\n\t\tDocumentation: protocol.Documentation{\n\t\t\tIdentifier: pkgPathElements[len(pkgPathElements)-1],\n\t\t\tSearchKey: shortestUniquePkgPath,\n\t\t\tNewPage: true,\n\t\t\tTags: pkgTags,\n\t\t},\n\t\tLabel: protocol.NewMarkupContent(\"Package \"+p.Name, protocol.PlainText),\n\t\tDetail: protocol.NewMarkupContent(pkgDocsMarkdown, protocol.Markdown),\n\t}).emit(d.i.emitter)\n\n\tnewSection := func(label, identifier string, children []uint64) uint64 {\n\t\tsectionID := (&documentationResult{\n\t\t\tDocumentation: protocol.Documentation{\n\t\t\t\tIdentifier: identifier,\n\t\t\t\tSearchKey: \"\", // don't index sections of documentation for search\n\t\t\t\tNewPage: false,\n\t\t\t\tTags: visibilityTags,\n\t\t\t},\n\t\t\tLabel: protocol.NewMarkupContent(label, protocol.PlainText),\n\t\t\tDetail: protocol.NewMarkupContent(\"\", protocol.PlainText),\n\t\t}).emit(d.i.emitter)\n\t\t_ = d.i.emitter.EmitDocumentationChildrenEdge(children, sectionID)\n\t\treturn sectionID\n\t}\n\n\tvar sections []uint64\n\t// Emit a \"Constants\" section\n\tsort.Slice(consts, func(i, j int) bool {\n\t\treturn sortName(\"\", \"\", consts[i].name, consts[j].name)\n\t})\n\tif len(consts) > 0 {\n\t\tvar children []uint64\n\t\tfor _, constDocs := range consts {\n\t\t\tchildren = append(children, constDocs.ID)\n\t\t}\n\t\tsections = append(sections, newSection(\"Constants\", \"const\", children))\n\t}\n\n\t// Emit a \"Variables\" section\n\tsort.Slice(vars, func(i, j int) bool {\n\t\treturn sortName(\"\", \"\", vars[i].name, vars[j].name)\n\t})\n\tif len(vars) > 0 {\n\t\tvar children []uint64\n\t\tfor _, varDocs := range vars {\n\t\t\tchildren = append(children, varDocs.ID)\n\t\t}\n\t\tsections = append(sections, newSection(\"Variables\", \"var\", children))\n\t}\n\n\t// Emit methods as children of their receiver types, functions as children of the type they\n\t// produce.\n\tsort.Slice(types, func(i, j int) bool {\n\t\treturn sortName(\"\", \"\", types[i].name, types[j].name)\n\t})\n\tsort.Slice(funcs, func(i, j int) bool {\n\t\treturn sortName(funcs[i].recvTypeName, funcs[j].recvTypeName, funcs[i].name, funcs[j].name)\n\t})\n\temittedMethods := map[uint64]struct{}{}\n\tfor _, typeDocs := range types {\n\t\tvar children []uint64\n\t\tfor _, funcDocs := range funcs {\n\t\t\tif _, emitted := emittedMethods[funcDocs.ID]; emitted {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif funcDocs.recvType == nil {\n\t\t\t\tvar matches int\n\t\t\t\tfor _, resultTypeExpr := range funcDocs.resultTypes {\n\t\t\t\t\tresultType := p.TypesInfo.TypeOf(resultTypeExpr)\n\t\t\t\t\tif dereference(resultType) == dereference(typeDocs.typ) {\n\t\t\t\t\t\tmatches++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif matches == 1 {\n\t\t\t\t\t// The function is only a child of the type it produces if there was one match.\n\t\t\t\t\t// If it returned multiple types, better off keeping it separate from both.\n\t\t\t\t\temittedMethods[funcDocs.ID] = struct{}{}\n\t\t\t\t\tchildren = append(children, funcDocs.ID)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor _, funcDocs := range funcs {\n\t\t\tif _, emitted := emittedMethods[funcDocs.ID]; emitted {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif funcDocs.recvType != nil {\n\t\t\t\trecvType := p.TypesInfo.TypeOf(funcDocs.recvType)\n\t\t\t\tif dereference(recvType) == dereference(typeDocs.typ) {\n\t\t\t\t\temittedMethods[funcDocs.ID] = struct{}{}\n\t\t\t\t\tchildren = append(children, funcDocs.ID)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif len(children) > 0 {\n\t\t\t_ = d.i.emitter.EmitDocumentationChildrenEdge(children, typeDocs.ID)\n\t\t}\n\t}\n\n\t// Emit a \"Types\" section\n\tif len(types) > 0 {\n\t\tvar children []uint64\n\t\tfor _, typeDocs := range types {\n\t\t\tchildren = append(children, typeDocs.ID)\n\t\t}\n\t\tsections = append(sections, newSection(\"Types\", \"type\", children))\n\t}\n\n\t// Emit a \"Functions\" section\n\tif len(funcs) > 0 {\n\t\tvar children []uint64\n\t\tfor _, funcDocs := range funcs {\n\t\t\tif _, emitted := emittedMethods[funcDocs.ID]; emitted {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tchildren = append(children, funcDocs.ID)\n\t\t}\n\t\tif len(children) > 0 {\n\t\t\tsections = append(sections, newSection(\"Functions\", \"func\", children))\n\t\t}\n\t}\n\n\treturn docsPackage{\n\t\tID: packageDocsID,\n\t\tPath: pkgPathStdStrip(p.PkgPath),\n\t\temitted: emitted,\n\t\tchildren: sections,\n\t}, nil\n}",
"func (i *Indexer) indexDocumentation() error {\n\tvar (\n\t\td = &docsIndexer{i: i}\n\t\tmu sync.Mutex\n\t\tdocsPackages []docsPackage\n\t\temitted = make(emittedDocumentationResults, 4096)\n\t\temittedPackagesByPath = make(map[string]uint64, 32)\n\t\terrs error\n\t)\n\ti.visitEachPackage(\"Indexing documentation\", func(p *packages.Package) {\n\t\t// Index the package without the lock, for parallelism.\n\t\tdocsPkg, err := d.indexPackage(p)\n\n\t\t// Acquire the lock; note that multierror.Append could also be racy and hence we hold the\n\t\t// lock even for the error check. In practice, this is not where most of the work is done\n\t\t// (indexPackage is) so this is fine.\n\t\tmu.Lock()\n\t\tdefer mu.Unlock()\n\t\tif err != nil {\n\t\t\terrs = multierror.Append(errs, errors.Wrap(err, \"package \"+p.Name))\n\t\t\treturn\n\t\t}\n\t\temitted.addAll(docsPkg.emitted)\n\t\tdocsPackages = append(docsPackages, docsPkg)\n\t\temittedPackagesByPath[docsPkg.Path] = docsPkg.ID\n\t})\n\n\t// Find the root package path (e.g. \"github.com/sourcegraph/sourcegraph\").\n\trootPkgPath := d.rootPkgPath()\n\n\t// Build an understanding of all pages in the workspace.\n\ttype page struct {\n\t\tid uint64 // the page itself\n\t\tchildren []uint64 // the children pages of this one\n\t}\n\tpagesByPath := map[string]*page{}\n\tfor _, docsPkg := range docsPackages {\n\t\trelPackagePath := d.relPackagePath(docsPkg.Path, rootPkgPath)\n\t\tif _, exists := pagesByPath[relPackagePath]; exists {\n\t\t\tpanic(\"invariant: no duplicate paths\")\n\t\t}\n\t\tpagesByPath[relPackagePath] = &page{id: docsPkg.ID}\n\t}\n\n\t// Emit the root documentationResult which will link all packages in this project to the\n\t// project itself. If the root of the workspace is a Go package, this may already exist\n\t// and would be that Go package's documentation.\n\tif rootPage, ok := pagesByPath[\"\"]; ok {\n\t\t_ = i.emitter.EmitDocumentationResultEdge(rootPage.id, i.projectID)\n\t} else {\n\t\t// Emit a blank index page.\n\t\trootDocumentationID := (&documentationResult{\n\t\t\tDocumentation: protocol.Documentation{\n\t\t\t\tIdentifier: \"\",\n\t\t\t\tSearchKey: \"\",\n\t\t\t\tNewPage: true,\n\t\t\t\tTags: []protocol.Tag{},\n\t\t\t},\n\t\t\tLabel: protocol.NewMarkupContent(\"\", protocol.PlainText),\n\t\t\tDetail: protocol.NewMarkupContent(\"\", protocol.PlainText),\n\t\t}).emit(i.emitter)\n\t\t_ = i.emitter.EmitDocumentationResultEdge(rootDocumentationID, i.projectID)\n\t\tpagesByPath[\"\"] = &page{id: rootDocumentationID}\n\t}\n\n\t// What we have now is pages for each package in the workspace, e.g.:\n\t//\n\t// \t/ (root index page)\n\t// \t/internal/lib/protocol (package page)\n\t// \t/internal/lib/util (package page)\n\t// \t/router/mux (package page)\n\t//\n\t// What we want ot add in is index pages (blank pages) for each parent path so we end up with:\n\t//\n\t// \t/ (root index page)\n\t// \t/internal (index page)\n\t// \t/internal/lib (index page)\n\t// \t/internal/lib/protocol (package page)\n\t// \t/internal/lib/util (package page)\n\t// \t/router (index page)\n\t// \t/router/mux (package page)\n\t//\n\t// Note: the actual paths do not have a leading slash.\n\tsort.Slice(docsPackages, func(i, j int) bool {\n\t\treturn docsPackages[i].Path < docsPackages[j].Path\n\t})\n\tfor _, docsPkg := range docsPackages {\n\t\trelPackagePath := d.relPackagePath(docsPkg.Path, rootPkgPath)\n\t\tpkgPathElements := strings.Split(relPackagePath, \"/\") // [\"internal\", \"lib\", \"protocol\"]\n\n\t\t// Walk over each path: \"internal\", \"internal/lib\", \"internal/lib/protocol\" and emit an\n\t\t// index page for each that does not have it.\n\t\tcurrentPath := \"\"\n\t\tfor _, element := range pkgPathElements {\n\t\t\tcurrentPath = path.Join(currentPath, element)\n\t\t\t_, ok := pagesByPath[currentPath]\n\t\t\tif ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcurrentPathElements := strings.Split(currentPath, \"/\")\n\t\t\tparentPath := path.Join(currentPathElements[:len(currentPathElements)-1]...)\n\n\t\t\t// Emit an index page at this path since one does not exist.\n\t\t\tpageID := (&documentationResult{\n\t\t\t\tDocumentation: protocol.Documentation{\n\t\t\t\t\tIdentifier: element,\n\t\t\t\t\tSearchKey: \"\", // don't index for search\n\t\t\t\t\tNewPage: true,\n\t\t\t\t\tTags: []protocol.Tag{},\n\t\t\t\t},\n\t\t\t\tLabel: protocol.NewMarkupContent(\"\", protocol.PlainText),\n\t\t\t\tDetail: protocol.NewMarkupContent(\"\", protocol.PlainText),\n\t\t\t}).emit(i.emitter)\n\t\t\tparentPage, ok := pagesByPath[parentPath]\n\t\t\tif !ok {\n\t\t\t\tpanic(\"invariant: parentPage should always exist(1)\")\n\t\t\t}\n\t\t\tparentPage.children = append(parentPage.children, pageID)\n\t\t\tpagesByPath[currentPath] = &page{id: pageID}\n\t\t}\n\t}\n\n\t// Finalize children of pages.\n\tfor _, docsPkg := range docsPackages {\n\t\trelPackagePath := d.relPackagePath(docsPkg.Path, rootPkgPath)\n\n\t\t// Attach the children sections of the page (consts/vars/etc) as children of the page itself.\n\t\tpage, ok := pagesByPath[relPackagePath]\n\t\tif !ok {\n\t\t\tpanic(\"invariant: page should always exist\")\n\t\t}\n\t\tpage.children = append(page.children, docsPkg.children...)\n\n\t\t// Attach package documentation pages as children of their parent (either another package\n\t\t// documentation page, or a blank index page.)\n\t\tif relPackagePath == \"\" {\n\t\t\t// root is not a child of anything.\n\t\t\tcontinue\n\t\t}\n\t\tpkgPathElements := strings.Split(relPackagePath, \"/\") // [\"internal\", \"lib\", \"protocol\"]\n\t\tparentPath := path.Join(pkgPathElements[:len(pkgPathElements)-1]...)\n\t\tparentPage, ok := pagesByPath[parentPath]\n\t\tif !ok {\n\t\t\tpanic(\"invariant: parentPage should always exist(2)\")\n\t\t}\n\t\tparentPage.children = append(parentPage.children, docsPkg.ID)\n\t}\n\n\t// Emit children edges of all pages.\n\tfor _, page := range pagesByPath {\n\t\t_ = i.emitter.EmitDocumentationChildrenEdge(page.children, page.id)\n\t}\n\n\ti.emittedDocumentationResults = emitted\n\ti.emittedDocumentationResultsByPackagePath = emittedPackagesByPath\n\treturn errs\n}",
"func indexServiceByClusterIP(obj interface{}) ([]string, error) {\n\treturn []string{obj.(*api.Service).Spec.ClusterIP}, nil\n}",
"func (p *PublicationServiceImpl) indexAll() {\n\tpubs := p.ListAll()\n\tfor _, pub := range pubs {\n\t\tp.search.Index(SearchObject{ID: pub.ID, Type: \"Publication\", Content: pub.Title})\n\t\tfor _, post := range pub.Posts {\n\t\t\tp.search.Index(SearchObject{ID: post.ID, Type: \"Post\", Content: post.Title})\n\t\t}\n\t}\n}",
"func (i indexer) Index(ctx context.Context, req IndexQuery) (\n\tresp *IndexResult, err error) {\n\n\tlog.Info(\"index [%v] root [%v] len_dirs=%v len_files=%v\",\n\t\treq.Key, req.Root, len(req.Dirs), len(req.Files))\n\tstart := time.Now()\n\t// Setup the response\n\tresp = NewIndexResult()\n\tif err = req.Normalize(); err != nil {\n\t\tlog.Info(\"index [%v] error: %v\", req.Key, err)\n\t\tresp.Error = errs.NewStructError(err)\n\t\treturn\n\t}\n\n\t// create index shards\n\tvar nshards int\n\tif nshards = i.cfg.NumShards; nshards == 0 {\n\t\tnshards = 1\n\t}\n\tnshards = utils.MinInt(nshards, maxShards)\n\ti.shards = make([]index.IndexWriter, nshards)\n\ti.root = getRoot(i.cfg, &req)\n\n\tfor n := range i.shards {\n\t\tname := path.Join(i.root, shardName(req.Key, n))\n\t\tixw, err := getIndexWriter(ctx, name)\n\t\tif err != nil {\n\t\t\tresp.Error = errs.NewStructError(err)\n\t\t\treturn resp, nil\n\t\t}\n\t\ti.shards[n] = ixw\n\t}\n\n\tfs := getFileSystem(ctx, i.root)\n\trepo := newRepoFromQuery(&req, i.root)\n\trepo.SetMeta(i.cfg.RepoMeta, req.Meta)\n\tresp.Repo = repo\n\n\t// Add query Files and scan Dirs for files to index\n\tnames, err := i.scanner(fs, &req)\n\tch := make(chan int, nshards)\n\tchnames := make(chan string, 100)\n\tgo func() {\n\t\tfor _, name := range names {\n\t\t\tchnames <- name\n\t\t}\n\t\tclose(chnames)\n\t}()\n\treqch := make(chan par.RequestFunc, nshards)\n\tfor _, shard := range i.shards {\n\t\treqch <- indexShard(&i, &req, shard, fs, chnames, ch)\n\t}\n\tclose(reqch)\n\terr = par.Requests(reqch).WithConcurrency(nshards).DoWithContext(ctx)\n\tclose(ch)\n\n\t// Await results, each indicating the number of files scanned\n\tfor num := range ch {\n\t\trepo.NumFiles += num\n\t}\n\n\trepo.NumShards = len(i.shards)\n\t// Flush our index shard files\n\tfor _, shard := range i.shards {\n\t\tshard.Flush()\n\t\trepo.SizeIndex += ByteSize(shard.IndexBytes())\n\t\trepo.SizeData += ByteSize(shard.DataBytes())\n\t\tlog.Debug(\"index flush %v (data) %v (index)\",\n\t\t\trepo.SizeData, repo.SizeIndex)\n\t}\n\trepo.ElapsedIndexing = time.Since(start)\n\trepo.TimeUpdated = time.Now().UTC()\n\n\tvar msg string\n\tif err != nil {\n\t\trepo.State = ERROR\n\t\tresp.SetError(err)\n\t\tmsg = \"error: \" + resp.Error.Error()\n\t} else {\n\t\trepo.State = OK\n\t\tmsg = \"ok \" + fmt.Sprintf(\n\t\t\t\"(%v files, %v data, %v index)\",\n\t\t\trepo.NumFiles, repo.SizeData, repo.SizeIndex)\n\t}\n\tlog.Info(\"index [%v] %v [%v]\", req.Key, msg, repo.ElapsedIndexing)\n\treturn\n}",
"func (mod *modContext) genIndex(exports []string) string {\n\tw := &bytes.Buffer{}\n\n\tname := mod.mod\n\tif name == \"\" {\n\t\tname = mod.pkg.Name\n\t}\n\n\tmod.genHeader(w, name)\n\n\t// If this is the root module, write out the package description.\n\tif mod.mod == \"\" {\n\t\tdescription := mod.pkg.Description\n\t\tif description != \"\" {\n\t\t\tdescription += \"\\n\\n\"\n\t\t}\n\t\tfmt.Fprint(w, description)\n\t}\n\n\t// If there are submodules, list them.\n\tvar children []string\n\tfor _, mod := range mod.children {\n\t\tchildren = append(children, mod.mod)\n\t}\n\tif len(children) > 0 {\n\t\tsort.Strings(children)\n\t\tfmt.Fprintf(w, \"<h3>Modules</h3>\\n\")\n\t\tfmt.Fprintf(w, \"<ul class=\\\"api\\\">\\n\")\n\t\tfor _, mod := range children {\n\t\t\tfmt.Fprintf(w, \" <li><a href=\\\"%s/\\\"><span class=\\\"symbol module\\\"></span>%s</a></li>\\n\", mod, mod)\n\t\t}\n\t\tfmt.Fprintf(w, \"</ul>\\n\\n\")\n\t}\n\n\t// If there are resources in the root, list them.\n\tvar resources []string\n\tfor _, r := range mod.resources {\n\t\tresources = append(resources, resourceName(r))\n\t}\n\tif len(resources) > 0 {\n\t\tsort.Strings(resources)\n\t\tfmt.Fprintf(w, \"<h3>Resources</h3>\\n\")\n\t\tfmt.Fprintf(w, \"<ul class=\\\"api\\\">\\n\")\n\t\tfor _, r := range resources {\n\t\t\tfmt.Fprintf(w, \" <li><a href=\\\"%s\\\"><span class=\\\"symbol resource\\\"></span>%s</a></li>\\n\", lower(r), r)\n\t\t}\n\t\tfmt.Fprintf(w, \"</ul>\\n\\n\")\n\t}\n\n\t// If there are functions in the root, list them.\n\tvar functions []string\n\tfor _, f := range mod.functions {\n\t\tfunctions = append(functions, tokenToName(f.Token))\n\t}\n\tif len(functions) > 0 {\n\t\tsort.Strings(functions)\n\t\tfmt.Fprintf(w, \"<h3>Functions</h3>\\n\")\n\t\tfmt.Fprintf(w, \"<ul class=\\\"api\\\">\\n\")\n\t\tfor _, f := range functions {\n\t\t\t// TODO: We want to use \"function\" rather than \"data source\" terminology. Need to add a\n\t\t\t// \"function\" class in the docs repo to replace \"datasource\".\n\t\t\tfmt.Fprintf(w, \" <li><a href=\\\"%s\\\"><span class=\\\"symbol datasource\\\"></span>%s</a></li>\\n\", lower(f), f)\n\t\t}\n\t\tfmt.Fprintf(w, \"</ul>\\n\\n\")\n\t}\n\n\treturn w.String()\n}",
"func indexesReport(c *clients.Client, response handle.ResponseHandle) error {\n\treq, err := http.NewRequest(\"GET\", c.Base()+\"/config/indexes\", nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn util.Execute(c, req, response)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
sort sorts the Resources in the index in display order and returns the ordered keys for the index Packages are sorted by package name Resources within a package are sorted by: [filename, namespace, name, kind, apiVersion]
|
func (p TreeWriter) sort(indexByPackage map[string][]*yaml.RNode) []string {
var keys []string
for k := range indexByPackage {
pkgNodes := indexByPackage[k]
sort.Slice(pkgNodes, func(i, j int) bool { return compareNodes(pkgNodes[i], pkgNodes[j]) })
keys = append(keys, k)
}
// return the package names sorted lexicographically
sort.Strings(keys)
return keys
}
|
[
"func sortResources(resources []astmodel.InternalTypeName) []astmodel.InternalTypeName {\n\tsort.Slice(resources, func(i, j int) bool {\n\t\tiVal := resources[i]\n\t\tjVal := resources[j]\n\n\t\treturn iVal.PackageReference().PackageName() < jVal.PackageReference().PackageName() ||\n\t\t\tiVal.PackageReference().PackageName() < jVal.PackageReference().PackageName() && iVal.Name() < jVal.Name()\n\t})\n\n\treturn resources\n}",
"func (i IndexFile) sortPackages() {\n\tfor _, versions := range i.Entries {\n\t\tsort.Sort(sort.Reverse(versions))\n\t}\n}",
"func SortResources(resources []*metav1.APIResourceList) {\n\tsort.SliceStable(resources, func(i, j int) bool {\n\t\tleft := resources[i]\n\t\tleftGV, _ := schema.ParseGroupVersion(left.GroupVersion)\n\t\t// not checking error because it should be impossible to fail to parse data coming from the\n\t\t// apiserver\n\t\tif leftGV.Group == \"extensions\" {\n\t\t\t// always sort extensions at the bottom by saying left is \"greater\"\n\t\t\treturn false\n\t\t}\n\n\t\tright := resources[j]\n\t\trightGV, _ := schema.ParseGroupVersion(right.GroupVersion)\n\t\t// not checking error because it should be impossible to fail to parse data coming from the\n\t\t// apiserver\n\t\tif rightGV.Group == \"extensions\" {\n\t\t\t// always sort extensions at the bottom by saying left is \"less\"\n\t\t\treturn true\n\t\t}\n\n\t\treturn i < j\n\t})\n}",
"func orderStackResourceKeys(m map[string]StackResource) []string {\n\tret := make([]string, len(m))\n\ti := 0\n\n\tfor k := range m {\n\t\tret[i] = k\n\t\ti++\n\t}\n\tsort.Sort(sort.Reverse(sort.StringSlice(ret)))\n\treturn ret\n}",
"func Sort(sortMetricName string, sortType string, rawMetrics *FormatedLevelMetric) (*FormatedLevelMetric, int) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tglog.Errorln(err)\n\t\t\tdebug.PrintStack()\n\t\t}\n\t}()\n\n\tif sortMetricName == \"\" {\n\t\treturn rawMetrics, -1\n\t}\n\n\t// default sort type is descending order\n\tif sortType == \"\" {\n\t\tsortType = ResultSortTypeDesc\n\t}\n\n\tvar currentResourceMap = make(map[string]int)\n\n\t// {<Resource Name>: <Ordering>}\n\tvar indexMap = make(map[string]int)\n\ti := 0\n\n\t// each metricItem is the result for a specific metric name\n\t// so we find the metricItem with sortMetricName, and sort it\n\tfor _, metricItem := range rawMetrics.Results {\n\t\t// only vector type result can be sorted\n\t\tif metricItem.Data.ResultType == ResultTypeVector && metricItem.Status == MetricStatusSuccess {\n\t\t\tif metricItem.MetricName == sortMetricName {\n\t\t\t\tif sortType == ResultSortTypeAsc {\n\t\t\t\t\t// asc\n\t\t\t\t\tsort.Sort(FormatedMetricDataWrapper{metricItem.Data, func(p, q *map[string]interface{}) bool {\n\t\t\t\t\t\tvalue1 := (*p)[ResultItemValue].([]interface{})\n\t\t\t\t\t\tvalue2 := (*q)[ResultItemValue].([]interface{})\n\t\t\t\t\t\tv1, _ := strconv.ParseFloat(value1[len(value1)-1].(string), 64)\n\t\t\t\t\t\tv2, _ := strconv.ParseFloat(value2[len(value2)-1].(string), 64)\n\t\t\t\t\t\tif v1 == v2 {\n\t\t\t\t\t\t\tresourceName1 := (*p)[ResultItemMetric].(map[string]interface{})[ResultItemMetricResourceName]\n\t\t\t\t\t\t\tresourceName2 := (*q)[ResultItemMetric].(map[string]interface{})[ResultItemMetricResourceName]\n\t\t\t\t\t\t\treturn resourceName1.(string) < resourceName2.(string)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\treturn v1 < v2\n\t\t\t\t\t}})\n\t\t\t\t} else {\n\t\t\t\t\t// desc\n\t\t\t\t\tsort.Sort(FormatedMetricDataWrapper{metricItem.Data, func(p, q *map[string]interface{}) bool {\n\t\t\t\t\t\tvalue1 := (*p)[ResultItemValue].([]interface{})\n\t\t\t\t\t\tvalue2 := (*q)[ResultItemValue].([]interface{})\n\t\t\t\t\t\tv1, _ := strconv.ParseFloat(value1[len(value1)-1].(string), 64)\n\t\t\t\t\t\tv2, _ := strconv.ParseFloat(value2[len(value2)-1].(string), 64)\n\n\t\t\t\t\t\tif v1 == v2 {\n\t\t\t\t\t\t\tresourceName1 := (*p)[ResultItemMetric].(map[string]interface{})[ResultItemMetricResourceName]\n\t\t\t\t\t\t\tresourceName2 := (*q)[ResultItemMetric].(map[string]interface{})[ResultItemMetricResourceName]\n\t\t\t\t\t\t\treturn resourceName1.(string) > resourceName2.(string)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\treturn v1 > v2\n\t\t\t\t\t}})\n\t\t\t\t}\n\n\t\t\t\tfor _, r := range metricItem.Data.Result {\n\t\t\t\t\t// record the ordering of resource_name to indexMap\n\t\t\t\t\t// example: {\"metric\":{ResultItemMetricResourceName: \"Deployment:xxx\"},\"value\":[1541142931.731,\"3\"]}\n\t\t\t\t\tresourceName, exist := r[ResultItemMetric].(map[string]interface{})[ResultItemMetricResourceName]\n\t\t\t\t\tif exist {\n\t\t\t\t\t\tif _, exist := indexMap[resourceName.(string)]; !exist {\n\t\t\t\t\t\t\tindexMap[resourceName.(string)] = i\n\t\t\t\t\t\t\ti = i + 1\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// iterator all metric to find max metricItems length\n\t\t\tfor _, r := range metricItem.Data.Result {\n\t\t\t\tk, ok := r[ResultItemMetric].(map[string]interface{})[ResultItemMetricResourceName]\n\t\t\t\tif ok {\n\t\t\t\t\tcurrentResourceMap[k.(string)] = 1\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t}\n\n\tvar keys []string\n\tfor k := range currentResourceMap {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\n\tfor _, resource := range keys {\n\t\tif _, exist := indexMap[resource]; !exist {\n\t\t\tindexMap[resource] = i\n\t\t\ti = i + 1\n\t\t}\n\t}\n\n\t// sort other metric\n\tfor i := 0; i < len(rawMetrics.Results); i++ {\n\t\tre := rawMetrics.Results[i]\n\t\tif re.Data.ResultType == ResultTypeVector && re.Status == MetricStatusSuccess {\n\t\t\tsortedMetric := make([]map[string]interface{}, len(indexMap))\n\t\t\tfor j := 0; j < len(re.Data.Result); j++ {\n\t\t\t\tr := re.Data.Result[j]\n\t\t\t\tk, exist := r[ResultItemMetric].(map[string]interface{})[ResultItemMetricResourceName]\n\t\t\t\tif exist {\n\t\t\t\t\tindex, exist := indexMap[k.(string)]\n\t\t\t\t\tif exist {\n\t\t\t\t\t\tsortedMetric[index] = r\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\trawMetrics.Results[i].Data.Result = sortedMetric\n\t\t}\n\t}\n\n\treturn rawMetrics, len(indexMap)\n}",
"func (d *Default) OrderResources(ctx context.Context, sp *spec.Spec, opts *DefaultOptions) error {\n\tsort.Slice(sp.Paths, func(i, j int) bool {\n\t\tp1, p2 := sp.Paths[i], sp.Paths[j]\n\n\t\treturn p1.Name < p2.Name\n\t})\n\n\tfor _, p := range sp.Paths {\n\t\tsort.Slice(p.Operations, func(i, j int) bool {\n\t\t\to1, o2 := p.Operations[i], p.Operations[j]\n\n\t\t\treturn o1.Name < o2.Name\n\t\t})\n\n\t\tfor _, o := range p.Operations {\n\t\t\tsort.Slice(o.Parameters, func(i, j int) bool {\n\t\t\t\tp1, p2 := o.Parameters[i], o.Parameters[j]\n\n\t\t\t\treturn p1.Name < p2.Name\n\t\t\t})\n\n\t\t\tsort.Slice(o.Responses, func(i, j int) bool {\n\t\t\t\tr1, r2 := o.Responses[i], o.Responses[j]\n\n\t\t\t\treturn r1.Name < r2.Name\n\t\t\t})\n\n\t\t\tfor _, cb := range o.Callbacks {\n\t\t\t\tfor _, cbPath := range cb {\n\t\t\t\t\tsort.Slice(cb, func(i, j int) bool {\n\t\t\t\t\t\tp1, p2 := cb[i], cb[j]\n\n\t\t\t\t\t\treturn p1.Name < p2.Name\n\t\t\t\t\t})\n\n\t\t\t\t\tfor _, cbOp := range cbPath.Operations {\n\t\t\t\t\t\tsort.Slice(cbOp.Parameters, func(i, j int) bool {\n\t\t\t\t\t\t\tp1, p2 := cbOp.Parameters[i], cbOp.Parameters[j]\n\n\t\t\t\t\t\t\treturn p1.Name < p2.Name\n\t\t\t\t\t\t})\n\n\t\t\t\t\t\tsort.Slice(cbOp.Responses, func(i, j int) bool {\n\t\t\t\t\t\t\tr1, r2 := cbOp.Responses[i], cbOp.Responses[j]\n\n\t\t\t\t\t\t\treturn r1.Name < r2.Name\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\treturn nil\n}",
"func orderStackCRDKeys(m map[string]apiextensions.CustomResourceDefinition) []string {\n\tret := make([]string, len(m))\n\ti := 0\n\n\tfor k := range m {\n\t\tret[i] = k\n\t\ti++\n\t}\n\tsort.Sort(sort.Reverse(sort.StringSlice(ret)))\n\treturn ret\n}",
"func sortByKind(manifests []releaseutil.Manifest) []releaseutil.Manifest {\n\tordering := kuberesource.InstallOrder\n\tks := newKindSorter(manifests, ordering)\n\tsort.Sort(ks)\n\treturn ks.manifests\n}",
"func SortByKind(manifests []*resource.Resource, ordering SortOrder) []*resource.Resource {\n\tks := newKindSorter(manifests, ordering)\n\tsort.Sort(ks)\n\treturn ks.resources\n}",
"func APIResourcesToStrings(resources []kube.APIResourceInfo, includeKinds bool) []string {\n\tresMap := map[string]bool{}\n\tfor _, r := range resources {\n\t\tgroupVersion := r.GroupVersionResource.GroupVersion().String()\n\t\tresMap[groupVersion] = true\n\t\tif includeKinds {\n\t\t\tresMap[groupVersion+\"/\"+r.GroupKind.Kind] = true\n\t\t}\n\n\t}\n\tvar res []string\n\tfor k := range resMap {\n\t\tres = append(res, k)\n\t}\n\tsort.Slice(res, func(i, j int) bool {\n\t\treturn res[i] < res[j]\n\t})\n\treturn res\n}",
"func SortResourcesByType(resources []Resource) {\n\tsort.Sort(resourcesSortedByType(resources))\n}",
"func SortedResourceNames(list api.ResourceList) []api.ResourceName {\n\tresources := make([]api.ResourceName, 0, len(list))\n\tfor res := range list {\n\t\tresources = append(resources, res)\n\t}\n\tsort.Sort(SortableResourceNames(resources))\n\treturn resources\n}",
"func sortedMapKeysbyName(m *jsonschema.Index) []string {\n\tvar schemas []*jsonschema.Schema\n\tfor _, v := range *m {\n\t\tschemas = append(schemas, v)\n\t}\n\tsort.Sort(byName(schemas))\n\n\tvar keys []string\n\tfor _, v := range schemas {\n\t\tkeys = append(keys, v.Pointer)\n\t}\n\treturn keys\n}",
"func orderStackIconKeys(m map[string]*v1alpha1.IconSpec) []string {\n\tret := make([]string, len(m))\n\ti := 0\n\n\tfor k := range m {\n\t\tret[i] = k\n\t\ti++\n\t}\n\tsort.Sort(sort.Reverse(sort.StringSlice(ret)))\n\treturn ret\n}",
"func normalizeAPIGroupResources(apiGroupResource *restmapper.APIGroupResources) []metav1.APIResource {\n\tvar versionedResources []metav1.APIResource\n\tfor version, vr := range apiGroupResource.VersionedResources {\n\t\tfor _, resource := range vr {\n\t\t\tresource.Group = apiGroupResource.Group.Name\n\t\t\tresource.Version = version\n\t\t\tversionedResources = append(versionedResources, resource)\n\t\t}\n\t}\n\n\t// Ensure deterministic output.\n\tpreferredVersion := apiGroupResource.Group.PreferredVersion.Version\n\tsort.SliceStable(versionedResources, func(i, j int) bool {\n\t\tif versionedResources[i].Version == versionedResources[j].Version {\n\t\t\treturn versionedResources[i].Name < versionedResources[j].Name\n\t\t}\n\n\t\t// preferred version\n\t\tif versionedResources[i].Version == preferredVersion {\n\t\t\treturn true\n\t\t}\n\t\tif versionedResources[j].Version == preferredVersion {\n\t\t\treturn false\n\t\t}\n\n\t\t// compare kube-like version\n\t\t// Versions will be sorted based on GA/alpha/beta first and then major and minor versions.\n\t\t// e.g. v2, v1, v1beta2, v1beta1, v1alpha1.\n\t\treturn version.CompareKubeAwareVersionStrings(versionedResources[i].Version, versionedResources[j].Version) > 0\n\t})\n\n\t// pick out preferred version or highest semantic version\n\tregistered := make(map[string]bool)\n\tvar normalizedVersionResources []metav1.APIResource\n\tfor _, vr := range versionedResources {\n\t\tif registered[vr.Name] {\n\t\t\tcontinue\n\t\t}\n\t\tnormalizedVersionResources = append(normalizedVersionResources, vr)\n\t\tregistered[vr.Name] = true\n\t}\n\treturn normalizedVersionResources\n}",
"func (v ResourceNodes) Sort() {\n\tsort.Sort(v)\n}",
"func (tf tFiles) sortByKey(icmp *iComparer) {\n\tsort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp})\n}",
"func (p TreeWriter) index(nodes []*yaml.RNode) map[string][]*yaml.RNode {\n\t// index the ResourceNodes by package\n\tindexByPackage := map[string][]*yaml.RNode{}\n\tfor i := range nodes {\n\t\tmeta, err := nodes[i].GetMeta()\n\t\tif err != nil || meta.Kind == \"\" {\n\t\t\t// not a resource\n\t\t\tcontinue\n\t\t}\n\t\tpkg := filepath.Dir(meta.Annotations[kioutil.PathAnnotation])\n\t\tindexByPackage[pkg] = append(indexByPackage[pkg], nodes[i])\n\t}\n\treturn indexByPackage\n}",
"func (i IndexFile) SortEntries() {\n\tfor _, versions := range i.Entries {\n\t\tsort.Sort(sort.Reverse(versions))\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
getFields looks up p.Fields from leaf and structures them into treeFields. TODO(pwittrock): simplify this function
|
func (p TreeWriter) getFields(leaf *yaml.RNode) (treeFields, error) {
fieldsByName := map[string]*treeField{}
// index nested and non-nested fields
for i := range p.Fields {
f := p.Fields[i]
seq, err := leaf.Pipe(&f)
if err != nil {
return nil, err
}
if seq == nil {
continue
}
if fieldsByName[f.Name] == nil {
fieldsByName[f.Name] = &treeField{name: f.Name}
}
// non-nested field -- add directly to the treeFields list
if f.SubName == "" {
// non-nested field -- only 1 element
val, err := yaml.String(seq.Content()[0], yaml.Trim, yaml.Flow)
if err != nil {
return nil, err
}
fieldsByName[f.Name].value = val
continue
}
// nested-field -- create a parent elem, and index by the 'match' value
if fieldsByName[f.Name].subFieldByMatch == nil {
fieldsByName[f.Name].subFieldByMatch = map[string]treeFields{}
}
index := fieldsByName[f.Name].subFieldByMatch
for j := range seq.Content() {
elem := seq.Content()[j]
matches := f.Matches[elem]
str, err := yaml.String(elem, yaml.Trim, yaml.Flow)
if err != nil {
return nil, err
}
// map the field by the name of the element
// index the subfields by the matching element so we can put all the fields for the
// same element under the same branch
matchKey := strings.Join(matches, "/")
index[matchKey] = append(index[matchKey], &treeField{name: f.SubName, value: str})
}
}
// iterate over collection of all queried fields in the Resource
for _, field := range fieldsByName {
// iterate over collection of elements under the field -- indexed by element name
for match, subFields := range field.subFieldByMatch {
// create a new element for this collection of fields
// note: we will convert name to an index later, but keep the match for sorting
elem := &treeField{name: match}
field.matchingElementsAndFields = append(field.matchingElementsAndFields, elem)
// iterate over collection of queried fields for the element
for i := range subFields {
// add to the list of fields for this element
elem.matchingElementsAndFields = append(elem.matchingElementsAndFields, subFields[i])
}
}
// clear this cached data
field.subFieldByMatch = nil
}
// put the fields in a list so they are ordered
fieldList := treeFields{}
for _, v := range fieldsByName {
fieldList = append(fieldList, v)
}
// sort the fields
sort.Sort(fieldList)
for i := range fieldList {
field := fieldList[i]
// sort the elements under this field
sort.Sort(field.matchingElementsAndFields)
for i := range field.matchingElementsAndFields {
element := field.matchingElementsAndFields[i]
// sort the elements under a list field by their name
sort.Sort(element.matchingElementsAndFields)
// set the name of the element to its index
element.name = fmt.Sprintf("%d", i)
}
}
return fieldList, nil
}
|
[
"func (Tree) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\t/*\n\t\t* ζ η»ζηΌη ,η¨δΊεΏ«ιζ₯ζΎ, ζ―δΈε±η±4δ½ε符η»ζ,η¨-εε²\n\t\t* ε¦η¬¬δΈε±:0001 第δΊε±:0001-0001 第δΈε±:0001-0001-0001\n\t\t */\n\t\tfield.Uint64(\"parent_id\").\n\t\t\tComment(\"ηΆηΊ§η±»ε«\").\n\t\t\tUnique().\n\t\t\tImmutable(),\n\t\tfield.String(\"tree_path\").\n\t\t\tComment(\"ζ θ·―εΎ\"),\n\t\tfield.String(\"tree_index\").\n\t\t\tComment(\"ζεΊεΊε·\"),\n\t\tfield.Uint32(\"tree_level\").\n\t\t\tComment(\"ζ ε±ηΊ§\"),\n\t}\n}",
"func getParentTreeFields(treePath string) (treeNames, treePaths []string) {\n\tif treePath == \"\" {\n\t\treturn treeNames, treePaths\n\t}\n\n\ttreeNames = strings.Split(treePath, \"/\")\n\ttreePaths = make([]string, len(treeNames))\n\tfor i := range treeNames {\n\t\ttreePaths[i] = strings.Join(treeNames[:i+1], \"/\")\n\t}\n\treturn treeNames, treePaths\n}",
"func (_struct *Struct) Fields() (Fields, error) {\n\tvar goFields Fields\n\tstructType := _struct.StructType()\n\tif structType == nil {\n\t\treturn nil, fmt.Errorf(\"no struct type in %#+v\", _struct)\n\t}\n\tfor idx, field := range structType.Fields.List {\n\t\ttyp, err := _struct.toType(field.Type)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to lookup type '%s': %w\", field.Type, err)\n\t\t}\n\t\tgoFields = append(goFields, &Field{\n\t\t\tField: *field,\n\t\t\tStruct: _struct,\n\t\t\tNames: field.Names,\n\t\t\tIndex: uint(idx),\n\t\t\tTypeValue: typ,\n\t\t})\n\t}\n\treturn goFields, nil\n}",
"func getNodeFields() []string {\n\trt := reflect.TypeOf((*tailcfg.Node)(nil)).Elem()\n\tret := make([]string, rt.NumField())\n\tfor i := 0; i < rt.NumField(); i++ {\n\t\tret[i] = rt.Field(i).Name\n\t}\n\treturn ret\n}",
"func TraverseFields(v interface{}, processField ProcessValue) error {\n\treturn TraverseValueFields(reflect.ValueOf(v), processField)\n}",
"func fixFields(n, parent *node, depth int) {\n\tn.parent = parent\n\tn.depth = depth\n\tfor _, c := range n.children {\n\t\tfixFields(c, n, depth+1)\n\t}\n}",
"func (n Node) AllFields() []interface{} {\n\tlabels := make([]interface{}, len(n.Labels))\n\tfor i, label := range n.Labels {\n\t\tlabels[i] = label\n\t}\n\treturn []interface{}{n.NodeIdentity, labels, n.Properties}\n}",
"func (pce *ppdCacheEntry) getFields() (cdd.PrinterDescriptionSection, string, string, lib.DuplexVendorMap) {\n\tpce.mutex.Lock()\n\tdefer pce.mutex.Unlock()\n\treturn pce.description, pce.manufacturer, pce.model, pce.duplexMap\n}",
"func typeFields(t reflect.Type) []field {\n\t// Anonymous fields to explore at the current level and the next.\n\tcurrent := []field{}\n\tnext := []field{{typ: t}}\n\n\t// Count of queued names for current level and the next.\n\tcount := map[reflect.Type]int{}\n\tnextCount := map[reflect.Type]int{}\n\n\t// Types already visited at an earlier level.\n\tvisited := map[reflect.Type]bool{}\n\n\t// Fields found.\n\tvar fields []field\n\n\tfor len(next) > 0 {\n\t\tcurrent, next = next, current[:0]\n\t\tcount, nextCount = nextCount, map[reflect.Type]int{}\n\n\t\tfor _, f := range current {\n\t\t\tif visited[f.typ] {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvisited[f.typ] = true\n\n\t\t\t// Scan f.typ for fields to include.\n\t\t\tfor i := 0; i < f.typ.NumField(); i++ {\n\t\t\t\tsf := f.typ.Field(i)\n\t\t\t\tisUnexported := sf.PkgPath != \"\"\n\t\t\t\tif sf.Anonymous {\n\t\t\t\t\tt := sf.Type\n\t\t\t\t\tif isUnexported && t.Kind() != reflect.Struct {\n\t\t\t\t\t\t// Ignore embedded fields of unexported non-struct types.\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\t// Do not ignore embedded fields of unexported struct types\n\t\t\t\t\t// since they may have exported fields.\n\t\t\t\t} else if isUnexported {\n\t\t\t\t\t// Ignore unexported non-embedded fields.\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tindex := make([]int, len(f.index)+1)\n\t\t\t\tcopy(index, f.index)\n\t\t\t\tindex[len(f.index)] = i\n\n\t\t\t\tft := sf.Type\n\n\t\t\t\t// Record found field and index sequence.\n\t\t\t\tif !sf.Anonymous || ft.Kind() != reflect.Struct {\n\t\t\t\t\tfields = append(fields, field{\n\t\t\t\t\t\tname: sf.Name,\n\t\t\t\t\t\tindex: index,\n\t\t\t\t\t\ttyp: ft,\n\t\t\t\t\t})\n\t\t\t\t\tif count[f.typ] > 1 {\n\t\t\t\t\t\t// If there were multiple instances, add a second,\n\t\t\t\t\t\t// so that the annihilation code will see a duplicate.\n\t\t\t\t\t\t// It only cares about the distinction between 1 or 2,\n\t\t\t\t\t\t// so don't bother generating any more copies.\n\t\t\t\t\t\tfields = append(fields, fields[len(fields)-1])\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// Record new anonymous struct to explore in next round.\n\t\t\t\tnextCount[ft]++\n\t\t\t\tif nextCount[ft] == 1 {\n\t\t\t\t\tnext = append(next, field{name: ft.Name(), index: index, typ: ft})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tsort.Sort(byIndex(fields))\n\n\treturn fields\n}",
"func getFields(v reflect.Value) map[string]structField {\n\ttyp := v.Type()\n\tnumField := v.NumField()\n\tresult := make(map[string]structField, numField)\n\tfor i := 0; i < numField; i++ {\n\t\tfield := typ.Field(i)\n\t\tif len(field.PkgPath) != 0 {\n\t\t\t// unexported\n\t\t\tcontinue\n\t\t}\n\t\tvalue := v.Field(i)\n\t\tsf := structField{Field: field, Value: value}\n\t\tsf.JSONName, sf.OmitEmpty = sf.jsonFieldName()\n\t\tresult[field.Name] = sf\n\t}\n\treturn result\n}",
"func getFieldmap(t reflect.Type) (fm fieldmap, err error) {\n\t// if we have a fieldmap cached, return it\n\tt, err = BaseStructType(t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfm, ok := fieldmapCache[t]\n\tif ok {\n\t\treturn fm, nil\n\t} else {\n\t\tfm = fieldmap{}\n\t}\n\n\tvar f reflect.StructField\n\tvar name string\n\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tf = t.Field(i)\n\t\tname = StructToDatabaseFieldNameMapper(f)\n\t\tfm[name] = i\n\t}\n\tfieldmapCache[t] = fm\n\treturn fm, nil\n}",
"func (s Strategy) GetFieldsForSubDocument(model string, foreignfield string) []map[string]interface{} {\n\tvar fields []map[string]interface{}\n\n\tfor _, f := range s.Map.Entities[model].Fields { // search foreign field in []map[string]interface{}\n\t\tif f[\"foreign\"] == foreignfield {\n\t\t\tfi := f[\"fields\"].([]interface{})\n\t\t\t// Convert the []interface into []map[string]interface{}\n\t\t\tfields = make([]map[string]interface{}, len(fi))\n\t\t\tfor i := range fields {\n\t\t\t\tfields[i] = fi[i].(map[string]interface{})\n\t\t\t}\n\t\t\treturn fields\n\t\t}\n\t}\n\treturn fields\n}",
"func TopLevelFields(paths []string) []string {\n\tseen := make(map[string]struct{}, len(paths))\n\tout := make([]string, 0, len(paths))\n\tfor _, path := range paths {\n\t\tparts := strings.SplitN(path, \".\", 2)\n\t\tif _, ok := seen[parts[0]]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tseen[parts[0]] = struct{}{}\n\t\tout = append(out, parts[0])\n\t}\n\treturn out\n}",
"func GetFields(q graphql.Querier, query *querybuilder.Query) ([]Field, error) {\n\ttyp, ok := schema.GetQueryType()\n\tif !ok {\n\t\treturn nil, errors.New(\"No QueryType present in schema\")\n\t}\n\n\tfor _, node := range query.List() {\n\t\tif node.ConcreteType == \"\" {\n\t\t\tfield, ok := typ.GetField(node.Name)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"Missing field %q from type %q\", node.Name, typ.Name)\n\t\t\t}\n\n\t\t\ttyp, ok = schema.GetType(field.GetTypeName())\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"Missing type %q\", field.GetTypeName())\n\t\t\t}\n\t\t} else {\n\t\t\ttyp, ok = schema.GetType(node.ConcreteType)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"Missing type %q\", node.ConcreteType)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn typ.Fields, nil\n}",
"func (e TypeReflectCacheEntry) Fields() map[string]*FieldCacheEntry {\n\treturn e.structFields\n}",
"func BottomLevelFields(paths []string) []string {\n\tseen := make(map[string]struct{}, len(paths))\n\tfor _, path := range paths {\n\t\tprefix := path\n\t\tif i := strings.LastIndex(prefix, \".\"); i >= 0 {\n\t\t\tprefix = prefix[:i]\n\t\t}\n\t\tif _, ok := seen[prefix]; ok {\n\t\t\tdelete(seen, prefix)\n\t\t}\n\t\tseen[path] = struct{}{}\n\t}\n\tout := make([]string, 0, len(seen))\n\tfor k := range seen {\n\t\tout = append(out, k)\n\t}\n\treturn out\n}",
"func (sc *SimpleProjection) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) {\n\tinner, err := sc.Input.GetFields(ctx, vcursor, bindVars)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &sqltypes.Result{Fields: sc.buildFields(inner)}, nil\n}",
"func getComparableFields() []string {\n\tfields := []string{}\n\n\tfor _, fieldName := range getContainerFields() {\n\t\t// Skip some fields\n\t\tif unicode.IsLower((rune)(fieldName[0])) {\n\t\t\tcontinue\n\t\t}\n\n\t\tskip := false\n\t\tfor _, f := range compareSkipFields {\n\t\t\tif f == fieldName {\n\t\t\t\tskip = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !skip {\n\t\t\tfields = append(fields, fieldName)\n\t\t}\n\t}\n\n\treturn fields\n}",
"func (f Field) GetSubFields(filter string) []SubField {\n\tvalues := []SubField{}\n\tfor _, sub := range f.SubFields {\n\t\tif strings.Contains(filter, sub.Code) {\n\t\t\tvalue := SubField{\n\t\t\t\tCode: sub.Code,\n\t\t\t\tValue: sub.Value,\n\t\t\t}\n\t\t\tvalues = append(values, value)\n\t\t}\n\t}\n\treturn values\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewCopyRecipeToMyRecipesWithChangesParams creates a new CopyRecipeToMyRecipesWithChangesParams object with the default values initialized.
|
func NewCopyRecipeToMyRecipesWithChangesParams() *CopyRecipeToMyRecipesWithChangesParams {
var ()
return &CopyRecipeToMyRecipesWithChangesParams{
timeout: cr.DefaultTimeout,
}
}
|
[
"func NewCopyRecipeToMyRecipesWithChangesParamsWithHTTPClient(client *http.Client) *CopyRecipeToMyRecipesWithChangesParams {\n\tvar ()\n\treturn &CopyRecipeToMyRecipesWithChangesParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewCopyRecipeToMyRecipesWithChangesParamsWithTimeout(timeout time.Duration) *CopyRecipeToMyRecipesWithChangesParams {\n\tvar ()\n\treturn &CopyRecipeToMyRecipesWithChangesParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *CopyRecipeToMyRecipesWithChangesParams) WithContext(ctx context.Context) *CopyRecipeToMyRecipesWithChangesParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func (o *CopyRecipeToMyRecipesWithChangesParams) WithTimeout(timeout time.Duration) *CopyRecipeToMyRecipesWithChangesParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *CopyRecipeToMyRecipesWithChangesParams) WithHTTPClient(client *http.Client) *CopyRecipeToMyRecipesWithChangesParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func NewCopyRecipeToMyRecipesWithChangesParamsWithContext(ctx context.Context) *CopyRecipeToMyRecipesWithChangesParams {\n\tvar ()\n\treturn &CopyRecipeToMyRecipesWithChangesParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func (o *CopyRecipeToMyRecipesWithChangesParams) WithJSONBody(jSONBody *models.Recipe) *CopyRecipeToMyRecipesWithChangesParams {\n\to.SetJSONBody(jSONBody)\n\treturn o\n}",
"func NewGetRecipesParams() GetRecipesParams {\n\n\tvar (\n\t\t// initialize parameters with default values\n\n\t\tingredient1Default = string(\"\")\n\t\tingredient2Default = string(\"\")\n\t\tingredient3Default = string(\"\")\n\t\tseasonDefault = string(\"\")\n\t)\n\n\treturn GetRecipesParams{\n\t\tIngredient1: &ingredient1Default,\n\n\t\tIngredient2: &ingredient2Default,\n\n\t\tIngredient3: &ingredient3Default,\n\n\t\tSeason: &seasonDefault,\n\t}\n}",
"func (s *TemplateService) NewCopyTemplateParams(id string) *CopyTemplateParams {\n\tp := &CopyTemplateParams{}\n\tp.p = make(map[string]interface{})\n\tp.p[\"id\"] = id\n\treturn p\n}",
"func NewCopyToArgs(nodeID NodeID, targetNodeID NodeID) *CopyToArgs {\n\targs := new(CopyToArgs)\n\targs.NodeID = nodeID\n\targs.TargetNodeID = targetNodeID\n\treturn args\n}",
"func cloneParams(params *chaincfg.Params) *chaincfg.Params {\n\t// Encode via gob.\n\tbuf := new(bytes.Buffer)\n\tenc := gob.NewEncoder(buf)\n\tenc.Encode(params)\n\n\t// Decode via gob to make a deep copy.\n\tvar paramsCopy chaincfg.Params\n\tdec := gob.NewDecoder(buf)\n\tdec.Decode(¶msCopy)\n\treturn ¶msCopy\n}",
"func NewCopyNotebookModel()(*CopyNotebookModel) {\n m := &CopyNotebookModel{\n }\n m.SetAdditionalData(make(map[string]interface{}));\n return m\n}",
"func (f *FilterParams) Clone() *FilterParams {\n\tclone := &FilterParams{\n\t\tFilters: []*model.FilterSet{},\n\t}\n\tfor _, filters := range f.Filters {\n\t\tclone.Filters = append(clone.Filters, filters.Clone())\n\t}\n\tfor _, highlights := range f.Highlights {\n\t\tclone.Highlights = append(clone.Highlights, highlights.Clone())\n\t}\n\tclone.Invert = f.Invert\n\tclone.Variables = append(clone.Variables, f.Variables...)\n\tclone.Size = f.Size\n\tclone.DataMode = f.DataMode\n\treturn clone\n}",
"func NewTransferParams(toWalletableId int32, toWalletableType string, fromWalletableId int32, fromWalletableType string, amount int32, date string, companyId int32, ) *TransferParams {\n\tthis := TransferParams{}\n\tthis.ToWalletableId = toWalletableId\n\tthis.ToWalletableType = toWalletableType\n\tthis.FromWalletableId = fromWalletableId\n\tthis.FromWalletableType = fromWalletableType\n\tthis.Amount = amount\n\tthis.Date = date\n\tthis.CompanyId = companyId\n\treturn &this\n}",
"func NewSaveChangesForIngredientParams() *SaveChangesForIngredientParams {\n\tvar ()\n\treturn &SaveChangesForIngredientParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *CopyRecipeToMyRecipesWithChangesParams) WithRecipeID(recipeID string) *CopyRecipeToMyRecipesWithChangesParams {\n\to.SetRecipeID(recipeID)\n\treturn o\n}",
"func NewRefreshStackRecipesParams() *RefreshStackRecipesParams {\n\tvar ()\n\treturn &RefreshStackRecipesParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewCopyNotebookModel()(*CopyNotebookModel) {\n m := &CopyNotebookModel{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}",
"func linkedConstructInputsCopyTo(ctx *pulumi.Context, inputs map[string]interface{}, args interface{}) error"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewCopyRecipeToMyRecipesWithChangesParamsWithTimeout creates a new CopyRecipeToMyRecipesWithChangesParams object with the default values initialized, and the ability to set a timeout on a request
|
func NewCopyRecipeToMyRecipesWithChangesParamsWithTimeout(timeout time.Duration) *CopyRecipeToMyRecipesWithChangesParams {
var ()
return &CopyRecipeToMyRecipesWithChangesParams{
timeout: timeout,
}
}
|
[
"func (o *CopyRecipeToMyRecipesWithChangesParams) WithTimeout(timeout time.Duration) *CopyRecipeToMyRecipesWithChangesParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *CopyRecipeToMyRecipesWithChangesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewCopyRecipeToMyRecipesWithChangesParams() *CopyRecipeToMyRecipesWithChangesParams {\n\tvar ()\n\treturn &CopyRecipeToMyRecipesWithChangesParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewRefreshStackRecipesParamsWithTimeout(timeout time.Duration) *RefreshStackRecipesParams {\n\tvar ()\n\treturn &RefreshStackRecipesParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewCopyRecipeToMyRecipesWithChangesParamsWithHTTPClient(client *http.Client) *CopyRecipeToMyRecipesWithChangesParams {\n\tvar ()\n\treturn &CopyRecipeToMyRecipesWithChangesParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *GetPublicsRecipeParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewNarrowSearchRecipeParamsWithTimeout(timeout time.Duration) *NarrowSearchRecipeParams {\n\tvar (\n\t\tsortbyDefault = string(\"name\")\n\t\tsortdirDefault = string(\"desc\")\n\t)\n\treturn &NarrowSearchRecipeParams{\n\t\tSortby: &sortbyDefault,\n\t\tSortdir: &sortdirDefault,\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewNodeModifyParamsWithTimeout(timeout time.Duration) *NodeModifyParams {\n\treturn &NodeModifyParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *BackupsCreateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewQueryChangesParamsWithTimeout(timeout time.Duration) *QueryChangesParams {\n\treturn &QueryChangesParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *OrderNewParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewRebalanceParamsWithTimeout(timeout time.Duration) *RebalanceParams {\n\tvar (\n\t\tbestEffortsDefault = bool(false)\n\t\tbootstrapDefault = bool(false)\n\t\tdowntimeDefault = bool(false)\n\t\tdryRunDefault = bool(false)\n\t\tincludeConsumingDefault = bool(false)\n\t\tminAvailableReplicasDefault = int32(1)\n\t\treassignInstancesDefault = bool(false)\n\t)\n\treturn &RebalanceParams{\n\t\tBestEfforts: &bestEffortsDefault,\n\t\tBootstrap: &bootstrapDefault,\n\t\tDowntime: &downtimeDefault,\n\t\tDryRun: &dryRunDefault,\n\t\tIncludeConsuming: &includeConsumingDefault,\n\t\tMinAvailableReplicas: &minAvailableReplicasDefault,\n\t\tReassignInstances: &reassignInstancesDefault,\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewResizeMachineParamsWithTimeout(timeout time.Duration) *ResizeMachineParams {\n\tvar ()\n\treturn &ResizeMachineParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewTransferOwnershipParamsWithTimeout(timeout time.Duration) *TransferOwnershipParams {\n\tvar ()\n\treturn &TransferOwnershipParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewChangeAMenuItemsRecipeIDParamsWithTimeout(timeout time.Duration) *ChangeAMenuItemsRecipeIDParams {\n\tvar ()\n\treturn &ChangeAMenuItemsRecipeIDParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewOrderNewParamsWithTimeout(timeout time.Duration) *OrderNewParams {\n\tvar (\n\t\tordTypeDefault = string(\"Limit\")\n\t)\n\treturn &OrderNewParams{\n\t\tOrdType: &ordTypeDefault,\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewPostReconciliationParamsWithTimeout(timeout time.Duration) *PostReconciliationParams {\n\tvar ()\n\treturn &PostReconciliationParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *QueryChangesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *RefreshStackRecipesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewCopyRecipeToMyRecipesWithChangesParamsWithContext creates a new CopyRecipeToMyRecipesWithChangesParams object with the default values initialized, and the ability to set a context for a request
|
func NewCopyRecipeToMyRecipesWithChangesParamsWithContext(ctx context.Context) *CopyRecipeToMyRecipesWithChangesParams {
var ()
return &CopyRecipeToMyRecipesWithChangesParams{
Context: ctx,
}
}
|
[
"func NewCopyRecipeToMyRecipesWithChangesParams() *CopyRecipeToMyRecipesWithChangesParams {\n\tvar ()\n\treturn &CopyRecipeToMyRecipesWithChangesParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *CopyRecipeToMyRecipesWithChangesParams) WithContext(ctx context.Context) *CopyRecipeToMyRecipesWithChangesParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func NewCopyRecipeToMyRecipesWithChangesParamsWithHTTPClient(client *http.Client) *CopyRecipeToMyRecipesWithChangesParams {\n\tvar ()\n\treturn &CopyRecipeToMyRecipesWithChangesParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewCopyRecipeToMyRecipesWithChangesParamsWithTimeout(timeout time.Duration) *CopyRecipeToMyRecipesWithChangesParams {\n\tvar ()\n\treturn &CopyRecipeToMyRecipesWithChangesParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *CopyRecipeToMyRecipesWithChangesParams) WithRecipeID(recipeID string) *CopyRecipeToMyRecipesWithChangesParams {\n\to.SetRecipeID(recipeID)\n\treturn o\n}",
"func (o *CopyRecipeToMyRecipesWithChangesParams) WithHTTPClient(client *http.Client) *CopyRecipeToMyRecipesWithChangesParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *CopyRecipeToMyRecipesWithChangesParams) WithTimeout(timeout time.Duration) *CopyRecipeToMyRecipesWithChangesParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func NewPatchV1ChangesChangeIDParamsWithContext(ctx context.Context) *PatchV1ChangesChangeIDParams {\n\tvar ()\n\treturn &PatchV1ChangesChangeIDParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func (o *PatchV1ChangesChangeIDParams) WithContext(ctx context.Context) *PatchV1ChangesChangeIDParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func NewPatchV1ChangesChangeIDParamsWithContext(ctx context.Context) *PatchV1ChangesChangeIDParams {\n\treturn &PatchV1ChangesChangeIDParams{\n\t\tContext: ctx,\n\t}\n}",
"func NewSaveChangesForIngredientParamsWithContext(ctx context.Context) *SaveChangesForIngredientParams {\n\tvar ()\n\treturn &SaveChangesForIngredientParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func (o *CopyRecipeToMyRecipesWithChangesParams) WithJSONBody(jSONBody *models.Recipe) *CopyRecipeToMyRecipesWithChangesParams {\n\to.SetJSONBody(jSONBody)\n\treturn o\n}",
"func NewPostV1ChangesParamsWithContext(ctx context.Context) *PostV1ChangesParams {\n\treturn &PostV1ChangesParams{\n\t\tContext: ctx,\n\t}\n}",
"func NewQueryChangesParamsWithContext(ctx context.Context) *QueryChangesParams {\n\treturn &QueryChangesParams{\n\t\tContext: ctx,\n\t}\n}",
"func (o *SaveChangesForIngredientParams) WithContext(ctx context.Context) *SaveChangesForIngredientParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func NewSaveChangesForIngredientParams() *SaveChangesForIngredientParams {\n\tvar ()\n\treturn &SaveChangesForIngredientParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewPatchV1ChangesChangeIDParams() *PatchV1ChangesChangeIDParams {\n\tvar ()\n\treturn &PatchV1ChangesChangeIDParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewPatchV1ChangesChangeIDParams() *PatchV1ChangesChangeIDParams {\n\treturn &PatchV1ChangesChangeIDParams{\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *CopyRecipeToMyRecipesWithChangesParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewCopyRecipeToMyRecipesWithChangesParamsWithHTTPClient creates a new CopyRecipeToMyRecipesWithChangesParams object with the default values initialized, and the ability to set a custom HTTPClient for a request
|
func NewCopyRecipeToMyRecipesWithChangesParamsWithHTTPClient(client *http.Client) *CopyRecipeToMyRecipesWithChangesParams {
var ()
return &CopyRecipeToMyRecipesWithChangesParams{
HTTPClient: client,
}
}
|
[
"func (o *CopyRecipeToMyRecipesWithChangesParams) WithHTTPClient(client *http.Client) *CopyRecipeToMyRecipesWithChangesParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *CopyRecipeToMyRecipesWithChangesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewRefreshStackRecipesParamsWithHTTPClient(client *http.Client) *RefreshStackRecipesParams {\n\tvar ()\n\treturn &RefreshStackRecipesParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *RefreshStackRecipesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *BackupsCreateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *SaveChangesForIngredientParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetPublicsRecipeParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *QueryChangesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ChatNewParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *RevertProductSnapshotRequestUsingPOSTParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *OrderNewParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostReconciliationParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateGitWebhookUsingPOSTParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *SaveChangesForIngredientParams) WithHTTPClient(client *http.Client) *SaveChangesForIngredientParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *GetIngredientVersionRevisionParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ChangeAMenuItemsRecipeIDParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *BudgetAddParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetReceiptsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ConfigurationBackupModifyParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
WithTimeout adds the timeout to the copy recipe to my recipes with changes params
|
func (o *CopyRecipeToMyRecipesWithChangesParams) WithTimeout(timeout time.Duration) *CopyRecipeToMyRecipesWithChangesParams {
o.SetTimeout(timeout)
return o
}
|
[
"func (o *CopyRecipeToMyRecipesWithChangesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetPublicsRecipeParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *ChangeAMenuItemsRecipeIDParams) WithTimeout(timeout time.Duration) *ChangeAMenuItemsRecipeIDParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func NewCopyRecipeToMyRecipesWithChangesParamsWithTimeout(timeout time.Duration) *CopyRecipeToMyRecipesWithChangesParams {\n\tvar ()\n\treturn &CopyRecipeToMyRecipesWithChangesParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *ChangeAMenuItemsRecipeIDParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *RefreshStackRecipesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *SaveChangesForIngredientParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (b *taskBuilder) timeout(timeout time.Duration) {\n\tb.Spec.ExecutionTimeout = timeout\n\tb.Spec.IoTimeout = timeout // With kitchen, step logs don't count toward IoTimeout.\n}",
"func (o *GetRecipeByUniqueHandleParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PostRecipeGroupAndMenuItemParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *RefreshStackRecipesParams) WithTimeout(timeout time.Duration) *RefreshStackRecipesParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *GetIngredientVersionRevisionParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func WithTimeout(timeout time.Duration) Option {\n\treturn func(b *batcher) {\n\t\tb.timeout = timeout\n\t}\n}",
"func (o *NarrowSearchRecipeParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *NarrowSearchRecipeParams) WithTimeout(timeout time.Duration) *NarrowSearchRecipeParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func WithTimeout(duration time.Duration) ReconcilerOption {\n\treturn func(r *Reconciler) {\n\t\tr.timeout = duration\n\t}\n}",
"func (o *PutRecipeGroupParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func WithTimeout(t time.Duration) OptFunc {\n\treturn func(d *Downloader) {\n\t\td.timeout = t\n\t}\n}",
"func (o *GetPublicsRecipeParams) WithTimeout(timeout time.Duration) *GetPublicsRecipeParams {\n\to.SetTimeout(timeout)\n\treturn o\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetTimeout adds the timeout to the copy recipe to my recipes with changes params
|
func (o *CopyRecipeToMyRecipesWithChangesParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
|
[
"func (o *CopyRecipeToMyRecipesWithChangesParams) WithTimeout(timeout time.Duration) *CopyRecipeToMyRecipesWithChangesParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *GetPublicsRecipeParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *ChangeAMenuItemsRecipeIDParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *RefreshStackRecipesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetRecipeByUniqueHandleParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *SaveChangesForIngredientParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetIngredientVersionRevisionParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PostRecipeGroupAndMenuItemParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *NarrowSearchRecipeParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (pool *ComplexPool) SetTimeout(timeout time.Duration) {\n\tlogger.Debugf(\"prox (%p): setting timeout: %v\", pool, timeout)\n\tpool.timeout = timeout\n}",
"func (o *PutRecipeGroupParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *ChangeAMenuItemsRecipeIDParams) WithTimeout(timeout time.Duration) *ChangeAMenuItemsRecipeIDParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *GetReceiptsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetNutritionForSingleParsedPlainTextIngredientParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *EditParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (w *Waiter) SetTimeout(_ time.Duration) {}",
"func (o *CreateGitWebhookUsingPOSTParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *TransferOwnershipParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetActionTemplateLogoVersionParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
WithContext adds the context to the copy recipe to my recipes with changes params
|
func (o *CopyRecipeToMyRecipesWithChangesParams) WithContext(ctx context.Context) *CopyRecipeToMyRecipesWithChangesParams {
o.SetContext(ctx)
return o
}
|
[
"func CopyWithContext(ctx context.Context, dst *Writer, src Stream) error {\n\tif err := src.Open(); err != nil {\n\t\treturn err\n\t}\n\tvar err error\n\tfor ctx.Err() == nil {\n\t\tvar pair Pair\n\t\tpair, err = src.Read()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif pair.Key == nil {\n\t\t\tbreak\n\t\t}\n\t\terr = dst.Write(pair)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn ctx.Err()\n}",
"func (ctx *Context) Copy() *Context {\n\tvar pathParams Params\n\tif len(ctx.PathParams) > 0 {\n\t\tpathParams = append(pathParams, ctx.PathParams...)\n\t}\n\treturn &Context{\n\t\tresponseWriter2: nil,\n\t\tResponseWriter: nil,\n\t\tRequest: ctx.Request,\n\t\tPathParams: pathParams,\n\t\tqueryParams: ctx.queryParams,\n\t\tValidator: ctx.Validator,\n\t\tfetchClientIPFromHeader: ctx.fetchClientIPFromHeader,\n\t\thandlers: nil,\n\t\thandlerIndex: __abortHandlerIndex,\n\t\tkvs: ctx.kvs,\n\t}\n}",
"func (c *Context) Copy() *Context {\n\tret := *c\n\tret.init(&fasthttp.RequestCtx{})\n\tc.Request.CopyTo(&ret.Request)\n\tc.Response.CopyTo(&ret.Response)\n\tret.WSConn = c.WSConn\n\tret.data = c.data\n\treturn &ret\n}",
"func (mr *MockS3APIMockRecorder) CopyObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{arg0, arg1}, arg2...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CopyObjectWithContext\", reflect.TypeOf((*MockS3API)(nil).CopyObjectWithContext), varargs...)\n}",
"func (mr *MockRDSAPIMockRecorder) CopyDBSnapshotWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {\n\tvarargs := append([]interface{}{arg0, arg1}, arg2...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CopyDBSnapshotWithContext\", reflect.TypeOf((*MockRDSAPI)(nil).CopyDBSnapshotWithContext), varargs...)\n}",
"func (ctx context) clone() context {\n\treturn context{\n\t\tkeyvals: safeSlice(ctx.keyvals),\n\t}\n}",
"func (tx *WriteTx) RunWithContext(ctx context.Context) error {\n\tif tx.err != nil {\n\t\treturn tx.err\n\t}\n\tinput, err := tx.input()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = retry(ctx, func() error {\n\t\tout, err := tx.db.client.TransactWriteItemsWithContext(ctx, input)\n\t\tif tx.cc != nil && out != nil {\n\t\t\tfor _, cc := range out.ConsumedCapacity {\n\t\t\t\taddConsumedCapacity(tx.cc, cc)\n\t\t\t}\n\t\t}\n\t\treturn err\n\t})\n\treturn err\n}",
"func (o *CopyRecipeToMyRecipesWithChangesParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (this *L0JobContext) Copy(request string) JobContext {\n\treturn &L0JobContext{\n\t\trequest: request,\n\t\tjobID: this.jobID,\n\t\tlogic: this.logic,\n\t\tloadBalancerLogic: this.loadBalancerLogic,\n\t\tserviceLogic: this.serviceLogic,\n\t\tenvironmentLogic: this.environmentLogic,\n\t\tmutex: this.mutex,\n\t}\n}",
"func (mr *MockKMSAPIMockRecorder) ReplicateKeyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{arg0, arg1}, arg2...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ReplicateKeyWithContext\", reflect.TypeOf((*MockKMSAPI)(nil).ReplicateKeyWithContext), varargs...)\n}",
"func MetaWithContext(ctx context.Context, newMeta map[string]interface{}) context.Context {\n\tprevMeta := MetaFromContext(ctx)\n\n\tif prevMeta == nil {\n\t\tprevMeta = make(map[string]interface{})\n\t}\n\n\tfor k, v := range newMeta {\n\t\tprevMeta[k] = v\n\t}\n\n\treturn context.WithValue(ctx, MetaCtxKey, prevMeta)\n}",
"func (o *SaveChangesForIngredientParams) WithContext(ctx context.Context) *SaveChangesForIngredientParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func (m *MockRDSAPI) CopyOptionGroupWithContext(arg0 aws.Context, arg1 *rds.CopyOptionGroupInput, arg2 ...request.Option) (*rds.CopyOptionGroupOutput, error) {\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"CopyOptionGroupWithContext\", varargs...)\n\tret0, _ := ret[0].(*rds.CopyOptionGroupOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (mr *MockRDSAPIMockRecorder) CopyDBClusterSnapshotWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {\n\tvarargs := append([]interface{}{arg0, arg1}, arg2...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CopyDBClusterSnapshotWithContext\", reflect.TypeOf((*MockRDSAPI)(nil).CopyDBClusterSnapshotWithContext), varargs...)\n}",
"func (m *MockRDSAPI) CopyDBParameterGroupWithContext(arg0 aws.Context, arg1 *rds.CopyDBParameterGroupInput, arg2 ...request.Option) (*rds.CopyDBParameterGroupOutput, error) {\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"CopyDBParameterGroupWithContext\", varargs...)\n\tret0, _ := ret[0].(*rds.CopyDBParameterGroupOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (m *MockRDSAPI) CopyDBSnapshotWithContext(arg0 aws.Context, arg1 *rds.CopyDBSnapshotInput, arg2 ...request.Option) (*rds.CopyDBSnapshotOutput, error) {\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"CopyDBSnapshotWithContext\", varargs...)\n\tret0, _ := ret[0].(*rds.CopyDBSnapshotOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (mr *MockRDSAPIMockRecorder) CopyOptionGroupWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {\n\tvarargs := append([]interface{}{arg0, arg1}, arg2...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CopyOptionGroupWithContext\", reflect.TypeOf((*MockRDSAPI)(nil).CopyOptionGroupWithContext), varargs...)\n}",
"func (m *MockS3API) CopyObjectWithContext(arg0 context.Context, arg1 *s3.CopyObjectInput, arg2 ...request.Option) (*s3.CopyObjectOutput, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"CopyObjectWithContext\", varargs...)\n\tret0, _ := ret[0].(*s3.CopyObjectOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (m *MockRDSAPI) CopyDBClusterSnapshotWithContext(arg0 aws.Context, arg1 *rds.CopyDBClusterSnapshotInput, arg2 ...request.Option) (*rds.CopyDBClusterSnapshotOutput, error) {\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"CopyDBClusterSnapshotWithContext\", varargs...)\n\tret0, _ := ret[0].(*rds.CopyDBClusterSnapshotOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetContext adds the context to the copy recipe to my recipes with changes params
|
func (o *CopyRecipeToMyRecipesWithChangesParams) SetContext(ctx context.Context) {
o.Context = ctx
}
|
[
"func (o *RevertProductSnapshotRequestUsingPOSTParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetPublicsRecipeParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *CopyRecipeToMyRecipesWithChangesParams) WithContext(ctx context.Context) *CopyRecipeToMyRecipesWithChangesParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func (ctx *Context) Copy() *Context {\n\tvar pathParams Params\n\tif len(ctx.PathParams) > 0 {\n\t\tpathParams = append(pathParams, ctx.PathParams...)\n\t}\n\treturn &Context{\n\t\tresponseWriter2: nil,\n\t\tResponseWriter: nil,\n\t\tRequest: ctx.Request,\n\t\tPathParams: pathParams,\n\t\tqueryParams: ctx.queryParams,\n\t\tValidator: ctx.Validator,\n\t\tfetchClientIPFromHeader: ctx.fetchClientIPFromHeader,\n\t\thandlers: nil,\n\t\thandlerIndex: __abortHandlerIndex,\n\t\tkvs: ctx.kvs,\n\t}\n}",
"func (c *Context) Copy() *Context {\n\tret := *c\n\tret.init(&fasthttp.RequestCtx{})\n\tc.Request.CopyTo(&ret.Request)\n\tc.Response.CopyTo(&ret.Response)\n\tret.WSConn = c.WSConn\n\tret.data = c.data\n\treturn &ret\n}",
"func (o *GetReceiptsParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *RefreshStackRecipesParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *SaveChangesForIngredientParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetIngredientVersionRevisionParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *CreateGitWebhookUsingPOSTParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *UpdateLookmlModelParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *PostRecipeGroupAndMenuItemParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetFileContentsParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetPointOfSaleReceiptsParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *PcloudPvminstancesSnapshotsRestorePostParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetRepository15Params) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *ChangeAMenuItemsRecipeIDParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *CreateProductReplaceLicenseRequestUsingPOSTParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *AddItemParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
WithHTTPClient adds the HTTPClient to the copy recipe to my recipes with changes params
|
func (o *CopyRecipeToMyRecipesWithChangesParams) WithHTTPClient(client *http.Client) *CopyRecipeToMyRecipesWithChangesParams {
o.SetHTTPClient(client)
return o
}
|
[
"func (o *CopyRecipeToMyRecipesWithChangesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ChangeAMenuItemsRecipeIDParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *SaveChangesForIngredientParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *RefreshStackRecipesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetPublicsRecipeParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostRecipeGroupAndMenuItemParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ChangeAMenuItemsRecipeIDParams) WithHTTPClient(client *http.Client) *ChangeAMenuItemsRecipeIDParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *AddItemParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *RevertProductSnapshotRequestUsingPOSTParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetIngredientVersionRevisionParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetRecipeByUniqueHandleParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetReceiptsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *BudgetAddParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewCopyRecipeToMyRecipesWithChangesParamsWithHTTPClient(client *http.Client) *CopyRecipeToMyRecipesWithChangesParams {\n\tvar ()\n\treturn &CopyRecipeToMyRecipesWithChangesParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *SaveChangesForIngredientParams) WithHTTPClient(client *http.Client) *SaveChangesForIngredientParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *CreateProductReplaceLicenseRequestUsingPOSTParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetNutritionForSingleParsedPlainTextIngredientParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateHardeningApplyItemParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *NarrowSearchRecipeParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetHTTPClient adds the HTTPClient to the copy recipe to my recipes with changes params
|
func (o *CopyRecipeToMyRecipesWithChangesParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
|
[
"func (o *RefreshStackRecipesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ChangeAMenuItemsRecipeIDParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetPublicsRecipeParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *SaveChangesForIngredientParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostRecipeGroupAndMenuItemParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *RevertProductSnapshotRequestUsingPOSTParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetRecipeByUniqueHandleParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateProductReplaceLicenseRequestUsingPOSTParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CopyRecipeToMyRecipesWithChangesParams) WithHTTPClient(client *http.Client) *CopyRecipeToMyRecipesWithChangesParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *GetIngredientVersionRevisionParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *AddItemParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func SetHTTPClient(newClient *http.Client) {\n\thttpClient = newClient\n}",
"func (o *GetReceiptsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateGitWebhookUsingPOSTParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *NarrowSearchRecipeParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateCartUsingPOSTParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetNutritionForSingleParsedPlainTextIngredientParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *EditParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *AddOrUpdateNodePoolConfigItemParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
WithJSONBody adds the jSONBody to the copy recipe to my recipes with changes params
|
func (o *CopyRecipeToMyRecipesWithChangesParams) WithJSONBody(jSONBody *models.Recipe) *CopyRecipeToMyRecipesWithChangesParams {
o.SetJSONBody(jSONBody)
return o
}
|
[
"func (o *CopyRecipeToMyRecipesWithChangesParams) SetJSONBody(jSONBody *models.Recipe) {\n\to.JSONBody = jSONBody\n}",
"func WithJSONBody(body string) Opts {\n\treturn func(opts *requestOpts) {\n\t\topts.bodyFunc = func() (io.Reader, error) {\n\t\t\treturn strings.NewReader(body), nil\n\t\t}\n\t}\n}",
"func (o *PostRecipeGroupAndMenuItemParams) SetJSONBody(jSONBody *models.RecipeGroupInserter) {\n\to.JSONBody = jSONBody\n}",
"func (a *API) JSONBody(ctx *fasthttp.RequestCtx, model interface{}) {\n\tr := bytes.NewReader(ctx.PostBody())\n\tjson.NewDecoder(r).Decode(&model)\n}",
"func (o *PostRecipeGroupAndMenuItemParams) WithJSONBody(jSONBody *models.RecipeGroupInserter) *PostRecipeGroupAndMenuItemParams {\n\to.SetJSONBody(jSONBody)\n\treturn o\n}",
"func (c *RBController) RecipeJSONAdvanced(w http.ResponseWriter, r *http.Request) (err error) {\n\tr.ParseForm()\n\tstrict, err := strconv.Atoi(r.PostFormValue(\"strict\"))\n\tname := r.PostFormValue(\"name\")\n\tcuisine, _ := strconv.Atoi(r.PostFormValue(\"cuisine\"))\n\tseason, _ := strconv.Atoi(r.PostFormValue(\"season\"))\n\tmealtype, _ := strconv.Atoi(r.PostFormValue(\"mealtype\"))\n\n\t// get all the recipes that match\n\tvar recipes *list.List\n\tif strict == 0 {\n\t\trecipes, err = c.GetRecipesLoose(name, cuisine, mealtype, season)\n\t} else {\n\t\trecipes, err = c.GetRecipesStrict(name, cuisine, mealtype, season)\n\t}\n\n\t// slice of jsons\n\tjsons := make([]string, recipes.Len())\n\n\tif err == nil {\n\t\tindex := 0\n\t\tfor e := recipes.Front(); e != nil; e = e.Next() {\n\t\t\trec := e.Value.(*Recipe)\n\t\t\tjsons[index] = rec.ToJSON()\n\t\t\tindex++\n\t\t}\n\t\trequest := strings.Join(jsons, \"\\n\")\n\t\tfmt.Fprintf(w, request)\n\t} else {\n\t\tfmt.Fprintf(w, \"%v\", err.Error())\n\t}\n\treturn\n}",
"func (r *Request) SetJSONBody(val interface{}) error {\n\tbuf := bytes.NewBuffer(nil)\n\tenc := json.NewEncoder(buf)\n\tif err := enc.Encode(val); err != nil {\n\t\treturn err\n\t}\n\n\tr.Obj = val\n\tr.Body = buf\n\tr.BodySize = int64(buf.Len())\n\treturn nil\n}",
"func (c *RBController) RecipeJSON(w http.ResponseWriter, r *http.Request) (err error) {\n\tvars := mux.Vars(r)\n\tid, _ := strconv.Atoi(vars[\"id\"])\n\trecipe, err := c.GetRecipe(id)\n\tif err == nil {\n\t\tc.JSON(w, http.StatusOK, recipe)\n\t} else if err == sql.ErrNoRows {\n\t\tc.RenderError(w, 404, \"Sorry, your page wasn't found\")\n\t\terr = nil\n\t}\n\treturn\n}",
"func (r *Request) setBodyJson(data interface{}) error {\n\tbody, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.Header.Set(\"Content-Type\", \"application/json\")\n\tr.setBodyReader(bytes.NewReader(body))\n\treturn nil\n}",
"func (o *RefreshStackRecipesParams) SetBody(body *model.UpdateRecipesV4Request) {\n\to.Body = body\n}",
"func TestAddRecipe(t *testing.T) {\n\tpayload := fmt.Sprintf(`\n {\n \"mealtype\": \"Breakfast\",\n \"name\": \"Pancakes\",\n \"Ingredients\": [ \"150g all purpose flour\",\n \t\t\t\t \"150ml of milk\"],\n \"preparation\": \"Add all ingredients and mix. Put in Pan.\"\n}`)\n\n\tresponse, err := http.Post(baseURL+\"/recipes\", \"application/json\", strings.NewReader(payload))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get json, %s\", err)\n\t}\n\n\tcheckResponseCode(t, http.StatusOK, response.StatusCode)\n}",
"func (o *PostMenuItemParams) SetJSONBody(jSONBody *models.MenuItem) {\n\to.JSONBody = jSONBody\n}",
"func (r *Request) BodyJSON(body interface{}) *Request {\n\tr.vBody = body\n\tr.bodyJSON = true\n\tr.additionalHeader = []string{\"Content-Type\", \"application/json\"}\n\treturn r\n}",
"func NewItemClonePostRequestBody()(*ItemClonePostRequestBody) {\n m := &ItemClonePostRequestBody{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}",
"func (o *PutMenuItemParams) SetJSONBody(jSONBody *models.MenuItem) {\n\to.JSONBody = jSONBody\n}",
"func (api *FoodRecipeAPI) partialRecipeUpdate(w http.ResponseWriter, req *http.Request) {\n\tdefer DrainBody(req)\n\tctx := req.Context()\n\n\tvars := mux.Vars(req)\n\tid := vars[\"id\"]\n\tlogData := log.Data{\"id\": id}\n\n\tvar errorObjects []*models.ErrorObject\n\n\tpatchJSON, recipePatches, err := patch.Get(ctx, req.Body)\n\tif err != nil {\n\t\terrorObjects = append(errorObjects, &models.ErrorObject{Error: err.Error()})\n\t\tErrorResponse(ctx, w, http.StatusBadRequest, &models.ErrorResponse{Errors: errorObjects})\n\t\treturn\n\t}\n\n\t// Validate patch request\n\tfor i, recipePatch := range *recipePatches {\n\t\tif err = recipePatch.Validate(nil); err != nil {\n\t\t\tif _, ok := err.(*validator.InvalidValidationError); ok {\n\t\t\t\terrorObjects = append(errorObjects, &models.ErrorObject{Error: errs.ErrInternalServer.Error()})\n\t\t\t\tErrorResponse(ctx, w, http.StatusInternalServerError, &models.ErrorResponse{Errors: errorObjects})\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, err := range err.(validator.ValidationErrors) {\n\t\t\t\terrorObjects = append(errorObjects, models.HandleValidationErrors(strconv.Itoa(i), err.ActualTag(), err.StructField(), err.Value().(string), err.Param()))\n\t\t\t}\n\t\t}\n\t}\n\tif len(errorObjects) > 0 {\n\t\tErrorResponse(ctx, w, http.StatusBadRequest, &models.ErrorResponse{Errors: errorObjects})\n\t\treturn\n\t}\n\n\t// apply patch against recipe resource\n\tp, err := jsonpatch.DecodePatch(patchJSON)\n\tif err != nil {\n\t\tlog.Error(ctx, \"patch recipe: unable to decode patch\", err)\n\t\terrorObjects = append(errorObjects, &models.ErrorObject{Error: err.Error()})\n\t\tErrorResponse(ctx, w, http.StatusBadRequest, &models.ErrorResponse{Errors: errorObjects})\n\t\treturn\n\t}\n\n\t// find current recipe doc\n\tvar recipe models.Recipe\n\n\tcollection := api.MongoClient.Database(\"food-recipes\").Collection(\"recipes\")\n\tif err = collection.FindOne(ctx, bson.M{\"_id\": id}).Decode(&recipe); err != nil {\n\t\tif err == mongo.ErrNoDocuments {\n\t\t\tlog.Warn(ctx, \"patch recipe: failed to find recipe\", log.FormatErrors([]error{err}), logData)\n\t\t\terrorObjects = append(errorObjects, &models.ErrorObject{Error: errs.ErrRecipeNotFound.Error()})\n\t\t\tErrorResponse(ctx, w, http.StatusNotFound, &models.ErrorResponse{Errors: errorObjects})\n\t\t\treturn\n\t\t}\n\n\t\tlog.Error(ctx, \"patch recipe: failed to find recipe, bad connection?\", err)\n\t\terrorObjects = append(errorObjects, &models.ErrorObject{Error: errs.ErrInternalServer.Error()})\n\t\tErrorResponse(ctx, w, http.StatusInternalServerError, &models.ErrorResponse{Errors: errorObjects})\n\t\treturn\n\t}\n\n\tb, err := json.Marshal(recipe)\n\tif err != nil {\n\t\tlog.Error(ctx, \"patch recipe: error returned from json marshal\", err, logData)\n\t\terrorObjects = append(errorObjects, &models.ErrorObject{Error: errs.ErrInternalServer.Error()})\n\t\tErrorResponse(ctx, w, http.StatusInternalServerError, &models.ErrorResponse{Errors: errorObjects})\n\t\treturn\n\t}\n\n\t// apply patch to existing recipe\n\tmodified, err := p.Apply(b)\n\tif err != nil {\n\t\tlog.Error(ctx, \"patch recipe: unable to apply patch to recipe\", err, logData)\n\t\terrorObjects = append(errorObjects, &models.ErrorObject{Error: err.Error()})\n\t\tErrorResponse(ctx, w, http.StatusBadRequest, &models.ErrorResponse{Errors: errorObjects})\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(modified, &recipe)\n\tif err != nil {\n\t\tlog.Error(ctx, \"patch recipe: unmarshal modified recipe into recipe struct\", err, logData)\n\t\terrorObjects = append(errorObjects, &models.ErrorObject{Error: err.Error()})\n\t\tErrorResponse(ctx, w, http.StatusBadRequest, &models.ErrorResponse{Errors: errorObjects})\n\t\treturn\n\t}\n\n\t// store new recipe\n\tif _, err = collection.ReplaceOne(ctx, bson.M{\"_id\": id}, recipe); err != nil {\n\t\tif err == mongo.ErrNoDocuments {\n\t\t\tlog.Error(ctx, \"update recipe: failed to update recipe, recipe deos not exists\", err, logData)\n\t\t\terrorObjects = append(errorObjects, &models.ErrorObject{Error: errs.ErrRecipeNotFound.Error()})\n\t\t\tErrorResponse(ctx, w, http.StatusNotFound, &models.ErrorResponse{Errors: errorObjects})\n\t\t\treturn\n\t\t}\n\n\t\tlog.Error(ctx, \"update recipe: failed to insert recipe\", err, logData)\n\t\terrorObjects = append(errorObjects, &models.ErrorObject{Error: errs.ErrInternalServer.Error()})\n\t\tErrorResponse(ctx, w, http.StatusInternalServerError, &models.ErrorResponse{Errors: errorObjects})\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\n\tlog.Info(ctx, \"update recipe: request successful\", logData)\n}",
"func (r *Request) BodyJSON(obj interface{}) {\n\tjson.Unmarshal([]byte(r.Body), obj)\n}",
"func (c *Client) PatchJSON(v interface{}, urlPath ...string) *Req {\n\treturn c.Patch(BodyJSON(v), urlPath...)\n}",
"func JSONBody(jsonStr string) Responder {\n\treturn func(w http.ResponseWriter) {\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\t\tw.Write([]byte(jsonStr))\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetJSONBody adds the jsonBody to the copy recipe to my recipes with changes params
|
func (o *CopyRecipeToMyRecipesWithChangesParams) SetJSONBody(jSONBody *models.Recipe) {
o.JSONBody = jSONBody
}
|
[
"func (o *CopyRecipeToMyRecipesWithChangesParams) WithJSONBody(jSONBody *models.Recipe) *CopyRecipeToMyRecipesWithChangesParams {\n\to.SetJSONBody(jSONBody)\n\treturn o\n}",
"func (r *Request) SetJSONBody(val interface{}) error {\n\tbuf := bytes.NewBuffer(nil)\n\tenc := json.NewEncoder(buf)\n\tif err := enc.Encode(val); err != nil {\n\t\treturn err\n\t}\n\n\tr.Obj = val\n\tr.Body = buf\n\tr.BodySize = int64(buf.Len())\n\treturn nil\n}",
"func (o *PostRecipeGroupAndMenuItemParams) SetJSONBody(jSONBody *models.RecipeGroupInserter) {\n\to.JSONBody = jSONBody\n}",
"func (o *RefreshStackRecipesParams) SetBody(body *model.UpdateRecipesV4Request) {\n\to.Body = body\n}",
"func (r *Request) setBodyJson(data interface{}) error {\n\tbody, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.Header.Set(\"Content-Type\", \"application/json\")\n\tr.setBodyReader(bytes.NewReader(body))\n\treturn nil\n}",
"func (o *PostMenuItemParams) SetJSONBody(jSONBody *models.MenuItem) {\n\to.JSONBody = jSONBody\n}",
"func (d *D) SetBody(i interface{}) error {\n\t// if response is nil, keeep the previous response intact.\n\tif i == nil {\n\t\treturn nil\n\t}\n\n\tvar buf bytes.Buffer\n\tenc := json.NewEncoder(&buf)\n\terr := enc.Encode(i)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.body = buf.Bytes()\n\treturn nil\n}",
"func (r *StandardResponse) SetJSONBody(body interface{}) error {\n\tbodyBytes, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed JSON conversion: %s\", err.Error())\n\t}\n\n\tr.SetBody(bodyBytes)\n\treturn nil\n}",
"func (o *PutMenuItemParams) SetJSONBody(jSONBody *models.MenuItem) {\n\to.JSONBody = jSONBody\n}",
"func (a *API) JSONBody(ctx *fasthttp.RequestCtx, model interface{}) {\n\tr := bytes.NewReader(ctx.PostBody())\n\tjson.NewDecoder(r).Decode(&model)\n}",
"func WithJSONBody(body string) Opts {\n\treturn func(opts *requestOpts) {\n\t\topts.bodyFunc = func() (io.Reader, error) {\n\t\t\treturn strings.NewReader(body), nil\n\t\t}\n\t}\n}",
"func (HTTPOperation) SetRequestBody(time time.Time, inputType api.InputTypeEnum, location int, numberAvailable int, numberTotal *int, tags *string, vaccine *int) error {\n\tbody.Date = time\n\tbody.InputType = inputType\n\tbody.Location = location\n\tbody.NumberAvailable = numberAvailable\n\tbody.NumberTotal = numberTotal\n\tbody.Tags = tags\n\tbody.Vaccine = vaccine\n\treturn nil\n}",
"func (o *PutRecipeGroupParams) SetJSONBody(jSONBody *models.RecipeGroup) {\n\to.JSONBody = jSONBody\n}",
"func (r *Request) ResetJSONBody() error {\n\tif r.Body == nil {\n\t\treturn nil\n\t}\n\treturn r.SetJSONBody(r.Obj)\n}",
"func SetBody(req *http.Request, body []byte) {\n\treq.Body = ioutil.NopCloser(bytes.NewReader(body))\n}",
"func JSONBody(jsonStr string) Responder {\n\treturn func(w http.ResponseWriter) {\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\t\tw.Write([]byte(jsonStr))\n\t}\n}",
"func (o *PostApplyManifestParams) SetRequestBody(requestBody PostApplyManifestBody) {\n\to.RequestBody = requestBody\n}",
"func (r *Request) BodyJSON(body interface{}) *Request {\n\tr.vBody = body\n\tr.bodyJSON = true\n\tr.additionalHeader = []string{\"Content-Type\", \"application/json\"}\n\treturn r\n}",
"func (o *SavePreferencesParams) SetJSONBody(jSONBody *models.UpdateUserPreferences) {\n\to.JSONBody = jSONBody\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
WithRecipeID adds the recipeID to the copy recipe to my recipes with changes params
|
func (o *CopyRecipeToMyRecipesWithChangesParams) WithRecipeID(recipeID string) *CopyRecipeToMyRecipesWithChangesParams {
o.SetRecipeID(recipeID)
return o
}
|
[
"func (o *CopyRecipeToMyRecipesWithChangesParams) SetRecipeID(recipeID string) {\n\to.RecipeID = recipeID\n}",
"func (o *RecipeAdditive) SetRecipe(ctx context.Context, exec boil.ContextExecutor, insert bool, related *Recipe) error {\n\tvar err error\n\tif insert {\n\t\tif err = related.Insert(ctx, exec, boil.Infer()); err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to insert into foreign table\")\n\t\t}\n\t}\n\n\tupdateQuery := fmt.Sprintf(\n\t\t\"UPDATE \\\"recipe_additive\\\" SET %s WHERE %s\",\n\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, []string{\"recipe_id\"}),\n\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", 2, recipeAdditivePrimaryKeyColumns),\n\t)\n\tvalues := []interface{}{related.ID, o.ID}\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, updateQuery)\n\t\tfmt.Fprintln(writer, values)\n\t}\n\tif _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil {\n\t\treturn errors.Wrap(err, \"failed to update local table\")\n\t}\n\n\to.RecipeID = related.ID\n\tif o.R == nil {\n\t\to.R = &recipeAdditiveR{\n\t\t\tRecipe: related,\n\t\t}\n\t} else {\n\t\to.R.Recipe = related\n\t}\n\n\tif related.R == nil {\n\t\trelated.R = &recipeR{\n\t\t\tRecipeAdditives: RecipeAdditiveSlice{o},\n\t\t}\n\t} else {\n\t\trelated.R.RecipeAdditives = append(related.R.RecipeAdditives, o)\n\t}\n\n\treturn nil\n}",
"func (r *RecipeInfo) newRecipe(id int) error {\n\t// Get recipe from the database\n\trecipe, err := new(models.Recipe).GetByID(database, queries, id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Setup Recipe info\n\tr.ID = recipe.ID\n\tr.TotalSteps = len(recipe.Steps)\n\tr.CurrentStep = recipe.Steps[0]\n\tr.PrevStep = nil\n\tr.NextStep = nil\n\tr.JobIDs = make([]int64, 0)\n\n\tif r.TotalSteps > 1 {\n\t\tr.NextStep = recipe.Steps[1]\n\t}\n\n\tr.recipe = recipe\n\tdone, err := r.initStep(0)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif done {\n\t\tlog.Warn(\"Just setup a newRecipe that is already done\")\n\t}\n\treturn err\n}",
"func (o *RecipeLipid) SetRecipe(ctx context.Context, exec boil.ContextExecutor, insert bool, related *Recipe) error {\n\tvar err error\n\tif insert {\n\t\tif err = related.Insert(ctx, exec, boil.Infer()); err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to insert into foreign table\")\n\t\t}\n\t}\n\n\tupdateQuery := fmt.Sprintf(\n\t\t\"UPDATE \\\"recipe_lipid\\\" SET %s WHERE %s\",\n\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, []string{\"recipe_id\"}),\n\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", 2, recipeLipidPrimaryKeyColumns),\n\t)\n\tvalues := []interface{}{related.ID, o.ID}\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, updateQuery)\n\t\tfmt.Fprintln(writer, values)\n\t}\n\tif _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil {\n\t\treturn errors.Wrap(err, \"failed to update local table\")\n\t}\n\n\to.RecipeID = related.ID\n\tif o.R == nil {\n\t\to.R = &recipeLipidR{\n\t\t\tRecipe: related,\n\t\t}\n\t} else {\n\t\to.R.Recipe = related\n\t}\n\n\tif related.R == nil {\n\t\trelated.R = &recipeR{\n\t\t\tRecipeLipids: RecipeLipidSlice{o},\n\t\t}\n\t} else {\n\t\trelated.R.RecipeLipids = append(related.R.RecipeLipids, o)\n\t}\n\n\treturn nil\n}",
"func (o *SaveChangesForIngredientParams) SetRecipeID(recipeID string) {\n\to.RecipeID = recipeID\n}",
"func (o *SaveChangesForIngredientParams) WithRecipeID(recipeID string) *SaveChangesForIngredientParams {\n\to.SetRecipeID(recipeID)\n\treturn o\n}",
"func (o *ChangeAMenuItemsRecipeIDParams) SetRecipeID(recipeID string) {\n\to.RecipeID = recipeID\n}",
"func (a *app) NewRecipe(ctx context.Context, recipeID string) error {\n\treturn a.streamer.Add(newRecipeStream, &streamer.Message{Payload: recipeID})\n}",
"func (ds CBDataStore) RecipeUpdate(modelRecipe model.Recipe, id, userID string) error {\n\t_, err := ds.bucket.Replace(modelRecipe.ID, modelRecipe, 0, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (o *CopyRecipeToMyRecipesWithChangesParams) WithContext(ctx context.Context) *CopyRecipeToMyRecipesWithChangesParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func (ds CBDataStore) RecipeUpdate(modelRecipe model.Recipe, id, userID string) error {\n\t_, err := ds.bucket.Replace(modelRecipe.Id, modelRecipe, 0, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (c *RBController) SaveRecipe(w http.ResponseWriter, r *http.Request) (err error) {\n\t// Get properties\n\tname := r.PostFormValue(`name`)\n\tcuisine, err := strconv.Atoi(r.PostFormValue(`cuisine`))\n\n\t// Get the mealtype and season encoded ints\n\tmealtype := EncodeMealtype(r.Form[`mealtype`])\n\tseason := EncodeSeason(r.Form[`season`])\n\n\t// get everything else\n\tdescription := r.PostFormValue(`description`)\n\tingredients := r.PostFormValue(`ingredients`)\n\tinstructions := r.PostFormValue(`instructions`)\n\n\t// TODO better error handling\n\tif err != nil {\n\t\tfmt.Println(\"[WARNING] Something went wrong in SaveRecipe\")\n\t\tc.RenderError(w, 500, \"Sorry, something went wrong.\")\n\t\treturn\n\t}\n\n\t// everything OK: build the recipe, and send it to the database\n\trecipe := Recipe{ID: 0, Name: name, Cuisine: cuisine, Mealtype: mealtype,\n\t\tSeason: season, Description: description, Ingredientlist: ingredients,\n\t\tInstructions: instructions}\n\n\t// if we don't have the id string, then this is a new request.\n\tvars := mux.Vars(r)\n\tidStr := vars[\"id\"]\n\tid := 0\n\n\tif idStr != \"\" {\n\t\tid, _ = strconv.Atoi(idStr)\n\t\trecipe.ID = id\n\t\terr = c.RecipeDB.UpdateRecipe(&recipe)\n\t} else {\n\t\tid, err = c.RecipeDB.NewRecipe(&recipe)\n\t}\n\n\tif err == nil {\n\t\thttp.Redirect(w, r, \"/recipes/\"+fmt.Sprintf(\"%v\", id)+\"/\", http.StatusFound)\n\t}\n\treturn\n}",
"func (a *Client) ChangeIngredientForScratchPadRecipe(params *ChangeIngredientForScratchPadRecipeParams, authInfo runtime.ClientAuthInfoWriter) (*ChangeIngredientForScratchPadRecipeOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewChangeIngredientForScratchPadRecipeParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"ChangeIngredientForScratchPadRecipe\",\n\t\tMethod: \"PUT\",\n\t\tPathPattern: \"/scratch-recipe/{recipeId}/ingredient/{ingredient-id}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ChangeIngredientForScratchPadRecipeReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*ChangeIngredientForScratchPadRecipeOK), nil\n\n}",
"func (a *Client) PutScratchPadRecipe(params *PutScratchPadRecipeParams, authInfo runtime.ClientAuthInfoWriter) (*PutScratchPadRecipeOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPutScratchPadRecipeParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"PutScratchPadRecipe\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/scratch-recipe/{recipeId}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &PutScratchPadRecipeReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*PutScratchPadRecipeOK), nil\n\n}",
"func (o *ChangeAMenuItemsRecipeIDParams) WithContext(ctx context.Context) *ChangeAMenuItemsRecipeIDParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func AddRecipe(db *database.DB, recipe *Recipe) {\n\tdb.Create(recipe)\n}",
"func (s *Service) UpdateRecipe(w http.ResponseWriter, r *http.Request) {\n\t// Get the recipe ID\n\tvars := mux.Vars(r)\n\trecipe := recipe.Recipe{ID: vars[\"id\"]}\n\n\tdecoder := json.NewDecoder(r.Body)\n\tif err := decoder.Decode(&recipe); err != nil {\n\t\tlog.Errorf(\"UpdateRecipe: %v\", err)\n\t\tutils.ResponseWithError(w, http.StatusBadRequest, \"Invalid resquest payload\")\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\n\t// Update the recipe in the storage\n\tif err := usecases.UpdateRecipe(s.storage, &recipe); err != nil {\n\t\tlog.Errorf(\"UpdateRecipe: %v\", err)\n\t\tutils.ResponseWithError(w, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\tutils.ResponseWithJSON(w, http.StatusOK, recipe)\n}",
"func (c *RBController) Recipe(w http.ResponseWriter, r *http.Request) (err error) {\n\tvars := mux.Vars(r)\n\tid, _ := strconv.Atoi(vars[\"id\"])\n\trecipe, err := c.GetRecipe(id)\n\tif err == nil {\n\t\tc.HTML(w, http.StatusOK, \"recipes/recipe\", recipe)\n\t} else if err == sql.ErrNoRows {\n\t\t// this means that the recipe wasn't found, so we should return a 404 error\n\t\tc.RenderError(w, 404, \"Sorry, your page wasn't found\")\n\t\terr = nil\n\t}\n\treturn\n}",
"func (a *app) UpdatedRecipe(ctx context.Context, recipeID string) error {\n\treturn a.streamer.Add(updatedRecipeStream, &streamer.Message{Payload: recipeID})\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetRecipeID adds the recipeId to the copy recipe to my recipes with changes params
|
func (o *CopyRecipeToMyRecipesWithChangesParams) SetRecipeID(recipeID string) {
o.RecipeID = recipeID
}
|
[
"func (o *RecipeLipid) SetRecipe(ctx context.Context, exec boil.ContextExecutor, insert bool, related *Recipe) error {\n\tvar err error\n\tif insert {\n\t\tif err = related.Insert(ctx, exec, boil.Infer()); err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to insert into foreign table\")\n\t\t}\n\t}\n\n\tupdateQuery := fmt.Sprintf(\n\t\t\"UPDATE \\\"recipe_lipid\\\" SET %s WHERE %s\",\n\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, []string{\"recipe_id\"}),\n\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", 2, recipeLipidPrimaryKeyColumns),\n\t)\n\tvalues := []interface{}{related.ID, o.ID}\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, updateQuery)\n\t\tfmt.Fprintln(writer, values)\n\t}\n\tif _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil {\n\t\treturn errors.Wrap(err, \"failed to update local table\")\n\t}\n\n\to.RecipeID = related.ID\n\tif o.R == nil {\n\t\to.R = &recipeLipidR{\n\t\t\tRecipe: related,\n\t\t}\n\t} else {\n\t\to.R.Recipe = related\n\t}\n\n\tif related.R == nil {\n\t\trelated.R = &recipeR{\n\t\t\tRecipeLipids: RecipeLipidSlice{o},\n\t\t}\n\t} else {\n\t\trelated.R.RecipeLipids = append(related.R.RecipeLipids, o)\n\t}\n\n\treturn nil\n}",
"func (o *SaveChangesForIngredientParams) SetRecipeID(recipeID string) {\n\to.RecipeID = recipeID\n}",
"func (o *ChangeAMenuItemsRecipeIDParams) SetRecipeID(recipeID string) {\n\to.RecipeID = recipeID\n}",
"func (o *RecipeAdditive) SetRecipe(ctx context.Context, exec boil.ContextExecutor, insert bool, related *Recipe) error {\n\tvar err error\n\tif insert {\n\t\tif err = related.Insert(ctx, exec, boil.Infer()); err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to insert into foreign table\")\n\t\t}\n\t}\n\n\tupdateQuery := fmt.Sprintf(\n\t\t\"UPDATE \\\"recipe_additive\\\" SET %s WHERE %s\",\n\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, []string{\"recipe_id\"}),\n\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", 2, recipeAdditivePrimaryKeyColumns),\n\t)\n\tvalues := []interface{}{related.ID, o.ID}\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, updateQuery)\n\t\tfmt.Fprintln(writer, values)\n\t}\n\tif _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil {\n\t\treturn errors.Wrap(err, \"failed to update local table\")\n\t}\n\n\to.RecipeID = related.ID\n\tif o.R == nil {\n\t\to.R = &recipeAdditiveR{\n\t\t\tRecipe: related,\n\t\t}\n\t} else {\n\t\to.R.Recipe = related\n\t}\n\n\tif related.R == nil {\n\t\trelated.R = &recipeR{\n\t\t\tRecipeAdditives: RecipeAdditiveSlice{o},\n\t\t}\n\t} else {\n\t\trelated.R.RecipeAdditives = append(related.R.RecipeAdditives, o)\n\t}\n\n\treturn nil\n}",
"func (o *CopyRecipeToMyRecipesWithChangesParams) WithRecipeID(recipeID string) *CopyRecipeToMyRecipesWithChangesParams {\n\to.SetRecipeID(recipeID)\n\treturn o\n}",
"func (o *SaveChangesForIngredientParams) WithRecipeID(recipeID string) *SaveChangesForIngredientParams {\n\to.SetRecipeID(recipeID)\n\treturn o\n}",
"func (r *RecipeInfo) newRecipe(id int) error {\n\t// Get recipe from the database\n\trecipe, err := new(models.Recipe).GetByID(database, queries, id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Setup Recipe info\n\tr.ID = recipe.ID\n\tr.TotalSteps = len(recipe.Steps)\n\tr.CurrentStep = recipe.Steps[0]\n\tr.PrevStep = nil\n\tr.NextStep = nil\n\tr.JobIDs = make([]int64, 0)\n\n\tif r.TotalSteps > 1 {\n\t\tr.NextStep = recipe.Steps[1]\n\t}\n\n\tr.recipe = recipe\n\tdone, err := r.initStep(0)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif done {\n\t\tlog.Warn(\"Just setup a newRecipe that is already done\")\n\t}\n\treturn err\n}",
"func (c *RBController) SaveRecipe(w http.ResponseWriter, r *http.Request) (err error) {\n\t// Get properties\n\tname := r.PostFormValue(`name`)\n\tcuisine, err := strconv.Atoi(r.PostFormValue(`cuisine`))\n\n\t// Get the mealtype and season encoded ints\n\tmealtype := EncodeMealtype(r.Form[`mealtype`])\n\tseason := EncodeSeason(r.Form[`season`])\n\n\t// get everything else\n\tdescription := r.PostFormValue(`description`)\n\tingredients := r.PostFormValue(`ingredients`)\n\tinstructions := r.PostFormValue(`instructions`)\n\n\t// TODO better error handling\n\tif err != nil {\n\t\tfmt.Println(\"[WARNING] Something went wrong in SaveRecipe\")\n\t\tc.RenderError(w, 500, \"Sorry, something went wrong.\")\n\t\treturn\n\t}\n\n\t// everything OK: build the recipe, and send it to the database\n\trecipe := Recipe{ID: 0, Name: name, Cuisine: cuisine, Mealtype: mealtype,\n\t\tSeason: season, Description: description, Ingredientlist: ingredients,\n\t\tInstructions: instructions}\n\n\t// if we don't have the id string, then this is a new request.\n\tvars := mux.Vars(r)\n\tidStr := vars[\"id\"]\n\tid := 0\n\n\tif idStr != \"\" {\n\t\tid, _ = strconv.Atoi(idStr)\n\t\trecipe.ID = id\n\t\terr = c.RecipeDB.UpdateRecipe(&recipe)\n\t} else {\n\t\tid, err = c.RecipeDB.NewRecipe(&recipe)\n\t}\n\n\tif err == nil {\n\t\thttp.Redirect(w, r, \"/recipes/\"+fmt.Sprintf(\"%v\", id)+\"/\", http.StatusFound)\n\t}\n\treturn\n}",
"func (m *CopyNotebookModel) SetId(value *string)() {\n m.id = value\n}",
"func (a *Client) ChangeIngredientForScratchPadRecipe(params *ChangeIngredientForScratchPadRecipeParams, authInfo runtime.ClientAuthInfoWriter) (*ChangeIngredientForScratchPadRecipeOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewChangeIngredientForScratchPadRecipeParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"ChangeIngredientForScratchPadRecipe\",\n\t\tMethod: \"PUT\",\n\t\tPathPattern: \"/scratch-recipe/{recipeId}/ingredient/{ingredient-id}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ChangeIngredientForScratchPadRecipeReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*ChangeIngredientForScratchPadRecipeOK), nil\n\n}",
"func (o *ChangeAMenuItemsRecipeIDParams) WithContext(ctx context.Context) *ChangeAMenuItemsRecipeIDParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func (a *Client) PutScratchPadRecipe(params *PutScratchPadRecipeParams, authInfo runtime.ClientAuthInfoWriter) (*PutScratchPadRecipeOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPutScratchPadRecipeParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"PutScratchPadRecipe\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/scratch-recipe/{recipeId}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &PutScratchPadRecipeReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*PutScratchPadRecipeOK), nil\n\n}",
"func (o *RecipeLipid) SetLipid(ctx context.Context, exec boil.ContextExecutor, insert bool, related *Lipid) error {\n\tvar err error\n\tif insert {\n\t\tif err = related.Insert(ctx, exec, boil.Infer()); err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to insert into foreign table\")\n\t\t}\n\t}\n\n\tupdateQuery := fmt.Sprintf(\n\t\t\"UPDATE \\\"recipe_lipid\\\" SET %s WHERE %s\",\n\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, []string{\"lipid_id\"}),\n\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", 2, recipeLipidPrimaryKeyColumns),\n\t)\n\tvalues := []interface{}{related.ID, o.ID}\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, updateQuery)\n\t\tfmt.Fprintln(writer, values)\n\t}\n\tif _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil {\n\t\treturn errors.Wrap(err, \"failed to update local table\")\n\t}\n\n\to.LipidID = related.ID\n\tif o.R == nil {\n\t\to.R = &recipeLipidR{\n\t\t\tLipid: related,\n\t\t}\n\t} else {\n\t\to.R.Lipid = related\n\t}\n\n\tif related.R == nil {\n\t\trelated.R = &lipidR{\n\t\t\tRecipeLipid: o,\n\t\t}\n\t} else {\n\t\trelated.R.RecipeLipid = o\n\t}\n\n\treturn nil\n}",
"func SetReactionID(neti, reai int, newID string) int {\n\terrCode := 0\n\tif neti < 0 || neti >= len(networkSet.netSet) || networkSet.netSetBool[neti] == false {\n\t\terrCode = -5\n\t\taddErrorMessage(errCode, \"()\", strconv.Itoa(neti), \"\")\n\t\treturn errCode\n\t}\n\tr := networkSet.netSet[neti].ReactionSet\n\tif reai < 0 || reai >= len(r) || networkSet.netSet[neti].reactionset[reai] == false {\n\t\terrCode = -6\n\t\taddErrorMessage(errCode, \"()\", strconv.Itoa(reai), \"\")\n\t\treturn errCode\n\t}\n\tif newID == \"\" {\n\t\terrCode = -3\n\t\taddErrorMessage(errCode, (\"(\" + strconv.Itoa(neti) + \", \" + strconv.Itoa(reai) + \", \\\"\" + newID + \"\\\")\"), newID, \"\")\n\t\treturn errCode\n\t}\n\tfor i := range r {\n\t\tif r[i].ID == newID {\n\t\t\terrCode = -3\n\t\t\taddErrorMessage(errCode, (\"(\" + strconv.Itoa(neti) + \", \" + strconv.Itoa(reai) + \", \\\"\" + newID + \"\\\")\"), newID, \"\")\n\t\t\treturn errCode\n\t\t}\n\t}\n\tif stackFlag {\n\t\tredoStack = TNetSetStack{}\n\t\tnetSetStack.push(networkSet)\n\t}\n\n\tnetworkSet.netSet[neti].ReactionSet[reai].ID = newID\n\treturn errCode\n}",
"func (s *Service) UpdateRecipe(w http.ResponseWriter, r *http.Request) {\n\t// Get the recipe ID\n\tvars := mux.Vars(r)\n\trecipe := recipe.Recipe{ID: vars[\"id\"]}\n\n\tdecoder := json.NewDecoder(r.Body)\n\tif err := decoder.Decode(&recipe); err != nil {\n\t\tlog.Errorf(\"UpdateRecipe: %v\", err)\n\t\tutils.ResponseWithError(w, http.StatusBadRequest, \"Invalid resquest payload\")\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\n\t// Update the recipe in the storage\n\tif err := usecases.UpdateRecipe(s.storage, &recipe); err != nil {\n\t\tlog.Errorf(\"UpdateRecipe: %v\", err)\n\t\tutils.ResponseWithError(w, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\tutils.ResponseWithJSON(w, http.StatusOK, recipe)\n}",
"func (a *app) NewRecipe(ctx context.Context, recipeID string) error {\n\treturn a.streamer.Add(newRecipeStream, &streamer.Message{Payload: recipeID})\n}",
"func (c *RBController) Recipe(w http.ResponseWriter, r *http.Request) (err error) {\n\tvars := mux.Vars(r)\n\tid, _ := strconv.Atoi(vars[\"id\"])\n\trecipe, err := c.GetRecipe(id)\n\tif err == nil {\n\t\tc.HTML(w, http.StatusOK, \"recipes/recipe\", recipe)\n\t} else if err == sql.ErrNoRows {\n\t\t// this means that the recipe wasn't found, so we should return a 404 error\n\t\tc.RenderError(w, 404, \"Sorry, your page wasn't found\")\n\t\terr = nil\n\t}\n\treturn\n}",
"func (ds CBDataStore) RecipeUpdate(modelRecipe model.Recipe, id, userID string) error {\n\t_, err := ds.bucket.Replace(modelRecipe.ID, modelRecipe, 0, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (ds CBDataStore) RecipeUpdate(modelRecipe model.Recipe, id, userID string) error {\n\t_, err := ds.bucket.Replace(modelRecipe.Id, modelRecipe, 0, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
To reduce request to GKE metadata server, the base token source is reused across syncers. Note: Initialization is deferred because there are possible to use serviceAccountSecretRef with no available default token source.
|
func initializedBaseTokenSource() (oauth2.TokenSource, error) {
baseTokenSourceOnce.Do(func() {
baseTokenSource, baseTokenSourceErr = google.DefaultTokenSource(context.Background(), cloudPlatformScope)
})
return baseTokenSource, baseTokenSourceErr
}
|
[
"func tokenSource(ctx context.Context) (oauth2.TokenSource, error) {\n\tok, err := credsFile.Exists()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar bootstrapToken *oauth2.Token\n\tif !ok {\n\t\ttok, err := authenticate()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbootstrapToken = tok\n\t}\n\treturn newCachedTokenFile(ctx, bootstrapToken, credsFile.Path())\n}",
"func initializeTokenRequester(centralCfg config.CentralConfig) {\n\tagent.tokenRequester = apicauth.NewPlatformTokenGetter(\n\t\tcentralCfg.GetAuthConfig().GetPrivateKey(),\n\t\tcentralCfg.GetAuthConfig().GetPublicKey(),\n\t\tcentralCfg.GetAuthConfig().GetKeyPassword(),\n\t\tcentralCfg.GetAuthConfig().GetTokenURL(),\n\t\tcentralCfg.GetAuthConfig().GetAudience(),\n\t\tcentralCfg.GetAuthConfig().GetClientID(),\n\t\tcentralCfg.GetAuthConfig().GetTimeout(),\n\t)\n}",
"func (a *Authenticator) TokenSource() oauth2.TokenSource {\n\treturn tokenSource{a}\n}",
"func newTokenSource() *tokenReplacer {\n\t// nil token will cause a refresh\n\ttok, _ := readToken()\n\treturn &tokenReplacer{tok, oauthConfig.TokenSource(context.Background(), tok), &tokenPrompt{}}\n}",
"func (s *keycloak) TokenSource(ctx context.Context, t *oauth2.Token) oauth2.TokenSource {\n\n\treturn s.oauth2Config.TokenSource(ctx, t)\n}",
"func (w *GCPAuthWrapper) SetTokenSource(permissionCode string) error {\n\tvar err error\n\n\tctx := context.Background()\n\n\tw.OauthToken, err = w.Config.Exchange(ctx, permissionCode)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {\n\ttkr := &tokenRefresher{\n\t\tctx: ctx,\n\t\tconf: c,\n\t}\n\tif t != nil {\n\t\ttkr.refreshToken = t.RefreshToken\n\t}\n\treturn &reuseTokenSource{\n\t\tt: t,\n\t\tnew: tkr,\n\t}\n}",
"func Init(ctx context.Context, local bool) (oauth2.TokenSource, error) {\n\treturn auth_steps.Init(ctx, local, auth.ScopeGerrit, auth.ScopeUserinfoEmail)\n}",
"func (b *BungieConfig) TokenSource(t *oauth2.Token) oauth2.TokenSource {\n\treturn newTokenSource(t, b.cfg.ClientID)\n}",
"func StorageTokenSource(ctx context.Context, c *Config, t *oauth2.Token) oauth2.TokenSource {\n\tif t == nil || !t.Valid() {\n\t\tif tok, err := c.Storage.GetToken(); err == nil {\n\t\t\tt = tok\n\t\t}\n\t}\n\tts := c.Config.TokenSource(ctx, t)\n\treturn &storageTokenSource{c, ts}\n}",
"func (c *Credentials) TokenSource(ctx context.Context) (oauth2.TokenSource, error) {\n\tswitch c.Type {\n\tcase CredentialsTypeADC:\n\t\treturn initializedBaseTokenSource()\n\tcase CredentialsTypeServiceAccountJSON:\n\t\tcred, err := google.CredentialsFromJSON(ctx, c.ServiceAccountJSON, cloudPlatformScope)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn cred.TokenSource, nil\n\tcase CredentialsTypeImpersonation:\n\t\tbaseTS, err := initializedBaseTokenSource()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tts, err := impersonate.CredentialsTokenSource(ctx, impersonate.CredentialsConfig{\n\t\t\tTargetPrincipal: c.ImpersonateConfig.TargetServiceAccount,\n\t\t\tDelegates: c.ImpersonateConfig.Delegates,\n\t\t\tScopes: []string{cloudPlatformScope},\n\t\t},\n\t\t\toption.WithTokenSource(baseTS),\n\t\t)\n\t\treturn ts, err\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"credentials type unknown: %v\", c.Type)\n\t}\n}",
"func init() {\n\ttr := utilnet.SetTransportDefaults(&http.Transport{})\n\tmetadataHTTPClientTimeout := time.Second * 10\n\thttpClient := &http.Client{\n\t\tTransport: tr,\n\t\tTimeout: metadataHTTPClientTimeout,\n\t}\n\tcredentialprovider.RegisterCredentialProvider(\"google-dockercfg\",\n\t\t&credentialprovider.CachingDockerConfigProvider{\n\t\t\tProvider: &dockerConfigKeyProvider{\n\t\t\t\tmetadataProvider{Client: httpClient},\n\t\t\t},\n\t\t\tLifetime: 60 * time.Second,\n\t\t})\n\n\tcredentialprovider.RegisterCredentialProvider(\"google-dockercfg-url\",\n\t\t&credentialprovider.CachingDockerConfigProvider{\n\t\t\tProvider: &dockerConfigUrlKeyProvider{\n\t\t\t\tmetadataProvider{Client: httpClient},\n\t\t\t},\n\t\t\tLifetime: 60 * time.Second,\n\t\t})\n\n\tcredentialprovider.RegisterCredentialProvider(\"google-container-registry\",\n\t\t// Never cache this. The access token is already\n\t\t// cached by the metadata service.\n\t\t&containerRegistryProvider{\n\t\t\tmetadataProvider{Client: httpClient},\n\t\t})\n}",
"func newTokenSource(ctx context.Context, conf *clientcredentials.Config) oauth2.TokenSource {\n\tsource := &tokenSource{\n\t\tctx: ctx,\n\t\tconf: conf,\n\t\torig: conf.TokenSource(ctx),\n\t}\n\treturn source\n}",
"func NewTokenSource(ctx context.Context, key []byte, authorizeHandler AuthorizeHandler, scope ...string) (oauth2.TokenSource, error) {\n\t// If key is not provided, use DefaultTokenSource.\n\tif key == nil {\n\t\treturn google.DefaultTokenSource(ctx, scope...)\n\t}\n\n\tvar secret map[string]interface{}\n\tif err := json.Unmarshal(key, &secret); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// TODO: support \"web\" client secret by using a local web server.\n\t// According to the content in the json, decide whether to run three-legged\n\t// flow (for client secret) or two-legged flow (for service account).\n\tif _, ok := secret[\"installed\"]; ok {\n\t\t// If authorizeHandler is not given, set it to the default one.\n\t\tif authorizeHandler == nil {\n\t\t\tauthorizeHandler = defaultAuthorizeFlowHandler\n\t\t}\n\n\t\t// When the secret contains \"installed\" field, it is a client secret. We\n\t\t// will run a three-legged flow\n\t\tconf, err := google.ConfigFromJSON(key, scope...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// In the authorize flow, user will paste a verification code back to console.\n\t\tauthUrl := conf.AuthCodeURL(\"\", oauth2.AccessTypeOffline, oauth2.ApprovalForce)\n\t\tcode, err := authorizeHandler(authUrl)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// The verify flow takes in the verification code from authorize flow, sends a\n\t\t// POST request containing the code to fetch oauth token.\n\t\ttoken, err := conf.Exchange(ctx, code)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn conf.TokenSource(ctx, token), nil\n\n\t}\n\n\tif tokenType, ok := secret[\"type\"]; ok && \"service_account\" == tokenType {\n\t\t// If the token type is \"service_account\", we will run the two-legged flow\n\t\tjwtCfg, err := google.JWTConfigFromJSON(key, scope...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn jwtCfg.TokenSource(ctx), nil\n\t}\n\n\treturn nil, fmt.Errorf(\"Unsupported token type.\")\n}",
"func (r *Retriever) StartTokenRefresh() error {\n\tglog.Infof(\"Refreshing CRC credentials \")\n\tsecret, err := config.GetKubeClient().CoreV1().Secrets(\"openshift-config\").\n\t\tGet(context.TODO(), \"pull-secret\", metav1.GetOptions{})\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\tglog.V(2).Infof(\"pull-secret does not exist\")\n\t\t\terr = nil\n\t\t} else if errors.IsForbidden(err) {\n\t\t\tglog.V(2).Infof(\"Operator does not have permission to check pull-secret: %v\", err)\n\t\t\terr = nil\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"could not check pull-secret: %v\", err)\n\t\t}\n\t\treturn err\n\t}\n\tif secret != nil {\n\t\tif data := secret.Data[\".dockerconfigjson\"]; len(data) > 0 {\n\t\t\tvar pullSecret serializedAuthMap\n\t\t\tif err := json.Unmarshal(data, &pullSecret); err != nil {\n\t\t\t\tglog.Errorf(\"Unable to unmarshal cluster pull-secret: %v\", err)\n\t\t\t}\n\t\t\tif auth, ok := pullSecret.Auths[\"cloud.openshift.com\"]; ok {\n\t\t\t\ttoken := strings.TrimSpace(auth.Auth)\n\t\t\t\tif strings.Contains(token, \"\\n\") || strings.Contains(token, \"\\r\") {\n\t\t\t\t\treturn fmt.Errorf(\"cluster authorization token is not valid: contains newlines\")\n\t\t\t\t}\n\t\t\t\tif len(token) > 0 {\n\t\t\t\t\tglog.V(2).Info(\"Found cloud.openshift.com token \")\n\t\t\t\t\tr.Token = \"Bearer \" + token\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (_Locking *LockingCallerSession) BaseToken() (common.Address, error) {\n\treturn _Locking.Contract.BaseToken(&_Locking.CallOpts)\n}",
"func attemptInit(ctx context.Context, account string, scopes []string, audience string) (TokenProvider, error) {\n\t// This mutex is used to avoid hitting GKE metadata server concurrently if\n\t// we have a stampede of goroutines. It doesn't actually protect any shared\n\t// state in the current process.\n\tglobalGCELock.Lock()\n\tdefer globalGCELock.Unlock()\n\n\tif account == \"\" {\n\t\taccount = \"default\"\n\t}\n\n\t// Grab an email associated with the account. This must not be failing on\n\t// a healthy VM if the account is present. If it does, the metadata server is\n\t// broken.\n\temail, err := metadataClient.Email(account)\n\tif err != nil {\n\t\t// Note: we purposefully delay this check only after the first call to\n\t\t// the metadata fails because metadata.OnGCE was observed to often report\n\t\t// \"false\" when running on GKE due to gke-metadata-server being slow. Our\n\t\t// metadataClient has (much) higher timeouts that the client used by\n\t\t// metadata.OnGCE, and it handles slow gke-metadata-server better. So if we\n\t\t// end up here and metadata.OnGCE also says \"false\", then we are not on GCE\n\t\t// with high probability. The downside is that it may take up to 15 sec to\n\t\t// detect this (or whatever ResponseHeaderTimeout in metadataClient is).\n\t\tif !metadata.OnGCE() {\n\t\t\treturn nil, ErrBadCredentials\n\t\t}\n\t\tif _, yep := err.(metadata.NotDefinedError); yep {\n\t\t\treturn nil, ErrInsufficientAccess\n\t\t}\n\t\treturn nil, transient.Tag.Apply(err)\n\t}\n\n\t// Ensure the account has requested scopes. Assume 'cloud-platform' scope\n\t// covers all possible scopes. This is important when using GKE Workload\n\t// Identities: the metadata server always reports only 'cloud-platform' scope\n\t// there. Its presence should be enough to cover all scopes used in practice.\n\t// The exception is non-cloud scopes (like gerritcodereview or G Suite). To\n\t// use such scopes, one will have to use impersonation through Cloud IAM APIs,\n\t// which *are* covered by cloud-platform (see ActAsServiceAccount in auth.go).\n\tif audience == \"\" {\n\t\tavailableScopes, err := metadataClient.Scopes(account)\n\t\tif err != nil {\n\t\t\treturn nil, transient.Tag.Apply(err)\n\t\t}\n\t\tavailableSet := stringset.NewFromSlice(availableScopes...)\n\t\tif !availableSet.Has(\"https://www.googleapis.com/auth/cloud-platform\") {\n\t\t\tfor _, requested := range scopes {\n\t\t\t\tif !availableSet.Has(requested) {\n\t\t\t\t\tlogging.Warningf(ctx, \"GCE service account %q doesn't have required scope %q (all scopes: %q)\", account, requested, availableScopes)\n\t\t\t\t\treturn nil, ErrInsufficientAccess\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &gceTokenProvider{\n\t\taccount: account,\n\t\temail: email,\n\t\taudience: audience,\n\t\tcacheKey: CacheKey{\n\t\t\tKey: fmt.Sprintf(\"gce/%s\", account),\n\t\t\tScopes: scopes,\n\t\t},\n\t}, nil\n}",
"func (c *Config) TokenSource() oauth2.TokenSource {\n\tpk, err := jwt.ParseRSAPrivateKeyFromPEM(c.PrivateKey)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn oauth2.ReuseTokenSource(nil, appSource{\n\t\tappID: c.AppID,\n\t\texpire: c.Expire,\n\t\tpk: pk,\n\t})\n}",
"func newTokenManagerFromAPIKey(config *aws.Config, apiKey, authEndPoint string, advisoryRefreshTimeout,\n\tmandatoryRefreshTimeout func(time.Duration) time.Duration, timeFunc func() time.Time,\n\tclient IBMClientDo) *defaultTMImplementation {\n\t// when the client is nil creates a new one using the config passed as argument\n\tif client == nil {\n\t\tclient = defaultIBMClient(config)\n\t}\n\n\t// set the function to get the initial token the defaultInit that uses the APIKey passed as argument\n\tinitFunc := defaultInit(apiKey, authEndPoint, client)\n\treturn newTokenManager(config, initFunc, authEndPoint, advisoryRefreshTimeout, mandatoryRefreshTimeout, timeFunc,\n\t\tclient)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TokenSource create oauth2.TokenSource for Credentials. Note: We can specify scopes needed for spannerautoscaler but it does increase maintenance cost. We should already use least privileged Google Service Accounts so it use cloudPlatformScope.
|
func (c *Credentials) TokenSource(ctx context.Context) (oauth2.TokenSource, error) {
switch c.Type {
case CredentialsTypeADC:
return initializedBaseTokenSource()
case CredentialsTypeServiceAccountJSON:
cred, err := google.CredentialsFromJSON(ctx, c.ServiceAccountJSON, cloudPlatformScope)
if err != nil {
return nil, err
}
return cred.TokenSource, nil
case CredentialsTypeImpersonation:
baseTS, err := initializedBaseTokenSource()
if err != nil {
return nil, err
}
ts, err := impersonate.CredentialsTokenSource(ctx, impersonate.CredentialsConfig{
TargetPrincipal: c.ImpersonateConfig.TargetServiceAccount,
Delegates: c.ImpersonateConfig.Delegates,
Scopes: []string{cloudPlatformScope},
},
option.WithTokenSource(baseTS),
)
return ts, err
default:
return nil, fmt.Errorf("credentials type unknown: %v", c.Type)
}
}
|
[
"func NewTokenSource(ctx context.Context, key []byte, authorizeHandler AuthorizeHandler, scope ...string) (oauth2.TokenSource, error) {\n\t// If key is not provided, use DefaultTokenSource.\n\tif key == nil {\n\t\treturn google.DefaultTokenSource(ctx, scope...)\n\t}\n\n\tvar secret map[string]interface{}\n\tif err := json.Unmarshal(key, &secret); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// TODO: support \"web\" client secret by using a local web server.\n\t// According to the content in the json, decide whether to run three-legged\n\t// flow (for client secret) or two-legged flow (for service account).\n\tif _, ok := secret[\"installed\"]; ok {\n\t\t// If authorizeHandler is not given, set it to the default one.\n\t\tif authorizeHandler == nil {\n\t\t\tauthorizeHandler = defaultAuthorizeFlowHandler\n\t\t}\n\n\t\t// When the secret contains \"installed\" field, it is a client secret. We\n\t\t// will run a three-legged flow\n\t\tconf, err := google.ConfigFromJSON(key, scope...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// In the authorize flow, user will paste a verification code back to console.\n\t\tauthUrl := conf.AuthCodeURL(\"\", oauth2.AccessTypeOffline, oauth2.ApprovalForce)\n\t\tcode, err := authorizeHandler(authUrl)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// The verify flow takes in the verification code from authorize flow, sends a\n\t\t// POST request containing the code to fetch oauth token.\n\t\ttoken, err := conf.Exchange(ctx, code)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn conf.TokenSource(ctx, token), nil\n\n\t}\n\n\tif tokenType, ok := secret[\"type\"]; ok && \"service_account\" == tokenType {\n\t\t// If the token type is \"service_account\", we will run the two-legged flow\n\t\tjwtCfg, err := google.JWTConfigFromJSON(key, scope...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn jwtCfg.TokenSource(ctx), nil\n\t}\n\n\treturn nil, fmt.Errorf(\"Unsupported token type.\")\n}",
"func CredentialsTokenSource(creds *google.Credentials) TokenSource {\n\tif creds == nil {\n\t\treturn nil\n\t}\n\treturn TokenSource(creds.TokenSource)\n}",
"func (s *keycloak) TokenSource(ctx context.Context, t *oauth2.Token) oauth2.TokenSource {\n\n\treturn s.oauth2Config.TokenSource(ctx, t)\n}",
"func newTokenSource(ctx context.Context, conf *clientcredentials.Config) oauth2.TokenSource {\n\tsource := &tokenSource{\n\t\tctx: ctx,\n\t\tconf: conf,\n\t\torig: conf.TokenSource(ctx),\n\t}\n\treturn source\n}",
"func tokenSource(ctx context.Context) (oauth2.TokenSource, error) {\n\tok, err := credsFile.Exists()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar bootstrapToken *oauth2.Token\n\tif !ok {\n\t\ttok, err := authenticate()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbootstrapToken = tok\n\t}\n\treturn newCachedTokenFile(ctx, bootstrapToken, credsFile.Path())\n}",
"func StorageTokenSource(ctx context.Context, c *Config, t *oauth2.Token) oauth2.TokenSource {\n\tif t == nil || !t.Valid() {\n\t\tif tok, err := c.Storage.GetToken(); err == nil {\n\t\t\tt = tok\n\t\t}\n\t}\n\tts := c.Config.TokenSource(ctx, t)\n\treturn &storageTokenSource{c, ts}\n}",
"func NewTokenSource(ctx context.Context, conf DownscopingConfig) (oauth2.TokenSource, error) {\n\tif conf.RootSource == nil {\n\t\treturn nil, fmt.Errorf(\"downscope: rootSource cannot be nil\")\n\t}\n\tif len(conf.Rules) == 0 {\n\t\treturn nil, fmt.Errorf(\"downscope: length of AccessBoundaryRules must be at least 1\")\n\t}\n\tif len(conf.Rules) > 10 {\n\t\treturn nil, fmt.Errorf(\"downscope: length of AccessBoundaryRules may not be greater than 10\")\n\t}\n\tfor _, val := range conf.Rules {\n\t\tif val.AvailableResource == \"\" {\n\t\t\treturn nil, fmt.Errorf(\"downscope: all rules must have a nonempty AvailableResource: %+v\", val)\n\t\t}\n\t\tif len(val.AvailablePermissions) == 0 {\n\t\t\treturn nil, fmt.Errorf(\"downscope: all rules must provide at least one permission: %+v\", val)\n\t\t}\n\t}\n\treturn downscopingTokenSource{ctx: ctx, config: conf}, nil\n}",
"func TokenSource(ctx context.Context, config *oauth2.Config, state string, authHandler AuthorizationHandler) oauth2.TokenSource {\n\treturn TokenSourceWithPKCE(ctx, config, state, authHandler, nil)\n}",
"func (gsp *GoogleServiceProvider) TokenSource(c context.Context, scopes ...string) (oauth2.TokenSource, error) {\n\tcbts := contextBoundTokenSource{\n\t\tContext: c,\n\t\tcache: gsp.Cache,\n\t\tcacheKey: accessTokenKeyForScopes(scopes),\n\t\tmakeTokenSource: func(c context.Context) (oauth2.TokenSource, error) {\n\t\t\treturn google.DefaultTokenSource(c, scopes...)\n\t\t},\n\t}\n\treturn &cbts, nil\n}",
"func OIDCFederatedTokenSource(tokenConfig *OIDCFederatedTokenConfig) (oauth2.TokenSource, error) {\n\n\tif &tokenConfig.SourceTokenSource == nil {\n\t\treturn nil, fmt.Errorf(\"oauth2/google: Source OIDC Token cannot be nil\")\n\t}\n\n\tif tokenConfig.Scope == \"\" {\n\t\ttokenConfig.Scope = GCP_OIDC_CLOUD_PLATFORM_SCOPE\n\t}\n\treturn &oidcFederatedTokenSource{\n\t\trefreshMutex: &sync.Mutex{},\n\t\trootSource: tokenConfig.SourceTokenSource,\n\t\tscope: tokenConfig.Scope,\n\t\ttargetResource: tokenConfig.TargetResource,\n\t\ttargetServiceAccount: tokenConfig.TargetServiceAccount,\n\t\tuseIAMToken: tokenConfig.UseIAMToken,\n\t}, nil\n}",
"func TokenSource(ctx context.Context, scopes ...string) oauth2.TokenSource {\n\treturn TokenSourceEnv(ctx, envPrivateKey, scopes...)\n}",
"func TokenSource(aud string) oauth2.TokenSource {\n\tidSrc := idTokenSrc{aud: aud}\n\tinitialToken := &oauth2.Token{}\n\treturn oauth2.ReuseTokenSource(initialToken, idSrc)\n}",
"func newTokenSource(ctx context.Context, settings *Settings) (*internal.TokenSource, error) {\n\tvar ts internal.TokenSource\n\tvar err error\n\tif settings == nil {\n\t\tts, err = DefaultTokenSource(ctx, DefaultScope)\n\t} else if settings.APIKey != \"\" {\n\t\treturn nil, nil\n\t} else if settings.Scope != \"\" {\n\t\tts, err = OAuthJSONTokenSource(ctx, settings)\n\t} else {\n\t\tts, err = JWTTokenSource(ctx, settings)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ts, err\n}",
"func NewSourceFromToken(ctx context.Context, clientID, secret string, scope ...string) (ClientSource, error) {\n\tif clientID == \"\" {\n\t\treturn nil, errors.New(\"No client ID provided\")\n\t}\n\tif secret == \"\" {\n\t\treturn nil, errors.New(\"No secret provided\")\n\t}\n\n\tsource := &tokenSource{\n\t\tconfig: oauth2.Config{\n\t\t\tClientID: clientID,\n\t\t\tClientSecret: secret,\n\t\t\tEndpoint: google.Endpoint,\n\t\t\tScopes: scope,\n\t\t},\n\t}\n\n\tvar err error\n\tsource.token, err = tokenFromWeb(ctx, &source.config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn source, nil\n}",
"func (c *Config) TokenSource() oauth2.TokenSource {\n\tpk, err := jwt.ParseRSAPrivateKeyFromPEM(c.PrivateKey)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn oauth2.ReuseTokenSource(nil, appSource{\n\t\tappID: c.AppID,\n\t\texpire: c.Expire,\n\t\tpk: pk,\n\t})\n}",
"func newTokenSource() *tokenReplacer {\n\t// nil token will cause a refresh\n\ttok, _ := readToken()\n\treturn &tokenReplacer{tok, oauthConfig.TokenSource(context.Background(), tok), &tokenPrompt{}}\n}",
"func (a *Authenticator) TokenSource() oauth2.TokenSource {\n\treturn tokenSource{a}\n}",
"func NewIAMTokenSource(ctx context.Context, cfg IAMConfig) (oauth2.TokenSource, error) {\n\tvar (\n\t\terr error\n\t\ttknSrc oauth2.TokenSource\n\t)\n\tif cfg.JSON != nil {\n\t\tcreds, err := google.CredentialsFromJSON(ctx, cfg.JSON, iam.CloudPlatformScope)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttknSrc = creds.TokenSource\n\t} else {\n\t\ttknSrc, err = defaultTokenSource(ctx, iam.CloudPlatformScope)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsvc, err := iam.New(oauth2.NewClient(ctx, tknSrc))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif cfg.IAMAddress != \"\" {\n\t\tsvc.BasePath = cfg.IAMAddress\n\t}\n\n\tsrc := &iamTokenSource{\n\t\tcfg: cfg,\n\t\tsvc: svc,\n\t}\n\n\ttkn, err := src.Token()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to create initial token\")\n\t}\n\n\treturn oauth2.ReuseTokenSource(tkn, src), nil\n}",
"func (w *GCPAuthWrapper) SetTokenSource(permissionCode string) error {\n\tvar err error\n\n\tctx := context.Background()\n\n\tw.OauthToken, err = w.Config.Exchange(ctx, permissionCode)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
UpdateTarget updates target and returns wether did update or not.
|
func (s *syncer) UpdateTarget(projectID, instanceID string, credentials *Credentials) bool {
updated := false
if s.projectID != projectID {
updated = true
s.projectID = projectID
}
if s.instanceID != instanceID {
updated = true
s.instanceID = instanceID
}
// TODO: Consider deepCopy
if !reflect.DeepEqual(s.credentials, credentials) {
updated = true
s.credentials = credentials
}
return updated
}
|
[
"func (s *syncer) UpdateTarget(projectID, instanceID string, serviceAccountJSON []byte) bool {\n\tupdated := false\n\n\tif s.projectID != projectID {\n\t\tupdated = true\n\t\ts.projectID = projectID\n\t}\n\n\tif s.instanceID != instanceID {\n\t\tupdated = true\n\t\ts.instanceID = instanceID\n\t}\n\n\tif string(s.serviceAccountJSON) != string(serviceAccountJSON) {\n\t\tupdated = true\n\t\ts.serviceAccountJSON = serviceAccountJSON\n\t}\n\n\treturn updated\n}",
"func (mr *ClientMockRecorder) UpdateTarget(arg0, arg1, arg2 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"UpdateTarget\", reflect.TypeOf((*Client)(nil).UpdateTarget), arg0, arg1, arg2)\n}",
"func (m *Client) UpdateTarget(arg0 context.Context, arg1 int64, arg2 zendesk.Target) (zendesk.Target, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateTarget\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(zendesk.Target)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (t Targets) Update(target *Target) error {\n\tif target.ID == nil {\n\t\treturn fmt.Errorf(\"target id field not set\")\n\t}\n\n\treturn t.crudTarget(\"PUT\", fmt.Sprintf(\"%s/%s\", baseTargetPath, *target.ID), target)\n\n}",
"func UpdateTarget(input FlightCoordinate) {\n\tif client != nil && input != (FlightCoordinate{}) {\n\t\tSendToZMQMessageChannelAuto(string(ZOSMService), ZUpdateTarget(input), true)\n\t\tclient.Go(\"RPCRegistry.UpdateTarget\", &input, &RPCNullArg, nil)\n\t}\n}",
"func (s *Service) UpdateTarget(ctx context.Context, update *platform.ScraperTarget) (target *platform.ScraperTarget, err error) {\n\top := OpPrefix + platform.OpUpdateTarget\n\tif !update.ID.Valid() {\n\t\treturn nil, &platform.Error{\n\t\t\tCode: platform.EInvalid,\n\t\t\tOp: op,\n\t\t\tMsg: \"id is invalid\",\n\t\t}\n\t}\n\t_, pe := s.loadScraperTarget(update.ID)\n\tif pe != nil {\n\t\treturn nil, &platform.Error{\n\t\t\tOp: op,\n\t\t\tErr: pe,\n\t\t}\n\t}\n\tif err = s.PutTarget(ctx, update); err != nil {\n\t\treturn nil, &platform.Error{\n\t\t\tOp: op,\n\t\t\tErr: pe,\n\t\t}\n\t}\n\n\treturn update, nil\n}",
"func NeedsUpdate() (bool, error) {\n\ttarget := findTarget()\n\tif target == nil {\n\t\treturn false, nil\n\t}\n\texists, err := target.isSetup()\n\treturn !exists, err\n}",
"func (o *RequestTarget) HasTarget() bool {\n\tif o != nil && o.Target != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (c *FakeAWSSNSTargets) Update(ctx context.Context, aWSSNSTarget *v1alpha1.AWSSNSTarget, opts v1.UpdateOptions) (result *v1alpha1.AWSSNSTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewUpdateAction(awssnstargetsResource, c.ns, aWSSNSTarget), &v1alpha1.AWSSNSTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.AWSSNSTarget), err\n}",
"func (s *ScraperTargetStoreService) UpdateTarget(ctx context.Context, upd *influxdb.ScraperTarget, userID platform.ID) (*influxdb.ScraperTarget, error) {\n\tst, err := s.s.GetTargetByID(ctx, upd.ID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif _, _, err := AuthorizeWrite(ctx, influxdb.ScraperResourceType, upd.ID, st.OrgID); err != nil {\n\t\treturn nil, err\n\t}\n\tif _, _, err := AuthorizeWrite(ctx, influxdb.BucketsResourceType, st.BucketID, st.OrgID); err != nil {\n\t\treturn nil, err\n\t}\n\treturn s.s.UpdateTarget(ctx, upd, userID)\n}",
"func (mr *MockSSMAPIMockRecorder) UpdateMaintenanceWindowTarget(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"UpdateMaintenanceWindowTarget\", reflect.TypeOf((*MockSSMAPI)(nil).UpdateMaintenanceWindowTarget), arg0)\n}",
"func (m *MockSSMAPI) UpdateMaintenanceWindowTarget(arg0 *ssm.UpdateMaintenanceWindowTargetInput) (*ssm.UpdateMaintenanceWindowTargetOutput, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateMaintenanceWindowTarget\", arg0)\n\tret0, _ := ret[0].(*ssm.UpdateMaintenanceWindowTargetOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func UpdateTargetHandler(w http.ResponseWriter, r *http.Request) {\n\tenv := envFromRequest(r)\n\n\tip, _, err := net.SplitHostPort(r.RemoteAddr)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif err := r.ParseForm(); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tmac, scriptName, environment, params := parsePostForm(r.PostForm)\n\tif mac == \"\" || scriptName == \"\" {\n\t\thttp.Error(w, \"MAC address and target must not be empty\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tserver := server.New(mac, ip, \"\")\n\tinputErr, err := polling.UpdateTarget(\n\t\tenv.Logger, env.ServerStates, env.Templates, env.EventLog, env.BaseURL, server,\n\t\tscriptName, environment, params)\n\n\tif err != nil {\n\t\tif inputErr {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t} else {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t}\n\t\treturn\n\t}\n\thttp.Redirect(w, r, \"/\", http.StatusFound)\n}",
"func (c *FakeCloudwatchEventTargets) Update(cloudwatchEventTarget *v1alpha1.CloudwatchEventTarget) (result *v1alpha1.CloudwatchEventTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewUpdateAction(cloudwatcheventtargetsResource, c.ns, cloudwatchEventTarget), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}",
"func (i *Injector) UpdateTargets(ts map[string][]*target.Target) error {\n\ti.curTargets = ts\n\treturn i.inject()\n}",
"func (a Actions) Update() bool {\n\tif len(a) != 1 {\n\t\treturn false\n\t}\n\n\treturn a[0] == ActionUpdate\n}",
"func (b *Bacteria) IsTarget(t Bacteria) bool {\n\tdist := b.DistToTarget(t) - t.sizeRadius\n\tif dist <= b.attackRange {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (o *OdataErrorDetail) HasTarget() bool {\n\tif o != nil && o.Target != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func UpdateTarget(b Bacteria, targets []Bacteria) []Bacteria {\n\tfor index := 0; index < len(targets); index++ {\n\t\tif b.ABenzyme.lock != targets[index].resistEnzyme.key { // lock and key doesn't match\n\t\t\ttargets[index] = InflictDamageE(targets[index], b.ABenzyme.potency) // antibiotic enzyme can incur damage to its full potency\n\t\t} else if b.ABenzyme.lock == targets[index].resistEnzyme.key && b.ABenzyme.potency > targets[index].resistEnzyme.potency { // if lock and key matches\n\t\t\tattackDamage := b.ABenzyme.potency - targets[index].resistEnzyme.potency // attack damage compansates resistEnzyme's potency\n\t\t\ttargets[index] = InflictDamageE(targets[index], attackDamage)\n\t\t}\n\t}\n\treturn targets\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Start spins up a goroutine that continously polls given API endpoint at interval Poller.Interval
|
func (p *Poller) Start() {
go func(url string, interval time.Duration, out chan<- PollMsg, shutdown <-chan *sync.WaitGroup) {
wg := &sync.WaitGroup{}
defer func() {
wg.Done()
}()
ticker := time.NewTicker(interval)
msg := PollMsg{}
resp, err := http.Get(url)
if err != nil {
msg.Error = append(msg.Error, err)
}
data, err2 := ioutil.ReadAll(resp.Body)
if err2 != nil {
msg.Error = append(msg.Error, err2)
}
msg.Payload = data
out <- msg
resp.Body.Close()
for {
select {
case wg = <-shutdown:
return
case <-ticker.C:
msg := PollMsg{}
resp, err := http.Get(url)
if err != nil {
msg.Error = append(msg.Error, err)
}
data, err2 := ioutil.ReadAll(resp.Body)
if err2 != nil {
msg.Error = append(msg.Error, err2)
}
msg.Payload = data
out <- msg
resp.Body.Close()
}
}
}(p.URL, p.Interval, p.Out, p.Shutdown)
}
|
[
"func (poller *Poller) Start() {\n\tpoller.Init()\n\n\tif poller.isRunning {\n\t\treturn\n\t}\n\tpoller.isRunning = true\n\n\tgo func() {\n\t\tpoller.poll()\n\t\ttickC := time.Tick(poller.Rate)\n\n\t\tfor {\n\t\t\tselect {\n\n\t\t\tcase <-tickC:\n\t\t\t\tpoller.poll()\n\n\t\t\tcase <-poller.stopC:\n\t\t\t\tpoller.isRunning = false\n\t\t\t\treturn\n\n\t\t\t}\n\t\t}\n\t}()\n}",
"func (p *Poller) Start() {\n\tif p.IsStopped() {\n\t\tpanic(\"cannot restart poller after it has been stopped\")\n\t}\n\n\tp.group.Add(1)\n\tgo p.runPolling()\n}",
"func (s *Service) Start(ctx context.Context) <-chan error {\n\ttick := make(chan error, 1)\n\n\tif s.Interval <= 0 {\n\t\ttick <- errors.New(\"cannot run poll service for non-positive interval\")\n\t\tclose(tick)\n\t\treturn tick\n\t}\n\n\tgo func(c context.Context, t chan<- error, interval time.Duration) {\n\t\t// update once when the service starts.\n\t\tt <- nil\n\n\t\tticker := time.NewTicker(interval)\n\t\tdefer ticker.Stop()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tt <- nil\n\n\t\t\tcase <-ctx.Done():\n\t\t\t\tt <- ctx.Err()\n\t\t\t\tclose(t)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}(ctx, tick, time.Duration(s.Interval))\n\n\treturn tick\n}",
"func (w *IndexPoller) Start() {\n\tw.channel <- w.run()\n\tticker := time.NewTicker(w.pollRate)\n\tfor {\n\t\tselect {\n\t\tcase <-w.controlChannel:\n\t\t\tdefer close(w.channel)\n\t\t\tticker.Stop()\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tw.channel <- w.run()\n\t\t}\n\t}\n}",
"func (m *Microservice) StartOperationPolling() {\n\tinterval := m.Config.viper.GetString(\"agent.operations.pollRate\")\n\n\tzap.S().Infof(\"Adding operation polling task with interval: %s\", interval)\n\t_, err := m.Scheduler.cronjob.AddFunc(interval, func() {\n\t\tm.CheckForNewConfiguration()\n\t})\n\n\tif err != nil {\n\t\tzap.S().Errorf(\"Could not create polling task with interval [%s]. %s\", interval, err)\n\t}\n}",
"func (inst *Instance) Start() error {\n\tinterval := time.Duration(inst.interval) * time.Second\n\n\tinst.logger.Info().Str(\"collection_interval\", interval.String()).Msg(\"client started\")\n\n\t// fire every minute so we run at the closest proximity to the interval boundary regardless of whether\n\t// it is 1m or 5m coupled with the duration of each individual collection run\n\t// NOTE: ticker doesn't fire EXACTLY on boundaries (e.g. 59.9997, 3m59.9988, etc.)\n\tticker := time.NewTicker(1 * time.Minute)\n\tdefer ticker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-inst.ctx.Done():\n\t\t\treturn nil\n\t\tcase <-ticker.C:\n\t\t\tinst.Lock()\n\t\t\tif inst.lastStart != nil {\n\t\t\t\telapsed := time.Since(*inst.lastStart)\n\t\t\t\tif elapsed < interval {\n\t\t\t\t\tif interval-elapsed > 2*time.Second {\n\t\t\t\t\t\tinst.logger.Debug().Str(\"interval\", interval.String()).Str(\"delta\", elapsed.String()).Msg(\"interval not reached\")\n\t\t\t\t\t\tinst.Unlock()\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif inst.running {\n\t\t\t\tinst.Unlock()\n\t\t\t\tinst.logger.Warn().Msg(\"collection already in progress, not starting another\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tinst.logger.Debug().Str(\"region\", inst.regionCfg.Name).Msg(\"setting up session\")\n\t\t\tsess, err := inst.createSession(inst.regionCfg.Name)\n\t\t\tif err != nil {\n\t\t\t\tinst.logger.Warn().Err(err).Msg(\"creating AWS SDK session\")\n\t\t\t\tinst.Unlock()\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// calculate one timeseries range for all requests from collectors\n\t\t\tstart := time.Now()\n\t\t\tdelta := 10 * time.Minute // get last 10 minutes of samples\n\t\t\tif inst.lastStart != nil {\n\t\t\t\tdelta = start.Sub(*inst.lastStart) + interval\n\t\t\t}\n\t\t\ttsEnd := start\n\t\t\ttsStart := tsEnd.Add(-delta)\n\t\t\tinst.logger.Info().Time(\"start\", tsStart).Time(\"end\", tsEnd).Str(\"delta\", delta.String()).Msg(\"collection timeseries range\")\n\n\t\t\tinst.lastStart = &start\n\t\t\tinst.running = true\n\t\t\tinst.Unlock()\n\n\t\t\ttimespan := collectors.MetricTimespan{\n\t\t\t\tStart: tsStart,\n\t\t\t\tEnd: tsEnd,\n\t\t\t\tPeriod: inst.period,\n\t\t\t}\n\n\t\t\tgo func() {\n\t\t\t\tfor _, c := range inst.collectors {\n\t\t\t\t\tif err := c.Collect(sess, timespan, inst.baseTags); err != nil {\n\t\t\t\t\t\tinst.check.ReportError(errors.WithMessage(err, fmt.Sprintf(\"id: %s, collector: %s\", inst.cfg.ID, c.ID())))\n\t\t\t\t\t\tinst.logger.Warn().Err(err).Str(\"collector\", c.ID()).Msg(\"collecting telemetry\")\n\t\t\t\t\t\t// need to determine which errors from the various\n\t\t\t\t\t\t// cloud service providers are fatal vs retry vs ???\n\t\t\t\t\t}\n\t\t\t\t\tif inst.done() {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tinst.Lock()\n\t\t\t\tinst.running = false\n\t\t\t\tinst.Unlock()\n\t\t\t\tinst.logger.Info().Str(\"duration\", time.Since(start).String()).Msg(\"collection complete\")\n\t\t\t}()\n\t\t}\n\t}\n}",
"func (this Client) BeginPolling() {\n fmt.Println(\"LET'S....GO...TO....WORK!!!\")\n for i:= 0; i < len(this.Workers); i++ {\n go this.Workers[i].Poll(this.JobHandlers)\n }\n}",
"func (a *API) BeginPolling(parentCtx context.Context) *API {\n\tlog.Println(\"Starting tasks...\")\n\tctx := cancellablecontext.New(parentCtx)\n\tfor _, task := range a.registeredTasks {\n\t\ttask.Start(ctx)\n\t}\n\tnumberOfStartedTasks := 0\n\tfor i := 0; numberOfStartedTasks < len(a.registeredTasks); i++ {\n\t\tlog.Println(\"Waiting for task to report that it has started...\")\n\t\t<-a.registeredTasks[i].Started()\n\t\tnumberOfStartedTasks++\n\t}\n\tlog.Println(\"All tasks have started.\")\n\treturn a\n}",
"func (f *Input) startPoller(ctx context.Context) {\n\tf.wg.Add(1)\n\tgo func() {\n\t\tdefer f.wg.Done()\n\t\tglobTicker := time.NewTicker(f.PollInterval)\n\t\tdefer globTicker.Stop()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase <-globTicker.C:\n\t\t\t}\n\n\t\t\tf.poll(ctx)\n\t\t}\n\t}()\n}",
"func (c *Client) Start() {\n\tu := c.url\n\tif c.LoggingEnabled {\n\t\tlog.Println(\"Now observing changes on\", u.String())\n\t}\n\n\tatomic.AddUint64(&(c.runID), 1)\n\tcurrentRunID := atomic.LoadUint64(&(c.runID))\n\n\tgo func(runID uint64, u *url.URL) {\n\t\tsince := int64(0)\n\t\tfor {\n\t\t\tpr, err := c.fetchEvents(since)\n\n\t\t\tif err != nil {\n\t\t\t\tif c.LoggingEnabled {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t\tlog.Printf(\"Reattempting to connect to %s in %d seconds\", u.String(), c.Reattempt)\n\t\t\t\t}\n\t\t\t\tc.EventsChan <- nil\n\t\t\t\ttime.Sleep(time.Duration(c.Reattempt) * time.Second)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// We check that its still the same runID as when this goroutine was started\n\t\t\tclientRunID := atomic.LoadUint64(&(c.runID))\n\t\t\tif clientRunID != runID {\n\t\t\t\tif c.LoggingEnabled {\n\t\t\t\t\tlog.Printf(\"Client on URL %s has been stopped, not sending events\", u.String())\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif len(pr.Events) > 0 {\n\t\t\t\tif c.LoggingEnabled {\n\t\t\t\t\tlog.Println(\"Got\", len(pr.Events), \"event(s) from URL\", u.String())\n\t\t\t\t}\n\t\t\t\tfor _, event := range pr.Events {\n\t\t\t\t\tsince = event.Timestamp\n\t\t\t\t\tc.EventsChan <- &event\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Only push timestamp forward if its greater than the last we checked\n\t\t\t\tif pr.Timestamp > since {\n\t\t\t\t\tsince = pr.Timestamp\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}(currentRunID, u)\n}",
"func (e *Exporter) StartURLWatcher(httpClient *http.Client, urls []string, interval time.Duration, workerCount int) {\n\tticker := time.NewTicker(interval)\n\tquit := make(chan struct{})\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tlog.Printf(\"Querying URLs: %v\\n\", urls)\n\t\t\t\tqueryResults := client.QueryURLs(httpClient, urls, workerCount)\n\t\t\t\te.QueryResults = queryResults\n\t\t\t\tbreak\n\t\t\tcase <-quit:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}",
"func (p *Poller) Start() {\n\twg := sync.WaitGroup{}\n\tvar pollCtx context.Context\n\tvar pollCancel context.CancelFunc = func() {}\n\tfor {\n\t\tselect {\n\t\tcase <-p.quit:\n\t\t\tpollCancel()\n\t\t\treturn\n\t\tcase commitID := <-p.Events:\n\t\t\tpollCancel()\n\t\t\tpollCtx, pollCancel = context.WithCancel(context.Background())\n\t\t\twg.Add(1)\n\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\terr := p.poll(pollCtx, commitID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tp.Log.Error(err, \"Error occured while polling\")\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}\n}",
"func (e *EndpointsManager) Run() {\n\tticker := time.NewTicker(time.Second * 10)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\te.watchAliveEndpoint()\n\t\tcase <-e.exit:\n\t\t\tclose(e.closed)\n\t\t\tcommon.Logger.Info(\"service done!!!\")\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (t *PollWatcher) Start() error {\n\tif t == nil {\n\t\tpanic(ErrNoWatcherSupplied)\n\t}\n\n\tif t.cfg.Debug {\n\t\tt.cfg.Logger.Get().Debug(\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"Starting ticker watcher with rate: %dms\",\n\t\t\t\tt.cfg.Rater.Time()/time.Millisecond,\n\t\t\t),\n\t\t)\n\t}\n\tgo t.watch()\n\treturn nil\n}",
"func (rc *RateConverter) startPeriodicFetching() {\n\n\tticker := time.NewTicker(rc.fetchingInterval)\n\tupdatesTicksCount := 0\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\t// Retries are handled by clients directly.\n\t\t\trc.Update()\n\t\t\tupdatesTicksCount++\n\t\t\tif rc.updateNotifier != nil {\n\t\t\t\trc.updateNotifier <- updatesTicksCount\n\t\t\t}\n\t\tcase <-rc.done:\n\t\t\tif ticker != nil {\n\t\t\t\tticker.Stop()\n\t\t\t\tticker = nil\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (s *Store) startPoller() {\n\tfor {\n\t\tselect {\n\t\tcase req := <-s.getReqQueue:\n\t\t\treq.respCh <- s.performGetOperation(req.key)\n\n\t\tcase req := <-s.modifyReqQueue:\n\t\t\terr := s.performModifyOperation(req)\n\t\t\treq.respCh <- err\n\n\t\t\ts.fanOutSubscriptions(req)\n\n\t\tcase sub := <-s.subscribeQueue:\n\t\t\ts.registerSubscription(sub)\n\t\t}\n\t}\n}",
"func StartPing(interval time.Duration){\n\tlog.Println(\"Ping Time\")\n\tpinging := true\n\tfor pinging {\n\t\tpinging = false\n\t\tlog.Print(\"Pinging set to \" + strconv.FormatBool(pinging))\n\n\t\tif shouldIPing() {\n\t\t\tpinging = true\n\t\t\tlog.Print(\"Pinging set to \" + strconv.FormatBool(pinging))\n\n\t\t\tbullyImpl.SetIsCoordinatorAlive(false)\n\t\t\tlog.Print(bullyImpl.IsCoordinatorAlive())\n\t\t\tbullyImpl.GetMoi().Ping(bullyImpl.GetCoordinator())\n\n\t\t\ttimer := time.NewTimer(interval)\n\t\t\tselect {\n\t\t\tcase <- endTimer:\n\t\t\t\tlog.Print(\"Pinging was ended\")\n\t\t\tcase <- timer.C:\n\t\t\t\tif shouldIPing() && !bullyImpl.IsCoordinatorAlive() {\n\t\t\t\t\tpinging = false\n\t\t\t\t\tlog.Print(\"Pinging set to \" + strconv.FormatBool(pinging))\n\t\t\t\t\tlog.Print(\"Coordinator is not alive, launching a new Election\")\n\t\t\t\t\tgo func(){ electionChannel <- struct{}{} }()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}",
"func (b *Bootstrapper) Start() {\n\tb.ctx, b.cancel = context.WithCancel(b.ctx)\n\tb.ticker = time.NewTicker(b.config.Period)\n\n\tgo func() {\n\t\tdefer b.ticker.Stop()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-b.ctx.Done():\n\t\t\t\treturn\n\t\t\tcase <-b.ticker.C:\n\t\t\t\tb.checkConnectivity()\n\t\t\t}\n\t\t}\n\t}()\n}",
"func (p *Prober) Start() {\n\t// Get static copy of the config object\n\tcfg := p.config.Copy()\n\n\tfor _, svc := range cfg.Monitor.Services {\n\t\t// Create new Probe Bot and start it\n\t\tif svc.Interval == 0 {\n\t\t\tsvc.Interval = cfg.Monitor.Interval\n\t\t}\n\n\t\tgo NewProbeBot(\n\t\t\tp.eb,\n\t\t\tsvc,\n\t\t\tp.status.Update,\n\t\t).Start()\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Stop sends a shutdown signal to the polling goroutine to return
|
func (p *Poller) Stop() {
wg := &sync.WaitGroup{}
wg.Add(1)
p.Shutdown <- wg
wg.Wait()
}
|
[
"func (b *bitcoindRPCPollingEvents) Stop() error {\n\tclose(b.quit)\n\tb.wg.Wait()\n\treturn nil\n}",
"func StopPoller() {\n\texitCh <- struct{}{}\n}",
"func (p *Poller) Stop() {\n\tp.tick.Stop()\n\tclose(p.stop)\n}",
"func (p *Poller) Stop(ctx context.Context) error {\n\tc := make(chan struct{})\n\tgo func() {\n\t\tdefer close(c)\n\t\tp.wg.Wait()\n\t}()\n\n\tclose(p.quit)\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tcase <-c:\n\t\t\treturn nil\n\t\t}\n\t}\n}",
"func (t *Ticker) Stop()\t{ t.shutdown = true }",
"func (rcsw *RemoteClusterServiceWatcher) Stop(cleanupState bool) {\n\trcsw.probeEventsSink.send(&ClusterNotRegistered{\n\t\tclusterName: rcsw.clusterName,\n\t})\n\tclose(rcsw.stopper)\n\tif cleanupState {\n\t\trcsw.eventsQueue.Add(&ClusterUnregistered{})\n\t}\n\trcsw.eventsQueue.ShutDown()\n}",
"func (w *UnbondingWatcher) Stop() {\n\tclose(w.quit)\n}",
"func (m *Mainloop) Stop() {\n\tgo func() { m.termchan <- 1 }()\n\treturn\n}",
"func (p *Poller) Stop() {\n\tp.stopMutex.Lock()\n\tdefer p.stopMutex.Unlock()\n\n\tp.isStopped = true\n\tclose(p.Channel)\n}",
"func Stop() {\n\tquit <- true\n\tfor id, socket := range sockets {\n\t\tfmt.Printf(\"Closing socket %s\\n\", id)\n\t\tsocket.Close()\n\t\tpoller.RemoveBySocket(socket)\n\t}\n\tzctx.Term()\n}",
"func (w *Watcher) Stop() {\n\tw.StopChannel <- true\n}",
"func (u *Updater) Stop() {\n\tu.quitCh <- true\n}",
"func (hb *heartbeat) stop() {\n\tselect {\n\tcase hb.stopChan <- struct{}{}:\n\tdefault:\n\t}\n}",
"func (r *Receiver) Shutdown(ctx context.Context) error {\n\tr.stopOnce.Do(func() {\n\t\tr.scraper.stop()\n\t})\n\treturn nil\n}",
"func (limiter *StableRateLimiter) Stop() {\n\tclose(limiter.quitChannel)\n}",
"func (brw *blockRetrievalWorker) Shutdown() {\n\tselect {\n\tcase <-brw.stopCh:\n\tdefault:\n\t\tclose(brw.stopCh)\n\t}\n}",
"func (w *WatchManager) Stop() {\n\tlog.Println(\"Stopping Watcher...\")\n\tclose(w.stopChannel)\n\tw.RunWaitGroup.Done()\n}",
"func (p *partitionCountWatcher) Stop() {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tp.unsafeInitiateShutdown(nil)\n}",
"func Stop() {\n\t// /bin/dbus-send --system --dest=org.ganesha.nfsd --type=method_call /org/ganesha/nfsd/admin org.ganesha.nfsd.admin.shutdown\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetAndDelete retrieves a PendingFileshare from the repository and then deletes it.
|
func (r *inMemoryFileshareRepository) GetAndDelete(key string) (PendingFileshare, bool) {
r.Lock()
defer r.Unlock()
fileshare, ok := r.pendingFileshares[key]
if !ok {
return PendingFileshare{}, false
}
delete(r.pendingFileshares, key)
return fileshare, true
}
|
[
"func (am *AutogitManager) Delete(\n\tctx context.Context, dstTLF *libkbfs.TlfHandle, dstDir string,\n\trepo, branchName string) (doneCh <-chan struct{}, err error) {\n\tam.log.CDebugf(ctx, \"Autogit delete request for %s/%s:%s\",\n\t\tdstTLF.GetCanonicalPath(), dstDir, repo, branchName)\n\tdefer func() {\n\t\tam.deferLog.CDebugf(ctx, \"Delete request processed: %+v\", err)\n\t}()\n\n\treq := deleteReq{\n\t\tdstTLF, dstDir, repo, branchName, make(chan struct{}),\n\t}\n\n\tselect {\n\tcase am.deleteQueue.In() <- req:\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\t}\n\treturn req.doneCh, nil\n}",
"func (fs *Storage) DeletePending(f string) error {\n\tprefix := filepath.Join(fs.root, \"leaves\", \"pending\")\n\tif !strings.HasPrefix(f, prefix) {\n\t\treturn fmt.Errorf(\"pending key %q does not have prefix %q\", f, prefix)\n\t}\n\tgetStorage().Call(\"removeItem\", f)\n\treturn nil\n}",
"func (s *shares) Delete(shareID int) error {\n\t_, err := s.c.baseRequest(http.MethodDelete, routes.shares, nil, strconv.Itoa(shareID))\n\treturn err\n}",
"func (o *get) Get(ctx context.Context, w http.ResponseWriter, resourceName string) error {\n\tlog.Printf(\"Get for %q\", resourceName)\n\n\tfileshare, ok := o.repository.GetAndDelete(resourceName)\n\tif !ok {\n\t\treturn &NotFoundError{Err: fmt.Errorf(\"resource %q is not found\", resourceName)}\n\t}\n\tdefer fileshare.Conn.Close()\n\n\tdownloaderConn, readerWriter, err := hijackConnection(w)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer downloaderConn.Close()\n\n\t_, err = readerWriter.Write(httpPreludeForFileDownload(fileshare.FileName))\n\tif err != nil {\n\t\treturn &LogOnlyError{Err: fmt.Errorf(\"could not send HTTP prelude for file download: %w\", err)}\n\t}\n\n\t// Some bytes may are still present in the uploader's Reader buffer, we need to transmit them.\n\tn, err := transferBufferedBytes(fileshare.Reader, readerWriter.Writer, fileshare.FileSize)\n\tif err != nil {\n\t\treturn &LogOnlyError{Err: err}\n\t}\n\n\tbytesLeft := fileshare.FileSize - n\n\n\tif bytesLeft > 0 {\n\t\t// NOTE: On linux, CopyN will use the \"splice\" syscall which allows very efficient data transfer between conns.\n\t\t// Using the bufio.Reader and bufio.Writer directly would prevent this optimization.\n\t\t_, err = io.CopyN(downloaderConn, fileshare.Conn, bytesLeft)\n\t\tif err != nil {\n\t\t\treturn &LogOnlyError{Err: fmt.Errorf(\"could not copy data: %v\", err)}\n\t\t}\n\t}\n\n\t_, err = fileshare.Writer.Write(httpPayloadForSuccessfulUpload)\n\tif err != nil {\n\t\treturn &LogOnlyError{Err: fmt.Errorf(\"could not send success response to uploader: %v\", err)}\n\t}\n\n\tif err := fileshare.Writer.Flush(); err != nil {\n\t\treturn &LogOnlyError{Err: fmt.Errorf(\"could not flush uploader response: %w\", err)}\n\t}\n\n\treturn nil\n}",
"func (s *ShareListener) Delete(ctx context.Context, in *pb.Reference) (empty *googleprotobuf.Empty, err error) {\n\tempty = &googleprotobuf.Empty{}\n\tif s == nil {\n\t\treturn empty, status.Errorf(codes.FailedPrecondition, fail.InvalidInstanceError().Message())\n\t}\n\tif in == nil {\n\t\treturn empty, status.Errorf(codes.InvalidArgument, fail.InvalidParameterError(\"in\", \"cannot be nil\").Message())\n\t}\n\tshareName := in.GetName()\n\t// FIXME: validate parameters\n\n\ttracer := debug.NewTracer(nil, fmt.Sprintf(\"('%s')\", shareName), true).WithStopwatch().GoingIn()\n\tdefer tracer.OnExitTrace()()\n\tdefer fail.OnExitLogError(tracer.TraceMessage(\"\"), &err)()\n\n\tctx, cancelFunc := context.WithCancel(ctx)\n\tif err := srvutils.JobRegister(ctx, cancelFunc, \"Delete share \"+in.GetName()); err == nil {\n\t\tdefer srvutils.JobDeregister(ctx)\n\t}\n\n\ttenant := GetCurrentTenant()\n\tif tenant == nil {\n\t\tlog.Info(\"Can't delete share: no tenant set\")\n\t\treturn empty, status.Errorf(codes.FailedPrecondition, \"cannot delete share: no tenant set\")\n\t}\n\n\thandler := ShareHandler(tenant.Service)\n\t_, _, _, err = handler.Inspect(ctx, shareName)\n\tif err != nil {\n\t\tswitch err.(type) {\n\t\tcase fail.ErrNotFound:\n\t\t\treturn empty, status.Errorf(codes.NotFound, getUserMessage(err))\n\t\tdefault:\n\t\t\treturn empty, status.Errorf(\n\t\t\t\tcodes.Internal,\n\t\t\t\tfail.Wrap(err, fmt.Sprintf(\"cannot delete share '%s'\", shareName)+adaptedUserMessage(err)).Message(),\n\t\t\t)\n\t\t}\n\t}\n\n\terr = handler.Delete(ctx, shareName)\n\tif err != nil {\n\t\treturn empty, status.Errorf(\n\t\t\tcodes.Internal,\n\t\t\tfail.Wrap(err, fmt.Sprintf(\"cannot delete share '%s'\", shareName)+adaptedUserMessage(err)).Message(),\n\t\t)\n\t}\n\treturn empty, nil\n}",
"func (c *WaitCache) Delete(ctx context.Context, id digest.Digest) error {\n\tpanic(\"delete is not implemented in WaitCache\")\n}",
"func (g *GistFile) Delete(id string) (*http.Response, error) {\n\turll := fmt.Sprintf(\"/gists/%s\", id)\n\treq, err := http.NewRequest(http.MethodDelete, urll, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := auth.Session.Client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}",
"func (a *NASApiService) CifsShareDelete(ctx context.Context, svmUuid string, name string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/protocols/cifs/shares/{svm.uuid}/{name}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"svm.uuid\"+\"}\", fmt.Sprintf(\"%v\", svmUuid), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/hal+json\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/hal+json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\tif localVarHttpResponse.StatusCode == 0 {\n\t\t\tvar v ErrorResponse\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"));\n\t\t\t\tif err != nil {\n\t\t\t\t\tnewErr.error = err.Error()\n\t\t\t\t\treturn localVarHttpResponse, newErr\n\t\t\t\t}\n\t\t\t\tnewErr.model = v\n\t\t\t\treturn localVarHttpResponse, newErr\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}",
"func (a *GoogleAuth) GetAndDelete(state string) bool {\n\ta.mu.Lock()\n\tdefer a.mu.Unlock()\n\n\t_, ok := a.states[state]\n\tif ok {\n\t\tdelete(a.states, state)\n\t\treturn true\n\t}\n\treturn false\n}",
"func (s *gcBlobTaskStore) Delete(ctx context.Context, b *models.GCBlobTask) error {\n\tdefer metrics.InstrumentQuery(\"gc_blob_task_delete\")()\n\n\tq := \"DELETE FROM gc_blob_review_queue WHERE digest = decode($1, 'hex')\"\n\tdgst, err := NewDigest(b.Digest)\n\tif err != nil {\n\t\treturn err\n\t}\n\tres, err := s.db.ExecContext(ctx, q, dgst)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"deleting GC blob task: %w\", err)\n\t}\n\tcount, err := res.RowsAffected()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"deleting GC blob task: %w\", err)\n\t}\n\tif count == 0 {\n\t\treturn fmt.Errorf(\"GC blob task not found\")\n\t}\n\n\treturn nil\n}",
"func (r *repository) Delete(dgst digest.Digest) error {\n\tms, err := r.Repository.Manifests(r.ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ms.Delete(dgst)\n}",
"func (c *PendingKeyImpl) GetAndRemove(t token.Token) (util.MsgSectionSender, bool) {\n\tif val, present := c.tokenMap.Get(t.String()); present {\n\t\tc.tokenMap.Remove(t.String())\n\t\tc.counter.Dec()\n\t\treturn val.(pkcValue).mss, true\n\t}\n\treturn util.MsgSectionSender{}, false\n}",
"func (gc *GalleryContext) DeleteAndConfirm() uiauto.Action {\n\tdeleteButtonFinder := nodewith.Role(role.Button).Name(\"Delete\").Ancestor(RootFinder)\n\tconfirmButtonFinder := nodewith.Role(role.Button).Name(\"Delete\").Ancestor(DialogFinder)\n\treturn uiauto.Combine(\"remove current opened media file\",\n\t\tgc.ui.WithTimeout(30*time.Second).WithInterval(1*time.Second).LeftClickUntil(\n\t\t\tdeleteButtonFinder, gc.ui.WithTimeout(3*time.Second).WaitUntilExists(confirmButtonFinder)),\n\t\tgc.ui.LeftClick(confirmButtonFinder),\n\t)\n}",
"func TestDelete(t *testing.T) {\n\tth.SetupHTTP()\n\tdefer th.TeardownHTTP()\n\n\tMockDeleteResponse(t)\n\tres := sharetypes.Delete(client.ServiceClient(), \"shareTypeID\")\n\tth.AssertNoErr(t, res.Err)\n}",
"func (m *Manager) Get(id string) *Transfer {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\n\treturn m.transfers[id]\n}",
"func (ss *redisStore) GetAndDelete(key string) (*storage.Secret, error) {\n\tbb, err := ss.rdb.GetDel(ctx, key).Bytes()\n\tif err == redis.Nil {\n\t\treturn nil, storage.ErrNoRecord\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar s storage.Secret\n\terr = json.Unmarshal(bb, &s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &s, nil\n}",
"func (c *client) deleteShared(org, team, path string) error {\n\treturn c.delete(fmt.Sprintf(\"%s/shared/%s/%s/%s\", c.config.Prefix, org, team, path))\n}",
"func (l *Locker) LoadAndDelete(key Flags) (interface{}, bool) {\n\treturn l.data.LoadAndDelete(key)\n}",
"func (api *bucketAPI) SyncDelete(obj *objstore.Bucket) error {\n\tvar writeErr error\n\tif api.ct.resolver != nil {\n\t\tapicl, err := api.ct.apiClient()\n\t\tif err != nil {\n\t\t\tapi.ct.logger.Errorf(\"Error creating API server clent. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t\t_, writeErr = apicl.ObjstoreV1().Bucket().Delete(context.Background(), &obj.ObjectMeta)\n\t}\n\n\tif writeErr == nil {\n\t\tapi.ct.handleBucketEvent(&kvstore.WatchEvent{Object: obj, Type: kvstore.Deleted})\n\t}\n\n\treturn writeErr\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set adds a new PendingFileshare in the repository. If the key already exists, it returns false, otherwise it returns true.
|
func (r *inMemoryFileshareRepository) Set(key string, fd PendingFileshare) bool {
r.Lock()
defer r.Unlock()
if _, ok := r.pendingFileshares[key]; ok {
return false // Key already exists.
}
if r.pendingFileshares == nil {
r.pendingFileshares = make(map[string]PendingFileshare)
}
r.pendingFileshares[key] = fd
return true
}
|
[
"func (f *Filter) Set(key string) (bool, error) {\n\tcmd := \"s \" + f.Name + \" \" + f.getKey(key)\n\tresp, err := f.Conn.SendAndReceive(cmd)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif resp == \"Yes\" || resp == \"No\" {\n\t\treturn resp == \"Yes\", nil\n\t}\n\treturn false, errInvalidResponse(resp)\n}",
"func (r *inMemoryFileshareRepository) GetAndDelete(key string) (PendingFileshare, bool) {\n\tr.Lock()\n\tdefer r.Unlock()\n\n\tfileshare, ok := r.pendingFileshares[key]\n\tif !ok {\n\t\treturn PendingFileshare{}, false\n\t}\n\n\tdelete(r.pendingFileshares, key)\n\treturn fileshare, true\n}",
"func (n *nullCache) Set(_ string, _ any) bool {\n\treturn false\n}",
"func (s *SyncStorage) SetIfNotExists(ns string, key string, data interface{}) (bool, error) {\n\treturn s.getDbBackend(ns).SetNX(getNsPrefix(ns)+key, data, 0)\n}",
"func (fs *FSCache) Set(key string, content []byte) error {\n\treturn ioutil.WriteFile(\n\t\tpath.Join(fs.Root, key),\n\t\tcontent,\n\t\t0600,\n\t)\n}",
"func (f *NaiveMap) Set(key string, value interface{}) bool {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\n\tif f.capacity <= f.count {\n\t\tlog.Print(\"At maximum capacity!\")\n\t\treturn false\n\t}\n\n\tif key == \"\" {\n\t\tlog.Print(\"Invalid input.\")\n\t\treturn false\n\t}\n\t// Check if they key has been set, if so, put value with key.\n\tfor i, _ := range f.keys {\n\t\tif f.keys[i] == key {\n\t\t\tf.values[i] = value\n\t\t\treturn true\n\t\t}\n\t}\n\n\t// Make sure there's an an empty spot, then add.\n\tfor i, _ := range f.keys {\n\t\tif f.keys[i] == \"\" {\n\t\t\tf.keys[i] = key\n\t\t\tf.values[i] = value\n\t\t\tf.count++\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (storage *PaymentChannelStorage) PutIfAbsent(key *PaymentChannelKey, state *PaymentChannelData) (ok bool, err error) {\n\treturn storage.delegate.PutIfAbsent(key, state)\n}",
"func (d *Downloader) containsOrMarkStarted(uri string) bool {\n\td.downloadsMutex.Lock()\n\tdefer d.downloadsMutex.Unlock()\n\n\t_, ok := d.downloads[uri]\n\tif ok {\n\t\treturn true\n\t}\n\n\td.downloads[uri] = true\n\treturn false\n}",
"func (s *SharemeService) Add(c *gae.Context, session *Session, key string) (stat Share) {\n\tstat = s.Stat(c, key)\n\tif stat.IsError() {\n\t\treturn\n\t}\n\tsession.Set(fmt.Sprintf(\"%s%s\", KeySessionPrefix, key), stat.Name)\n\treturn\n}",
"func (n *namespace) Set(ctx context.Context, key string, value []byte) error {\n\terr := n.canonical.Set(ctx, key, value)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// best effort to set the cache\n\tcloned := append(value[:0:0], value...)\n\t_ = n.cache.Set(ctx, key, cloned)\n\treturn nil\n}",
"func (s *SyncStorage) SetIfNotExistsAndPublish(ns string, channelsAndEvents []string, key string, data interface{}) (bool, error) {\n\tnsPrefix := getNsPrefix(ns)\n\tif len(channelsAndEvents) == 0 {\n\t\treturn s.getDbBackend(ns).SetNX(nsPrefix+key, data, 0)\n\t}\n\tif err := s.checkChannelsAndEvents(\"SetIfNotExistsAndPublish\", channelsAndEvents); err != nil {\n\t\treturn false, err\n\t}\n\tchannelsAndEventsPrepared := s.prepareChannelsAndEvents(nsPrefix, channelsAndEvents)\n\treturn s.getDbBackend(ns).SetNXPub(channelsAndEventsPrepared, nsPrefix+key, data)\n}",
"func (m *mSignatureKeyHolderMockEquals) Set(f func(p SignatureKeyHolder) (r bool)) *SignatureKeyHolderMock {\n\tm.mainExpectation = nil\n\tm.expectationSeries = nil\n\n\tm.mock.EqualsFunc = f\n\treturn m.mock\n}",
"func (s *Store) Set(key string, d StoreData) bool {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\t_, ok := s.data[key]\n\tdd, _ := d.Init()\n\ts.data[key] = dd\n\ts.changed = true\n\ts.Log.Printf(\"debug: in set: %+v\", s)\n\treturn ok\n\n}",
"func (c *BlockCache) Set(key string) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tkey = strings.ToLower(key)\n\tc.m[key] = true\n}",
"func (s *Store) Put(key string, value []byte) bool {\n\n\tif _, ok := s.objects.Load(key); !ok {\n\t\ts.objects.Store(key, value)\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (t *SBF) Add(hashes []uint64) bool {\n // Check if the key is in SBF already\n t.mutex.RLock()\n for _, pf := range t.plainFilters {\n if pf.Has(hashes) {\n t.mutex.RUnlock()\n // Has the key already.\n return true\n }\n }\n t.mutex.RUnlock()\n\n // Add the key to SBF\n t.mutex.Lock()\n defer t.mutex.Unlock()\n pf := t.plainFilters[len(t.plainFilters)-1]\n\n if t.keys == t.capacity {\n // SBF is full. Expand it by attaching another plainFilter\n pf := plainFilter.NewPlainFilter(scale_size*pf.Capacity, r*pf.Probability)\n t.plainFilters = append(t.plainFilters, pf)\n atomic.AddUint64(&t.capacity, pf.Capacity)\n }\n\n // In most cases added is false. Since we checked the key is not in the filter in the\n // top half of this function. But there is a tiny chance there is a context switch happens\n // between the RWLock and we could add the same key twice. So double check added here.\n added := pf.Add(hashes)\n if !added {\n atomic.AddUint64(&t.keys, 1)\n }\n \n return added\n}",
"func (b *Bcache) Set(key, val string, expiredTimestamp int64) {\n\tb.peer.Set(key, val, expiredTimestamp)\n}",
"func (rf *Filter) Set(ctx context.Context, cli *bloomd.Client, k bloomd.Key) (bool, error) {\n\tcurrUnit := rf.currUnit()\n\tf := cli.GetFilter(rf.nameForUnit(currUnit))\n\treturn f.Set(k)\n}",
"func (b *SharingKeys) Add(name string, key *[32]byte) (*SharingKey, error) {\n\tif name == \"\" {\n\t\treturn nil, ErrSharingKeyNameInvalid\n\t}\n\tn := []byte(name)\n\tif v := b.b.Get(n); v != nil {\n\t\treturn nil, ErrSharingKeyExist\n\t}\n\tif err := b.b.Put([]byte(name), key[:]); err != nil {\n\t\treturn nil, err\n\t}\n\ts := &SharingKey{\n\t\tb: b,\n\t\tname: n,\n\t\tsecret: key[:],\n\t}\n\treturn s, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Env returns Environment associated with Expect instance. Tests can use it to store arbitrary data. Example: e := httpexpect.Default(t, " e.Env().Put("key", "value") value := e.Env().GetString("key")
|
func (e *Expect) Env() *Environment {
return e.chain.env()
}
|
[
"func (c *CommandGo) Env() (result *EnvInfo, err error) {\n\tcmdArgs := []string{\"env\", \"-json\"}\n\tvar envInfo EnvInfo\n\tif err = c.runWithJSONDecode(cmdArgs, &envInfo); nil != err {\n\t\treturn\n\t}\n\tresult = &envInfo\n\treturn\n}",
"func (envManager *TestEnvManager) GetEnv() TestEnv {\n\treturn envManager.testEnv\n}",
"func (suite *Suite[Env]) Env() *Env {\n\tif suite.env == nil || !suite.isUpdateEnvCalledInThisTest {\n\t\tsuite.UpdateEnv(suite.defaultStackDef)\n\t}\n\treturn suite.env\n}",
"func (e *EnvSet) Env(envType Type) (*cel.Env, error) {\n\tswitch envType {\n\tcase NewExpressions:\n\t\treturn e.newExpressions, nil\n\tcase StoredExpressions:\n\t\treturn e.storedExpressions, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported environment type: %v\", envType)\n\t}\n}",
"func GetENV(experimentDetails *experimentTypes.ExperimentDetails) {\n\texperimentDetails.ExperimentName = Getenv(\"EXPERIMENT_NAME\", \"\")\n\texperimentDetails.AppNS = Getenv(\"APP_NS\", \"\")\n\texperimentDetails.TargetContainer = Getenv(\"APP_CONTAINER\", \"\")\n\texperimentDetails.TargetPods = Getenv(\"APP_POD\", \"\")\n\texperimentDetails.AppLabel = Getenv(\"APP_LABEL\", \"\")\n\texperimentDetails.ChaosDuration, _ = strconv.Atoi(Getenv(\"TOTAL_CHAOS_DURATION\", \"30\"))\n\texperimentDetails.ChaosNamespace = Getenv(\"CHAOS_NAMESPACE\", \"litmus\")\n\texperimentDetails.EngineName = Getenv(\"CHAOS_ENGINE\", \"\")\n\texperimentDetails.ChaosUID = clientTypes.UID(Getenv(\"CHAOS_UID\", \"\"))\n\texperimentDetails.ChaosPodName = Getenv(\"POD_NAME\", \"\")\n\texperimentDetails.ContainerRuntime = Getenv(\"CONTAINER_RUNTIME\", \"\")\n\texperimentDetails.NetworkInterface = Getenv(\"NETWORK_INTERFACE\", \"eth0\")\n\texperimentDetails.TargetIPs = Getenv(\"TARGET_IPs\", \"\")\n}",
"func (cmd Cmd) Env(env *Env) Cmd {\n\tcmd.Environment = env\n\treturn cmd\n}",
"func getTestEnv() *Env {\n\tdb := createMockDB()\n\n\toauthConf := &oauth2.Config{\n\t\tClientID: \"abcdef0123abcdef4567\",\n\t\tClientSecret: \"abcdef0123abcdef4567abcdef8901abcdef2345\",\n\t\tScopes: []string{\"user:email\"},\n\t\tEndpoint: githuboauth.Endpoint,\n\t}\n\n\tenv := &Env{\n\t\tdb: db,\n\t\tjwtSecretKey: \"keyForTesting\",\n\t\toauthConf: oauthConf,\n\t\toauthState: \"nonRandomStateString\",\n\t}\n\treturn env\n}",
"func (s RunSpec) GetEnv() (env []string) {\n\tif s.Environment == nil {\n\t\ts.Environment = make(map[string]string)\n\t}\n\n\ts.Environment[\"RANNA_HOSTDIR\"] = s.HostDir\n\n\tenv = make([]string, len(s.Environment))\n\ti := 0\n\tfor k, v := range s.Environment {\n\t\tenv[i] = fmt.Sprintf(`%s=%s`, k, v)\n\t\ti++\n\t}\n\n\treturn\n}",
"func Env(key, val string) string {\n\treturn fmt.Sprintf(\"%s=%s\", key, val)\n}",
"func (ci MrbCallInfo) Env() REnv {\n\treturn REnv{C.mrb_vm_ci_env(ci.p), nil}\n}",
"func (f *EnvTestFixture) NewEnv() *hermit.Env {\n\tenvDir, err := ioutil.TempDir(\"\", \"\")\n\trequire.NoError(f.t, err)\n\tlog, _ := ui.NewForTesting()\n\terr = hermit.Init(log, envDir, \"\", f.State.Root(), hermit.Config{})\n\trequire.NoError(f.t, err)\n\tenv, err := hermit.OpenEnv(envDir, f.State, envars.Envars{}, f.Server.Client())\n\trequire.NoError(f.t, err)\n\treturn env\n}",
"func (p RProc) Env() REnv {\n\tif !p.HasEnv() {\n\t\treturn REnv{nil, p.mrb}\n\t}\n\treturn REnv{C._MRB_PROC_ENV(p.p), p.mrb}\n}",
"func TestEnv(t *testing.T) {\n\tt.Parallel()\n\tctx := context.Background()\n\n\tf := newFixtureWithoutDiskBasedLogging(t)\n\n\tse, err := f.ssh.clt.NewSession(ctx)\n\trequire.NoError(t, err)\n\tdefer se.Close()\n\n\trequire.NoError(t, se.Setenv(ctx, \"HOME_TEST\", \"/test\"))\n\toutput, err := se.Output(ctx, \"env\")\n\trequire.NoError(t, err)\n\trequire.Contains(t, string(output), \"HOME_TEST=/test\")\n}",
"func NewEnv() *Env {\n\treturn &Env{}\n}",
"func InjectEnv() env.Env {\n\twire.Build(\n\t\twire.Bind(new(env.Env), new(env.GoDotEnv)),\n\t\tenv.NewGoDotEnv,\n\t)\n\treturn env.GoDotEnv{}\n}",
"func NewExpectWithEnv(name string, args []string, env []string, serverProcessConfigName string) (ep *ExpectProcess, err error) {\n\tep = &ExpectProcess{\n\t\tcfg: expectConfig{\n\t\t\tname: serverProcessConfigName,\n\t\t\tcmd: name,\n\t\t\targs: args,\n\t\t\tenv: env,\n\t\t},\n\t\treadCloseCh: make(chan struct{}),\n\t}\n\tep.cmd = commandFromConfig(ep.cfg)\n\n\tif ep.fpty, err = pty.Start(ep.cmd); err != nil {\n\t\treturn nil, err\n\t}\n\n\tep.wg.Add(2)\n\tgo ep.read()\n\tgo ep.waitSaveExitErr()\n\treturn ep, nil\n}",
"func (ed *EnvironmentDefinition) GetEnv(inherit bool) map[string]string {\n\tlookupEnv := os.LookupEnv\n\tif !inherit {\n\t\tlookupEnv = func(_ string) (string, bool) { return \"\", false }\n\t}\n\tres, err := ed.GetEnvBasedOn(lookupEnv)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Could not inherit OS environment variable: %v\", err))\n\t}\n\treturn res\n}",
"func TestEnviron(t *testing.T) {\n\tenv := []string{\n\t\t\"FOO=foo\",\n\t\t\"BAR=baz\",\n\t}\n\tcmd := scriptHelper(\"TestWithEnvironSet\", env)\n\tresult, err := cmd.Output()\n\tif err != nil {\n\t\tt.Fatalf(\"failed running helper: %s\", err)\n\t}\n\n\tdata := [][]byte{\n\t\t[]byte(\"FOO=foo\"),\n\t\t[]byte(\"BAR=bar\"),\n\t\t[]byte(\"BAZ=baz\"),\n\t}\n\n\tfor _, expected := range data {\n\t\tif !bytes.Contains(result, expected) {\n\t\t\tt.Errorf(\"missing %s from environment\", expected)\n\t\t}\n\t}\n}",
"func Env(name string) string {\n\treturn os.Getenv(name)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Request returns a new Request instance. Arguments are similar to NewRequest. After creating request, all builders attached to Expect instance are invoked. See Builder.
|
func (e *Expect) Request(method, path string, pathargs ...interface{}) *Request {
opChain := e.chain.enter("Request(%q)", method)
defer opChain.leave()
req := newRequest(opChain, e.config, method, path, pathargs...)
for _, builder := range e.builders {
builder(req)
}
for _, matcher := range e.matchers {
req.WithMatcher(matcher)
}
return req
}
|
[
"func (f Factory) Request(method string, arguments interface{}) (request Request) {\n\trequest.Token = f.Token\n\trequest.Method = method\n\tif arguments != nil {\n\t\tvar err error\n\t\trequest.Values, err = query.Values(arguments)\n\t\tmust(err)\n\t}\n\treturn request\n}",
"func NewRequest(requestName string, params rata.Params, header http.Header, query url.Values, body ...io.Reader) Request {\n\tif header == nil {\n\t\theader = http.Header{}\n\t}\n\theader.Set(\"Accept\", \"application/json\")\n\n\trequest := Request{\n\t\tRequestName: requestName,\n\t\tParams: params,\n\t\tHeader: header,\n\t\tQuery: query,\n\t}\n\n\tif len(body) == 1 {\n\t\trequest.Body = body[0]\n\t}\n\n\treturn request\n}",
"func NewRequest() *Request {\n\treturn &Request{\n\t\tNotBefore: time.Now(),\n\t\tNotAfter: time.Now().AddDate(0, 0, 1),\n\t\tBitSize: defaultBitSize,\n\t}\n}",
"func (s APIv1) NewRequest(ctx context.Context, method, path string, data interface{}) (req *http.Request, err error) {\n\t// Resolve the URL reference from the path\n\tendpoint := s.endpoint.ResolveReference(&url.URL{Path: path})\n\n\tvar body io.ReadWriter\n\tif data != nil {\n\t\tbody = &bytes.Buffer{}\n\t\tif err = json.NewEncoder(body).Encode(data); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not serialize request data: %s\", err)\n\t\t}\n\t} else {\n\t\tbody = nil\n\t}\n\n\t// Create the http request\n\tif req, err = http.NewRequestWithContext(ctx, method, endpoint.String(), body); err != nil {\n\t\treturn nil, fmt.Errorf(\"could not create request: %s\", err)\n\t}\n\n\t// Set the headers on the request\n\treq.Header.Add(\"User-Agent\", \"Whisper/1.0\")\n\treq.Header.Add(\"Accept\", \"application/json\")\n\treq.Header.Add(\"Accept-Language\", \"en-US,en\")\n\treq.Header.Add(\"Accept-Encoding\", \"gzip, deflate, br\")\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\n\treturn req, nil\n}",
"func NewRequest(typ string, data []byte) *Request {\n\treturn &Request{\n\t\tType: typ,\n\t\tBody: data,\n\t}\n}",
"func NewRequest(params interface{}, atta map[string]interface{}) *DubboRequest {\n\tif atta == nil {\n\t\tatta = make(map[string]interface{})\n\t}\n\treturn &DubboRequest{\n\t\tParams: params,\n\t\tAttachments: atta,\n\t}\n}",
"func New(method, url string) *Request {\n\treturn &Request{\n\t\tmethod: method,\n\t\turl: url,\n\t\theaders: make(map[string]string),\n\t\tqueryParams: make(map[string]string),\n\t\tpathParams: make([]string, 0),\n\t}\n}",
"func NewRequest(r *http.Request) (Request, error) {\n\tif r == nil {\n\t\treturn nil, ErrWrappedNil\n\t}\n\n\treturn &request{raw: r, ctx: map[interface{}]interface{}{}}, nil\n}",
"func New() *Request {\n\tr := &Request{}\n\tr.curl = false\n\tr.curlHeader = false\n\tr.timeout = 30 * time.Second\n\tr.skipRedirects = false\n\n\treturn r\n}",
"func NewRequest(session *Session, path string) *Request {\n\trequest := new(Request)\n\trequest.UnderlyingRequest = api.NewRequest(session.underlyingSession, path)\n\trequest.session = session\n\treturn request\n}",
"func NewRequest(frm Frame, respID uint32) *Request {\n\treturn &Request{\n\t\tFrame: frm,\n\t\tFilterFunc: sdoFilter(frm, respID),\n\t}\n}",
"func (s *Nap) Request() (*http.Request, error) {\n\treqURL, err := url.Parse(s.rawURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = buildQueryParamUrl(reqURL, s.queryStructs, s.queryParams)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar body io.Reader\n\tif s.bodyProvider != nil {\n\t\tbody, err = s.bodyProvider.Body()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treq, err := http.NewRequestWithContext(s.Context(), s.method, reqURL.String(), body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\taddHeaders(req, s.header)\n\treturn req, err\n}",
"func NewRequest(m Manager) *Request {\n\treturn &Request{\n\t\tm: m,\n\t\trLocks: make(map[string]struct{}),\n\t\twLocks: make(map[string]struct{}),\n\t}\n}",
"func NewRequest(path string, mode xrdfs.OpenMode, options xrdfs.OpenOptions) *Request {\n\treturn &Request{Mode: mode, Options: options, Path: path}\n}",
"func NewRequest(session *Session, path string) *Request {\n\tr := new(Request)\n\tr.path = path\n\tr.session = session\n\tr.queryValues = make(url.Values)\n\treturn r\n}",
"func NewRequest(domain string, resolveName, includeIP bool, except tools.Exceptions) *Request {\n\tdr := new(Request)\n\tdr.topic.domain = strings.ToLower(dns.Fqdn(domain))\n\tdr.topic.followAlias = resolveName\n\tdr.topic.includeIP = includeIP\n\tdr.topic.depth = 0\n\tdr.topic.except = except\n\tdr.resultChan = make(chan *result, 1)\n\tdr.context = make(map[RequestTopic]bool)\n\treturn dr\n}",
"func NewRequest(t Type, body io.WriterTo) *Request {\n\treq := &Request{\n\t\tBody: ©Reader{WriterTo: body},\n\t\tProto: \"OFP/1.3\",\n\t\tProtoMajor: 1, ProtoMinor: 3,\n\t}\n\n\treq.Header.Version = uint8(req.ProtoMajor + req.ProtoMinor)\n\treq.Header.Type = t\n\n\treturn req\n}",
"func (c *baseClient) Request() (*http.Request, error) {\n\treqURL, err := url.Parse(c.url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif c.queryStruct != nil {\n\t\terr = addQueryStruct(reqURL, c.queryStruct)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tbody := &bytes.Buffer{}\n\tif c.body != nil {\n\t\tif err := json.NewEncoder(body).Encode(c.body); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treq, err := http.NewRequest(c.method, reqURL.String(), body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Add headers to request\n\tfor k, vs := range c.header {\n\t\tfor _, v := range vs {\n\t\t\treq.Header.Add(k, v)\n\t\t}\n\t}\n\n\treturn req, nil\n}",
"func (i *instance) NewRequest(method, urlStr string, body io.Reader) (*http.Request, error) {\n\treq, err := http.NewRequest(method, urlStr, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Make a context for this request.\n\tc := &context{\n\t\treq: req,\n\t\tsession: newSessionID(),\n\t\tinstance: i,\n\t}\n\n\t// Associate this request.\n\trelease := appengine_internal.RegisterTestContext(req, c)\n\ti.relFuncs = append(i.relFuncs, release)\n\n\treturn req, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
OPTIONS is a shorthand for e.Request("OPTIONS", path, pathargs...).
|
func (e *Expect) OPTIONS(path string, pathargs ...interface{}) *Request {
return e.Request(http.MethodOptions, path, pathargs...)
}
|
[
"func (e *Expect) OPTIONS(url string, args ...interface{}) *Request {\n\treturn e.Request(\"OPTIONS\", url, args...)\n}",
"func Options(url string, data ...interface{}) (*ClientResponse, error) {\n\treturn DoRequest(\"OPTIONS\", url, data...)\n}",
"func (g *Group) OPTIONS(path string, handler Handler, middleware ...Middleware) *Group {\n\treturn g.Add(http.MethodOptions, path, handler, middleware...)\n}",
"func (r *Request) Options(path string, params ...url.Values) {\n\tcontentType := \"text/html\"\n\n\tif len(params) == 0 {\n\t\tr.Send(\"OPTIONS\", path, contentType)\n\t} else {\n\t\tr.Send(\"OPTIONS\", path, contentType, params[0])\n\t}\n}",
"func (cg *ContextGroup) OPTIONS(path string, handler http.HandlerFunc) {\n\tcg.Handle(\"OPTIONS\", path, handler)\n}",
"func (serv *Server) OPTIONS(url string, handlers ...Handler) {\n\tserv.Handle(\"OPTIONS\", url, handlers...)\n}",
"func (c *SCGIClient) Options(p map[string]string) (resp *http.Response, err error) {\n\n\tp[\"REQUEST_METHOD\"] = \"OPTIONS\"\n\tp[\"CONTENT_LENGTH\"] = \"0\"\n\n\treturn c.Request(p, nil)\n}",
"func (r *Router) OPTIONS(url string, viewFn View) *Path {\n\treturn r.Path(fasthttp.MethodOptions, url, viewFn)\n}",
"func (c *Client) Options(url string, headers, queryParams map[string][]string) (response *http.Response, err error) {\n\treturn c.makeRequest(url, http.MethodOptions, headers, queryParams, nil)\n}",
"func (e *Engine) OPTIONS(path string, handler Handler) {\n\te.registerRoute(http.MethodOptions, path, handler)\n}",
"func Options(callURL string, headers ...Header) (*Response, error) {\n\treturn performRequest(http.MethodOptions, callURL, \"\", getHeadersMap(headers))\n}",
"func (f *Fastglue) OPTIONS(path string, h FastRequestHandler) {\n\tf.Router.OPTIONS(path, f.handler(h))\n}",
"func (r *Router) OPTIONS(path string, handle HandlerFunc, middleware ...MiddlewareFunc) {\n\tr.Handle(\"OPTIONS\", path, handle, middleware...)\n}",
"func (r *Router) OPTIONS(path string, handler fasthttp.RequestHandler) {\n\tr.Handle(fasthttp.MethodOptions, path, handler)\n}",
"func (g *Group) OPTIONS(path string, handler xhandler.HandlerC) {\n\tg.HandleC(\"OPTIONS\", path, handler)\n}",
"func Options(c *gin.Context) {\n\tif c.Request.Method != \"OPTIONS\" {\n\t\tc.Next()\n\t} else {\n\t\tc.Header(\"Access-Control-Allow-Origin\", \"*\")\n\t\tc.Header(\"Access-Control-Allow-Methods\", \"GET,POST,PUT,PATCH,DELETE,OPTIONS\")\n\t\tc.Header(\"Access-Control-Allow-Headers\", \"authorization,origin,content-type,accept\")\n\t\tc.Header(\"Allow\", \"HEAD,GET,POST,PUT,PATCH,DELETE,OPTIONS\")\n\t\tc.Header(\"Content-Type\", \"application/json\")\n\t\tc.AbortWithStatus(200)\n\t}\n}",
"func (router *Router) Options(path string, handler http.Handler) {\n\trouter.Handle(\"OPTIONS\", path, handler)\n}",
"func (rg *RouteGroup) OPTIONS(path string, handlers ...Handler) *Route {\n\treturn rg.add(\"OPTIONS\", path, handlers)\n}",
"func (r *Router) OPTIONS(path string, handle Handle) {\n\tr.Handle(http.MethodOptions, path, handle)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
HEAD is a shorthand for e.Request("HEAD", path, pathargs...).
|
func (e *Expect) HEAD(path string, pathargs ...interface{}) *Request {
return e.Request(http.MethodHead, path, pathargs...)
}
|
[
"func (e *Expect) HEAD(url string, args ...interface{}) *Request {\n\treturn e.Request(\"HEAD\", url, args...)\n}",
"func (r *irequest) Head(url string, options ...interface{}) Request {\n\toptions = append([]interface{}{url, nil}, options...)\n\tr.SetMethod(Method_HEAD, url)\n\n\treturn r\n}",
"func (r *Request) Head(path string, params ...url.Values) {\n\tcontentType := \"text/html\"\n\n\tif len(params) == 0 {\n\t\tr.Send(\"HEAD\", path, contentType)\n\t} else {\n\t\tr.Send(\"HEAD\", path, contentType, params[0])\n\t}\n}",
"func (c *SCGIClient) Head(p map[string]string) (resp *http.Response, err error) {\n\n\tp[\"REQUEST_METHOD\"] = \"HEAD\"\n\tp[\"CONTENT_LENGTH\"] = \"0\"\n\n\treturn c.Request(p, nil)\n}",
"func (c *Client) Head(path string) *Request {\n\treturn c.Request(http.MethodHead, path)\n}",
"func (r *Router) HEAD(url string, viewFn View) *Path {\n\treturn r.Path(fasthttp.MethodHead, url, viewFn)\n}",
"func (F *Frisby) Head(url string) *Frisby {\n\tF.Method = \"HEAD\"\n\tF.Url = url\n\treturn F\n}",
"func (c *Client) Head(url string) (*http.Response, error) {\n\treturn c.DoRaw(\"HEAD\", url, \"\", nil, nil)\n}",
"func NewHead(url string) *Request { return NewRequest(\"HEAD\", url) }",
"func (r *Router) HEAD(path string, handler fasthttp.RequestHandler) {\n\tr.Handle(fasthttp.MethodHead, path, handler)\n}",
"func (bow *Browser) httpHEAD(u *url.URL, ref *url.URL) error {\n\treq, err := bow.buildRequest(\"HEAD\", u.String(), ref, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn bow.httpRequest(req)\n}",
"func (f *Fastglue) HEAD(path string, h FastRequestHandler) {\n\tf.Router.HEAD(path, f.handler(h))\n}",
"func (serv *Server) HEAD(url string, handlers ...Handler) {\n\tserv.Handle(\"HEAD\", url, handlers...)\n}",
"func (g *Group) HEAD(path string, handler Handler, middleware ...Middleware) *Group {\n\treturn g.Add(http.MethodHead, path, handler, middleware...)\n}",
"func (cg *ContextGroup) HEAD(path string, handler http.HandlerFunc) {\n\tcg.Handle(\"HEAD\", path, handler)\n}",
"func (r *Router) HEAD(path string, handle Handle) {\n\tr.Handle(http.MethodHead, path, handle)\n}",
"func (e *Engine) HEAD(path string, handler Handler) {\n\te.registerRoute(http.MethodHead, path, handler)\n}",
"func (c *Client) Head(ctx context.Context, hosts []string, path string) (http.Header, int, error) {\n\tresp, err := c.executeRequest(ctx, http.MethodHead, hosts, path, nil)\n\tif err != nil {\n\t\treturn nil, 0, errors.WithStack(err)\n\t}\n\tdefer resp.Body.Close()\n\treturn resp.Header, resp.StatusCode, nil\n}",
"func (r *Router) HEAD(path string, handle HandlerFunc, middleware ...MiddlewareFunc) {\n\tr.Handle(\"HEAD\", path, handle, middleware...)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GET is a shorthand for e.Request("GET", path, pathargs...).
|
func (e *Expect) GET(path string, pathargs ...interface{}) *Request {
return e.Request(http.MethodGet, path, pathargs...)
}
|
[
"func (e *Expect) GET(url string, args ...interface{}) *Request {\n\treturn e.Request(\"GET\", url, args...)\n}",
"func (r *Request) Get(path string) *Request {\n\treturn r.method(\"GET\", path)\n}",
"func (c *Client) Get(urlPath ...string) *Req {\n\treturn c.Req(http.MethodGet, nil, urlPath...)\n}",
"func (c *Client) Get(path string) *Request {\n\treturn c.Request(http.MethodGet, path)\n}",
"func (r *Request) Get(url string) *Request {\n\tr.method = http.MethodGet\n\tr.url = url\n\treturn r\n}",
"func Get (url string, args map[string]string) (*http.Response, error) {\n\t// create a client\n\tclient, req, _ := GetHttpClient(url)\n\t// build the query\n\tif len(args) > 0 {\n\t\treq = buildQuery(req, args)\n\t}\n\t// execute the request\n\t//fmt.Println(req.URL.String())\n\treturn client.Do(req)\n}",
"func (r *Router) GET(url string, viewFn View) *Path {\n\treturn r.Path(fasthttp.MethodGet, url, viewFn)\n}",
"func GET(path string, f func(c *Context)) {\n\thandlers = append(handlers, handlerPath{\"GET\", path, f})\n}",
"func (c *baseClient) Get(path string) *baseClient {\n\tc.method = \"GET\"\n\treturn c.Path(path)\n}",
"func (r *Request) Get(path string, params ...url.Values) {\n\tcontentType := \"text/html\"\n\n\tif len(params) == 0 {\n\t\tr.Send(\"GET\", path, contentType)\n\t} else {\n\t\tr.Send(\"GET\", path, contentType, params[0])\n\t}\n}",
"func (req *Request) GET() (*http.Request, error) {\n\treturn http.NewRequest(\"GET\", req.SourceURL, http.NoBody)\n}",
"func (c *Case) GET(p string) *RequestBuilder {\n\treturn &RequestBuilder{\n\t\tmethod: http.MethodGet,\n\t\tpath: p,\n\t\tcas: c,\n\t\tfail: c.fail,\n\t}\n}",
"func (api *Api) Get(path string, endpoint http.HandlerFunc, queries ...string) {\n\tapi.Router.HandleFunc(path, endpoint).Methods(\"GET\").Queries(queries...)\n}",
"func (c Client) get(path string, params url.Values, holder interface{}) error {\n\treturn c.request(\"GET\", path, params, &holder)\n}",
"func (e *Engine) GET(path string, handler Handler) {\n\te.registerRoute(http.MethodGet, path, handler)\n}",
"func NewGet(url string) *Request { return NewRequest(\"GET\", url) }",
"func (c *Client) Get(url string, headers, queryParams map[string][]string) (response *http.Response, err error) {\n\treturn c.makeRequest(url, http.MethodGet, headers, queryParams, nil)\n}",
"func Get(opts ...Option) ([]byte, error) {\n\treturn request(\"GET\", opts...)\n}",
"func (r *Router) Get(args ...interface{}) {\n\tr.addHandle(\"GET\", args...)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
POST is a shorthand for e.Request("POST", path, pathargs...).
|
func (e *Expect) POST(path string, pathargs ...interface{}) *Request {
return e.Request(http.MethodPost, path, pathargs...)
}
|
[
"func (c *Client) Post(path string) *Request {\n\treturn c.Request(http.MethodPost, path)\n}",
"func (e *Expect) POST(url string, args ...interface{}) *Request {\n\treturn e.Request(\"POST\", url, args...)\n}",
"func (r *Request) Post(path, contentType string, data ...interface{}) {\n\tr.Send(\"POST\", path, contentType, data...)\n}",
"func (r *Router) POST(url string, viewFn View) *Path {\n\treturn r.Path(fasthttp.MethodPost, url, viewFn)\n}",
"func (r *Request) Post(url string) *Request {\n\tr.method = http.MethodPost\n\tr.url = url\n\treturn r\n}",
"func (s *Nap) Post(pathURL string) *Nap {\n\ts.method = MethodPost\n\treturn s.Path(pathURL)\n}",
"func (e *Engine) POST(path string, handler Handler) {\n\te.registerRoute(http.MethodPost, path, handler)\n}",
"func (r *irequest) Post(url string, options ...interface{}) Request {\n\toptions = append([]interface{}{url}, options...)\n\tr.SetMethod(Method_POST, options...)\n\n\treturn r\n}",
"func (c *baseClient) Post(path string) *baseClient {\n\tc.method = \"POST\"\n\treturn c.Path(path)\n}",
"func (g *Group) POST(path string, handler Handler, middleware ...Middleware) *Group {\n\treturn g.Add(http.MethodPost, path, handler, middleware...)\n}",
"func (router *Router) POST(relativePath string, handler Handler, decorators ...Decorator) {\n\trouter.createRouter(http.MethodPost, relativePath, handler, \"\", decorators...)\n}",
"func (app *App) Post(path string, endpoint http.HandlerFunc, queries ...string) {\r\n\tapp.Router.HandleFunc(path, endpoint).Methods(\"POST\").Queries(queries...)\r\n}",
"func (h *Handler) POST(relativePath string, f ActionFunc) {\n\th.pushAction(POST, relativePath, f, false)\n}",
"func (api *Api) Post(path string, endpoint http.HandlerFunc, queries ...string) {\n\tapi.Router.HandleFunc(path, endpoint).Methods(\"POST\").Queries(queries...)\n}",
"func (f *Fastglue) POST(path string, h FastRequestHandler) {\n\tf.Router.POST(path, f.handler(h))\n}",
"func (r *Router) POST(path string, handler fasthttp.RequestHandler) {\n\tr.Handle(fasthttp.MethodPost, path, handler)\n}",
"func (c Client) post(path string, params url.Values, holder interface{}) error {\n\treturn c.request(\"POST\", path, params, &holder)\n}",
"func (r *Router) POST(path string, handler Handle) {\n\tr.Handle(\"POST\", path, handler)\n}",
"func (c *Case) POST(p string) *RequestBuilder {\n\treturn &RequestBuilder{\n\t\tmethod: http.MethodPost,\n\t\tpath: p,\n\t\tcas: c,\n\t\tfail: c.fail,\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
PUT is a shorthand for e.Request("PUT", path, pathargs...).
|
func (e *Expect) PUT(path string, pathargs ...interface{}) *Request {
return e.Request(http.MethodPut, path, pathargs...)
}
|
[
"func (e *Expect) PUT(url string, args ...interface{}) *Request {\n\treturn e.Request(\"PUT\", url, args...)\n}",
"func (r *Request) Put(path, contentType string, data ...interface{}) {\n\tr.Send(\"PUT\", path, contentType, data...)\n}",
"func (c *Client) Put(path string) *Request {\n\treturn c.Request(http.MethodPut, path)\n}",
"func (api *Api) Put(path string, endpoint http.HandlerFunc, queries ...string) {\n\tapi.Router.HandleFunc(path, endpoint).Methods(\"PUT\").Queries(queries...)\n}",
"func (r *Router) PUT(url string, viewFn View) *Path {\n\treturn r.Path(fasthttp.MethodPut, url, viewFn)\n}",
"func (g *Group) PUT(path string, handler Handler, middleware ...Middleware) *Group {\n\treturn g.Add(http.MethodPut, path, handler, middleware...)\n}",
"func (c Client) put(path string, params url.Values, holder interface{}) error {\n\treturn c.request(\"PUT\", path, params, &holder)\n}",
"func (r Requester) Update(path string, payload interface{}) Requester {\n\tb, err := json.Marshal(payload)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tbody := bytes.NewReader(b)\n\tr.httpRequest, err = http.NewRequest(http.MethodPut, r.url, body)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn r\n}",
"func (e *Engine) PUT(path string, handler Handler) {\n\te.registerRoute(http.MethodPut, path, handler)\n}",
"func (router *Router) PUT(relativePath string, handler Handler, decorators ...Decorator) {\n\trouter.createRouter(http.MethodPut, relativePath, handler, \"\", decorators...)\n}",
"func (cl *Client) Put(ctx context.Context, path string, qps url.Values, body, into interface{}) error {\n\treturn cl.do(ctx, http.MethodPut, path, qps, body, into)\n}",
"func (r *Request) Put(url string) *Request {\n\tr.method = http.MethodPut\n\tr.url = url\n\treturn r\n}",
"func (r *Router) PUT(path string, handler fasthttp.RequestHandler) {\n\tr.Handle(fasthttp.MethodPut, path, handler)\n}",
"func (c *Client) Put(path string, data string) {\n\turl := c.UrlFor(path)\n\tres, err := c.http.PutJson(url, data)\n\tc.Resp = res\n\tc.Err = err\n\tc.parseResponse()\n}",
"func (f *Fastglue) PUT(path string, h FastRequestHandler) {\n\tf.Router.PUT(path, f.handler(h))\n}",
"func (r *RouterGroup) PUT(path string, fn Controller) {\n\tr.gin.PUT(path, adapt(fn))\n}",
"func Put(url string, data ...interface{}) (*ClientResponse, error) {\n\treturn DoRequest(\"PUT\", url, data...)\n}",
"func (cg *ContextGroup) PUT(path string, handler http.HandlerFunc) {\n\tcg.Handle(\"PUT\", path, handler)\n}",
"func Put(path string, handler http.Handler) Route {\n\treturn NewRoute(\"PUT\", path, handler)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
PATCH is a shorthand for e.Request("PATCH", path, pathargs...).
|
func (e *Expect) PATCH(path string, pathargs ...interface{}) *Request {
return e.Request(http.MethodPatch, path, pathargs...)
}
|
[
"func (e *Expect) PATCH(url string, args ...interface{}) *Request {\n\treturn e.Request(\"PATCH\", url, args...)\n}",
"func (c *Client) Patch(path string) *Request {\n\treturn c.Request(http.MethodPatch, path)\n}",
"func (r *Router) PATCH(url string, viewFn View) *Path {\n\treturn r.Path(fasthttp.MethodPatch, url, viewFn)\n}",
"func (r *Request) Patch(path, contentType string, data ...interface{}) {\n\tr.Send(\"PATCH\", path, contentType, data...)\n}",
"func (wok *Wok) PATCH(path string, mws ...noodle.Middleware) RouteClosure {\n\treturn wok.Handle(\"PATCH\", path, mws...)\n}",
"func (c *Client) Patch(bFn BodyFn, urlPath ...string) *Req {\n\treturn c.Req(http.MethodPatch, bFn, urlPath...)\n}",
"func (cg *ContextGroup) PATCH(path string, handler http.HandlerFunc) {\n\tcg.Handle(\"PATCH\", path, handler)\n}",
"func (r *Router) PATCH(path string, handle HandlerFunc, middleware ...MiddlewareFunc) {\n\tr.Handle(\"PATCH\", path, handle, middleware...)\n}",
"func (r *Router) PATCH(path string, handler fasthttp.RequestHandler) {\n\tr.Handle(fasthttp.MethodPatch, path, handler)\n}",
"func (r *Router) PATCH(path string, handle Handle) {\n\tr.Handle(\"PATCH\", path, handle)\n}",
"func (e *Engine) PATCH(path string, fn Controller) {\n\te.gin.PATCH(path, adapt(fn))\n}",
"func (e *Engine) PATCH(path string, handler Handler) {\n\te.registerRoute(http.MethodPatch, path, handler)\n}",
"func (r *RouterGroup) PATCH(path string, fn Controller) {\n\tr.gin.PATCH(path, adapt(fn))\n}",
"func (rg *RouteGroup) PATCH(path string, handlers ...Handler) *Route {\n\treturn rg.add(\"PATCH\", path, handlers)\n}",
"func PATCH(c *httputil.Client, data DataMultipartWriter, v interface{}, url string) error {\n\treturn Do(c, \"PATCH\", data, v, url)\n}",
"func (r *RouterPublic) PATCH(path string, handle httprouter.Handle) {\n\tr.Handle(\"PATCH\", path, NoCacheHandler(handle))\n}",
"func (router *Router) PATCH(relativePath string, handler Handler, decorators ...Decorator) {\n\trouter.createRouter(http.MethodPatch, relativePath, handler, \"\", decorators...)\n}",
"func (r *Mux) PATCH(path string, handler http.HandlerFunc) {\n\tr.HandleFunc(\"PATCH\", path, handler)\n}",
"func (api *Api) Patch(path string, endpoint http.HandlerFunc, queries ...string) {\n\tapi.Router.HandleFunc(path, endpoint).Methods(\"PATCH\").Queries(queries...)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
DELETE is a shorthand for e.Request("DELETE", path, pathargs...).
|
func (e *Expect) DELETE(path string, pathargs ...interface{}) *Request {
return e.Request(http.MethodDelete, path, pathargs...)
}
|
[
"func (e *Expect) DELETE(url string, args ...interface{}) *Request {\n\treturn e.Request(\"DELETE\", url, args...)\n}",
"func (c *Client) Delete(urlPath ...string) *Req {\n\treturn c.Req(http.MethodDelete, nil, urlPath...)\n}",
"func (c *Client) Delete(path string) *Request {\n\treturn c.Request(http.MethodDelete, path)\n}",
"func (r Requester) Delete(path string) Requester {\n\treq, err := http.NewRequest(http.MethodDelete, r.url, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tr.httpRequest = req\n\treturn r\n}",
"func (r *Request) Delete(path, contentType string, data ...interface{}) {\n\tr.Send(\"DELETE\", path, contentType, data...)\n}",
"func (c *Client) Delete(url string, headers, queryParams map[string][]string, data interface{}) (response *http.Response, err error) {\n\treturn c.makeRequest(url, http.MethodDelete, headers, queryParams, data)\n}",
"func Delete(url string, data ...interface{}) (*ClientResponse, error) {\n\treturn DoRequest(\"DELETE\", url, data...)\n}",
"func DELETE(mux *http.ServeMux, path string, f func(w http.ResponseWriter, r *http.Request)) {\n\tmux.HandleFunc(path, Chain(f, Method(http.MethodDelete)))\n}",
"func (r *Request) Delete() (*Response, error) {\n\n\t// set the HTTP method\n\tr.httpMethod = common.HTTPMethodDelete\n\n\t// get the transporter to do the work\n\treturn r.session.transporter.MakeRequest(r)\n}",
"func (c Client) delete(path string, params url.Values, holder interface{}) error {\n\treturn c.request(\"DELETE\", path, params, &holder)\n}",
"func (r *Router) DELETE(url string, viewFn View) *Path {\n\treturn r.Path(fasthttp.MethodDelete, url, viewFn)\n}",
"func (g *Group) DELETE(path string, handler Handler, middleware ...Middleware) *Group {\n\treturn g.Add(http.MethodDelete, path, handler, middleware...)\n}",
"func (cl *Client) Delete(ctx context.Context, path string, qps url.Values, into interface{}) error {\n\treturn cl.do(ctx, http.MethodDelete, path, qps, nil, into)\n}",
"func (c *Client) Delete(path string, headers map[string]string) error {\n\t_, err := c.DoWithStatus(NewRequest(http.MethodDelete, path, headers, nil),\n\t\thttp.StatusOK)\n\treturn err\n}",
"func (c Client) Delete(uri string) (*http.Response, error) {\n\treturn c.do(\"DELETE\", uri, nil)\n}",
"func (c *Client) Delete() *Request {\n\treturn NewRequest(c.httpClient, c.base, \"DELETE\", c.version, c.authstring, c.userAgent)\n}",
"func (self *Client) Delete(dst interface{}, path string, data url.Values) error {\n\tvar addr *url.URL\n\tvar err error\n\tvar body *strings.Reader\n\n\tif addr, err = url.Parse(self.Prefix + strings.TrimLeft(path, \"/\")); err != nil {\n\t\treturn err\n\t}\n\n\tif data != nil {\n\t\tbody = strings.NewReader(data.Encode())\n\t}\n\n\treturn self.newRequest(dst, \"DELETE\", addr, body)\n}",
"func (c *baseClient) Delete(path string) *baseClient {\n\tc.method = \"DELETE\"\n\treturn c.Path(path)\n}",
"func (APIResourceBase) Delete(session *Session, url string, queries url.Values, body io.Reader) (APIStatus, interface{}) {\n\treturn FailSimple(http.StatusMethodNotAllowed), nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetDialect gets the name of the dialect that defines the Message
|
func (m *Heartbeat) GetDialect() string {
return "common"
}
|
[
"func GetDialect() SQLDialect { return def.GetDialect() }",
"func DbDialect() string {\n\treturn Db().Dialect().GetName()\n}",
"func DbDialect() string {\n\treturn dbDialectStr\n}",
"func (m *StorageInformation) GetDialect() string {\n\treturn \"common\"\n}",
"func (in *Instance) GetDialect() SQLDialect {\n\treturn in.dialect\n}",
"func (cfg DBConfig) Dialect() string {\n\treturn \"mysql\"\n}",
"func GetDialect() Dialect {\n\treturn GetDialectByDriver(config.GetDatabases().GetDefault().Driver)\n}",
"func (tx *txDriver) Dialect() string { return tx.drv.Dialect() }",
"func (db *Database) GetDatabaseDialect() string {\n\treturn \"dynamodb\"\n}",
"func Dialect() streamer.Dialect {\n\treturn pcoreDialectSingleton\n}",
"func PickDialect(name string) Dialect {\n\tfor _, d := range AllDialects {\n\t\tif strings.EqualFold(name, d.String()) || strings.EqualFold(name, d.Alias()) {\n\t\t\treturn d\n\t\t}\n\t}\n\treturn nil\n}",
"func PickDialect(name string) Dialect {\n\tfor _, d := range AllDialects {\n\t\tif strings.EqualFold(name, d.Name()) || strings.EqualFold(name, d.Alias()) {\n\t\t\treturn d\n\t\t}\n\t}\n\treturn nil\n}",
"func SetDialect(d string) error { return def.SetDialect(d) }",
"func GetDialectByDriver(driver string) Dialect {\n\tswitch driver {\n\tcase \"mysql\":\n\t\treturn mysql{\n\t\t\tcommonDialect: commonDialect{delimiter: \"`\", delimiter2: \"`\"},\n\t\t}\n\tcase \"mssql\":\n\t\treturn mssql{\n\t\t\tcommonDialect: commonDialect{delimiter: \"[\", delimiter2: \"]\"},\n\t\t}\n\tcase \"postgresql\":\n\t\treturn postgresql{\n\t\t\tcommonDialect: commonDialect{delimiter: `\"`, delimiter2: `\"`},\n\t\t}\n\tcase \"sqlite\":\n\t\treturn sqlite{\n\t\t\tcommonDialect: commonDialect{delimiter: \"`\", delimiter2: \"`\"},\n\t\t}\n\tdefault:\n\t\treturn commonDialect{delimiter: \"`\", delimiter2: \"`\"}\n\t}\n}",
"func (o BackupOutput) DatabaseDialect() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Backup) pulumi.StringOutput { return v.DatabaseDialect }).(pulumi.StringOutput)\n}",
"func GetDatastoreDialect(driver string) DatastoreDialect {\n\tif result, ok := datastoreDialectableRegistry[driver]; ok {\n\t\treturn result\n\t}\n\tif isSQLDatabase(driver) {\n\t\tRegisterDatastoreDialect(driver, newAnsiSQLDialect())\n\t\treturn datastoreDialectableRegistry[driver]\n\t}\n\tpanic(\"failed to lookup datastore dialect: \" + driver)\n}",
"func (in *Instance) SetDialect(d string) error {\n\tswitch d {\n\tcase \"postgres\":\n\t\tin.dialect = &PostgresDialect{tableName: in.TableName}\n\tcase \"mysql\":\n\t\tin.dialect = &MySQLDialect{tableName: in.TableName}\n\tcase \"sqlite3\":\n\t\tin.dialect = &Sqlite3Dialect{tableName: in.TableName}\n\tcase \"mssql\":\n\t\tin.dialect = &SqlServerDialect{tableName: in.TableName}\n\tcase \"redshift\":\n\t\tin.dialect = &RedshiftDialect{tableName: in.TableName}\n\tcase \"tidb\":\n\t\tin.dialect = &TiDBDialect{tableName: in.TableName}\n\tdefault:\n\t\treturn fmt.Errorf(\"%q: unknown dialect\", d)\n\t}\n\n\treturn nil\n}",
"func (d *Dialect) Name() string {\n\treturn \"mysql\"\n}",
"func For(name string) Dialect {\n\tif name == \"\" {\n\t\tdrivers := sql.Drivers()\n\t\tif len(drivers) > 0 {\n\t\t\tname = drivers[0]\n\t\t}\n\t}\n\tname = strings.TrimSpace(strings.ToLower(name))\n\n\td := dialects[name]\n\tif d == nil {\n\t\td = defaultDialect\n\t}\n\n\treturn d\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
HasExtensionFields returns true if the message definition contained extensions; false otherwise
|
func (m *Heartbeat) HasExtensionFields() bool {
return false
}
|
[
"func (m *ComponentInformation) HasExtensionFields() bool {\n\treturn false\n}",
"func (m *StorageInformation) HasExtensionFields() bool {\n\treturn false\n}",
"func (m *UavionixAdsbOutDynamic) HasExtensionFields() bool {\n\treturn false\n}",
"func HasAdaptationFieldExtension(pkt *packet.Packet) bool {\n\treturn pkt[5]&0x01 != 0\n}",
"func (o *Post) HasExtensions() bool {\n\tif o != nil && o.Extensions != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (r *AttachmentPreview) HasExt() bool {\n\treturn r.hasExt\n}",
"func (this *RTPPacket) HasExtension() bool {\n\treturn this.header.extension != 0\n}",
"func (o *ServiceDefinitionV2Dot1) HasExtensions() bool {\n\treturn o != nil && o.Extensions != nil\n}",
"func (f *DialectMessageField) GetIsExtension() bool {\n\treturn f.isExtension\n}",
"func (f *messageFlattener) parseExtensions() (err error) {\n\tdescriptorMessage, ok := f.message.(descriptor.Message)\n\tif !ok {\n\t\treturn fmt.Errorf(\"message [%s] does not implement descriptor.Message\", f.message)\n\t}\n\t_, messageDescriptor := descriptor.ForMessage(descriptorMessage)\n\n\tfor i := range messageDescriptor.Field {\n\t\tfieldDescriptor := messageDescriptor.Field[i]\n\t\tfV := reflect.ValueOf(fieldDescriptor).Elem()\n\t\tfType := fV.Type()\n\t\tfieldName := *fieldDescriptor.Name\n\t\tfor i := 0; i < fV.NumField(); i++ {\n\t\t\tfield := fV.Field(i)\n\t\t\tif fType.Field(i).Name != \"Options\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif proto.HasExtension(field.Interface().(proto.Message), proofspb.E_ExcludeFromTree) {\n\t\t\t\text, err := proto.GetExtension(field.Interface().(proto.Message), proofspb.E_ExcludeFromTree)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tb, _ := ext.(*bool)\n\t\t\t\tif *b {\n\t\t\t\t\tf.excludedFields[fieldName] = struct{}{}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (o *ControllerServiceDTO) HasExtensionMissing() bool {\n\tif o != nil && o.ExtensionMissing != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (message *Message) HasAttachments() bool {\n\treturn message.GetInteger(3591) & 0x10 != 0\n}",
"func (d *Device) HasExtension(extension string) bool {\n\tfor _, v := range d.Extensions {\n\t\tif v == extension {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (fi fileInfo) HasExt(exts ...string) bool {\n\tfor _, ext := range exts {\n\t\tif strings.HasSuffix(strings.ToLower(fi.Name), strings.ToLower(ext)) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func isMessageSetExtension(xt pref.ExtensionType) bool {\n\txd := xt.Descriptor()\n\tif xd.Name() != \"message_set_extension\" {\n\t\treturn false\n\t}\n\tmd := xd.Message()\n\tif md == nil {\n\t\treturn false\n\t}\n\tif xd.FullName().Parent() != md.FullName() {\n\t\treturn false\n\t}\n\txmd, ok := xd.ContainingMessage().(interface{ IsMessageSet() bool })\n\treturn ok && xmd.IsMessageSet()\n}",
"func (me TxsdCounterSimpleContentExtensionType) IsMessage() bool { return me.String() == \"message\" }",
"func (me TxsdType) IsExtended() bool { return me.String() == \"extended\" }",
"func FieldHasPBEX(field *protogen.Field) bool {\n\tfop := field.Desc.Options().(*descriptorpb.FieldOptions)\n\tif field.Desc.IsList() || field.Desc.IsMap() {\n\t\tif proto.HasExtension(fop, E_MapRepeatedLenEq) ||\n\t\t\tproto.HasExtension(fop, E_MapRepeatedLenNotEq) ||\n\t\t\tproto.HasExtension(fop, E_MapRepeatedLenGt) ||\n\t\t\tproto.HasExtension(fop, E_MapRepeatedLenGte) ||\n\t\t\tproto.HasExtension(fop, E_MapRepeatedLenLt) ||\n\t\t\tproto.HasExtension(fop, E_MapRepeatedLenLte) {\n\t\t\treturn true\n\t\t}\n\t}\n\tswitch field.Desc.Kind() {\n\tcase protoreflect.BoolKind:\n\t\t//bool\n\t\tif proto.HasExtension(fop, E_BoolEq) {\n\t\t\treturn true\n\t\t}\n\tcase protoreflect.Int32Kind:\n\t\tfallthrough\n\tcase protoreflect.Sint32Kind:\n\t\tfallthrough\n\tcase protoreflect.Sfixed32Kind:\n\t\tfallthrough\n\t\t//int32 or []int32\n\tcase protoreflect.Int64Kind:\n\t\tfallthrough\n\tcase protoreflect.Sint64Kind:\n\t\tfallthrough\n\tcase protoreflect.Sfixed64Kind:\n\t\t//int64 or []int64\n\t\tif proto.HasExtension(fop, E_IntIn) ||\n\t\t\tproto.HasExtension(fop, E_IntNotIn) ||\n\t\t\tproto.HasExtension(fop, E_IntGt) ||\n\t\t\tproto.HasExtension(fop, E_IntGte) ||\n\t\t\tproto.HasExtension(fop, E_IntLt) ||\n\t\t\tproto.HasExtension(fop, E_IntLte) {\n\t\t\treturn true\n\t\t}\n\tcase protoreflect.Uint32Kind:\n\t\tfallthrough\n\tcase protoreflect.Fixed32Kind:\n\t\tfallthrough\n\t\t//uint32 or []uint32\n\tcase protoreflect.Uint64Kind:\n\t\tfallthrough\n\tcase protoreflect.Fixed64Kind:\n\t\t//uint64 or []uint64\n\t\tif proto.HasExtension(fop, E_UintIn) ||\n\t\t\tproto.HasExtension(fop, E_UintNotIn) ||\n\t\t\tproto.HasExtension(fop, E_UintGt) ||\n\t\t\tproto.HasExtension(fop, E_UintGte) ||\n\t\t\tproto.HasExtension(fop, E_UintLt) ||\n\t\t\tproto.HasExtension(fop, E_UintLte) {\n\t\t\treturn true\n\t\t}\n\tcase protoreflect.FloatKind:\n\t\t//float32 or []float32\n\t\tfallthrough\n\tcase protoreflect.DoubleKind:\n\t\t//float64 or []float64\n\t\tif proto.HasExtension(fop, E_FloatIn) ||\n\t\t\tproto.HasExtension(fop, E_FloatNotIn) ||\n\t\t\tproto.HasExtension(fop, E_FloatGt) ||\n\t\t\tproto.HasExtension(fop, E_FloatGte) ||\n\t\t\tproto.HasExtension(fop, E_FloatLt) ||\n\t\t\tproto.HasExtension(fop, E_FloatLte) {\n\t\t\treturn true\n\t\t}\n\tcase protoreflect.EnumKind:\n\t\t//enum or []enum\n\t\tif proto.HasExtension(fop, E_EnumIn) ||\n\t\t\tproto.HasExtension(fop, E_EnumNotIn) ||\n\t\t\tproto.HasExtension(fop, E_EnumGt) ||\n\t\t\tproto.HasExtension(fop, E_EnumGte) ||\n\t\t\tproto.HasExtension(fop, E_EnumLt) ||\n\t\t\tproto.HasExtension(fop, E_EnumLte) {\n\t\t\treturn true\n\t\t}\n\tcase protoreflect.BytesKind:\n\t\t//[]bytes or [][]bytes\n\t\tfallthrough\n\tcase protoreflect.StringKind:\n\t\t//string or []string\n\t\tif proto.HasExtension(fop, E_StringBytesIn) ||\n\t\t\tproto.HasExtension(fop, E_StringBytesNotIn) ||\n\t\t\tproto.HasExtension(fop, E_StringBytesRegMatch) ||\n\t\t\tproto.HasExtension(fop, E_StringBytesRegNotMatch) ||\n\t\t\tproto.HasExtension(fop, E_StringBytesLenEq) ||\n\t\t\tproto.HasExtension(fop, E_StringBytesLenNotEq) ||\n\t\t\tproto.HasExtension(fop, E_StringBytesLenGt) ||\n\t\t\tproto.HasExtension(fop, E_StringBytesLenGte) ||\n\t\t\tproto.HasExtension(fop, E_StringBytesLenLt) ||\n\t\t\tproto.HasExtension(fop, E_StringBytesLenLte) {\n\t\t\treturn true\n\t\t}\n\tcase protoreflect.MessageKind:\n\t\tif !field.Desc.IsMap() {\n\t\t\t//message or []message\n\t\t\tif proto.HasExtension(fop, E_MessageNotNil) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tkey := field.Message.Fields[0]\n\t\tvalue := field.Message.Fields[1]\n\t\tswitch key.Desc.Kind() {\n\t\tcase protoreflect.Int32Kind:\n\t\t\tfallthrough\n\t\tcase protoreflect.Sint32Kind:\n\t\t\tfallthrough\n\t\tcase protoreflect.Sfixed32Kind:\n\t\t\tfallthrough\n\t\tcase protoreflect.Int64Kind:\n\t\t\tfallthrough\n\t\tcase protoreflect.Sint64Kind:\n\t\t\tfallthrough\n\t\tcase protoreflect.Sfixed64Kind:\n\t\t\tif proto.HasExtension(fop, E_MapKeyIntIn) ||\n\t\t\t\tproto.HasExtension(fop, E_MapKeyIntNotIn) ||\n\t\t\t\tproto.HasExtension(fop, E_MapKeyIntGt) ||\n\t\t\t\tproto.HasExtension(fop, E_MapKeyIntGte) ||\n\t\t\t\tproto.HasExtension(fop, E_MapKeyIntLt) ||\n\t\t\t\tproto.HasExtension(fop, E_MapKeyIntLte) {\n\t\t\t\treturn true\n\t\t\t}\n\t\tcase protoreflect.Uint32Kind:\n\t\t\tfallthrough\n\t\tcase protoreflect.Fixed32Kind:\n\t\t\tfallthrough\n\t\tcase protoreflect.Uint64Kind:\n\t\t\tfallthrough\n\t\tcase protoreflect.Fixed64Kind:\n\t\t\tif proto.HasExtension(fop, E_MapKeyUintIn) ||\n\t\t\t\tproto.HasExtension(fop, E_MapKeyUintNotIn) ||\n\t\t\t\tproto.HasExtension(fop, E_MapKeyUintGt) ||\n\t\t\t\tproto.HasExtension(fop, E_MapKeyUintGte) ||\n\t\t\t\tproto.HasExtension(fop, E_MapKeyUintLt) ||\n\t\t\t\tproto.HasExtension(fop, E_MapKeyUintLte) {\n\t\t\t\treturn true\n\t\t\t}\n\t\tcase protoreflect.StringKind:\n\t\t\tif proto.HasExtension(fop, E_MapKeyStringIn) ||\n\t\t\t\tproto.HasExtension(fop, E_MapKeyStringNotIn) ||\n\t\t\t\tproto.HasExtension(fop, E_MapKeyStringRegMatch) ||\n\t\t\t\tproto.HasExtension(fop, E_MapKeyStringRegNotMatch) ||\n\t\t\t\tproto.HasExtension(fop, E_MapKeyStringLenEq) ||\n\t\t\t\tproto.HasExtension(fop, E_MapKeyStringLenNotEq) ||\n\t\t\t\tproto.HasExtension(fop, E_MapKeyStringLenGt) ||\n\t\t\t\tproto.HasExtension(fop, E_MapKeyStringLenGte) ||\n\t\t\t\tproto.HasExtension(fop, E_MapKeyStringLenLt) ||\n\t\t\t\tproto.HasExtension(fop, E_MapKeyStringLenLte) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\tswitch value.Desc.Kind() {\n\t\tcase protoreflect.EnumKind:\n\t\t\tif proto.HasExtension(fop, E_MapValueEnumIn) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueEnumNotIn) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueEnumGt) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueEnumGte) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueEnumLt) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueEnumLte) {\n\t\t\t\treturn true\n\t\t\t}\n\t\tcase protoreflect.BoolKind:\n\t\t\tif proto.HasExtension(fop, E_MapValueBoolEq) {\n\t\t\t\treturn true\n\t\t\t}\n\t\tcase protoreflect.Int32Kind:\n\t\t\tfallthrough\n\t\tcase protoreflect.Sint32Kind:\n\t\t\tfallthrough\n\t\tcase protoreflect.Sfixed32Kind:\n\t\t\tfallthrough\n\t\tcase protoreflect.Int64Kind:\n\t\t\tfallthrough\n\t\tcase protoreflect.Sint64Kind:\n\t\t\tfallthrough\n\t\tcase protoreflect.Sfixed64Kind:\n\t\t\tif proto.HasExtension(fop, E_MapValueIntIn) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueIntNotIn) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueIntGt) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueIntGte) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueIntLt) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueIntLte) {\n\t\t\t\treturn true\n\t\t\t}\n\t\tcase protoreflect.Uint32Kind:\n\t\t\tfallthrough\n\t\tcase protoreflect.Fixed32Kind:\n\t\t\tfallthrough\n\t\tcase protoreflect.Uint64Kind:\n\t\t\tfallthrough\n\t\tcase protoreflect.Fixed64Kind:\n\t\t\tif proto.HasExtension(fop, E_MapValueUintIn) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueUintNotIn) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueUintGt) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueUintGte) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueUintLt) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueUintLte) {\n\t\t\t\treturn true\n\t\t\t}\n\t\tcase protoreflect.FloatKind:\n\t\t\tfallthrough\n\t\tcase protoreflect.DoubleKind:\n\t\t\tif proto.HasExtension(fop, E_MapValueFloatIn) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueFloatNotIn) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueFloatGt) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueFloatGte) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueFloatLt) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueFloatLte) {\n\t\t\t\treturn true\n\t\t\t}\n\t\tcase protoreflect.BytesKind:\n\t\t\tfallthrough\n\t\tcase protoreflect.StringKind:\n\t\t\tif proto.HasExtension(fop, E_MapValueStringBytesIn) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueStringBytesNotIn) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueStringBytesRegMatch) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueStringBytesRegNotMatch) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueStringBytesLenEq) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueStringBytesLenNotEq) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueStringBytesLenGt) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueStringBytesLenGte) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueStringBytesLenLt) ||\n\t\t\t\tproto.HasExtension(fop, E_MapValueStringBytesLenLte) {\n\t\t\t\treturn true\n\t\t\t}\n\t\tcase protoreflect.MessageKind:\n\t\t\tif proto.HasExtension(fop, E_MapValueMessageNotNil) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}",
"func (me TxsdType) IsExtended() bool { return me == \"extended\" }"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Version return the version string of Go runtime. This works well even though the runtime is devel version.
|
func Version() string {
b, err := exec.Command("go", "version").CombinedOutput()
if err != nil {
panic(err.Error())
}
token := strings.Split(string(b), " ")
if strings.HasPrefix(token[2], "go") {
return token[2]
}
if !strings.HasPrefix(token[2], "devel") {
panic("Can not detect go version")
}
d := strings.Join(token[4:len(token)-1], " ")
t0, err := time.Parse(`Mon Jan _2 15:04:05 2006 -0700`, d)
if err != nil {
panic("Can not detect go version")
}
for i, v := range vers {
t1, err := time.Parse(`2006-01-2 15:04:05 -0700`, v[0])
if err != nil {
continue
}
if t1.After(t0) {
return vers[i-1][1]
}
}
return vers[len(vers)-1][1]
}
|
[
"func Version() string {\n\treturn runtime.Version()\n}",
"func GoVersionString() string {\n\treturn runtime.Version()\n}",
"func Version() (os string, rt string) {\n\tos = runtime.GOOS\n\tvar flavor string\n\tif ver, err := syscall.GetVersion(); err == nil {\n\t\tvar major, minor, build = byte(ver), uint8(ver >> 8), uint16(ver >> 16)\n\t\tswitch {\n\t\tcase major == 4:\n\t\t\tswitch minor {\n\t\t\tcase 0:\n\t\t\t\tflavor = \"NT\"\n\t\t\tcase 10:\n\t\t\t\tflavor = \"98\"\n\t\t\tcase 90:\n\t\t\t\tflavor = \"Me\"\n\t\t\t}\n\t\tcase major == 5:\n\t\t\tswitch {\n\t\t\tcase minor == 2:\n\t\t\t\tflavor = \"2003\"\n\t\t\tcase minor == 1 && build == 2600:\n\t\t\t\tflavor = \"XP\"\n\t\t\tcase minor == 0:\n\t\t\t\tflavor = \"2000\"\n\t\t\t}\n\t\tcase major == 6:\n\t\t\tswitch minor {\n\t\t\tcase 3:\n\t\t\t\tflavor = \"8.1\"\n\t\t\tcase 2:\n\t\t\t\tflavor = \"8\"\n\t\t\tcase 1:\n\t\t\t\tflavor = \"7\"\n\t\t\tcase 0:\n\t\t\t\tflavor = \"Vista\"\n\t\t\t}\n\t\t}\n\t\tos = fmt.Sprintf(\"%s %s: [Version %d.%d.%d]\",\n\t\t\tstrings.Title(runtime.GOOS), flavor,\n\t\t\tmajor, minor, build)\n\t}\n\trt = fmt.Sprintf(\"%s %s/%s\", runtime.Version(), runtime.GOOS, runtime.GOARCH)\n\treturn os, BOTVERSION + \" (\" + rt + \")\"\n}",
"func Version() (string, error) {\n\tcmd := exec.Command(\"go\", \"version\")\n\tcmd.Env = os.Environ()\n\n\tres, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"unable to 'go version': %w\", err)\n\t}\n\n\treturn strings.TrimSpace(string(res)), nil\n}",
"func Version() string {\n\tif version == \"\" {\n\t\treturn \"devel\"\n\t}\n\n\treturn version\n}",
"func (svc *Compiler) Version() string {\n\tval, err := svc.ctx.RunScript(\"svelte.VERSION\", \"version_call\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn val.String()\n}",
"func GoVersion() string {\n\treturn runtime.Version()[2:]\n}",
"func Version() (v string) {\n\treturn version\n}",
"func Version() string {\n\t//char* IupVersion (void);\n\treturn C.GoString(C.IupVersion())\n}",
"func RuntimeVersion(ctx *gcp.Context, dir string) (string, error) {\n\tif v := os.Getenv(env.Runtime); v != \"\" && !strings.HasPrefix(v, \"python\") {\n\t\treturn \"*\", nil\n\t}\n\n\tif v := os.Getenv(versionEnv); v != \"\" {\n\t\tctx.Logf(\"Using Python version from %s: %s\", versionEnv, v)\n\t\treturn v, nil\n\t}\n\tif v := os.Getenv(env.RuntimeVersion); v != \"\" {\n\t\tctx.Logf(\"Using Python version from %s: %s\", env.RuntimeVersion, v)\n\t\treturn v, nil\n\t}\n\tv, err := versionFromFile(ctx, dir)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif v != \"\" {\n\t\treturn v, nil\n\t}\n\n\t// This will use the highest listed at https://dl.google.com/runtimes/python/version.json.\n\tctx.Logf(\"Python version not specified, using the latest available version.\")\n\treturn \"*\", nil\n}",
"func (info *SystemInformation) getRuntimeVersion() string {\n\treturn runtime.Version()\n}",
"func (p *Processor) Version() string {\n\treturn p.str(0x10)\n}",
"func Version() string {\r\n\tonce.Do(func() {\r\n\t\tsemver := fmt.Sprintf(\"%d.%d.%d\", major, minor, patch)\r\n\t\tverBuilder := bytes.NewBufferString(semver)\r\n\t\tif tag != \"\" && tag != \"-\" {\r\n\t\t\tupdated := strings.TrimPrefix(tag, \"-\")\r\n\t\t\t_, err := verBuilder.WriteString(\"-\" + updated)\r\n\t\t\tif err == nil {\r\n\t\t\t\tverBuilder = bytes.NewBufferString(semver)\r\n\t\t\t}\r\n\t\t}\r\n\t\tversion = verBuilder.String()\r\n\t})\r\n\treturn version\r\n}",
"func GetVersion() string {\n\treturn version.VERSIONSTR\n}",
"func Version() string {\n\treturn softwareVersion\n}",
"func goVersion() (string, error) {\n\t// deppy might have been compiled with a different\n\t// version, so we can't just use runtime.Version here.\n\tcmd := exec.Command(\"go\", \"version\")\n\tcmd.Stderr = os.Stderr\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\ts := strings.TrimSpace(string(out))\n\ts = strings.TrimSuffix(s, \" \"+runtime.GOOS+\"/\"+runtime.GOARCH)\n\ts = strings.TrimPrefix(s, \"go version \")\n\treturn s, nil\n}",
"func (o SpringCloudJavaDeploymentOutput) RuntimeVersion() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *SpringCloudJavaDeployment) pulumi.StringPtrOutput { return v.RuntimeVersion }).(pulumi.StringPtrOutput)\n}",
"func Version() string {\n\tcmd := exec.Command(\"systemctl\", \"--version\")\n\tbuf, err := cmd.Output()\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\ti := bytes.Index(buf, []byte(\"\\n\"))\n\tif i == 0 {\n\t\treturn \"\"\n\t}\n\treturn string(buf[:i])\n}",
"func GetVersion() string {\n\tif len(Version) == 0 {\n\t\treturn \"dev\"\n\t}\n\treturn Version\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
WithMultiStatsHandler returns a DialOption that chains given stats.Handler(s). The first handler will be executed first, and then next afterwards. nil Handlers are ignored.
|
func WithMultiStatsHandler(handlers ...stats.Handler) grpc.DialOption {
mh := &multiStatsHandler{}
for _, h := range handlers {
if h == nil {
continue
}
mh.delegates = append(mh.delegates, h)
}
switch len(mh.delegates) {
case 0:
// Effectively, this unsets the dial option for the stats handler.
return grpc.WithStatsHandler(nil)
case 1:
// Avoid unnecessary delegation layer.
return grpc.WithStatsHandler(mh.delegates[0])
}
return grpc.WithStatsHandler(mh)
}
|
[
"func MultiHandler(handlers ...Handler) Handler {\n\tc := make([]Handler, len(handlers))\n\tcopy(c, handlers)\n\treturn &multiHandler{\n\t\thandlers: c,\n\t}\n}",
"func NewMultipleHandler(hs []JSONHandler) *MultipleHandler {\n\thandlers := make([]JSONHandler, 0, len(hs))\n\tfor _, h := range hs {\n\t\tif h.Method != \"\" && h.PathFmt != \"\" {\n\t\t\thandlers = append(handlers, h)\n\t\t}\n\t}\n\treturn &MultipleHandler{\n\t\thandlers: handlers,\n\t}\n}",
"func NewMultiStatReporter(reporters ...StatsReporter) StatsReporter {\n\treturn multiStatReporter(reporters)\n}",
"func Chain(hs ...juggler.Handler) juggler.Handler {\n\treturn juggler.HandlerFunc(func(ctx context.Context, c *juggler.Conn, m message.Msg) {\n\t\tfor _, h := range hs {\n\t\t\th.Handle(ctx, c, m)\n\t\t}\n\t})\n}",
"func NewStatsMux(s stats.Statter) *bone.Mux {\n\tmux := NewMux()\n\n\tvar h StatsHandler\n\tfor s != nil {\n\t\tif sh, ok := s.(StatsHandler); ok {\n\t\t\th = sh\n\t\t\tbreak\n\t\t}\n\n\t\ts = stats.Unwrap(s)\n\t}\n\n\tif h != nil {\n\t\tmux.Get(DefaultStatsPath, h.Handler())\n\t}\n\n\treturn mux\n}",
"func (c *Client) Stats(stats ...StatType) (statC chan Stat, done func() error, err error) {\n\tif stats == nil {\n\t\tstats = []StatType{\n\t\t\tStatTypeDPIStats,\n\t\t\tStatTypeInterfaces,\n\t\t\tStatTypeSystemStats,\n\t\t}\n\t}\n\n\tdoneC := make(chan struct{})\n\terrC := make(chan error)\n\twg := new(sync.WaitGroup)\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer func() {\n\t\t\tclose(errC)\n\t\t\twg.Done()\n\t\t}()\n\n\t\tif err := c.keepalive(doneC); err != nil {\n\t\t\terrC <- err\n\t\t}\n\t}()\n\n\tstatC, wsDone, err := c.initWebsocket(stats)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tdone = func() error {\n\t\tclose(doneC)\n\t\twg.Wait()\n\n\t\tif err := <-errC; err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := wsDone(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tclose(statC)\n\t\treturn nil\n\t}\n\n\treturn statC, done, nil\n}",
"func (ho *HandlerOption) Append(h ...Handler) *HandlerOption {\n\tho.handlers = append(ho.handlers, h...)\n\treturn ho\n}",
"func MultipleMiddleware(h http.HandlerFunc, m ...Middleware) http.HandlerFunc {\n\n\tif len(m) < 1 {\n\t\treturn h\n\t}\n\n\twrapped := h\n\n\t// loop in reverse to preserve middleware orders\n\tfor i := len(m) - 1; i >= 0; i-- {\n\t\twrapped = m[i](wrapped)\n\t}\n\n\treturn wrapped\n\n}",
"func MultipleMiddleware(h http.HandlerFunc, m ...Middleware) http.HandlerFunc {\n\n\tif len(m) < 1 {\n\t\treturn h\n\t}\n\n\twrapped := h\n\tctx := context.TODO()\n\t// loop in reverse to preserve middleware order\n\tfor i := len(m) - 1; i >= 0; i-- {\n\t\twrapped = m[i](&ctx, wrapped)\n\t}\n\n\treturn wrapped\n\n}",
"func Chain(handlers ...interface{}) http.Handler {\n\t// fake handler in order to wrap last handler call \"next\"\n\tvar f http.Handler = http.HandlerFunc(blobHandler)\n\t// apply middleware/handlers from the last to the first one\n\tfor i := len(handlers) - 1; i >= 0; i-- {\n\t\tswitch t := handlers[i].(type) {\n\t\t// build the handler from classic middleware func\n\t\tcase func(http.ResponseWriter, *http.Request, http.Handler):\n\t\t\tf = func(curr MiddlewareFunc, next http.Handler) http.HandlerFunc {\n\t\t\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\t\tcurr(w, r, next)\n\t\t\t\t}\n\t\t\t}(t, f)\n\t\tcase MiddlewareFunc:\n\t\t\tf = func(curr MiddlewareFunc, next http.Handler) http.HandlerFunc {\n\t\t\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\t\tcurr(w, r, next)\n\t\t\t\t}\n\t\t\t}(t, f)\n\t\t// wrap existing handler (or blobHandler) with a Middleware/func\n\t\tcase func(http.Handler) http.Handler:\n\t\t\tf = t(f)\n\t\tcase Middleware:\n\t\t\tf = t(f)\n\t\t// ordinary functions can also be provided as arguments, in such case they\n\t\t// will be called via adapter http.HandlerFunc\n\t\tcase func(w http.ResponseWriter, r *http.Request):\n\t\t\tf = func(curr, next http.Handler) http.HandlerFunc {\n\t\t\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\t\tcurr.ServeHTTP(w, r)\n\t\t\t\t\t// due to the blobHandler next will never be nil\n\t\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\t}\n\t\t\t}(http.HandlerFunc(t), f)\n\t\t// since http.HandlerFunc implements http.Handler interface we can use type\n\t\t// http.Handler for both of them\n\t\tcase http.Handler:\n\t\t\tf = func(curr, next http.Handler) http.HandlerFunc {\n\t\t\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\t\tcurr.ServeHTTP(w, r)\n\t\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\t}\n\t\t\t}(t, f)\n\t\tdefault:\n\t\t\t// everything else is not supported\n\t\t\tpanic(fmt.Sprintf(\"unsupported argument type \\\"%T\\\"\", t))\n\t\t}\n\t}\n\treturn f\n}",
"func HandlerSeries(handlers ...Handler) Handler {\n\tvar series = DoNothing\n\tfor i := len(handlers) - 1; i >= 0; i-- {\n\t\tif handlers[i] == nil {\n\t\t\tcontinue\n\t\t}\n\t\tseries = doubleHandler{lhs: handlers[i], rhs: series}\n\t}\n\treturn series\n}",
"func HandlerStats(stats stats.Stats) server.HandlerWrapper {\n\t// return a handler wrapper\n\treturn func(h server.HandlerFunc) server.HandlerFunc {\n\t\t// return a function that returns a function\n\t\treturn func(ctx context.Context, req server.Request, rsp interface{}) error {\n\t\t\t// execute the handler\n\t\t\terr := h(ctx, req, rsp)\n\t\t\t// record the stats\n\t\t\tstats.Record(err)\n\t\t\t// return the error\n\t\t\treturn err\n\t\t}\n\t}\n}",
"func Chain(he ...HandlerE) HandlerE {\n\tf := func(w http.ResponseWriter, r *http.Request) error {\n\t\tfor _, h := range he {\n\t\t\tif err := h.ServeHTTPe(w, r); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\treturn HandlerFuncE(f)\n}",
"func NewHandlerMetrics(scope tally.Scope) *HandlerMetrics {\n\thandlerAPIScope := scope.SubScope(\"api\")\n\thandlerSuccessScope := scope.Tagged(map[string]string{\"result\": \"success\"})\n\thandlerFailScope := scope.Tagged(map[string]string{\"result\": \"fail\"})\n\treturn &HandlerMetrics{\n\t\tHead: scope.Gauge(\"head\"),\n\t\tTail: scope.Gauge(\"tail\"),\n\t\tSize: scope.Gauge(\"size\"),\n\t\tCapacity: scope.Gauge(\"capacity\"),\n\t\tUnexpectedClientError: scope.Counter(\"unexpectedClientError\"),\n\t\tPurgeEventError: scope.Counter(\"purgeEventError\"),\n\t\tInvalidStreamIDError: scope.Counter(\"invalidStreamIdError\"),\n\t\tAddEventAPI: handlerAPIScope.Counter(\"addEvent\"),\n\t\tAddEventSuccess: handlerSuccessScope.Counter(\"addEvent\"),\n\t\tAddEventFail: handlerFailScope.Counter(\"addEvent\"),\n\t\tAddEventDeDupe: handlerAPIScope.Counter(\"addEventDeDupe\"),\n\t\tInitStreamAPI: handlerAPIScope.Counter(\"initStream\"),\n\t\tInitStreamSuccess: handlerSuccessScope.Counter(\"initStream\"),\n\t\tInitStreamFail: handlerFailScope.Counter(\"initStream\"),\n\t\tWaitForEventsAPI: handlerAPIScope.Counter(\"waitForEvents\"),\n\t\tWaitForEventsSuccess: handlerSuccessScope.Counter(\"waitForEvents\"),\n\t\tWaitForEventsFailed: handlerFailScope.Counter(\"waitForEvents\"),\n\t}\n}",
"func WrapHandlers(handlers ...HTTPWrapper) Option {\n\treturn func(g *GRPC) {\n\t\tg.allHTTPWrappers = handlers\n\t}\n}",
"func Chain(handler http.Handler, middleware ...Middleware) http.Handler {\n\th := handler\n\tfor _, m := range middleware {\n\t\th = m(h)\n\t}\n\treturn h\n}",
"func Chain(handlers ...http.Handler) http.Handler {\n\treturn &handlerChain{handlers}\n}",
"func (h *Handler) setMultiChannelFanoutHandler(nh *multichannelfanout.Handler) {\n\th.fanout.Store(nh)\n}",
"func Use(h http.Handler, mws ...Middleware) http.Handler {\n\tfor _, m := range mws {\n\t\tif m != nil {\n\t\t\th = m(h)\n\t\t}\n\t}\n\treturn h\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Separated messages The first is message Other is key => value
|
func separatedMessages(values ...interface{}) (string, []interface{}) {
if len(values)%2 == 0 {
return ``, values
}
return values[0].(string), values[1:]
}
|
[
"func splitMessage(msg string, splitLen int) (msgs []string) {\n\t// This is quite short ;-)\n\tif splitLen < 10 {\n\t\tsplitLen = 10\n\t}\n\tfor len(msg) > splitLen {\n\t\tidx := indexFragment(msg[:splitLen])\n\t\tif idx < 0 {\n\t\t\tidx = splitLen\n\t\t}\n\t\tmsgs = append(msgs, msg[:idx] + \"...\")\n\t\tmsg = msg[idx:]\n\t}\n\treturn append(msgs, msg)\n}",
"func SplitMessage(m string, n int) []string {\n\tpart := \"\"\n\tparts := []string{}\n\n\trunes := bytes.Runes([]byte(m))\n\tl := len(runes)\n\tfor i, r := range runes {\n\t\tpart = part + string(r)\n\t\tif (i+1)%n == 0 {\n\t\t\tparts = append(parts, part)\n\t\t\tpart = \"\"\n\t\t} else if (i + 1) == l {\n\t\t\tparts = append(parts, part)\n\t\t}\n\t}\n\n\treturn parts\n}",
"func messageSplitter(input string, msgLen int, r *regexp.Regexp) []string {\n\tif len(input) == 0 {\n\t\treturn nil\n\t}\n\tif len(input) < msgLen {\n\t\treturn []string{input}\n\t}\n\n\tidx := r.FindIndex([]byte(input[msgLen:]))\n\t// If can't find any delimiter...\n\tif len(idx) < 1 {\n\t\treturn []string{input}\n\t}\n\treturn append([]string{input[:msgLen+idx[0]+1]}, messageSplitter(input[msgLen+idx[0]+1:], msgLen, r)...)\n}",
"func Bytes(input []byte) *Message {\n\tlex := lexer{\n\t\tinput: input,\n\t}\n\tlex.next()\n\n\t// firstKeyPos is the position of the first key in the message\n\t//\n\t// consider the following example message:\n\t//\n\t// this is a message key=1 key=2 more message stuff key=3\n\t// ^\n\t// if a message has key=val and then text that |\n\t// does not match key=val, then the key=val is |\n\t// not parsed for example, the first key is here ----+\n\tvar firstKeyPos int\n\n\t// count kv pairs so that we can allocate once only\n\tvar kvCount int\n\n\t// iterate through the message looking for the position\n\t// before which we will not be looking for key/val pairs\n\tfor lex.token != tokEOF {\n\t\tfor lex.notMatch(tokKey, tokQuotedKey, tokEOF) {\n\t\t\tfirstKeyPos = 0\n\t\t\tlex.next()\n\t\t}\n\t\tif lex.token == tokEOF {\n\t\t\tbreak\n\t\t}\n\t\tfirstKeyPos = lex.pos\n\t\tfor lex.match(tokKey, tokQuotedKey) {\n\t\t\tkvCount += 2\n\t\t\tlex.next() // skip past key\n\t\t\tlex.next() // skip past value\n\t\t\tlex.skipWS()\n\t\t}\n\t}\n\n\tlex.rewind()\n\tlex.skipWS()\n\tmessage := newMessage()\n\tunquoteBuf := message.buf[:]\n\tvar unquoted []byte\n\n\tif firstKeyPos == 0 {\n\t\t// there are no key/value pairs\n\t\tmessage.Text = lex.input\n\t} else {\n\t\tif cap(message.List) < kvCount {\n\t\t\tmessage.List = make([][]byte, 0, kvCount)\n\t\t}\n\t\tvar pos int\n\t\tfor lex.pos < firstKeyPos {\n\t\t\tpos = lex.pos\n\t\t\tlex.next()\n\t\t}\n\t\tmessage.Text = lex.input[:pos]\n\t\tfor lex.match(tokKey, tokQuotedKey) {\n\t\t\tif lex.token == tokKey {\n\t\t\t\tmessage.List = append(message.List, lex.lexeme())\n\t\t\t} else {\n\t\t\t\tunquoted, unquoteBuf = unquote(lex.lexeme(), unquoteBuf)\n\t\t\t\tmessage.List = append(message.List, unquoted)\n\t\t\t}\n\t\t\tlex.next()\n\n\t\t\tswitch lex.token {\n\t\t\tcase tokQuoted:\n\t\t\t\tunquoted, unquoteBuf = unquote(lex.lexeme(), unquoteBuf)\n\t\t\t\tmessage.List = append(message.List, unquoted)\n\t\t\tdefault:\n\t\t\t\tmessage.List = append(message.List, lex.lexeme())\n\t\t\t}\n\n\t\t\tlex.next()\n\t\t\tlex.skipWS()\n\t\t}\n\t}\n\n\tmessage.Text = bytes.TrimSpace(message.Text)\n\tmessage.used = bufLength - len(unquoteBuf)\n\treturn message\n}",
"func formatMessage(message []string, width int) []string {\n\tvar ret []string\n\tfor _, l := range message {\n\t\tw := runewidth.StringWidth(l)\n\t\tret = append(ret, l+strings.Repeat(\" \", width-w))\n\t}\n\treturn ret\n}",
"func (s MessageState) MessageParts() []string {\n\treturn strings.Split(s.Event.Message.Content, \" \")\n}",
"func determineMsgs(in, sep string) ([]string, string) {\n\tbits := strings.Split(in, sep)\n\n\tif len(bits) == 1 {\n\t\treturn []string{}, in\n\t}\n\n\treturn bits[0 : len(bits)-1], bits[len(bits)-1]\n}",
"func flatten(entry logEntry) (string, error) {\n\tvar msgValue string\n\tvar errorValue error\n\tif len(entry.Values)%2 == 1 {\n\t\treturn \"\", errors.New(\"log entry cannot have odd number off keyAndValues\")\n\t}\n\n\tkeys := make([]string, 0, len(entry.Values)/2)\n\tvalues := make(map[string]interface{}, len(entry.Values)/2)\n\tfor i := 0; i < len(entry.Values); i += 2 {\n\t\tk, ok := entry.Values[i].(string)\n\t\tif !ok {\n\t\t\tpanic(fmt.Sprintf(\"key is not a string: %s\", entry.Values[i]))\n\t\t}\n\t\tvar v interface{}\n\t\tif i+1 < len(entry.Values) {\n\t\t\tv = entry.Values[i+1]\n\t\t}\n\t\tswitch k {\n\t\tcase \"msg\":\n\t\t\tmsgValue, ok = v.(string)\n\t\t\tif !ok {\n\t\t\t\tpanic(fmt.Sprintf(\"the msg value is not of type string: %s\", v))\n\t\t\t}\n\t\tcase \"error\":\n\t\t\terrorValue, ok = v.(error)\n\t\t\tif !ok {\n\t\t\t\tpanic(fmt.Sprintf(\"the error value is not of type error: %s\", v))\n\t\t\t}\n\t\tdefault:\n\t\t\tif _, ok := values[k]; !ok {\n\t\t\t\tkeys = append(keys, k)\n\t\t\t}\n\t\t\tvalues[k] = v\n\t\t}\n\t}\n\tstr := \"\"\n\tif entry.Prefix != \"\" {\n\t\tstr += fmt.Sprintf(\"[%s] \", entry.Prefix)\n\t}\n\tstr += msgValue\n\tif errorValue != nil {\n\t\tif msgValue != \"\" {\n\t\t\tstr += \": \"\n\t\t}\n\t\tstr += errorValue.Error()\n\t}\n\tfor _, k := range keys {\n\t\tprettyValue, err := pretty(values[k])\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tstr += fmt.Sprintf(\" %s=%s\", k, prettyValue)\n\t}\n\treturn str, nil\n}",
"func group(msg *daemon.Msg) (messages []*emailq.Msg) {\n\thostMap := make(map[string][]string)\n\n\tfor _, to := range msg.To {\n\t\thost := strings.Split(to, \"@\")[1]\n\t\thostMap[host] = append(hostMap[host], to)\n\t}\n\n\tfor k, v := range hostMap {\n\t\tmessages = append(messages, &emailq.Msg{\n\t\t\tFrom: msg.From,\n\t\t\tHost: k,\n\t\t\tTo: v,\n\t\t\tData: msg.Data,\n\t\t})\n\t}\n\n\treturn messages\n}",
"func SplitMessage(message []rune, encoder Encoder, messageLength int) ([]string, error) {\n\tvar messageParts []string\n\tvar messagePart []rune\n\tvar codePoints int\n\tvar lastSplitPoint = -1 // no valid split point\n\n\tfor idx := 0; idx < len(message); idx++ {\n\t\tvar char = message[idx]\n\n\t\t// Some encodings have variable lengthed characters\n\t\tcharPoints, err := encoder.GetCodePoints(char)\n\t\tif err != nil {\n\t\t\treturn nil, ErrNotEncodable\n\t\t}\n\n\t\tcodePoints += charPoints\n\n\t\t// check for split point\n\t\tif canSplitBefore(char) {\n\t\t\tlastSplitPoint = len(messagePart)\n\t\t}\n\n\t\t// if the SMS is full\n\t\tif codePoints > messageLength {\n\t\t\t// if the split is impossible\n\t\t\tif len(messagePart) == 0 {\n\t\t\t\treturn nil, ErrNotSplittable\n\t\t\t}\n\n\t\t\t// split at the last valid point\n\t\t\tif lastSplitPoint == -1 {\n\t\t\t\tlastSplitPoint = len(messagePart)\n\t\t\t}\n\n\t\t\t// recover dropped characters\n\t\t\tidx -= (len(messagePart) - lastSplitPoint)\n\t\t\tmessagePart = messagePart[0:lastSplitPoint]\n\n\t\t\t// save message part\n\t\t\tmessageParts = append(messageParts, string(messagePart))\n\n\t\t\t// reset\n\t\t\tmessagePart = nil\n\t\t\tcodePoints = 0\n\t\t\tlastSplitPoint = -1\n\n\t\t\t// try adding char again with fresh message part\n\t\t\tidx--\n\t\t\tcontinue\n\t\t}\n\n\t\t// add char to message part\n\t\tmessagePart = append(messagePart, char)\n\n\t\t// check for split point\n\t\tif canSplitAfter(char) {\n\t\t\tlastSplitPoint = len(messagePart)\n\t\t}\n\t}\n\n\t// save last message part\n\tmessageParts = append(messageParts, string(messagePart))\n\n\treturn messageParts, nil\n}",
"func SplitMessage(msg string) (subject, message string) {\n\tparts := strings.SplitN(msg, \"\\n\", 2)\n\tsubject = strings.TrimRight(parts[0], \"\\r\") // better safe than sorry\n\tif len(parts) == 1 {\n\t\treturn subject, \"\"\n\t}\n\tmessage = parts[1]\n\treturn\n}",
"func lines2msg(lines []string) (msg *LogMsg, skipped, remlines []string) {\n\tif len(lines) == 0 {\n\t\treturn nil, nil, nil\n\t}\n\tskipped = make([]string, 0)\n\tmsg = &LogMsg{msg: \"\", kind: \"\"}\n\tok, n := false, -1\n\tfor i, line := range lines {\n\t\tif (msg.msg != \"\") && ((n > 0) || strings.HasPrefix(line, \" \")) {\n\t\t\tmsg.msg = msg.msg + \"\\n\" + line // amend with previous line\n\t\t\tn--\n\n\t\t} else if msg.msg != \"\" {\n\t\t\treturn msg, skipped, lines[i:]\n\n\t\t} else {\n\t\t\tmsg.msg = line\n\t\t\tif ok, n = msg.parseHeadLine(); !ok {\n\t\t\t\tmsg.msg = \"\"\n\t\t\t\tif strings.Trim(line, \"\\t \\r\\n\") != \"\" {\n\t\t\t\t\tskipped = append(skipped, line)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn msg, skipped, nil\n}",
"func extractKvp(msg string) ([][]string, error) {\n\ttuples := [][]string{}\n\tlogger.Debugf(\"matching `%s`\", msg)\n\n\tfor i := 0; i < len(msg); {\n\t\tmatch := kvpRegex.FindStringSubmatchIndex(msg[i:])\n\n\t\tif match == nil {\n\t\t\treturn nil, errors.New(\"Parsing error: invalid kvp format\")\n\t\t}\n\n\t\tif match[0] != 0 {\n\t\t\treturn nil, errors.New(\"Parsing error: invalid kvp format\")\n\t\t}\n\n\t\tlogger.Debugf(\"Match? %v, `%s`\", match, msg[i+match[0]:i+match[1]])\n\n\t\ttuples = append(tuples, []string{msg[i+match[2] : i+match[3]], msg[i+match[4] : i+match[5]]})\n\n\t\tlogger.Debugf(\"%+v\", tuples)\n\n\t\ti += match[1]\n\t}\n\n\treturn tuples, nil\n}",
"func RenderMessage(message *arbor.ChatMessage, width int, colorPre string, colorPost string) [][]byte {\n\tconst separator = \": \"\n\tusernameWidth := runewidth.StringWidth(message.Username)\n\tseparatorWidth := runewidth.StringWidth(separator)\n\tfirstLinePrefix := message.Username + separator\n\totherLinePrefix := strings.Repeat(\" \", usernameWidth+separatorWidth)\n\tmessageRenderWidth := width - (usernameWidth + separatorWidth)\n\toutputLines := make([][]byte, 1)\n\twrapper := wrap.NewWrapper()\n\twrapper.StripTrailingNewline = true\n\twrapped := wrapper.Wrap(message.Content, messageRenderWidth)\n\twrappedLines := strings.SplitAfter(wrapped, \"\\n\")\n\t//ensure last line ends with newline\n\tlastLine := wrappedLines[len(wrappedLines)-1]\n\tif (len(lastLine) > 0 && lastLine[len(lastLine)-1] != '\\n') || len(lastLine) == 0 {\n\t\twrappedLines[len(wrappedLines)-1] = lastLine + \"\\n\"\n\t}\n\twrappedLines[0] = colorPre + wrappedLines[0]\n\twrappedLines[len(wrappedLines)-1] += colorPost\n\toutputLines[0] = []byte(firstLinePrefix + wrappedLines[0])\n\tfor i := 1; i < len(wrappedLines); i++ {\n\t\toutputLines = append(outputLines, []byte(otherLinePrefix+wrappedLines[i]))\n\t}\n\treturn outputLines\n}",
"func messageBlocks(text string) map[int]string{\n\n res := textToBinary(text) \n size :=binary(len(text))\n \n t:= \"\"\n for i:=0; i<(512-(len(res)%512))-len(size)-1 ; i++{\n t = \"0\" + t\n }\n\n res = res + \"1\" + t + size\n result := break_512(res)\n\n return result\n}",
"func ParseMessage_KLog(boot_ts time.Time, data string) (syslog5424.Message,error) {\n\tlog.SetFlags(log.Ltime | log.Lshortfile)\n\n\tpart\t:= FieldsFuncN(data, 4, get_klog_tokenizer())\n\n\tif (len(part) < 4){\n\t\tlog.Println(data)\n\t\tfor pi := range part {\n\t\t\tlog.Println(part[pi])\n\t\t}\n\t}\n\n\tswitch len(part) {\n\n\tcase 2:\n\t\tprio, err := strconv.Atoi(part[0])\n\t\tif err != nil {\n\t\t\treturn syslog5424.EmptyMessage(),errors.New(\"Wrong Priority :\"+string(part[0]))\n\t\t}\n\n\t\treturn\tsyslog5424.CreateMessage(\"-\", syslog5424.Priority(prio), part[2]),nil\n\n\tcase 3:\n\t\t// (kern) 0 * 8 + 6 (info)\n\t\treturn\tsyslog5424.CreateMessage(\"-\", syslog5424.Priority(6), part[2]).Delta(boot_ts, part[0], part[1]),nil\n\n\tcase 4:\n\t\tprio, err := strconv.Atoi(part[0])\n\t\tif err != nil {\n\t\t\treturn syslog5424.EmptyMessage(),errors.New(\"Wrong Priority :\"+string(part[0]))\n\t\t}\n\t\treturn\tsyslog5424.CreateMessage(\"-\", syslog5424.Priority(prio), part[3]).Delta(boot_ts, part[1], part[2]),nil\n\n\tdefault:\n\t\t// (kern) 0 * 8 + 6 (info)\n\t\treturn\tsyslog5424.CreateMessage(\"-\", syslog5424.Priority(6), data),nil\n\t}\n}",
"func DeconstructConnMonVal(connmonval []byte) ([]byte,[]byte) {\n b := bytes.SplitN(connmonval,SEP,2)\n return b[0],b[1]\n}",
"func FormatMessage(msg string) string {\n\t_msg := strings.Split(msg, \"\\n\")\n\n\tfor i, m := range _msg {\n\t\t_msg[i] = \" \" + m\n\t}\n\n\t// _msg = append(append([]string{\"\\\"\" + name + \"\\\"\" + \": [\"}, _msg...), \"]\")\n\treturn strings.Join(_msg, \"\\n\")\n}",
"func mapper(key string) (string, string, string, string) {\n\tvals := strings.Split(key, \":\")\n\treturn vals[0], vals[1], vals[2], vals[3]\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
preGenerate pre generate key until 1000
|
func (g *Genkey) preGenerate() {
var (
i int
)
time.Sleep(errorSleep)
for i = 0; i < 10; i++ {
go func() {
var (
key int64
keys []int64
err error
)
for {
if keys, err = g.client.Ids(100); err != nil {
log.Errorf("preGenerate() error(%v) retry", err)
time.Sleep(errorSleep)
continue
}
for _, key = range keys {
g.keys <- key
}
}
}()
}
}
|
[
"func (k *Key) KeyGen() {}",
"func Auto() (pkey []byte, err error) {\n\treturn Generate(defaultBits)\n}",
"func genKey(length int) string {\n\tkey := make([]byte, length)\n\tchars := \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\trand.Seed(time.Now().UnixNano())\n\n\tfor i := range key {\n\t\tkey[i] = chars[rand.Intn(len(chars))]\n\t}\n\n\treturn string(key)\n}",
"func generateKey(length int) (key []byte, err error) {\n\tdefer func(start time.Time) {\n\t\tvalue.RecordDataKeyGeneration(start, err)\n\t}(time.Now())\n\tkey = make([]byte, length)\n\tif _, err = rand.Read(key); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn key, nil\n}",
"func (db *APIKeyDatabase) genKey() string {\n\treturn RandString(apiKeyMinLength, db.rand)\n}",
"func Generate(kt KeyType, pfx string) Key {\n\tswitch kt {\n\tcase RandI64Type:\n\t\treturn Key{fmt.Sprintf(\"%d\", Int64Key(seededRand.Int63()))}\n\tcase RandStrType:\n\t\treturn Key{MakeRandString(pfx, 16)}\n\tcase OrderedStrType:\n\t\torderedStrCount++\n\t\treturn Key{makeOrderedString(pfx, orderedStrCount)}\n\tcase OrderedI64Type:\n\t\tint64Count++\n\t\treturn Key{fmt.Sprintf(\"%d\", Int64Key(int64Count))}\n\t}\n\treturn Key{\"\"}\n}",
"func generatePrivKey() []byte {\n\tpriv := make([]byte, 32)\n\ts := rand.NewSource(time.Now().UnixNano())\n\tr := rand.New(s)\n\tfor i := 0; i < 31; i++ {\n\t\tpriv[i] = byte(r.Intn(256))\n\t}\n\treturn priv\n}",
"func (r *RatchetState) genkeys() {\n\th := hmac.New(crypto.SHA256.New, r.dynamic[:])\n\th.Write(r.static[:])\n\tres := h.Sum(nil)\n\tcopy(r.privateKey[:], res)\n\tcurve25519.ScalarBaseMult(&r.PublicKey, &r.privateKey)\n}",
"func keyGeneration() {\n\tserverKeySet.Kb, _ = rand.Int(rand.Reader, bn256.Order) // Kb: one key for server\n\tuserKeySet.Ku, _ = rand.Int(rand.Reader, bn256.Order) // Ku: one key for user\n\tcaKeySet.X = big.NewInt(0) // X: the key held by CA\n\tcaKeySet.X.Add(serverKeySet.Kb, userKeySet.Ku) // X=Kb+Ku. for all the users, X is the same\n\n\tcaKeySet.gx = new(bn256.G2).ScalarBaseMult(caKeySet.X) // gx: g^x is used for the encryption in user\n\tuserKeySet.gx = new(bn256.G2).ScalarBaseMult(caKeySet.X) // gx: g^x is used for the encryption in user\n\n\tuserKeySet.alpha, _ = rand.Int(rand.Reader, bn256.Order) // alpha: the encryption key for user in encryption\n\tserverKeySet.beta, _ = rand.Int(rand.Reader, bn256.Order) // beta: the encryption key for server in on-chain encryption\n\n\tserverKeySet.S, _ = rand.Int(rand.Reader, bn256.Order) // S: the encryption key for server in on-chain encryption\n\n\tserverKeySet.g1Sbeta = new(bn256.G1).ScalarBaseMult(serverKeySet.S)\n\tserverKeySet.g1Sbeta.ScalarMult(serverKeySet.g1Sbeta, serverKeySet.beta) //g1^{S*beta}: the encryption key for server in on-chain encryption\n}",
"func keygen() (string, string) {\n priv, _ := config.GenerateRandomBytes(32)\n addr := config.PrivateToAddress(priv)\n return \"0x\"+addr, fmt.Sprintf(\"%x\", priv)\n}",
"func (c *HashRing) generateKey(ip string, i int) string {\n\treturn ip + \"#\" + strconv.Itoa(i)\n}",
"func (p *siprng) rekey() {\n\tvar k [16]byte\n\tif _, err := io.ReadFull(rand.Reader, k[:]); err != nil {\n\t\tpanic(err.Error())\n\t}\n\tp.k0 = binary.LittleEndian.Uint64(k[0:8])\n\tp.k1 = binary.LittleEndian.Uint64(k[8:16])\n\tp.ctr = 1\n}",
"func generateKey() string {\n\n\t// Use uniuri to generate random string\n\tkey := uniuri.NewLen(20)\n\tloggy(fmt.Sprintf(\"Generated id is '%s', checking if it's already taken in the database\",\n\t\tkey))\n\n\t// Query database if id exists and if it does call generateName again\n\tvar key_taken string\n\terr := dbHandle.QueryRow(\"select key from \"+configuration.DBAccountsTable+\n\t\t\" where key=\"+configuration.DBPlaceHolder[0], key).\n\t\tScan(&key_taken)\n\n\tswitch {\n\tcase err == sql.ErrNoRows:\n\t\tloggy(fmt.Sprintf(\"Key '%s' is not taken, will use it.\", key))\n\tcase err != nil:\n\t\tdebugLogger.Println(\" Database error : \" + err.Error())\n\t\tos.Exit(1)\n\tdefault:\n\t\tloggy(fmt.Sprintf(\"Key '%s' is taken, generating new key.\", key_taken))\n\t\tgenerateKey()\n\t}\n\n\treturn key\n}",
"func GenerateKey() []byte {\n\tk, _ := GenerateRandomBlock()\n\treturn k\n}",
"func getRepeatingKey(k []byte, ptx []byte) []byte {\n\treturn bytes.Repeat(k, len(ptx))\n}",
"func UnsafeAuto() (pkey []byte, err error) {\n\treturn Generate(128)\n}",
"func generateTwofishKey() (key twofishKey) {\n\tfastrand.Read(key[:])\n\treturn\n}",
"func GenerateKey(n int) (string, error) {\n\tbuf := make([]byte, n)\n\t_, err := rand.Read(buf)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn base64.URLEncoding.EncodeToString(buf), nil\n\n}",
"func genKeyAndSendCipher(kx *KX, pk *[sntrup4591761.PublicKeySize]byte, ek *[32]byte) (*[32]byte, error) {\n\tc, k, err := sntrup4591761.Encapsulate(rand.Reader, pk)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ek != nil {\n\t\terr = kx.writeWithKey(c[:], ek)\n\t} else {\n\t\t_, err = xdr.Marshal(kx.Conn, c)\n\t}\n\treturn k, err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Each list of collation elements corresponding to an expansion starts with a header indicating the length of the sequence.
|
func makeExpansionHeader(n int) (uint32, error) {
return uint32(n), nil
}
|
[
"func expansion(s []string) []string {\n\n\treturn []string{s[31], s[0], s[1], s[2], s[3], s[4], s[3], s[4],\n\t\ts[5], s[6], s[7], s[8], s[7], s[8], s[9], s[10],\n\t\ts[11], s[12], s[11], s[12], s[13], s[14], s[15], s[16],\n\t\ts[15], s[16], s[17], s[18], s[19], s[20], s[19], s[20],\n\t\ts[21], s[22], s[23], s[24], s[23], s[24], s[25], s[26],\n\t\ts[27], s[28], s[27], s[28], s[29], s[30], s[31], s[0]}\n}",
"func expandCertificateCertificateDescriptionSlice(c *Client, f []CertificateCertificateDescription, res *Certificate) ([]map[string]interface{}, error) {\n\tif f == nil {\n\t\treturn nil, nil\n\t}\n\n\titems := []map[string]interface{}{}\n\tfor _, item := range f {\n\t\ti, err := expandCertificateCertificateDescription(c, &item, res)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\titems = append(items, i)\n\t}\n\n\treturn items, nil\n}",
"func (h *literalsHeader) setSizes(compLen, inLen int, single bool) {\n\tcompBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen))\n\t// Only retain 2 bits\n\tconst mask = 3\n\tlh := uint64(*h & mask)\n\tswitch {\n\tcase compBits <= 10 && inBits <= 10:\n\t\tif !single {\n\t\t\tlh |= 1 << 2\n\t\t}\n\t\tlh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60)\n\t\tif debugEncoder {\n\t\t\tconst mmask = (1 << 24) - 1\n\t\t\tn := (lh >> 4) & mmask\n\t\t\tif int(n&1023) != inLen {\n\t\t\t\tpanic(fmt.Sprint(\"regensize:\", int(n&1023), \"!=\", inLen, inBits))\n\t\t\t}\n\t\t\tif int(n>>10) != compLen {\n\t\t\t\tpanic(fmt.Sprint(\"compsize:\", int(n>>10), \"!=\", compLen, compBits))\n\t\t\t}\n\t\t}\n\tcase compBits <= 14 && inBits <= 14:\n\t\tlh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60)\n\t\tif single {\n\t\t\tpanic(\"single stream used with more than 10 bits length.\")\n\t\t}\n\tcase compBits <= 18 && inBits <= 18:\n\t\tlh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60)\n\t\tif single {\n\t\t\tpanic(\"single stream used with more than 10 bits length.\")\n\t\t}\n\tdefault:\n\t\tpanic(\"internal error: block too big\")\n\t}\n\t*h = literalsHeader(lh)\n}",
"func expandCertificateCertificateDescriptionAuthorityKeyIdSlice(c *Client, f []CertificateCertificateDescriptionAuthorityKeyId, res *Certificate) ([]map[string]interface{}, error) {\n\tif f == nil {\n\t\treturn nil, nil\n\t}\n\n\titems := []map[string]interface{}{}\n\tfor _, item := range f {\n\t\ti, err := expandCertificateCertificateDescriptionAuthorityKeyId(c, &item, res)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\titems = append(items, i)\n\t}\n\n\treturn items, nil\n}",
"func (b *Builder) appendExpansion(e *entry) int {\n\tt := b.t\n\ti := len(t.ExpandElem)\n\tce := uint32(len(e.elems))\n\tt.ExpandElem = append(t.ExpandElem, ce)\n\tfor _, w := range e.elems {\n\t\tce, err := makeCE(w)\n\t\tif err != nil {\n\t\t\tb.error(err)\n\t\t\treturn -1\n\t\t}\n\t\tt.ExpandElem = append(t.ExpandElem, ce)\n\t}\n\treturn i\n}",
"func expandCertificateCertificateDescriptionSubjectDescriptionSlice(c *Client, f []CertificateCertificateDescriptionSubjectDescription, res *Certificate) ([]map[string]interface{}, error) {\n\tif f == nil {\n\t\treturn nil, nil\n\t}\n\n\titems := []map[string]interface{}{}\n\tfor _, item := range f {\n\t\ti, err := expandCertificateCertificateDescriptionSubjectDescription(c, &item, res)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\titems = append(items, i)\n\t}\n\n\treturn items, nil\n}",
"func expandCertificateCertificateDescriptionX509DescriptionSlice(c *Client, f []CertificateCertificateDescriptionX509Description, res *Certificate) ([]map[string]interface{}, error) {\n\tif f == nil {\n\t\treturn nil, nil\n\t}\n\n\titems := []map[string]interface{}{}\n\tfor _, item := range f {\n\t\ti, err := expandCertificateCertificateDescriptionX509Description(c, &item, res)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\titems = append(items, i)\n\t}\n\n\treturn items, nil\n}",
"func expandCertificateCertificateDescriptionSubjectKeyIdSlice(c *Client, f []CertificateCertificateDescriptionSubjectKeyId, res *Certificate) ([]map[string]interface{}, error) {\n\tif f == nil {\n\t\treturn nil, nil\n\t}\n\n\titems := []map[string]interface{}{}\n\tfor _, item := range f {\n\t\ti, err := expandCertificateCertificateDescriptionSubjectKeyId(c, &item, res)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\titems = append(items, i)\n\t}\n\n\treturn items, nil\n}",
"func buildAux(aa []sam.Aux, buf *[]byte) {\n\tfor _, a := range aa {\n\t\t// TODO: validate each 'a'\n\t\t*buf = append(*buf, []byte(a)...)\n\t\tswitch a.Type() {\n\t\tcase 'Z', 'H':\n\t\t\t*buf = append(*buf, 0)\n\t\t}\n\t}\n}",
"func buildAux(aa []sam.Aux) (aux []byte) {\n\tfor _, a := range aa {\n\t\t// TODO: validate each 'a'\n\t\taux = append(aux, []byte(a)...)\n\t\tswitch a.Type() {\n\t\tcase 'Z', 'H':\n\t\t\taux = append(aux, 0)\n\t\t}\n\t}\n\treturn\n}",
"func expandCertificateRevocationDetailsSlice(c *Client, f []CertificateRevocationDetails, res *Certificate) ([]map[string]interface{}, error) {\n\tif f == nil {\n\t\treturn nil, nil\n\t}\n\n\titems := []map[string]interface{}{}\n\tfor _, item := range f {\n\t\ti, err := expandCertificateRevocationDetails(c, &item, res)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\titems = append(items, i)\n\t}\n\n\treturn items, nil\n}",
"func expandCertificateCertificateDescriptionX509DescriptionCaOptionsSlice(c *Client, f []CertificateCertificateDescriptionX509DescriptionCaOptions, res *Certificate) ([]map[string]interface{}, error) {\n\tif f == nil {\n\t\treturn nil, nil\n\t}\n\n\titems := []map[string]interface{}{}\n\tfor _, item := range f {\n\t\ti, err := expandCertificateCertificateDescriptionX509DescriptionCaOptions(c, &item, res)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\titems = append(items, i)\n\t}\n\n\treturn items, nil\n}",
"func expandCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameSlice(c *Client, f []CertificateCertificateDescriptionSubjectDescriptionSubjectAltName, res *Certificate) ([]map[string]interface{}, error) {\n\tif f == nil {\n\t\treturn nil, nil\n\t}\n\n\titems := []map[string]interface{}{}\n\tfor _, item := range f {\n\t\ti, err := expandCertificateCertificateDescriptionSubjectDescriptionSubjectAltName(c, &item, res)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\titems = append(items, i)\n\t}\n\n\treturn items, nil\n}",
"func walkSliceHeader(n *ir.SliceHeaderExpr, init *ir.Nodes) ir.Node {\n\tn.Ptr = walkExpr(n.Ptr, init)\n\tn.Len = walkExpr(n.Len, init)\n\tn.Cap = walkExpr(n.Cap, init)\n\treturn n\n}",
"func expandCertificateCertificateDescriptionSubjectDescriptionSubjectSlice(c *Client, f []CertificateCertificateDescriptionSubjectDescriptionSubject, res *Certificate) ([]map[string]interface{}, error) {\n\tif f == nil {\n\t\treturn nil, nil\n\t}\n\n\titems := []map[string]interface{}{}\n\tfor _, item := range f {\n\t\ti, err := expandCertificateCertificateDescriptionSubjectDescriptionSubject(c, &item, res)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\titems = append(items, i)\n\t}\n\n\treturn items, nil\n}",
"func (m *ASCIICoolness) populate() {\n\tfor i := 0; i < m.Length; i++ {\n\t\tresultitem := asciiresult{ASCIINumber: i, ASCIICharter: string(i)}\n\t\tm.ASCIIResult = append(m.ASCIIResult, resultitem)\n\t}\n}",
"func hkdfExpand(hash func() hash.Hash, prk, info []byte, length int) []byte {\n\thashSize := hash().Size()\n\tif length > 255*hashSize {\n\t\tpanic(\"hkdfExpand: length too long\")\n\t}\n\tif len(prk) < hashSize {\n\t\tpanic(\"hkdfExpand: prk too short\")\n\t}\n\tvar lastBlock []byte\n\tcounter := byte(0)\n\tokm := make([]byte, length)\n\thmac := hmac.New(hash, prk)\n\tfor length > 0 {\n\t\thmac.Reset()\n\t\tcounter++\n\t\thmac.Write(lastBlock)\n\t\thmac.Write(info)\n\t\thmac.Write([]byte{counter})\n\t\tblock := hmac.Sum(nil)\n\t\tlastBlock = block\n\t\tcopy(okm[(int(counter)-1)*hashSize:], block)\n\t\tlength -= hashSize\n\t}\n\treturn okm\n}",
"func (ns Seq) Expand() []byte {\n\ts := make([]byte, ns.Length)\n\tfor i := range s {\n\t\tif i&1 == 0 {\n\t\t\ts[i] = n16TableRev[ns.Seq[i>>1]>>4]\n\t\t} else {\n\t\t\ts[i] = n16TableRev[ns.Seq[i>>1]&0xf]\n\t\t}\n\t}\n\n\treturn s\n}",
"func (o *ordering) genColElems(str string) []rawCE {\n\telems := []rawCE{}\n\tfor _, r := range []rune(str) {\n\t\tfor _, ce := range o.find(string(r)).elems {\n\t\t\tif ce.w[0] != 0 || ce.w[1] != 0 || ce.w[2] != 0 {\n\t\t\t\telems = append(elems, ce)\n\t\t\t}\n\t\t}\n\t}\n\treturn elems\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
implicitPrimary returns the primary weight for the a rune for which there is no entry for the rune in the collation table. We take a different approach from the one specified in but preserve the resulting relative ordering of the runes.
|
func implicitPrimary(r rune) int {
if unicode.Is(unicode.Ideographic, r) {
if r >= minUnified && r <= maxUnified {
// The most common case for CJK.
return int(r) + commonUnifiedOffset
}
if r >= minCompatibility && r <= maxCompatibility {
// This will typically not hit. The DUCET explicitly specifies mappings
// for all characters that do not decompose.
return int(r) + commonUnifiedOffset
}
return int(r) + rareUnifiedOffset
}
return int(r) + otherOffset
}
|
[
"func Latin1_bin_RuneWeight(r rune) int32 {\n\tweight, ok := latin1_bin_Weights[r]\n\tif ok {\n\t\treturn weight\n\t} else if r >= 0 && r <= 127 {\n\t\treturn r + 0\n\t} else {\n\t\treturn 2147483647\n\t}\n}",
"func Dec8_bin_RuneWeight(r rune) int32 {\n\tweight, ok := dec8_bin_Weights[r]\n\tif ok {\n\t\treturn weight\n\t} else if r >= 0 && r <= 163 {\n\t\treturn r + 0\n\t} else {\n\t\treturn 2147483647\n\t}\n}",
"func Geostd8_bin_RuneWeight(r rune) int32 {\n\tweight, ok := geostd8_bin_Weights[r]\n\tif ok {\n\t\treturn weight\n\t} else if r >= 0 && r <= 127 {\n\t\treturn r + 0\n\t} else {\n\t\treturn 2147483647\n\t}\n}",
"func Swe7_swedish_ci_RuneWeight(r rune) int32 {\n\tweight, ok := swe7_swedish_ci_Weights[r]\n\tif ok {\n\t\treturn weight\n\t} else {\n\t\treturn 2147483647\n\t}\n}",
"func Utf8mb3_slovak_ci_RuneWeight(r rune) int32 {\n\tweight, ok := common_utf_slovak_ci_Weights()[r]\n\tif ok {\n\t\treturn weight\n\t} else if r >= 9003 && r <= 9168 {\n\t\treturn r - 8070\n\t} else if r >= 9475 && r <= 9632 {\n\t\treturn r - 8323\n\t} else if r >= 10496 && r <= 10626 {\n\t\treturn r - 8775\n\t} else if r >= 10765 && r <= 10867 {\n\t\treturn r - 8800\n\t} else if r >= 10872 && r <= 10971 {\n\t\treturn r - 8803\n\t} else if r >= 10240 && r <= 10495 {\n\t\treturn r - 8022\n\t} else if r >= 5121 && r <= 5499 {\n\t\treturn r + 552\n\t} else if r >= 5543 && r <= 5740 {\n\t\treturn r + 560\n\t} else if r >= 40960 && r <= 42124 {\n\t\treturn r - 34149\n\t} else if r >= 20241 && r <= 20358 {\n\t\treturn r - 11992\n\t} else if r >= 20416 && r <= 20523 {\n\t\treturn r - 11992\n\t} else if r >= 20524 && r <= 20698 {\n\t\treturn r - 11992\n\t} else if r >= 21571 && r <= 21693 {\n\t\treturn r - 11992\n\t} else if r >= 21694 && r <= 21895 {\n\t\treturn r - 11992\n\t} else if r >= 22121 && r <= 22230 {\n\t\treturn r - 11992\n\t} else if r >= 22320 && r <= 22592 {\n\t\treturn r - 11992\n\t} else if r >= 22900 && r <= 23375 {\n\t\treturn r - 11991\n\t} else if r >= 23665 && r <= 23833 {\n\t\treturn r - 11991\n\t} else if r >= 23889 && r <= 23994 {\n\t\treturn r - 11991\n\t} else if r >= 24062 && r <= 24177 {\n\t\treturn r - 11991\n\t} else if r >= 24605 && r <= 24724 {\n\t\treturn r - 11990\n\t} else if r >= 25164 && r <= 25289 {\n\t\treturn r - 11990\n\t} else if r >= 25343 && r <= 25467 {\n\t\treturn r - 11990\n\t} else if r >= 25505 && r <= 25754 {\n\t\treturn r - 11990\n\t} else if r >= 25797 && r <= 25902 {\n\t\treturn r - 11990\n\t} else if r >= 26793 && r <= 27138 {\n\t\treturn r - 11987\n\t} else if r >= 27156 && r <= 27347 {\n\t\treturn r - 11987\n\t} else if r >= 28187 && r <= 28316 {\n\t\treturn r - 11987\n\t} else if r >= 28452 && r <= 28651 {\n\t\treturn r - 11987\n\t} else if r >= 28671 && r <= 28778 {\n\t\treturn r - 11987\n\t} else if r >= 28890 && r <= 29001 {\n\t\treturn r - 11987\n\t} else if r >= 30466 && r <= 30682 {\n\t\treturn r - 11987\n\t} else if r >= 30707 && r <= 30827 {\n\t\treturn r - 11987\n\t} else if r >= 31521 && r <= 31680 {\n\t\treturn r - 11987\n\t} else if r >= 31681 && r <= 31806 {\n\t\treturn r - 11987\n\t} else if r >= 32048 && r <= 32160 {\n\t\treturn r - 11987\n\t} else if r >= 32415 && r <= 32565 {\n\t\treturn r - 11987\n\t} else if r >= 32908 && r <= 33240 {\n\t\treturn r - 11987\n\t} else if r >= 33402 && r <= 33509 {\n\t\treturn r - 11987\n\t} else if r >= 33591 && r <= 33737 {\n\t\treturn r - 11987\n\t} else if r >= 33880 && r <= 34030 {\n\t\treturn r - 11987\n\t} else if r >= 34045 && r <= 34253 {\n\t\treturn r - 11987\n\t} else if r >= 34411 && r <= 34746 {\n\t\treturn r - 11987\n\t} else if r >= 34747 && r <= 34847 {\n\t\treturn r - 11987\n\t} else if r >= 35328 && r <= 35498 {\n\t\treturn r - 11987\n\t} else if r >= 35744 && r <= 35894 {\n\t\treturn r - 11987\n\t} else if r >= 36336 && r <= 36522 {\n\t\treturn r - 11987\n\t} else if r >= 36791 && r <= 36899 {\n\t\treturn r - 11987\n\t} else if r >= 37429 && r <= 37636 {\n\t\treturn r - 11987\n\t} else if r >= 37707 && r <= 38020 {\n\t\treturn r - 11987\n\t} else if r >= 38021 && r <= 38262 {\n\t\treturn r - 11987\n\t} else if r >= 39410 && r <= 39530 {\n\t\treturn r - 11987\n\t} else if r >= 39792 && r <= 40023 {\n\t\treturn r - 11987\n\t} else if r >= 40060 && r <= 40164 {\n\t\treturn r - 11987\n\t} else if r >= 40165 && r <= 40372 {\n\t\treturn r - 11987\n\t} else if r >= 13312 && r <= 19893 {\n\t\treturn r + 15583\n\t} else if r >= 1970 && r <= 2304 {\n\t\treturn r + 33723\n\t} else if r >= 6517 && r <= 6623 {\n\t\treturn r + 30534\n\t} else if r >= 6657 && r <= 7423 {\n\t\treturn r + 30502\n\t} else if r >= 7533 && r <= 7679 {\n\t\treturn r + 30394\n\t} else if r >= 11022 && r <= 11903 {\n\t\treturn r + 27432\n\t} else if r >= 42183 && r <= 55295 {\n\t\treturn r - 2617\n\t} else if r >= 57345 && r <= 63743 {\n\t\treturn r - 4665\n\t} else if r >= 64107 && r <= 64255 {\n\t\treturn r - 5026\n\t} else {\n\t\treturn 2147483647\n\t}\n}",
"func makeRootOrdering() ordering {\n\tconst max = unicode.MaxRune\n\to := ordering{\n\t\tentryMap: make(map[string]*entry),\n\t}\n\tinsert := func(typ logicalAnchor, s string, ce []int) {\n\t\te := &entry{\n\t\t\telems: []rawCE{{w: ce}},\n\t\t\tstr: s,\n\t\t\texclude: true,\n\t\t\tlogical: typ,\n\t\t}\n\t\to.insert(e)\n\t}\n\tinsert(firstAnchor, \"first tertiary ignorable\", []int{0, 0, 0, 0})\n\tinsert(lastAnchor, \"last tertiary ignorable\", []int{0, 0, 0, max})\n\tinsert(lastAnchor, \"last primary ignorable\", []int{0, defaultSecondary, defaultTertiary, max})\n\tinsert(lastAnchor, \"last non ignorable\", []int{maxPrimary, defaultSecondary, defaultTertiary, max})\n\tinsert(lastAnchor, \"__END__\", []int{1 << maxPrimaryBits, defaultSecondary, defaultTertiary, max})\n\treturn o\n}",
"func nextWeight(level colltab.Level, elems []rawCE) []rawCE {\n\tif level == colltab.Identity {\n\t\tnext := make([]rawCE, len(elems))\n\t\tcopy(next, elems)\n\t\treturn next\n\t}\n\tnext := []rawCE{makeRawCE(elems[0].w, elems[0].ccc)}\n\tnext[0].w[level]++\n\tif level < colltab.Secondary {\n\t\tnext[0].w[colltab.Secondary] = defaultSecondary\n\t}\n\tif level < colltab.Tertiary {\n\t\tnext[0].w[colltab.Tertiary] = defaultTertiary\n\t}\n\t// Filter entries that cannot influence ordering.\n\tfor _, ce := range elems[1:] {\n\t\tskip := true\n\t\tfor i := colltab.Primary; i < level; i++ {\n\t\t\tskip = skip && ce.w[i] == 0\n\t\t}\n\t\tif !skip {\n\t\t\tnext = append(next, ce)\n\t\t}\n\t}\n\treturn next\n}",
"func applyRulesW1to3(r rune, clz bidi.Class, current scrap) bidi.Class {\n\tcurrclz := current.bidiclz\n\tswitch clz { // do some pre-processing\n\tcase bidi.NSM: // rule W1, handle accents\n\t\tswitch currclz {\n\t\tcase bidi.LRI:\n\t\t\treturn bidi.L\n\t\tcase bidi.RLI:\n\t\t\treturn bidi.R\n\t\tcase bidi.PDI:\n\t\t\treturn cNI\n\t\t}\n\t\treturn currclz\n\tcase bidi.EN: // rule W2\n\t\tif current.context.IsAL() {\n\t\t\ttracing.Errorf(\"========= context: %v\", current.context)\n\t\t\treturn bidi.AN\n\t\t}\n\tcase bidi.AL: // rule W3\n\t\treturn bidi.R\n\tcase bidi.S, bidi.WS, bidi.ON:\n\t\treturn cNI\n\t\t//if sc.bidiclz == NI {\n\t\t// return NI\n\t\t//}\n\t}\n\treturn clz\n}",
"func (o SysbenchSpecPodSchedulingAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionOutput) Weight() pulumi.IntOutput {\n\treturn o.ApplyT(func(v SysbenchSpecPodSchedulingAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecution) int {\n\t\treturn v.Weight\n\t}).(pulumi.IntOutput)\n}",
"func (o SysbenchSpecPodSchedulingAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionOutput) Weight() pulumi.IntOutput {\n\treturn o.ApplyT(func(v SysbenchSpecPodSchedulingAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution) int {\n\t\treturn v.Weight\n\t}).(pulumi.IntOutput)\n}",
"func PrimaryScramblingCode(v string) predicate.SurveyCellScan {\n\treturn predicate.SurveyCellScan(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldPrimaryScramblingCode), v))\n\t},\n\t)\n}",
"func (it *ColumnDefinition) AsPrimary() *ColumnDefinition {\n\tlastElementPos := len(it.Columns) - 1\n\n\t// ignore if no element set yet\n\tif lastElementPos < 0 {\n\t\treturn it\n\t}\n\n\tlastElement := it.Columns[lastElementPos]\n\tlastElement.IsPrimary = true\n\n\t// need to put it back\n\tit.Columns = append(it.Columns[:lastElementPos], lastElement)\n\n\treturn it\n}",
"func Utf8mb3_general_mysql500_ci_RuneWeight(r rune) int32 {\n\tweight, ok := utf8mb3_general_mysql500_ci_Weights[r]\n\tif ok {\n\t\treturn weight\n\t} else if r >= 659 && r <= 836 {\n\t\treturn r - 341\n\t} else if r >= 1415 && r <= 7679 {\n\t\treturn r - 585\n\t} else if r >= 8189 && r <= 8544 {\n\t\treturn r - 1039\n\t} else if r >= 8575 && r <= 9398 {\n\t\treturn r - 1055\n\t} else if r >= 9449 && r <= 55295 {\n\t\treturn r - 1081\n\t} else if r >= 57345 && r <= 65313 {\n\t\treturn r - 3129\n\t} else if r >= 65371 && r <= 65535 {\n\t\treturn r - 3155\n\t} else {\n\t\treturn 2147483647\n\t}\n}",
"func compareWeights(a, b []rawCE) (result int, level colltab.Level) {\n\tfor level := colltab.Primary; level < colltab.Identity; level++ {\n\t\tvar va, vb int\n\t\tfor ia, ib := 0, 0; ia < len(a) || ib < len(b); ia, ib = ia+1, ib+1 {\n\t\t\tia, va = nextVal(a, ia, level)\n\t\t\tib, vb = nextVal(b, ib, level)\n\t\t\tif va != vb {\n\t\t\t\tif va < vb {\n\t\t\t\t\treturn -1, level\n\t\t\t\t} else {\n\t\t\t\t\treturn 1, level\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn 0, colltab.Identity\n}",
"func DefaultStrictUpdatePrimaryStringType(ctx context.Context, in *PrimaryStringType, db *gorm.DB) (*PrimaryStringType, error) {\n\tif in == nil {\n\t\treturn nil, fmt.Errorf(\"Nil argument to DefaultStrictUpdatePrimaryStringType\")\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar count int64\n\tlockedRow := &PrimaryStringTypeORM{}\n\tcount = db.Model(&ormObj).Set(\"gorm:query_option\", \"FOR UPDATE\").Where(\"id=?\", ormObj.Id).First(lockedRow).RowsAffected\n\tif hook, ok := interface{}(&ormObj).(PrimaryStringTypeORMWithBeforeStrictUpdateCleanup); ok {\n\t\tif db, err = hook.BeforeStrictUpdateCleanup(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tfilterChild := ExternalChildORM{}\n\tif ormObj.Id == \"\" {\n\t\treturn nil, errors.EmptyIdError\n\t}\n\tfilterChild.PrimaryStringTypeId = new(string)\n\t*filterChild.PrimaryStringTypeId = ormObj.Id\n\tif err = db.Where(filterChild).Delete(ExternalChildORM{}).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(PrimaryStringTypeORMWithBeforeStrictUpdateSave); ok {\n\t\tif db, err = hook.BeforeStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Omit().Save(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(PrimaryStringTypeORMWithAfterStrictUpdateSave); ok {\n\t\tif err = hook.AfterStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif count == 0 {\n\t\terr = gateway.SetCreated(ctx, \"\")\n\t}\n\treturn &pbResponse, err\n}",
"func (o ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionOutput) Weight() pulumi.IntOutput {\n\treturn o.ApplyT(func(v ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution) int {\n\t\treturn v.Weight\n\t}).(pulumi.IntOutput)\n}",
"func DefaultApplyFieldMaskPrimaryStringType(ctx context.Context, patchee *PrimaryStringType, patcher *PrimaryStringType, updateMask *field_mask.FieldMask, prefix string, db *gorm.DB) (*PrimaryStringType, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors.NilArgumentError\n\t}\n\tvar err error\n\tvar updatedChild bool\n\tfor i, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedChild && strings.HasPrefix(f, prefix+\"Child.\") {\n\t\t\tupdatedChild = true\n\t\t\tif patcher.Child == nil {\n\t\t\t\tpatchee.Child = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.Child == nil {\n\t\t\t\tpatchee.Child = &ExternalChild{}\n\t\t\t}\n\t\t\tif o, err := DefaultApplyFieldMaskExternalChild(ctx, patchee.Child, patcher.Child, &field_mask.FieldMask{Paths: updateMask.Paths[i:]}, prefix+\"Child.\", db); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t} else {\n\t\t\t\tpatchee.Child = o\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Child\" {\n\t\t\tupdatedChild = true\n\t\t\tpatchee.Child = patcher.Child\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}",
"func (t PostgresTyper) MostWeight(left, right string) string {\n\tif left == right {\n\t\treturn left\n\t}\n\n\tif typeWeight[left] > typeWeight[right] {\n\t\treturn left\n\t}\n\n\treturn right\n}",
"func convertLargeWeights(elems []rawCE) (res []rawCE, err error) {\n\tconst (\n\t\tcjkPrimaryStart = 0xFB40\n\t\trarePrimaryStart = 0xFB80\n\t\totherPrimaryStart = 0xFBC0\n\t\tillegalPrimary = 0xFFFE\n\t\thighBitsMask = 0x3F\n\t\tlowBitsMask = 0x7FFF\n\t\tlowBitsFlag = 0x8000\n\t\tshiftBits = 15\n\t)\n\tfor i := 0; i < len(elems); i++ {\n\t\tce := elems[i].w\n\t\tp := ce[0]\n\t\tif p < cjkPrimaryStart {\n\t\t\tcontinue\n\t\t}\n\t\tif p > 0xFFFF {\n\t\t\treturn elems, fmt.Errorf(\"found primary weight %X; should be <= 0xFFFF\", p)\n\t\t}\n\t\tif p >= illegalPrimary {\n\t\t\tce[0] = illegalOffset + p - illegalPrimary\n\t\t} else {\n\t\t\tif i+1 >= len(elems) {\n\t\t\t\treturn elems, fmt.Errorf(\"second part of double primary weight missing: %v\", elems)\n\t\t\t}\n\t\t\tif elems[i+1].w[0]&lowBitsFlag == 0 {\n\t\t\t\treturn elems, fmt.Errorf(\"malformed second part of double primary weight: %v\", elems)\n\t\t\t}\n\t\t\tnp := ((p & highBitsMask) << shiftBits) + elems[i+1].w[0]&lowBitsMask\n\t\t\tswitch {\n\t\t\tcase p < rarePrimaryStart:\n\t\t\t\tnp += commonUnifiedOffset\n\t\t\tcase p < otherPrimaryStart:\n\t\t\t\tnp += rareUnifiedOffset\n\t\t\tdefault:\n\t\t\t\tp += otherOffset\n\t\t\t}\n\t\t\tce[0] = np\n\t\t\tfor j := i + 1; j+1 < len(elems); j++ {\n\t\t\t\telems[j] = elems[j+1]\n\t\t\t}\n\t\t\telems = elems[:len(elems)-1]\n\t\t}\n\t}\n\treturn elems, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
convertLargeWeights converts collation elements with large primaries (either double primaries or for illegal runes) to our own representation. A CJK character C is represented in the DUCET as [.FBxx.0020.0002.C][.BBBB.0000.0000.C] We will rewrite these characters to a single CE. We assume the CJK values start at 0x8000. See
|
func convertLargeWeights(elems []rawCE) (res []rawCE, err error) {
const (
cjkPrimaryStart = 0xFB40
rarePrimaryStart = 0xFB80
otherPrimaryStart = 0xFBC0
illegalPrimary = 0xFFFE
highBitsMask = 0x3F
lowBitsMask = 0x7FFF
lowBitsFlag = 0x8000
shiftBits = 15
)
for i := 0; i < len(elems); i++ {
ce := elems[i].w
p := ce[0]
if p < cjkPrimaryStart {
continue
}
if p > 0xFFFF {
return elems, fmt.Errorf("found primary weight %X; should be <= 0xFFFF", p)
}
if p >= illegalPrimary {
ce[0] = illegalOffset + p - illegalPrimary
} else {
if i+1 >= len(elems) {
return elems, fmt.Errorf("second part of double primary weight missing: %v", elems)
}
if elems[i+1].w[0]&lowBitsFlag == 0 {
return elems, fmt.Errorf("malformed second part of double primary weight: %v", elems)
}
np := ((p & highBitsMask) << shiftBits) + elems[i+1].w[0]&lowBitsMask
switch {
case p < rarePrimaryStart:
np += commonUnifiedOffset
case p < otherPrimaryStart:
np += rareUnifiedOffset
default:
p += otherOffset
}
ce[0] = np
for j := i + 1; j+1 < len(elems); j++ {
elems[j] = elems[j+1]
}
elems = elems[:len(elems)-1]
}
}
return elems, nil
}
|
[
"func Utf8mb3_slovak_ci_RuneWeight(r rune) int32 {\n\tweight, ok := common_utf_slovak_ci_Weights()[r]\n\tif ok {\n\t\treturn weight\n\t} else if r >= 9003 && r <= 9168 {\n\t\treturn r - 8070\n\t} else if r >= 9475 && r <= 9632 {\n\t\treturn r - 8323\n\t} else if r >= 10496 && r <= 10626 {\n\t\treturn r - 8775\n\t} else if r >= 10765 && r <= 10867 {\n\t\treturn r - 8800\n\t} else if r >= 10872 && r <= 10971 {\n\t\treturn r - 8803\n\t} else if r >= 10240 && r <= 10495 {\n\t\treturn r - 8022\n\t} else if r >= 5121 && r <= 5499 {\n\t\treturn r + 552\n\t} else if r >= 5543 && r <= 5740 {\n\t\treturn r + 560\n\t} else if r >= 40960 && r <= 42124 {\n\t\treturn r - 34149\n\t} else if r >= 20241 && r <= 20358 {\n\t\treturn r - 11992\n\t} else if r >= 20416 && r <= 20523 {\n\t\treturn r - 11992\n\t} else if r >= 20524 && r <= 20698 {\n\t\treturn r - 11992\n\t} else if r >= 21571 && r <= 21693 {\n\t\treturn r - 11992\n\t} else if r >= 21694 && r <= 21895 {\n\t\treturn r - 11992\n\t} else if r >= 22121 && r <= 22230 {\n\t\treturn r - 11992\n\t} else if r >= 22320 && r <= 22592 {\n\t\treturn r - 11992\n\t} else if r >= 22900 && r <= 23375 {\n\t\treturn r - 11991\n\t} else if r >= 23665 && r <= 23833 {\n\t\treturn r - 11991\n\t} else if r >= 23889 && r <= 23994 {\n\t\treturn r - 11991\n\t} else if r >= 24062 && r <= 24177 {\n\t\treturn r - 11991\n\t} else if r >= 24605 && r <= 24724 {\n\t\treturn r - 11990\n\t} else if r >= 25164 && r <= 25289 {\n\t\treturn r - 11990\n\t} else if r >= 25343 && r <= 25467 {\n\t\treturn r - 11990\n\t} else if r >= 25505 && r <= 25754 {\n\t\treturn r - 11990\n\t} else if r >= 25797 && r <= 25902 {\n\t\treturn r - 11990\n\t} else if r >= 26793 && r <= 27138 {\n\t\treturn r - 11987\n\t} else if r >= 27156 && r <= 27347 {\n\t\treturn r - 11987\n\t} else if r >= 28187 && r <= 28316 {\n\t\treturn r - 11987\n\t} else if r >= 28452 && r <= 28651 {\n\t\treturn r - 11987\n\t} else if r >= 28671 && r <= 28778 {\n\t\treturn r - 11987\n\t} else if r >= 28890 && r <= 29001 {\n\t\treturn r - 11987\n\t} else if r >= 30466 && r <= 30682 {\n\t\treturn r - 11987\n\t} else if r >= 30707 && r <= 30827 {\n\t\treturn r - 11987\n\t} else if r >= 31521 && r <= 31680 {\n\t\treturn r - 11987\n\t} else if r >= 31681 && r <= 31806 {\n\t\treturn r - 11987\n\t} else if r >= 32048 && r <= 32160 {\n\t\treturn r - 11987\n\t} else if r >= 32415 && r <= 32565 {\n\t\treturn r - 11987\n\t} else if r >= 32908 && r <= 33240 {\n\t\treturn r - 11987\n\t} else if r >= 33402 && r <= 33509 {\n\t\treturn r - 11987\n\t} else if r >= 33591 && r <= 33737 {\n\t\treturn r - 11987\n\t} else if r >= 33880 && r <= 34030 {\n\t\treturn r - 11987\n\t} else if r >= 34045 && r <= 34253 {\n\t\treturn r - 11987\n\t} else if r >= 34411 && r <= 34746 {\n\t\treturn r - 11987\n\t} else if r >= 34747 && r <= 34847 {\n\t\treturn r - 11987\n\t} else if r >= 35328 && r <= 35498 {\n\t\treturn r - 11987\n\t} else if r >= 35744 && r <= 35894 {\n\t\treturn r - 11987\n\t} else if r >= 36336 && r <= 36522 {\n\t\treturn r - 11987\n\t} else if r >= 36791 && r <= 36899 {\n\t\treturn r - 11987\n\t} else if r >= 37429 && r <= 37636 {\n\t\treturn r - 11987\n\t} else if r >= 37707 && r <= 38020 {\n\t\treturn r - 11987\n\t} else if r >= 38021 && r <= 38262 {\n\t\treturn r - 11987\n\t} else if r >= 39410 && r <= 39530 {\n\t\treturn r - 11987\n\t} else if r >= 39792 && r <= 40023 {\n\t\treturn r - 11987\n\t} else if r >= 40060 && r <= 40164 {\n\t\treturn r - 11987\n\t} else if r >= 40165 && r <= 40372 {\n\t\treturn r - 11987\n\t} else if r >= 13312 && r <= 19893 {\n\t\treturn r + 15583\n\t} else if r >= 1970 && r <= 2304 {\n\t\treturn r + 33723\n\t} else if r >= 6517 && r <= 6623 {\n\t\treturn r + 30534\n\t} else if r >= 6657 && r <= 7423 {\n\t\treturn r + 30502\n\t} else if r >= 7533 && r <= 7679 {\n\t\treturn r + 30394\n\t} else if r >= 11022 && r <= 11903 {\n\t\treturn r + 27432\n\t} else if r >= 42183 && r <= 55295 {\n\t\treturn r - 2617\n\t} else if r >= 57345 && r <= 63743 {\n\t\treturn r - 4665\n\t} else if r >= 64107 && r <= 64255 {\n\t\treturn r - 5026\n\t} else {\n\t\treturn 2147483647\n\t}\n}",
"func Utf8mb3_general_mysql500_ci_RuneWeight(r rune) int32 {\n\tweight, ok := utf8mb3_general_mysql500_ci_Weights[r]\n\tif ok {\n\t\treturn weight\n\t} else if r >= 659 && r <= 836 {\n\t\treturn r - 341\n\t} else if r >= 1415 && r <= 7679 {\n\t\treturn r - 585\n\t} else if r >= 8189 && r <= 8544 {\n\t\treturn r - 1039\n\t} else if r >= 8575 && r <= 9398 {\n\t\treturn r - 1055\n\t} else if r >= 9449 && r <= 55295 {\n\t\treturn r - 1081\n\t} else if r >= 57345 && r <= 65313 {\n\t\treturn r - 3129\n\t} else if r >= 65371 && r <= 65535 {\n\t\treturn r - 3155\n\t} else {\n\t\treturn 2147483647\n\t}\n}",
"func wideToMB(codePage C.UINT, wide []C.wchar_t) (s string, e error) {\r\n\te = ErrInvalidNarrow\r\n\r\n\tif numOfMB := C.WideCharToMultiByte(codePage, 0 /*C.WC_ERR_INVALID_CHARS*/, (*C.WCHAR)(&wide[0]), -1, nil, 0, nil, nil); numOfMB > 0 {\r\n\t\tmbStr := make([]C.char, numOfMB)\r\n\t\tif rc := C.WideCharToMultiByte(codePage, 0 /*C.WC_ERR_INVALID_CHARS*/, (*C.WCHAR)(&wide[0]), -1, (*C.CHAR)(&mbStr[0]), numOfMB, nil, nil); rc > 0 {\r\n\t\t\tptr := (*C.char)(unsafe.Pointer(&mbStr[0])) // #nosec\r\n\t\t\ts, e = C.GoString(ptr), nil\r\n\t\t}\r\n\t}\r\n\r\n\treturn\r\n}",
"func Swe7_swedish_ci_RuneWeight(r rune) int32 {\n\tweight, ok := swe7_swedish_ci_Weights[r]\n\tif ok {\n\t\treturn weight\n\t} else {\n\t\treturn 2147483647\n\t}\n}",
"func Dec8_bin_RuneWeight(r rune) int32 {\n\tweight, ok := dec8_bin_Weights[r]\n\tif ok {\n\t\treturn weight\n\t} else if r >= 0 && r <= 163 {\n\t\treturn r + 0\n\t} else {\n\t\treturn 2147483647\n\t}\n}",
"func CConv(s uint8) string",
"func kiloToChinese(number int, maybeZero, lastZero bool) (string, bool, bool) {\n\tvar buffer bytes.Buffer\n\n\tfor i, str := range Reverse(Number2) {\n\t\tn := int(math.Pow10(3 - i))\n\t\tswitch {\n\t\tcase number >= n:\n\t\t\tbuffer.WriteString(Number1[number/n] + str)\n\t\t\tnumber = number % n\n\t\t\tmaybeZero = true\n\t\t\tlastZero = false\n\t\tcase maybeZero:\n\t\t\tif !lastZero && number != 0 {\n\t\t\t\tbuffer.WriteString(\"ιΆ\")\n\t\t\t\tlastZero = true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn buffer.String(), maybeZero, lastZero\n}",
"func mbToWide(codePage C.UINT, mb *C.char) (s []C.wchar_t, e error) {\r\n\te = ErrInvalidWide\r\n\r\n\tif numOfWC := C.MultiByteToWideChar(codePage, C.MB_ERR_INVALID_CHARS, (*C.CHAR)(mb), -1, nil, 0); numOfWC > 0 {\r\n\t\twideStr := make([]C.wchar_t, numOfWC)\r\n\t\tif rc := C.MultiByteToWideChar(codePage, C.MB_ERR_INVALID_CHARS, (*C.CHAR)(mb), -1, (*C.WCHAR)(&wideStr[0]), numOfWC); rc > 0 {\r\n\t\t\t/* for _, ch := range wideStr {\r\n\t\t\t\tif ch == 0xFFFD {\r\n\t\t\t\t\treturn wideStr, ErrInvalidWide\r\n\t\t\t\t}\r\n\t\t\t} */\r\n\t\t\ts, e = wideStr, nil\r\n\t\t}\r\n\t}\r\n\r\n\treturn\r\n}",
"func (x nat) string(charset string) string {\n\tb := Word(len(charset))\n\n\t// special cases\n\tswitch {\n\tcase b < 2 || b > 256:\n\t\tpanic(\"illegal base\")\n\tcase len(x) == 0:\n\t\treturn string(charset[0])\n\t}\n\n\t// allocate buffer for conversion\n\ti := x.bitLen()/log2(b) + 1 // +1: round up\n\ts := make([]byte, i)\n\n\t// special case: power of two bases can avoid divisions completely\n\tif b == b&-b {\n\t\t// shift is base-b digit size in bits\n\t\tshift := uint(trailingZeroBits(b)) // shift > 0 because b >= 2\n\t\tmask := Word(1)<<shift - 1\n\t\tw := x[0]\n\t\tnbits := uint(_W) // number of unprocessed bits in w\n\n\t\t// convert less-significant words\n\t\tfor k := 1; k < len(x); k++ {\n\t\t\t// convert full digits\n\t\t\tfor nbits >= shift {\n\t\t\t\ti--\n\t\t\t\ts[i] = charset[w&mask]\n\t\t\t\tw >>= shift\n\t\t\t\tnbits -= shift\n\t\t\t}\n\n\t\t\t// convert any partial leading digit and advance to next word\n\t\t\tif nbits == 0 {\n\t\t\t\t// no partial digit remaining, just advance\n\t\t\t\tw = x[k]\n\t\t\t\tnbits = _W\n\t\t\t} else {\n\t\t\t\t// partial digit in current (k-1) and next (k) word\n\t\t\t\tw |= x[k] << nbits\n\t\t\t\ti--\n\t\t\t\ts[i] = charset[w&mask]\n\n\t\t\t\t// advance\n\t\t\t\tw = x[k] >> (shift - nbits)\n\t\t\t\tnbits = _W - (shift - nbits)\n\t\t\t}\n\t\t}\n\n\t\t// convert digits of most-significant word (omit leading zeros)\n\t\tfor nbits >= 0 && w != 0 {\n\t\t\ti--\n\t\t\ts[i] = charset[w&mask]\n\t\t\tw >>= shift\n\t\t\tnbits -= shift\n\t\t}\n\n\t\treturn string(s[i:])\n\t}\n\n\t// general case: extract groups of digits by multiprecision division\n\n\t// maximize ndigits where b**ndigits < 2^_W; bb (big base) is b**ndigits\n\tbb := Word(1)\n\tndigits := 0\n\tfor max := Word(_M / b); bb <= max; bb *= b {\n\t\tndigits++\n\t}\n\n\t// preserve x, create local copy for use in repeated divisions\n\tq := nat(nil).set(x)\n\tvar r Word\n\n\t// convert\n\tif b == 10 { // hard-coding for 10 here speeds this up by 1.25x\n\t\tfor len(q) > 0 {\n\t\t\t// extract least significant, base bb \"digit\"\n\t\t\tq, r = q.divW(q, bb) // N.B. >82% of time is here. Optimize divW\n\t\t\tif len(q) == 0 {\n\t\t\t\t// skip leading zeros in most-significant group of digits\n\t\t\t\tfor j := 0; j < ndigits && r != 0; j++ {\n\t\t\t\t\ti--\n\t\t\t\t\ts[i] = charset[r%10]\n\t\t\t\t\tr /= 10\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfor j := 0; j < ndigits; j++ {\n\t\t\t\t\ti--\n\t\t\t\t\ts[i] = charset[r%10]\n\t\t\t\t\tr /= 10\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfor len(q) > 0 {\n\t\t\t// extract least significant group of digits\n\t\t\tq, r = q.divW(q, bb) // N.B. >82% of time is here. Optimize divW\n\t\t\tif len(q) == 0 {\n\t\t\t\t// skip leading zeros in most-significant group of digits\n\t\t\t\tfor j := 0; j < ndigits && r != 0; j++ {\n\t\t\t\t\ti--\n\t\t\t\t\ts[i] = charset[r%b]\n\t\t\t\t\tr /= b\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfor j := 0; j < ndigits; j++ {\n\t\t\t\t\ti--\n\t\t\t\t\ts[i] = charset[r%b]\n\t\t\t\t\tr /= b\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn string(s[i:])\n}",
"func To3x3Char(numb int) Matrix {\n\tswitch numb {\n\tcase 1:\n\t\treturn Matrix{\n\t\t\t[]rune{b, lv, b},\n\t\t\t[]rune{b, v, b},\n\t\t\t[]rune{b, hv, b},\n\t\t}\n\tcase 2:\n\t\treturn Matrix{\n\t\t\t[]rune{rh, h, tr},\n\t\t\t[]rune{tl, h, br},\n\t\t\t[]rune{bl, h, lh},\n\t\t}\n\tcase 3:\n\t\treturn Matrix{\n\t\t\t[]rune{h, h, tr},\n\t\t\t[]rune{rh, h, teeL},\n\t\t\t[]rune{h, h, br},\n\t\t}\n\tcase 4:\n\t\treturn Matrix{\n\t\t\t[]rune{lv, b, lv},\n\t\t\t[]rune{bl, h, teeL},\n\t\t\t[]rune{b, b, hv},\n\t\t}\n\tcase 5:\n\t\treturn Matrix{\n\t\t\t[]rune{tl, h, lh},\n\t\t\t[]rune{bl, h, tr},\n\t\t\t[]rune{rh, h, br},\n\t\t}\n\tcase 6:\n\t\treturn Matrix{\n\t\t\t[]rune{tl, h, lh},\n\t\t\t[]rune{teeR, h, tr},\n\t\t\t[]rune{bl, h, br},\n\t\t}\n\tcase 7:\n\t\treturn Matrix{\n\t\t\t[]rune{h, h, tr},\n\t\t\t[]rune{b, b, v},\n\t\t\t[]rune{b, b, hv},\n\t\t}\n\tcase 8:\n\t\treturn Matrix{\n\t\t\t[]rune{tl, h, tr},\n\t\t\t[]rune{teeR, h, teeL},\n\t\t\t[]rune{bl, h, br},\n\t\t}\n\tcase 9:\n\t\treturn Matrix{\n\t\t\t[]rune{tl, h, tr},\n\t\t\t[]rune{bl, h, teeL},\n\t\t\t[]rune{rh, h, br},\n\t\t}\n\tdefault:\n\t\treturn Matrix{\n\t\t\t[]rune{tl, h, tr},\n\t\t\t[]rune{v, b, v},\n\t\t\t[]rune{bl, h, br},\n\t\t}\n\t}\n}",
"func zzUnpackCMap(packed []int) []rune {\n\tm := make([]rune, 0x110000)\n\tj := 0 // index in unpacked array\n\tassert(len(packed) == 4122)\n\n\tfor i := 0; i < 4122; i += 2 {\n\t\tcount, value := packed[i], packed[i+1]\n\t\tm[j] = rune(value)\n\t\tj++\n\t\tcount--\n\t\tfor count > 0 {\n\t\t\tm[j] = rune(value)\n\t\t\tj++\n\t\t\tcount--\n\t\t}\n\t}\n\treturn m\n}",
"func Latin1_bin_RuneWeight(r rune) int32 {\n\tweight, ok := latin1_bin_Weights[r]\n\tif ok {\n\t\treturn weight\n\t} else if r >= 0 && r <= 127 {\n\t\treturn r + 0\n\t} else {\n\t\treturn 2147483647\n\t}\n}",
"func Geostd8_bin_RuneWeight(r rune) int32 {\n\tweight, ok := geostd8_bin_Weights[r]\n\tif ok {\n\t\treturn weight\n\t} else if r >= 0 && r <= 127 {\n\t\treturn r + 0\n\t} else {\n\t\treturn 2147483647\n\t}\n}",
"func (g *Generator) convertCyrillicToLatin() {\n\tfor _, runeValue := range g.lowerCased {\n\t\tg.converted = g.converted + librarian.Dictionary[string(runeValue)]\n\t}\n}",
"func applyRulesW1to3(r rune, clz bidi.Class, current scrap) bidi.Class {\n\tcurrclz := current.bidiclz\n\tswitch clz { // do some pre-processing\n\tcase bidi.NSM: // rule W1, handle accents\n\t\tswitch currclz {\n\t\tcase bidi.LRI:\n\t\t\treturn bidi.L\n\t\tcase bidi.RLI:\n\t\t\treturn bidi.R\n\t\tcase bidi.PDI:\n\t\t\treturn cNI\n\t\t}\n\t\treturn currclz\n\tcase bidi.EN: // rule W2\n\t\tif current.context.IsAL() {\n\t\t\ttracing.Errorf(\"========= context: %v\", current.context)\n\t\t\treturn bidi.AN\n\t\t}\n\tcase bidi.AL: // rule W3\n\t\treturn bidi.R\n\tcase bidi.S, bidi.WS, bidi.ON:\n\t\treturn cNI\n\t\t//if sc.bidiclz == NI {\n\t\t// return NI\n\t\t//}\n\t}\n\treturn clz\n}",
"func convert(s string, numRows int) string {\n\t// Sample 8ms\n\tif numRows < 2 {\n\t\treturn s\n\t}\n\tn := len(s)\n\tout := make([]byte, n)\n\tidx := 0\n\tfor i := 0; i < numRows; i++ {\n\t\tzig := (numRows - i - 1) * 2\n\t\tzag := i * 2\n\t\tif zig == 0 {\n\t\t\tzig = zag\n\t\t}\n\t\tif zag == 0 {\n\t\t\tzag = zig\n\t\t}\n\t\tfor j, k := i, 0; j < n; idx, k = idx+1, k+1 {\n\t\t\tout[idx] = s[j]\n\t\t\tif k%2 == 0 {\n\t\t\t\tj += zig\n\t\t\t} else {\n\t\t\t\tj += zag\n\t\t\t}\n\t\t}\n\t}\n\n\treturn string(out)\n\n\t/* My Solution 24ms\n\tif numRows < 2 {\n\t\treturn s\n\t}\n\tvar ans []rune\n\tr := make([][]rune, numRows)\n\tplen := numRows*2 - 2\n\tfor i, c := range s {\n\t\tidx := i % plen\n\t\tif idx >= numRows {\n\t\t\tidx = plen - idx\n\t\t}\n\t\tr[idx] = append(r[idx], c)\n\t}\n\tfor _, row := range r {\n\t\tans = append(ans, row...)\n\t}\n\treturn string(ans)\n\t*/\n}",
"func ToKlingon(value string) (string, error) {\n\n\t// Return error for empty string\n\tif value == \"\" {\n\t\treturn \"\", errors.New(\"Not translatable\")\n\t}\n\n\ttranslated := []string{}\n\n\t// Total length of the string\n\tlength := len(value)\n\n\tcursor := 0\n\n\t// Loop until all values are validated\n\tfor cursor < length {\n\n\t\t// Search for 2 characters\n\t\t// expected : ch - gh - ng\n\t\tif cursor+1 < length {\n\n\t\t\ttranslatedChar, ok := dictionary[strings.ToLower(value[cursor:cursor+2])]\n\n\t\t\t// Matched!\n\t\t\tif ok {\n\t\t\t\t// Save translated characters to array\n\t\t\t\ttranslated = append(translated, translatedChar)\n\n\t\t\t\t// Add 2 to cursor\n\t\t\t\tcursor += 2\n\n\t\t\t\t// Skip the remaining process\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// Search for 3 characters\n\t\t// expected : tlh\n\t\tif cursor+2 < length {\n\t\t\ttranslatedChar, ok := dictionary[strings.ToLower(value[cursor:cursor+3])]\n\n\t\t\t// Matched!\n\t\t\tif ok {\n\t\t\t\t// Save translated characters to array\n\t\t\t\ttranslated = append(translated, translatedChar)\n\n\t\t\t\t// Add 3 to cursor\n\t\t\t\tcursor += 3\n\n\t\t\t\t// Skip the remaining process\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// Convert []byte to string\n\t\tcurrentChar := string(value[cursor])\n\n\t\t// Search the dictionary\n\t\ttranslatedChar, ok := dictionary[currentChar]\n\n\t\t// If failed, search for the lowercase\n\t\tif !ok {\n\n\t\t\ttranslatedChar, ok = dictionary[strings.ToLower(currentChar)]\n\n\t\t\t// The character isn't translatable -- return error\n\t\t\tif !ok {\n\t\t\t\treturn \"\", errors.New(\"Not translatable\")\n\t\t\t}\n\t\t}\n\n\t\t// Save translated characters to array\n\t\ttranslated = append(translated, translatedChar)\n\n\t\t// Add 1 to cursor\n\t\tcursor++\n\n\t}\n\n\t// Separate each character with space\n\treturn strings.Join(translated, \" \"), nil\n\n}",
"func ConvertSizeToBytes(s string) (string, error) {\n\ts = strings.TrimSpace(strings.ToLower(s))\n\n\t// spin until we find a match, if no match return original string\n\tfor _, k := range units {\n\t\tvar y int = lookupTable[k]\n\t\tif strings.HasSuffix(s, k) {\n\t\t\ts = s[:len(s)-len(k)]\n\t\t\ti, err := strconv.Atoi(s)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\ti = i * Pow(1024, y)\n\t\t\ts = strconv.Itoa(i)\n\t\t\treturn s, nil\n\t\t}\n\t}\n\n\treturn s, nil\n}",
"func (arg1 *UConverter) GetMaxCharSize() int"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
nextWeight computes the first possible collation weights following elems for the given level.
|
func nextWeight(level colltab.Level, elems []rawCE) []rawCE {
if level == colltab.Identity {
next := make([]rawCE, len(elems))
copy(next, elems)
return next
}
next := []rawCE{makeRawCE(elems[0].w, elems[0].ccc)}
next[0].w[level]++
if level < colltab.Secondary {
next[0].w[colltab.Secondary] = defaultSecondary
}
if level < colltab.Tertiary {
next[0].w[colltab.Tertiary] = defaultTertiary
}
// Filter entries that cannot influence ordering.
for _, ce := range elems[1:] {
skip := true
for i := colltab.Primary; i < level; i++ {
skip = skip && ce.w[i] == 0
}
if !skip {
next = append(next, ce)
}
}
return next
}
|
[
"func Weight(numCounters uint32) float64 {\n\tif numCounters < 16 {\n\t\treturn 0.673\n\t} else if numCounters < 32 {\n\t\treturn 0.697\n\t} else if numCounters < 64 {\n\t\treturn 0.709\n\t}\n\treturn (0.7213 * float64(numCounters)) / (float64(numCounters) + 1.079)\n}",
"func (e WeightedEdgePair) Weight() float64 { return e.W }",
"func (sw *SW) Next() interface{} {\n\tsw.mtx.RLock()\n\tdefer sw.mtx.RUnlock()\n\n\tif sw.elems == nil || len(sw.elems) == 0 {\n\t\treturn nil\n\t}\n\n\tvar total int\n\tnext := &smoothElem{}\n\tfor _, elem := range sw.elems {\n\t\ttotal += elem.effectiveWeight\n\t\telem.currentWeight += elem.effectiveWeight\n\n\t\tif elem.effectiveWeight < elem.weight { // automatic recovery\n\t\t\telem.effectiveWeight++\n\t\t}\n\n\t\tif next == nil || next.currentWeight < elem.currentWeight {\n\t\t\tnext = elem\n\t\t}\n\t}\n\n\tnext.currentWeight -= total\n\treturn next.elem\n}",
"func (fields List) Weight() int {\n\tif fields.p == nil {\n\t\treturn 0\n\t}\n\tx, n := uvarint(*(*[]byte)(unsafe.Pointer(&bytes{fields.p, 10, 10})))\n\treturn x + n\n}",
"func weightedRand(weights []int) int {\n\t// Calculate the total weight\n\ttotal := 0\n\tfor _, weight := range weights {\n\t\ttotal += weight\n\t}\n\n\t// Select a random value between 0 and the total weight\n\tvalue := rand.Intn(total)\n\n\t// Find the corresponding index\n\tfor i, weight := range weights {\n\t\tif value < weight {\n\t\t\treturn i\n\t\t}\n\t\tvalue -= weight\n\t}\n\treturn len(weights) - 1\n}",
"func Weighted(w0 float64, v0 byte, w1 float64, v1 byte) byte {\n\treturn byte(math.Round((w0*float64(v0) + w1*float64(v1)) / (w0 + w1)))\n}",
"func compareWeights(a, b []rawCE) (result int, level colltab.Level) {\n\tfor level := colltab.Primary; level < colltab.Identity; level++ {\n\t\tvar va, vb int\n\t\tfor ia, ib := 0, 0; ia < len(a) || ib < len(b); ia, ib = ia+1, ib+1 {\n\t\t\tia, va = nextVal(a, ia, level)\n\t\t\tib, vb = nextVal(b, ib, level)\n\t\t\tif va != vb {\n\t\t\t\tif va < vb {\n\t\t\t\t\treturn -1, level\n\t\t\t\t} else {\n\t\t\t\t\treturn 1, level\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn 0, colltab.Identity\n}",
"func Weighted(choiceWeight []int) int {\n\n\t// sum and number of choices\n\tsumWeight := core.Sum(choiceWeight[:])\n\tnumChoices := len(choiceWeight)\n\n\t// get random number from sum of all weights\n\tvar rnd int = rand.Intn(sumWeight)\n\n\t// search for randomized index\n\tvar index int\n\tfor index = 0; index < numChoices; index++ {\n\t\tif rnd < choiceWeight[index] {\n\t\t\tbreak\n\t\t}\n\n\t\trnd -= choiceWeight[index]\n\t}\n\n\treturn index\n}",
"func next(g *game, x, y int, adj []*player, z float64) *player {\n\t// Most likely gonna stay the same\n\tf := rand.Float64()\n\tif f >= 0 && f < 0.90 {\n\t\treturn adj[CENTER]\n\t}\n\n\tnearPlayers := make(map[*player]bool)\n\tfor _, p := range g.players {\n\t\tif p.dist(x, y) < 8 {\n\t\t\tnearPlayers[p] = true\n\t\t}\n\t}\n\n\tweight := make(map[*player]float64)\n\tfor _, p := range adj {\n\t\t// 1 point of representation for each adjacent player. Treat unoccupied like a player.\n\t\tweight[p] += 1\n\n\t\t// increase the influence of occupied cells when their player is near\n\t\tif nearPlayers[p] {\n\t\t\tweight[p] += 7\n\t\t}\n\t}\n\n\t// reduce the influence of unoccupied\n\tweight[nil] *= 0.05\n\n\t// Normalize all weights so they add to 1.0\n\ttotalWeight := 0.0\n\tfor _, w := range weight {\n\t\ttotalWeight += w\n\t}\n\tfor p, w := range weight {\n\t\tweight[p] = w / totalWeight\n\t}\n\n\t// Translate weights into ranges that span [0,1) and use z to determine the outcome\n\tl := 0.0\n\tr := 0.0\n\t// TODO there's probably a smarter O(1) way to do this\n\tfor i := 0; i < len(g.players); i++ {\n\t\tp := g.players[i]\n\t\tr = l + weight[p]\n\t\tif z >= l && z < r {\n\t\t\treturn p\n\t\t}\n\t\tl = r\n\t}\n\treturn nil // unowned\n}",
"func Weighted(weights ...int) func() int {\n\trepartition := []int{}\n\tfor i, weight := range weights {\n\t\tfor j := 0; j < weight; j++ {\n\t\t\trepartition = append(repartition, i)\n\t\t}\n\t}\n\tlimit := int64(len(repartition))\n\treturn func() int {\n\t\treturn repartition[int(src.Int63()%limit)]\n\t}\n}",
"func (v *Variations) NextWeightedRand() string {\n\trand.Seed(time.Now().UnixNano())\n\n\t// Get a sum total of the probabities of all variations.\n\tvar total int64\n\tfor _, i := range *v {\n\t\ttotal += i\n\t}\n\n\t// Pick a random int between 0 and the total sum.\n\tr := rand.Int63n(total)\n\n\t// Range through the possible variations and subtract the probability\n\t// weight from the random number. If r goes below zero, select the key.\n\tvar k string\n\tvar i int64\n\tfor k, i = range *v {\n\t\tr -= i\n\t\tif r < 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn k\n}",
"func (*Add) Weight() int {\n\treturn 1\n}",
"func WeightedChoice(choices []Choice) (Choice, error) {\n\t// Based on this algorithm:\n\t// http://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python/\n\tvar ret Choice\n\tsum := 0\n\tfor _, c := range choices {\n\t\tsum += c.Weight\n\t}\n\tr, err := IntRange(0, sum)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\tfor _, c := range choices {\n\t\tr -= c.Weight\n\t\tif r < 0 {\n\t\t\treturn c, nil\n\t\t}\n\t}\n\terr = errors.New(\"Internal error - code should not reach this point\")\n\treturn ret, err\n}",
"func (p *ColorPalette) Weight(c color.Color) float64 {\n\tweight, _ := p.colorWeights[c]\n\treturn weight\n}",
"func (g *graph) weight(e Edge) float64 {\n\tw := e.GetWeight()\n\tv := e.GetDestination()\n\tfor g.Parent[v] != nil {\n\t\tw += g.Const[e.GetDestination()]\n\t\tv = *g.Parent[v]\n\t}\n\treturn w\n}",
"func (this *NodeWeight) Weight() float32 {\n\tavgSpeed := float32(0.0)\n\tfor _, s := range this.speed {\n\t\tavgSpeed += s\n\t}\n\tavgSpeed = avgSpeed / float32(len(this.speed))\n\n\tavgInterval := float32(0.0)\n\tnow := time.Now().UnixNano() / int64(time.Millisecond)\n\tfor _, t := range this.reqTime {\n\t\tavgInterval += float32(now - t)\n\t}\n\tavgInterval = avgInterval / float32(len(this.reqTime))\n\tw := avgSpeed + avgInterval\n\treturn w\n}",
"func Weight(v int) predicate.Relationship {\n\treturn predicate.Relationship(sql.FieldEQ(FieldWeight, v))\n}",
"func (np *neighborPair) Weight() int {\n\treturn np.cost\n}",
"func packweight() int {\n\tk := c[GOLD] / 1000\n\tj := 25\n\tfor iven[j] == 0 && j > 0 {\n\t\tj--\n\t}\n\tfor i := 0; i <= j; i++ {\n\t\tswitch iven[i] {\n\t\tcase 0:\n\t\tcase OSSPLATE, OPLATEARMOR:\n\t\t\tk += 40\n\t\tcase OPLATE:\n\t\t\tk += 35\n\t\tcase OHAMMER:\n\t\t\tk += 30\n\t\tcase OSPLINT:\n\t\t\tk += 26\n\t\tcase OSWORDofSLASHING, OCHAIN, OBATTLEAXE, O2SWORD:\n\t\t\tk += 23\n\t\tcase OLONGSWORD, OSWORD, ORING, OFLAIL:\n\t\t\tk += 20\n\t\tcase OLANCE, OSTUDLEATHER:\n\t\t\tk += 15\n\t\tcase OLEATHER, OSPEAR:\n\t\t\tk += 8\n\t\tcase OORBOFDRAGON, OBELT:\n\t\t\tk += 4\n\t\tcase OSHIELD:\n\t\t\tk += 7\n\t\tcase OCHEST:\n\t\t\tk += 30 + ivenarg[i]\n\t\tdefault:\n\t\t\tk++\n\t\t}\n\t}\n\treturn k\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
compareWeights returns 1 if a b, or 0 otherwise. It also returns the collation level at which the difference is found.
|
func compareWeights(a, b []rawCE) (result int, level colltab.Level) {
for level := colltab.Primary; level < colltab.Identity; level++ {
var va, vb int
for ia, ib := 0, 0; ia < len(a) || ib < len(b); ia, ib = ia+1, ib+1 {
ia, va = nextVal(a, ia, level)
ib, vb = nextVal(b, ib, level)
if va != vb {
if va < vb {
return -1, level
} else {
return 1, level
}
}
}
}
return 0, colltab.Identity
}
|
[
"func CMPW(amr, imr operand.Op) { ctx.CMPW(amr, imr) }",
"func compare(a, b *TrieKey, prematchedBits uint) (a_match, b_match, reversed bool, common, child uint) {\n\t// Figure out which is the longer prefix and reverse them if b is shorter\n\treversed = b.Length < a.Length\n\tif reversed {\n\t\tb_match, a_match, common, child = contains(b, a, prematchedBits)\n\t} else {\n\t\ta_match, b_match, common, child = contains(a, b, prematchedBits)\n\t}\n\treturn\n}",
"func Weight(numCounters uint32) float64 {\n\tif numCounters < 16 {\n\t\treturn 0.673\n\t} else if numCounters < 32 {\n\t\treturn 0.697\n\t} else if numCounters < 64 {\n\t\treturn 0.709\n\t}\n\treturn (0.7213 * float64(numCounters)) / (float64(numCounters) + 1.079)\n}",
"func calculateWeightStatus(ro *v1alpha1.Rollout, canaryHash, stableHash string, desiredWeight int32, weightDestinations ...v1alpha1.WeightDestination) (bool, *v1alpha1.TrafficWeights) {\n\tweights := v1alpha1.TrafficWeights{\n\t\tCanary: v1alpha1.WeightDestination{\n\t\t\tWeight: desiredWeight,\n\t\t\tPodTemplateHash: canaryHash,\n\t\t\tServiceName: ro.Spec.Strategy.Canary.CanaryService,\n\t\t},\n\t}\n\tstableWeight := 100 - desiredWeight\n\tfor _, weightDest := range weightDestinations {\n\t\tweights.Additional = append(weights.Additional, weightDest)\n\t\tstableWeight -= weightDest.Weight\n\t}\n\tweights.Stable.Weight = stableWeight\n\tweights.Stable.PodTemplateHash = stableHash\n\tweights.Stable.ServiceName = ro.Spec.Strategy.Canary.StableService\n\n\tprevWeights := ro.Status.Canary.Weights\n\tmodified := prevWeights == nil ||\n\t\tprevWeights.Canary != weights.Canary ||\n\t\tprevWeights.Stable != weights.Stable ||\n\t\t!reflect.DeepEqual(prevWeights.Additional, weights.Additional)\n\treturn modified, &weights\n}",
"func TestWeightedOperations(t *testing.T) {\n\n\tapp, ctx := createTestApp(false)\n\n\tctx.WithChainID(\"test-chain\")\n\n\tcdc := app.AppCodec()\n\tappParams := make(simtypes.AppParams)\n\n\tweightesOps := simulation.WeightedOperations(appParams, cdc, app.AccountKeeper,\n\t\tapp.BankKeeper, app.StakingKeeper,\n\t)\n\n\ts := rand.NewSource(1)\n\tr := rand.New(s)\n\taccs := simtypes.RandomAccounts(r, 3)\n\n\texpected := []struct {\n\t\tweight int\n\t\topMsgRoute string\n\t\topMsgName string\n\t}{{simappparams.DefaultWeightMsgCreatePool, types.ModuleName, types.TypeMsgCreatePool},\n\t\t{simappparams.DefaultWeightMsgAddPledge, types.ModuleName, types.TypeMsgAddPledge},\n\t\t{simappparams.DefaultWeightMsgRedeemPledge, types.ModuleName, types.TypeMsgRedeemPledge},\n\t\t{simappparams.DefaultWeightMsgPalceOrder, types.ModuleName, types.TypeMsgPlaceOrder},\n\t\t{simappparams.DefaultWeightMsgRevokeOrder, types.ModuleName, types.TypeMsgRevokeOrder},\n\t\t{simappparams.DefaultWeightMsgAgreeOrder, types.ModuleName, types.TypeMsgAgreeOrder},\n\t}\n\n\tfor i, w := range weightesOps {\n\t\toperationMsg, _, _ := w.Op()(r, app.BaseApp, ctx, accs, ctx.ChainID())\n\t\t// the following checks are very much dependent from the ordering of the output given\n\t\t// by WeightedOperations. if the ordering in WeightedOperations changes some tests\n\t\t// will fail\n\t\trequire.Equal(t, expected[i].weight, w.Weight(), \"weight should be the same\")\n\t\trequire.Equal(t, expected[i].opMsgRoute, operationMsg.Route, \"route should be the same\")\n\t\trequire.Equal(t, expected[i].opMsgName, operationMsg.Name, \"operation Msg name should be the same\")\n\t}\n}",
"func (a *AlgorithmDefault) WeightingRelation(\n\tword1ID int,\n\tword2ID int,\n\trank *Rank,\n) float32 {\n\trelationQty := rank.Relation.Node[word1ID][word2ID].Qty\n\n\treturn float32(relationQty)\n}",
"func (e *Election) cmp(a, b int) int {\n\tcnt := 0\n\n\tfor _, v := range e.V {\n\t\tfor i := 0; i < len(v.C); i++ {\n\t\t\tx := v.C[strconv.Itoa(i)]\n\t\t\tif x == a {\n\t\t\t\tcnt -= v.W\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif x == b {\n\t\t\t\tcnt += v.W\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn cnt\n}",
"func distcmp(target, a, b common.Hash) int {\n\tfor i := range target {\n\t\tda := a[i] ^ target[i]\n\t\tdb := b[i] ^ target[i]\n\t\tif da > db {\n\t\t\treturn 1\n\t\t} else if da < db {\n\t\t\treturn -1\n\t\t}\n\t}\n\treturn 0\n}",
"func distcmp(target, a, b bgmcommon.Hash) int {\n\tfor i := range target {\n\t\tda := a[i] ^ target[i]\n\t\tdb := b[i] ^ target[i]\n\t\tif da > db {\n\t\t\treturn 1\n\t\t} else if da < db {\n\t\t\treturn -1\n\t\t}\n\t}\n\treturn 0\n}",
"func compareReplicationStates(s1, s2 string) (int, error) {\n\tw1, err := ParseReplicationState(s1, nil)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tw2, err := ParseReplicationState(s2, nil)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tif w1.ClusterID != w2.ClusterID {\n\t\treturn 0, fmt.Errorf(\"can't compare replication states with different ClusterIDs\")\n\t}\n\n\tswitch {\n\tcase w1.LocalIndex >= w2.LocalIndex && w1.ReplicatedIndex >= w2.ReplicatedIndex:\n\t\treturn 1, nil\n\t// We've already handled the case where both are equal above, so really we're\n\t// asking here if one or both are lesser.\n\tcase w1.LocalIndex <= w2.LocalIndex && w1.ReplicatedIndex <= w2.ReplicatedIndex:\n\t\treturn -1, nil\n\t}\n\n\treturn 0, nil\n}",
"func (g *testExpectedGraph) Weight(xid, yid int64) (w float64, ok bool) {\n\tif xid == yid {\n\t\treturn self, true\n\t}\n\tif to, ok := g.from[xid]; ok {\n\t\tif e, ok := to[yid]; ok {\n\t\t\treturn e.Weight(), true\n\t\t}\n\t}\n\treturn absent, false\n}",
"func (uc *unicodeCICollator) Compare(a, b string) int {\n\ta = truncateTailingSpace(a)\n\tb = truncateTailingSpace(b)\n\t// weight of a, b. weight in unicode_ci may has 8 uint16s. xn indicate first 4 u16s, xs indicate last 4 u16s\n\tan, bn := uint64(0), uint64(0)\n\tas, bs := uint64(0), uint64(0)\n\t// rune of a, b\n\tar, br := rune(0), rune(0)\n\t// decode index of a, b\n\tai, bi := 0, 0\n\tfor {\n\t\tif an == 0 {\n\t\t\tif as == 0 {\n\t\t\t\tfor an == 0 && ai < len(a) {\n\t\t\t\t\tar, ai = decodeRune(a, ai)\n\t\t\t\t\tan, as = convertRuneUnicodeCI(ar)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tan = as\n\t\t\t\tas = 0\n\t\t\t}\n\t\t}\n\n\t\tif bn == 0 {\n\t\t\tif bs == 0 {\n\t\t\t\tfor bn == 0 && bi < len(b) {\n\t\t\t\t\tbr, bi = decodeRune(b, bi)\n\t\t\t\t\tbn, bs = convertRuneUnicodeCI(br)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tbn = bs\n\t\t\t\tbs = 0\n\t\t\t}\n\t\t}\n\n\t\tif an == 0 || bn == 0 {\n\t\t\treturn sign(int(an) - int(bn))\n\t\t}\n\n\t\tif an == bn {\n\t\t\tan, bn = 0, 0\n\t\t\tcontinue\n\t\t}\n\n\t\tfor an != 0 && bn != 0 {\n\t\t\tif (an^bn)&0xFFFF == 0 {\n\t\t\t\tan >>= 16\n\t\t\t\tbn >>= 16\n\t\t\t} else {\n\t\t\t\treturn sign(int(an&0xFFFF) - int(bn&0xFFFF))\n\t\t\t}\n\t\t}\n\t}\n}",
"func (e WeightedEdgePair) Weight() float64 { return e.W }",
"func DifferentIndex(childrenWeights []int) (int, int) {\n\tif len(childrenWeights) == 0 {\n\t\treturn -1, 0\n\t}\n\tsortedWeights := make([]int, len(childrenWeights))\n\tcopy(sortedWeights, childrenWeights)\n\tsort.Ints(sortedWeights)\n\tcorrectValue := sortedWeights[len(sortedWeights)/2]\n\tfor i, v := range childrenWeights {\n\t\tif v != correctValue {\n\t\t\treturn i, correctValue\n\t\t}\n\t}\n\treturn -1, correctValue\n}",
"func (a *AlgorithmChain) WeightingRelation(\n\tword1ID int,\n\tword2ID int,\n\trank *Rank,\n) float32 {\n\trelationQty := rank.Relation.Node[word1ID][word2ID].Qty\n\tword1Qty := rank.Words[word1ID].Qty\n\tword2Qty := rank.Words[word2ID].Qty\n\n\tqDiff := float32(math.Abs(float64(word1Qty)-float64(word2Qty))) / 100\n\tweight := float32(relationQty) + qDiff\n\n\treturn weight\n}",
"func (t PostgresTyper) MostWeight(left, right string) string {\n\tif left == right {\n\t\treturn left\n\t}\n\n\tif typeWeight[left] > typeWeight[right] {\n\t\treturn left\n\t}\n\n\treturn right\n}",
"func (this Graph) getWeightBetween(v1, v2 graph.VertexInterface) float64 {\n for _, side := range v1.GetEdgesFast() {\n for _, edge := range side() {\n if edge.GetOtherVertex(v1) == v2 {\n return edge.GetWeight()\n }\n }\n }\n return -1\n}",
"func compare(s1, s2 uint16) int {\n\tif s1 == s2 {\n\t\treturn 0\n\t}\n\tif ((s2 - s1) & 0x8000) != 0 {\n\t\treturn 1\n\t}\n\treturn -1\n}",
"func (etf *Etf) computeWeightDirection(x, y gocv.Vecf) float32 {\n\treturn float32(math.Abs(float64(etf.computeDot(x, y))))\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Anagrams returns all anagrams of the input string using words from the lineseparated word list found at dpath
|
func Anagrams(s string, dpath string) ([]string, error) {
d, err := readDict(dpath)
if err != nil {
panic(err)
}
l := strings.ReplaceAll(s, " ", "")
l = util.SortString(l)
res := []string{}
return *findAnagrams(l, &res, d, s), nil
}
|
[
"func Anagrams(word string) []string {\n\tif len(word) <= 1 {\n\t\treturn []string{word}\n\t}\n\n\toutput := []string{}\n\tvar letter byte\n\tvar pre []byte\n\tvar post []byte\n\tvar joined []byte\n\tfor x := 0; x < len(word); x++ {\n\t\tworkingWord := make([]byte, len(word))\n\t\tcopy(workingWord, []byte(word))\n\t\tletter = workingWord[x]\n\t\tpre = workingWord[0:x]\n\t\tpost = workingWord[x+1 : len(word)]\n\t\tjoined = append(pre, post...)\n\t\tfor _, subResult := range Anagrams(string(joined)) {\n\t\t\toutput = append(output, string(letter)+subResult)\n\t\t}\n\t}\n\treturn output\n}",
"func ProblemDay6() {\n\n\ttest1 := []string{\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"}\n\n\tfmt.Println(groupAnagrams(test1))\n\n}",
"func Anagrams(words []string) int {\n\tm := make(map[string]int)\n\tfor _, w := range words {\n\t\tsorted := SortString(w)\n\t\tm[sorted]++\n\t}\n\tcount := 0\n\tfor _, i := range m {\n\t\tif i == 1 {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}",
"func anagramList(words []string) map[string]map[string]bool {\n\tanagrams := make(map[string]map[string]bool)\n\tfor _, w := range words {\n\t\tw = strings.ToLower(w)\n\t\twordKey := sortWord(w)\n\t\tif anagrams[wordKey] != nil {\n\t\t\tanagrams[wordKey][w] = true\n\t\t} else {\n\t\t\tanagrams[wordKey] = make(map[string]bool)\n\t\t\tanagrams[wordKey][w] = true\n\t\t}\n\t}\n\treturn anagrams\n}",
"func groupAnagrams(strs []string) [][]string {\n\tvar set = make(map[[math.MaxUint8]int][]string)\n\tfor _, str := range strs {\n\t\tvar charSet = [math.MaxUint8]int{}\n\t\tfor _, letter := range []byte(str) {\n\t\t\tcharSet[letter]++\n\t\t}\n\t\tif _, ok := set[charSet]; !ok {\n\t\t\tset[charSet] = []string{str}\n\t\t} else {\n\t\t\tset[charSet] = append(set[charSet], str)\n\t\t}\n\t}\n\tres := make([][]string, 0, len(set))\n\tfor _, v := range set {\n\t\tres = append(res, v)\n\t}\n\treturn res\n}",
"func findAnagrams(s string, p string) []int {\n\trequired := [26]int{} // Strings consists of lowercase English letters only\n\tfor _, c := range p {\n\t\trequired[c-'a']++\n\t}\n\tlength := len(p)\n\tleft := 0\n\tresults := []int{}\n\tfor _, c := range s {\n\t\tb := c - 'a'\n\t\trequired[b]--\n\t\tlength--\n\t\tif required[b] < 0 {\n\t\t\tfor {\n\t\t\t\tl := left\n\t\t\t\tleft++\n\t\t\t\trequired[s[l]-'a']++\n\t\t\t\tlength++\n\t\t\t\tif s[l] == byte(c) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif length == 0 {\n\t\t\tresults = append(results, left)\n\t\t\trequired[s[left]-'a']++\n\t\t\tlength++\n\t\t\tleft++\n\t\t}\n\t}\n\treturn results\n}",
"func AnagramsReader(r io.Reader) int {\n\tbufR := bufio.NewReader(r)\n\tn := ReadConstraintN(bufR)\n\twords := make([]string, n)\n\tfor i := 0; i < n; i++ {\n\t\tline, err := bufR.ReadBytes('\\n')\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\twords[i] = string(line)\n\t}\n\treturn Anagrams(words)\n}",
"func AlienDictonary(words []string) {\n\tdict := make(map[string][]string)\n\tfor i := 0; i < len(words); i++ {\n\t\tcurrent := string(words[i])\n\t\tfor j := 0; j < len(current); j++ {\n\t\t\t_, found := dict[string(current[j])]\n\t\t\tif !found {\n\t\t\t\tdict[string(current[j])] = []string{}\n\t\t\t}\n\n\t\t}\n\t}\n\tinEdges := make(map[string]int)\n\tfor key := range dict {\n\t\tinEdges[key] = 0\n\t}\n\tfor i := 1; i < len(words); i++ {\n\t\tfirst := words[i-1]\n\t\tsecond := words[i]\n\t\tl := int(math.Min(float64(len(first)), float64(len(second))))\n\t\tfor j := 0; j < l; j++ {\n\t\t\tif first[j] != second[j] {\n\t\t\t\tdict[string(first[j])] = append(dict[string(first[j])], string(second[j]))\n\t\t\t\tinEdges[string(second[j])]++\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tq := []string{}\n\tfor key := range inEdges {\n\t\tif inEdges[key] == 0 {\n\t\t\tq = append(q, key)\n\t\t}\n\t}\n\tans := \"\"\n\tfor len(q) > 0 {\n\t\tremoved := q[0]\n\t\tq = q[1:]\n\t\tans += removed\n\t\tcurrent := dict[removed]\n\t\tfor i := 0; i < len(current); i++ {\n\t\t\tinEdges[string(current[i])]--\n\t\t\tif inEdges[string(current[i])] == 0 {\n\t\t\t\tq = append(q, string(current[i]))\n\t\t\t}\n\t\t}\n\n\t}\n\tfmt.Println(ans)\n}",
"func anagrams(s1 string, s2 string) bool {\n\tr1 := []rune(s1)\n\tr2 := []rune(s2)\n\tsort.Sort(sortableRunes(r1))\n\tsort.Sort(sortableRunes(r2))\n\n\tif string(r1) == string(r2) {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func findAnagrams(s string, a string) []int {\n\tvar result []int\n\tm := make(map[rune]int)\n\tfor i := 0; (i + len(a)) <= len(s); i++ {\n\t\tisAnagram := true\n\t\tfor _, c := range a {\n\t\t\tm[c] += 1\n\t\t}\n\t\tfor _, c := range s[i : i+len(a)] {\n\t\t\tm[c] -= 1\n\t\t}\n\t\tfor _, c := range a {\n\t\t\tif m[c] != 0 {\n\t\t\t\tisAnagram = false\n\t\t\t}\n\t\t\tm[c] = 0\n\t\t}\n\t\tif isAnagram {\n\t\t\tresult = append(result, i)\n\t\t}\n\t}\n\treturn result\n}",
"func Anagram(a string) []string {\n\tn := len(a)\n\treturn printAnagramUtil([]rune(a), n, n)\n}",
"func anagrams(s1 string, s2 string) bool {\n\tif len(s1) != len(s2) {\n\t\treturn false\n\t}\n\n\tm := make(map[rune]int)\n\n\tfor _, v := range s1 {\n\t\tm[v]++\n\t}\n\n\tfor _, v := range s2 {\n\t\tm[v]--\n\t}\n\n\tfor _, v := range m {\n\t\tif v != 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func (a *AnagramMap) AnagramSentence(sent []string) []string {\n\tvar ret []string\n\tfmt.Println(sent)\n\tfor _, s := range sent {\n\t\tfmt.Println(s)\n\t\tret = append(ret, a.AnagramOfWord(s))\n\t}\n\treturn ret\n}",
"func Detect(word string, candidates []string) []string {\n\tanagrams := []string{}\n\twordFrequencies := Frequencies(word)\n\tfor _, candidate := range candidates {\n\t\tif !strings.EqualFold(word, candidate) &&\n\t\t\treflect.DeepEqual(Frequencies(candidate), wordFrequencies) {\n\t\t\tanagrams = append(anagrams, candidate)\n\t\t}\n\t}\n\treturn anagrams\n}",
"func SplitIntoNGrams(nGram int, query string) []Term {\n\trunes := []rune(query)\n\n\tif len(runes) < nGram {\n\t\treturn []Term{}\n\t}\n\n\tresult := make([]Term, 0, len(runes)-nGram+1)\n\n\tfor i := 0; i < len(runes)-nGram+1; i++ {\n\t\tresult = appendUnique(result, string(runes[i:i+nGram]))\n\t}\n\n\treturn result\n}",
"func Detect(subject string, candidates []string) (anagrams []string) {\n\n\tsubject = strings.ToLower(subject)\n\n\tfor _, c := range candidates {\n\t\tif isAnagram(subject, strings.ToLower(c)) {\n\t\t\tanagrams = append(anagrams, c)\n\t\t}\n\t}\n\treturn anagrams\n}",
"func (a *AnagramMap) AnagramOfWord(word string) string {\n\tword = strings.ToLower(word)\n\twordKey := sortWord(word)\n\twordMap := a.Mapping[wordKey]\n\tif len(wordMap) <= 1 {\n\t\treturn word\n\t} else {\n\t\tuniqueWords := make([]string, 0)\n\t\tfor k, _ := range wordMap {\n\t\t\tif k != word {\n\t\t\t\tuniqueWords = append(uniqueWords, k)\n\t\t\t}\n\t\t}\n\n\t\tt := time.Now()\n\t\tr := rand.New(rand.NewSource(t.UnixNano()))\n\t\tw := uniqueWords[r.Intn(len(uniqueWords))]\n\t\treturn w\n\t}\n}",
"func AnagramKey(s string) string {\n\tchars := map[rune]int{}\n\trunes := []rune(s)\n\n\tfor _, rune := range s {\n\t\tchars[rune]++\n\t}\n\n\tsort.Sort(utils.SortedRunes(runes))\n\n\tout := []byte{}\n\tfor _, rune := range runes {\n\t\tout = strconv.AppendInt(out, int64(chars[rune]), 10)\n\t\tout = append(out, byte(rune))\n\t}\n\n\treturn string(out)\n}",
"func FindAnagramOrNot(str1, str2 string) {\n\n\tif len(strings.Replace(str1, \" \", \"\", -1)) != len(strings.Replace(str2, \" \", \"\", -1)) {\n\t\tfmt.Println(\"Not an anagram\")\n\t}\n\n\tfor _, first := range str1 {\n\t\tflag := false\n\t\tfor _, second := range str2 {\n\t\t\tif strings.ToLower(string(first)) == strings.ToLower(string(second)) {\n\t\t\t\tflag = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !flag {\n\t\t\tfmt.Println(\"strings are not anagrams\")\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n\n\tfmt.Println(\"Given strings are Anagrams\")\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Int promises that the zero value is a 0, but in gmp the zero value is a crash. To bridge the gap, the init bool says whether this is a valid gmp value. doinit initializes f.i if it needs it. This is not inherent to FFI, just a mismatch between Go's convention of making zero values useful and gmp's decision not to.
|
func (f *Float) doinit() {
if f.init {
return
}
if f.prec != 0 {
C.mpf_init2(&f.i[0], C.mp_bitcnt_t(f.prec))
} else {
C.mpf_init(&f.i[0])
}
f.init = true
}
|
[
"func (z *Int) doinit() {\n\tif z.init {\n\t\treturn\n\t}\n\tz.init = true\n\tC.mpz_init(&z.i[0])\n}",
"func gmpInit(ctx phpv.Context, args []*phpv.ZVal) (*phpv.ZVal, error) {\n\tvar num *phpv.ZVal\n\tvar base *phpv.ZInt\n\n\t_, err := core.Expand(ctx, args, &num, &base)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar i *big.Int\n\n\tswitch num.GetType() {\n\tcase phpv.ZtNull, phpv.ZtBool, phpv.ZtInt, phpv.ZtFloat:\n\t\tnum, err = num.As(ctx, phpv.ZtInt)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ti = big.NewInt(int64(num.Value().(phpv.ZInt)))\n\tdefault:\n\t\tnum, err = num.As(ctx, phpv.ZtString)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ti = &big.Int{}\n\t\tif base == nil {\n\t\t\t_, ok := i.SetString(string(num.AsString(ctx)), 0)\n\t\t\tif !ok {\n\t\t\t\treturn nil, errors.New(\"failed to parse integer\")\n\t\t\t}\n\t\t} else {\n\t\t\t_, ok := i.SetString(string(num.AsString(ctx)), int(*base))\n\t\t\tif !ok {\n\t\t\t\treturn nil, errors.New(\"failed to parse integer\")\n\t\t\t}\n\t\t}\n\t}\n\n\treturn returnInt(ctx, i)\n}",
"func Int128_Init__0(v int) (out Int128) {\n\treturn Int128_Cast__2(int64(v))\n}",
"func TestZero(t *testing.T) {\n\tf := new(fieldVal).SetInt(2)\n\tf.Zero()\n\tfor idx, rawInt := range f.n {\n\t\tif rawInt != 0 {\n\t\t\tt.Errorf(\"internal field integer at index #%d is not \"+\n\t\t\t\t\"zero - got %d\", idx, rawInt)\n\t\t}\n\t}\n}",
"func (i *Int64) Initialize(val int64) {\n\t*i.get() = val\n}",
"func TestNilInt(t *testing.T) {\n\ty := GetIntFlag(\"example\").Value()\n\tassert.Nil(t, y)\n}",
"func (n Noop) Init(_ int) error {\n\treturn nil\n}",
"func (v asmInt) FitsIn(bytes uint) bool {\r\n\t// In fact, 64-bit declarations in JWasm don't limit the value at all.\r\n\tif bytes >= 8 {\r\n\t\treturn true\r\n\t}\r\n\treturn v.n >= -int64(1<<(bytes*8)) &&\r\n\t\tv.n <= int64((1<<(bytes*8)-1))\r\n}",
"func Init[T any, I initialiser[T]](ptr *T, init I) T {\n\tif ptr == nil {\n\t\tpanic(fmt.Sprintf(\"nil pointer exception with pointers.Init for %T\", *new(T)))\n\t}\n\tif val, ok := initAtomic[T, I](ptr, init); ok {\n\t\treturn val\n\t}\n\tif val, ok := initFastPath[T](ptr); ok {\n\t\treturn val\n\t}\n\tvar key = pointersync.Key(ptr)\n\tdefer initLocks.Sync(key)()\n\tif ptr != nil && !isZero(*ptr) {\n\t\treturn *ptr\n\t}\n\t*ptr = initialise[T, I](init)\n\treturn *ptr\n}",
"func zeroval(ival int) {\n\tival = 0\n}",
"func Int128_Init__1(v UntypedBigint) (out Int128) {\n\treturn Int128_Cast__1(v)\n}",
"func EmptyInit(value bool) EmptyAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"init\"] = value\n\t}\n}",
"func InitMpz(mpz *MpzT) {\n\tC.init_mpzp(C.uintptr_t(uintptr(unsafe.Pointer(mpz))))\n}",
"func (t systemIntType) Zero() interface{} {\n\treturn int64(0)\n}",
"func addInit(b *testing.B) {\n\tif addIntSet64Data == nil {\n\t\taddIntSet64Data, addIntSet32Data, addIntMapData = generateSets(*max_range, *count)\n\t\taddValue = getValue(addIntSet64Data, *max_range)\n\t\tb.ResetTimer()\n\t}\n}",
"func IFBInit() error {\n\treturn execModProbe(ifbModuleName, \"numifbs=0\")\n}",
"func imgInit() int {\n\tlog.Printf(\"imgInit()\")\n\tvar f = mod.NewProc(\"img_init\")\n\tret, _, _ := f.Call()\n\treturn int(ret)\n}",
"func ZeroInt(v interface{}) int {\n\ti, err := Int64(v)\n\tif err != nil {\n\t\treturn 0\n\t}\n\treturn int(i)\n}",
"func zeroValue(iValue int) {\n\tiValue = 0\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set sets f = x and returns f.
|
func (f *Float) Set(x *Float) *Float {
f.doinit()
C.mpf_set(&f.i[0], &x.i[0])
return f
}
|
[
"func (m *mShifterMockShift) Set(f func(p context.Context, p1 insolar.PulseNumber) (r error)) *ShifterMock {\n\tm.mainExpectation = nil\n\tm.expectationSeries = nil\n\n\tm.mock.ShiftFunc = f\n\treturn m.mock\n}",
"func (m *mHeavySyncMockReset) Set(f func(p context.Context, p1 insolar.ID, p2 insolar.PulseNumber) (r error)) *HeavySyncMock {\n\tm.mainExpectation = nil\n\tm.expectationSeries = nil\n\n\tm.mock.ResetFunc = f\n\treturn m.mock\n}",
"func (m *mStorageMockUpdate) Set(f func(p context.Context, p1 insolar.PulseNumber, p2 bool, p3 ...insolar.JetID) (r error)) *StorageMock {\n\tm.mainExpectation = nil\n\tm.expectationSeries = nil\n\n\tm.mock.UpdateFunc = f\n\treturn m.mock\n}",
"func (m *mModifierMockSet) Set(f func(p context.Context, p1 Drop) (r error)) *ModifierMock {\n\tm.mainExpectation = nil\n\tm.expectationSeries = nil\n\n\tm.mock.SetFunc = f\n\treturn m.mock\n}",
"func (m *mTesterMockFatalf) Set(f func(p string, p1 ...interface{})) *TesterMock {\n\tm.mock.FatalfFunc = f\n\treturn m.mock\n}",
"func (m *mNodeMockPulse) Set(f func() (r core.PulseNumber)) *NodeMock {\n\tm.mock.PulseFunc = f\n\n\treturn m.mock\n}",
"func (m *mActiveNodeMockGetSignatureVerifier) Set(f func() (r cryptkit.SignatureVerifier)) *ActiveNodeMock {\n\tm.mainExpectation = nil\n\tm.expectationSeries = nil\n\n\tm.mock.GetSignatureVerifierFunc = f\n\treturn m.mock\n}",
"func (m *mActiveNodeMockGetNodeID) Set(f func() (r insolar.ShortNodeID)) *ActiveNodeMock {\n\tm.mainExpectation = nil\n\tm.expectationSeries = nil\n\n\tm.mock.GetNodeIDFunc = f\n\treturn m.mock\n}",
"func (m *mParcelMockPulse) Set(f func() (r insolar.PulseNumber)) *ParcelMock {\n\tm.mainExpectation = nil\n\tm.expectationSeries = nil\n\n\tm.mock.PulseFunc = f\n\treturn m.mock\n}",
"func (m *mStorageMockClone) Set(f func(p context.Context, p1 insolar.PulseNumber, p2 insolar.PulseNumber) (r error)) *StorageMock {\n\tm.mainExpectation = nil\n\tm.expectationSeries = nil\n\n\tm.mock.CloneFunc = f\n\treturn m.mock\n}",
"func (m *mStateStorageMockGetValidationState) Set(f func(p insolar.Reference) (r *ExecutionState)) *StateStorageMock {\n\tm.mainExpectation = nil\n\tm.expectationSeries = nil\n\n\tm.mock.GetValidationStateFunc = f\n\treturn m.mock\n}",
"func (m *mStorageMockForID) Set(f func(p context.Context, p1 insolar.PulseNumber, p2 insolar.ID) (r insolar.JetID, r1 bool)) *StorageMock {\n\tm.mainExpectation = nil\n\tm.expectationSeries = nil\n\n\tm.mock.ForIDFunc = f\n\treturn m.mock\n}",
"func (m *mStateStorageMockGetExecutionState) Set(f func(p insolar.Reference) (r *ExecutionBroker)) *StateStorageMock {\n\tm.mainExpectation = nil\n\tm.expectationSeries = nil\n\n\tm.mock.GetExecutionStateFunc = f\n\treturn m.mock\n}",
"func (m *mObjectDescriptorMockCode) Set(f func() (r *insolar.Reference, r1 error)) *ObjectDescriptorMock {\n\tm.mainExpectation = nil\n\tm.expectationSeries = nil\n\n\tm.mock.CodeFunc = f\n\treturn m.mock\n}",
"func (m *mNodeMockVersion) Set(f func() (r string)) *NodeMock {\n\tm.mock.VersionFunc = f\n\n\treturn m.mock\n}",
"func (m *mOutboundMockGetRelayID) Set(f func() (r insolar.ShortNodeID)) *OutboundMock {\n\tm.mainExpectation = nil\n\tm.expectationSeries = nil\n\n\tm.mock.GetRelayIDFunc = f\n\treturn m.mock\n}",
"func (m *mIndexCollectionAccessorMockForPulseAndJet) Set(f func(p context.Context, p1 insolar.PulseNumber, p2 insolar.JetID) (r map[insolar.ID]Lifeline)) *IndexCollectionAccessorMock {\n\tm.mainExpectation = nil\n\tm.expectationSeries = nil\n\n\tm.mock.ForPulseAndJetFunc = f\n\treturn m.mock\n}",
"func (m *mIndexCollectionAccessorMockForJet) Set(f func(p context.Context, p1 insolar.JetID) (r map[insolar.ID]LifelineMeta)) *IndexCollectionAccessorMock {\n\tm.mainExpectation = nil\n\tm.expectationSeries = nil\n\n\tm.mock.ForJetFunc = f\n\treturn m.mock\n}",
"func (m *mStateStorageMockUpsertValidationState) Set(f func(p insolar.Reference) (r *ExecutionState)) *StateStorageMock {\n\tm.mainExpectation = nil\n\tm.expectationSeries = nil\n\n\tm.mock.UpsertValidationStateFunc = f\n\treturn m.mock\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetFloat64 sets f = x and returns f.
|
func (f *Float) SetFloat64(x float64) *Float {
f.doinit()
C.mpf_set_d(&f.i[0], C.double(x))
return f
}
|
[
"func (z *Float) SetFloat64(x float64) *Float {}",
"func (z *Rat) SetFloat64(f float64) *Rat {}",
"func (feature Feature) SetFieldFloat64(index int, value float64) {\n\tC.OGR_F_SetFieldDouble(feature.cval, C.int(index), C.double(value))\n}",
"func (d *Datum) SetFloat64(f float64) {\n\td.k = KindFloat64\n\td.i = int64(math.Float64bits(f))\n}",
"func (c *Configurator) Float64F(name string, value float64, usage string) *float64 {\n\tp := new(float64)\n\n\tc.Float64VarF(p, name, value, usage)\n\n\treturn p\n}",
"func Float64F(name string, value float64, usage string) *float64 {\n\treturn Global.Float64F(name, value, usage)\n}",
"func SetFloat64(value interface{}) (float64, error) {\n\tf, err := setType(value, setFloat64Type)\n\treturn f.(float64), err\n}",
"func (instance *Instance) SetFloat64(fieldName string, value float64) error {\n\tfieldNameCStr := C.CString(fieldName)\n\tdefer C.free(unsafe.Pointer(fieldNameCStr))\n\n\tretcode := int(C.RTI_Connector_set_number_into_samples(unsafe.Pointer(instance.output.connector.native), instance.output.nameCStr, fieldNameCStr, C.double(value)))\n\treturn checkRetcode(retcode)\n}",
"func (cv *ConVar) SetFloat64(value float64) error {\n\treturn cv.write(reflect.Float64, value, 2)\n}",
"func (instance *Instance) SetFloat64(fieldName string, value float64) error {\n\tfieldNameCStr := C.CString(fieldName)\n\tdefer C.free(unsafe.Pointer(fieldNameCStr))\n\n\tC.RTIDDSConnector_setNumberIntoSamples(unsafe.Pointer(instance.output.connector.native), instance.output.nameCStr, fieldNameCStr, C.double(value))\n\treturn nil\n}",
"func (z *Float) SetUint64(x uint64) *Float {}",
"func (c *Configurator) Float64VarF(p *float64, name string, value float64, usage string) {\n\tc.flag().Float64Var(p, name, value, usage)\n}",
"func (f Float) Float64() float64 {\n\tpanic(\"not yet implemented\")\n}",
"func (vf Float64Func) Float64() (*float64, error) {\n\treturn vf()\n}",
"func Float64VarF(p *float64, name string, value float64, usage string) {\n\tGlobal.Float64VarF(p, name, value, usage)\n}",
"func Float64(f float64) *float64 {\n\treturn &f\n}",
"func (fs *FlagSet) Float64Var(name string, def float64, usage string) *Float64Value {\n\tv := &Float64Value{\n\t\tname: name,\n\t\tfs: fs.fs,\n\t}\n\tfs.fs.Float64Var(&v.value, name, def, usage)\n\treturn v\n}",
"func OfFloat64(f float64) Float64 {\n\treturn Float64{value: f, presents: true}\n}",
"func SetFloat(field string, args map[string]interface{}, defaultValue float64) float64 {\n\tvar value float64\n\n\tif args[field] != nil {\n\t\tvalue = convertFloat(args[field])\n\t} else {\n\t\tvalue = defaultValue\n\t}\n\n\treturn value\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetString interprets s as a number in the given base and sets f to that value. The base must be in the range [2,36]. SetString returns an error if s cannot be parsed or the base is invalid.
|
func (f *Float) SetString(s string, base int) error {
f.doinit()
if base < 2 || base > 36 {
return os.ErrInvalid
}
p := C.CString(s)
defer C.free(unsafe.Pointer(p))
if C.mpf_set_str(&f.i[0], p, C.int(base)) < 0 {
return os.ErrInvalid
}
return nil
}
|
[
"func (z *Int) SetString(s string, base int) error {\n\tz.doinit()\n\tif base < 2 || base > 36 {\n\t\treturn os.ErrInvalid\n\t}\n\tp := C.CString(s)\n\tdefer C.free(unsafe.Pointer(p))\n\tif C.mpz_set_str(&z.i[0], p, C.int(base)) < 0 {\n\t\treturn os.ErrInvalid\n\t}\n\treturn nil\n}",
"func (z *Int) SetString(s string, base int) (*Int, bool) {}",
"func IntSetString(z *big.Int, s string, base int) (*big.Int, bool)",
"func SetString(z *big.Int, s string, base int) (*big.Int, bool) {\n\treturn z.SetString(s, base)\n}",
"func FloatSetString(z *big.Float, s string) (*big.Float, bool)",
"func (z *Element17) SetString(s string) *Element17 {\n\tx, ok := new(big.Int).SetString(s, 10)\n\tif !ok {\n\t\tpanic(\"Element17.SetString failed -> can't parse number in base10 into a big.Int\")\n\t}\n\treturn z.SetBigInt(x)\n}",
"func (num *OCINum) SetString(s string) error {\n\ts = strings.TrimSpace(s)\n\tif len(s) == 0 {\n\t\treturn io.EOF\n\t}\n\tif s == \"0\" {\n\t\t*num = OCINum([]byte{128})\n\t\treturn nil\n\t}\n\tvar (\n\t\tdotSeen bool\n\t\tnonZeros, numCount int\n\t)\n\tfor i, r := range s {\n\t\tif '0' <= r && r <= '9' {\n\t\t\tnumCount++\n\t\t\tif numCount == 40 {\n\t\t\t\treturn fmt.Errorf(\"got %d, max 39 (%q): %w\", numCount, s, ErrTooLong)\n\t\t\t}\n\t\t\tif r != '0' {\n\t\t\t\tnonZeros++\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif i == 0 && r == '-' {\n\t\t\tcontinue\n\t\t}\n\t\tif !dotSeen && r == '.' {\n\t\t\tdotSeen = true\n\t\t\tcontinue\n\t\t}\n\t\treturn fmt.Errorf(\"%c in %q: %w\", r, s, ErrBadCharacter)\n\t}\n\tif numCount == 0 {\n\t\treturn fmt.Errorf(\"%s: %w\", s, ErrNoDigit)\n\t}\n\tif nonZeros == 0 {\n\t\t*num = OCINum([]byte{128})\n\t\treturn nil\n\t}\n\n\t// x = b - 1 <=> b = x + 1\n\tD := func(b byte) byte { return b + 1 }\n\tvar negative bool\n\tif s[0] == '-' {\n\t\tnegative = true\n\t\ts = s[1:]\n\t\t// x = 101 - b <=> b = 101 - x\n\t\tD = func(b byte) byte { return 101 - b }\n\t}\n\ti := len(s)\n\tif j := strings.IndexByte(s, '.'); j >= 0 {\n\t\tif j == 1 && s[0] == '0' {\n\t\t\ts = s[2:]\n\t\t\ti = 0\n\t\t} else {\n\t\t\tif j%2 != 0 {\n\t\t\t\ts = \"0\" + s\n\t\t\t\tj++\n\t\t\t}\n\t\t\ts = s[:j] + s[j+1:]\n\t\t\ti = j\n\t\t}\n\t\tif len(s)%2 == 1 {\n\t\t\ts = s + \"0\"\n\t\t}\n\t} else if len(s)%2 == 1 {\n\t\ts = \"0\" + s\n\t\ti = len(s)\n\t}\n\n\tfor j := len(s) - 2; j > 0 && s[j] == '0' && s[j+1] == '0'; j -= 2 {\n\t\ts = s[:j]\n\t}\n\texp := (i >> 1) - 1\n\n\tn := 1 + (len(s) >> 1) + 1\n\tif n > 21 {\n\t\tn = 21\n\t}\n\tif cap(*num) < n {\n\t\t*num = make([]byte, 1, n)\n\t} else {\n\t\t*num = (*num)[:1]\n\t}\n\tfor i := 0; i < len(s)-1; i += 2 {\n\t\tb := 10*(s[i]-'0') + s[i+1] - '0'\n\t\t*num = append(*num, D(b))\n\t}\n\texp += 65\n\tif negative {\n\t\texp = (^exp) & 0x7f\n\t\tif n < 21 {\n\t\t\t*num = append(*num, 102)\n\t\t}\n\t} else {\n\t\texp |= (1 << 7)\n\t}\n\t(*num)[0] = byte(exp)\n\treturn nil\n}",
"func (z *Element29) SetString(s string) *Element29 {\n\tx, ok := new(big.Int).SetString(s, 10)\n\tif !ok {\n\t\tpanic(\"Element29.SetString failed -> can't parse number in base10 into a big.Int\")\n\t}\n\treturn z.SetBigInt(x)\n}",
"func (me *TSAFPTUNNumber) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TDunsNumberBaseType) Set(s string) { (*xsdt.Integer)(me).Set(s) }",
"func (me *TSAFTaxonomyCode) Set(s string) { (*xsdt.Integer)(me).Set(s) }",
"func (me *TSAFPTPortugueseVatNumber) Set(s string) { (*xsdt.Integer)(me).Set(s) }",
"func ParseFloat(s string, base int, prec uint, mode RoundingMode) (f *Float, b int, err error) {}",
"func setFloat(data [2]string, f func(float64)) error {\n\tval, err := strconv.ParseFloat(strings.TrimSpace(data[1]), 64)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"code %s: %s\", data[0], err.Error())\n\t}\n\tf(val)\n\treturn nil\n}",
"func (f *File) SetString(in string, typ StringType) (err error) {\n\ts := C.CString(in)\n\tdefer C.free(unsafe.Pointer(s))\n\tif C.sf_set_string(f.s, C.int(typ), s) != 0 {\n\t\terr = errors.New(C.GoString(C.sf_strerror(f.s)))\n\t}\n\treturn\n}",
"func ParseFloat(s string, base int, prec uint, mode big.RoundingMode,) (*big.Float, int, error)",
"func (s *Size) Set(str string) error {\n\tswitch str[len(str)-1] {\n\tcase 'b', 'B':\n\t\tstr = str[:len(str)-1]\n\t}\n\n\tfactor := 0\n\tswitch str[len(str)-1] {\n\tcase 'k', 'K':\n\t\tfactor = 10\n\t\tstr = str[:len(str)-1]\n\tcase 'm', 'M':\n\t\tfactor = 20\n\t\tstr = str[:len(str)-1]\n\tcase 'g', 'G':\n\t\tfactor = 30\n\t\tstr = str[:len(str)-1]\n\t}\n\n\tt, err := strconv.Atoi(str)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*s = Size(t << factor)\n\treturn nil\n}",
"func (e *Encoder) SetBase(i int) {\n\tif i < 2 {\n\t\ti = 2\n\t}\n\te.base = i\n}",
"func (u256 *Uint256) Set(s string) error {\n\t// TODO It would be really nice to give more guidance here, e.g. the number\n\t// TODO is too big vs simply invalid.\n\tint, ok := math.ParseBig256(s)\n\tif !ok || len(s) == 0 || int.Sign() == -1 {\n\t\treturn fmt.Errorf(\n\t\t\t\"[%v] must be a positive 256-bit or smaller hex or decimal string\",\n\t\t\ts,\n\t\t)\n\t}\n\n\tu256.Uint = int\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
FIXME: Float2Exp is inconsistent, Float642Exp is silly. Convert f to a float64, truncating if necessary (ie. rounding towards zero), and with an exponent returned separately.
|
func (f *Float) Float2Exp() (d float64, exp int) {
var exp_ C.long
d = float64(C.mpf_get_d_2exp(&exp_, &f.i[0]))
exp = int(exp_)
return
}
|
[
"func (f *Float) bigFtoa(buf []byte, fmt byte, prec int) []byte {\n\tif debugFloat && f.IsInf() {\n\t\tpanic(\"non-finite float\")\n\t}\n\n\t// 1) convert Float to multiprecision decimal\n\tvar mant nat\n\tif f.form == finite {\n\t\tmant = f.mant\n\t}\n\tvar d decimal\n\td.init(mant, int(f.exp)-f.mant.bitLen())\n\n\t// 2) round to desired precision\n\tshortest := false\n\tif prec < 0 {\n\t\tshortest = true\n\t\tpanic(\"unimplemented\")\n\t\t// TODO(gri) complete this\n\t\t// roundShortest(&d, f.mant, int(f.exp))\n\t\t// Precision for shortest representation mode.\n\t\tswitch fmt {\n\t\tcase 'e', 'E':\n\t\t\tprec = len(d.mant) - 1\n\t\tcase 'f':\n\t\t\tprec = max(len(d.mant)-d.exp, 0)\n\t\tcase 'g', 'G':\n\t\t\tprec = len(d.mant)\n\t\t}\n\t} else {\n\t\t// round appropriately\n\t\tswitch fmt {\n\t\tcase 'e', 'E':\n\t\t\t// one digit before and number of digits after decimal point\n\t\t\td.round(1 + prec)\n\t\tcase 'f':\n\t\t\t// number of digits before and after decimal point\n\t\t\td.round(d.exp + prec)\n\t\tcase 'g', 'G':\n\t\t\tif prec == 0 {\n\t\t\t\tprec = 1\n\t\t\t}\n\t\t\td.round(prec)\n\t\t}\n\t}\n\n\t// 3) read digits out and format\n\tswitch fmt {\n\tcase 'e', 'E':\n\t\treturn fmtE(buf, fmt, prec, f.neg, d)\n\tcase 'f':\n\t\treturn fmtF(buf, prec, f.neg, d)\n\tcase 'g', 'G':\n\t\t// trim trailing fractional zeros in %e format\n\t\teprec := prec\n\t\tif eprec > len(d.mant) && len(d.mant) >= d.exp {\n\t\t\teprec = len(d.mant)\n\t\t}\n\t\t// %e is used if the exponent from the conversion\n\t\t// is less than -4 or greater than or equal to the precision.\n\t\t// If precision was the shortest possible, use eprec = 6 for\n\t\t// this decision.\n\t\tif shortest {\n\t\t\teprec = 6\n\t\t}\n\t\texp := d.exp - 1\n\t\tif exp < -4 || exp >= eprec {\n\t\t\tif prec > len(d.mant) {\n\t\t\t\tprec = len(d.mant)\n\t\t\t}\n\t\t\treturn fmtE(buf, fmt+'e'-'g', prec-1, f.neg, d)\n\t\t}\n\t\tif prec > d.exp {\n\t\t\tprec = len(d.mant)\n\t\t}\n\t\treturn fmtF(buf, max(prec-d.exp, 0), f.neg, d)\n\t}\n\n\t// unknown format\n\treturn append(buf, '%', fmt)\n}",
"func (n Number) Float64() (float64, error) {\n\treturn strconv.ParseFloat(string(n), 64)\n}",
"func (f *Float) Mul2Exp(x *Float, s uint) *Float {\n\tx.doinit()\n\tf.doinit()\n\tC.mpf_mul_2exp(&f.i[0], &x.i[0], C.mp_bitcnt_t(s))\n\treturn f\n}",
"func ExpFloat64() float64 { return globalRand.ExpFloat64() }",
"func (f Float) Exp() int {\n\t// 5 bit exponent: 0b0111110000000000\n\treturn int(f.bits & 0x7C00 >> 10)\n}",
"func (f *Float) Div2Exp(x *Float, s uint) *Float {\n\tx.doinit()\n\tf.doinit()\n\tC.mpf_div_2exp(&f.i[0], &x.i[0], C.mp_bitcnt_t(s))\n\treturn f\n}",
"func ExpF(base, exponent *Int) *Int {\n\tz := &Int{1, 0, 0, 0}\n\t// b^0 == 1\n\tif exponent.IsZero() || base.IsOne() {\n\t\treturn z\n\t}\n\t// b^1 == b\n\tif exponent.IsOne() {\n\t\tz.Copy(base)\n\t\treturn z\n\t}\n\tvar (\n\t\tword uint64\n\t\tbits int\n\t)\n\texpBitlen := exponent.BitLen()\n\n\tword = exponent[0]\n\tbits = 0\n\tfor ; bits < expBitlen && bits < 64; bits++ {\n\t\tif word&1 == 1 {\n\t\t\tz.Mul(z, base)\n\t\t}\n\t\tbase.Squared()\n\t\tword >>= 1\n\t}\n\n\tword = exponent[1]\n\tfor ; bits < expBitlen && bits < 128; bits++ {\n\t\tif word&1 == 1 {\n\t\t\tz.Mul(z, base)\n\t\t}\n\t\tbase.Squared()\n\t\tword >>= 1\n\t}\n\n\tword = exponent[2]\n\tfor ; bits < expBitlen && bits < 192; bits++ {\n\t\tif word&1 == 1 {\n\t\t\tz.Mul(z, base)\n\t\t}\n\t\tbase.Squared()\n\t\tword >>= 1\n\t}\n\n\tword = exponent[3]\n\tfor ; bits < expBitlen && bits < 256; bits++ {\n\t\tif word&1 == 1 {\n\t\t\tz.Mul(z, base)\n\t\t}\n\t\tbase.Squared()\n\t\tword >>= 1\n\t}\n\treturn z\n}",
"func op_f64_pow(expr *CXExpression, fp int) {\n\tinp1, inp2, out1 := expr.Inputs[0], expr.Inputs[1], expr.Outputs[0]\n\toutB1 := FromF64(math.Pow(ReadF64(fp, inp1), ReadF64(fp, inp2)))\n\tWriteMemory(GetFinalOffset(fp, out1), outB1)\n}",
"func (v *Value) Float64() float64 {\n\tswitch {\n\tcase v.fvalOk:\n\tcase v.ivalOk:\n\t\tv.fval = float64(v.ival)\n\t\tv.fvalOk = true\n\tcase v.svalOk:\n\t\t// Perform a best-effort conversion from string to float64.\n\t\tv.fval = 0.0\n\t\tstrs := matchFloat.FindStringSubmatch(v.sval)\n\t\tif len(strs) >= 2 {\n\t\t\tv.fval, _ = strconv.ParseFloat(strs[1], 64)\n\t\t}\n\t\tv.fvalOk = true\n\t}\n\treturn v.fval\n}",
"func (f Float) Float64() (float64, big.Accuracy) {\n\tx, nan := f.Big()\n\tif nan {\n\t\tif x.Signbit() {\n\t\t\treturn -math.NaN(), big.Exact\n\t\t}\n\t\treturn math.NaN(), big.Exact\n\t}\n\treturn x.Float64()\n}",
"func (n Number) Float64() float64 {\n\treturn float64(n)\n}",
"func (f Float) Exp() int {\n\t// 0b0111111111111111\n\treturn int(f.se & 0x7FFF)\n}",
"func (f Float) Float64() float64 {\n\tpanic(\"not yet implemented\")\n}",
"func (x *Big) Float64() (f float64, ok bool) {\n\tif debug {\n\t\tx.validate()\n\t}\n\n\tif !x.IsFinite() {\n\t\tswitch x.form {\n\t\tcase pinf, ninf:\n\t\t\treturn math.Inf(int(x.form & signbit)), true\n\t\tcase snan, qnan:\n\t\t\treturn math.NaN(), true\n\t\tcase ssnan, sqnan:\n\t\t\treturn math.Copysign(math.NaN(), -1), true\n\t\t}\n\t}\n\n\tconst (\n\t\tmaxPow10 = 22 // largest exact power of 10\n\t\tmaxMantissa = 1<<53 + 1 // largest exact mantissa\n\t)\n\tswitch xc := x.compact; {\n\tcase !x.isCompact():\n\t\tfallthrough\n\t//lint:ignore ST1015 convoluted, but on purpose\n\tdefault:\n\t\tf, _ = strconv.ParseFloat(x.String(), 64)\n\t\tok = !math.IsInf(f, 0) && !math.IsNaN(f)\n\tcase xc == 0:\n\t\tok = true\n\tcase x.IsInt():\n\t\tif xc, ok := x.Int64(); ok {\n\t\t\tf = float64(xc)\n\t\t} else if xc, ok := x.Uint64(); ok {\n\t\t\tf = float64(xc)\n\t\t}\n\t\tok = xc < maxMantissa || (xc&(xc-1)) == 0\n\tcase x.exp == 0:\n\t\tf = float64(xc)\n\t\tok = xc < maxMantissa || (xc&(xc-1)) == 0\n\tcase x.exp > 0:\n\t\tf = float64(x.compact) * math.Pow10(x.exp)\n\t\tok = x.compact < maxMantissa && x.exp < maxPow10\n\tcase x.exp < 0:\n\t\tf = float64(x.compact) / math.Pow10(-x.exp)\n\t\tok = x.compact < maxMantissa && x.exp > -maxPow10\n\t}\n\n\tif x.form&signbit != 0 {\n\t\tf = math.Copysign(f, -1)\n\t}\n\treturn f, ok\n}",
"func (ec evalContext) evalFloat64(e spansql.Expr) (float64, error) {\n\tv, err := ec.evalExpr(e)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn asFloat64(e, v)\n}",
"func (f *Float) Float64() (float64, big.Accuracy) {\n\tif f.NaN {\n\t\tif f.Signbit() {\n\t\t\treturn -math.NaN(), big.Exact\n\t\t}\n\t\treturn math.NaN(), big.Exact\n\t}\n\treturn f.Float.Float64()\n}",
"func (ef *EncryptedFloat) Float64() (float64, error) {\n\tif ef.StringValue == nil {\n\t\treturn 0, fmt.Errorf(\"EncryptedFloat.Float64: attempting to return nil float64\")\n\t}\n\treturn strconv.ParseFloat(*ef.StringValue, 64)\n}",
"func Float64(f *frm.Field, inp ...string) {\n\tnum, err := strconv.ParseFloat(strings.TrimSpace(inp[0]), 64)\n\tf.Value = num\n\tif err != nil {\n\t\t//Return error if input string failed to convert.\n\t\tf.Err = err.Error()\n\t\treturn\n\t}\n\n\tif !f.Required && num == 0 {\n\t\t//f.ValueFloat64 is zero by default so assigning zero isn't required\n\t\treturn\n\t}\n\n\tif f.Min != nil && num < f.Min.(float64) || f.Max != nil && num > f.Max.(float64) {\n\t\tf.Err = fmt.Sprintf(\"Must be between %v and %v.\", f.Min, f.Max)\n\t\treturn\n\t}\n\n\tif rem := toFixed(math.Mod(num, float64(f.Step)), 6); rem != 0 {\n\t\tf.Err = fmt.Sprintf(\"Please enter a valid value. The two nearest values are %v and %v.\", num-rem, num-rem+float64(f.Step))\n\t}\n}",
"func op_f64_log(expr *CXExpression, fp int) {\n\tinp1, out1 := expr.Inputs[0], expr.Outputs[0]\n\toutB1 := FromF64(math.Log(ReadF64(fp, inp1)))\n\tWriteMemory(GetFinalOffset(fp, out1), outB1)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sub sets f = x y and returns f.
|
func (f *Float) Sub(x, y *Float) *Float {
x.doinit()
y.doinit()
f.doinit()
C.mpf_sub(&f.i[0], &x.i[0], &y.i[0])
return f
}
|
[
"func NewFSub(x, y value.Value) *InstFSub {\n\tinst := &InstFSub{X: x, Y: y}\n\t// Compute type.\n\tinst.Type()\n\treturn inst\n}",
"func NewFSub(x, y value.Value) *InstFSub {\n\treturn &InstFSub{\n\t\tX: x,\n\t\tY: y,\n\t\tMetadata: make(map[string]*metadata.Metadata),\n\t}\n}",
"func (f Fixed) Sub(f0 Fixed) Fixed {\n\t// check overflow\n\tif f.LessThan(f0) {\n\t\tpanic(errOverflow)\n\t}\n\tif f.IsNaN() || f0.IsNaN() {\n\t\treturn NaN\n\t}\n\treturn Fixed{fp: f.fp - f0.fp}\n}",
"func (f Fixed) Sub(f0 Fixed) Fixed {\n\tif f.IsNaN() || f0.IsNaN() {\n\t\treturn NaN\n\t}\n\treturn Fixed{fp: f.fp - f0.fp}\n}",
"func (z *Int) Sub(x, y *Int) *Int {}",
"func (p Vec) Sub(q Vec) Vec {\n\treturn Sub(p, q)\n}",
"func (v V) Sub(a V) V {\n\treturn V{v.X - a.X, v.Y - a.Y}\n}",
"func (f Fixed8) Sub(g Fixed8) Fixed8 {\n\treturn f - g\n}",
"func (v Vec2) Sub(x Vec2) Vec2 {\n\treturn Vec2{v[0] - x[0], v[1] - x[1]}\n}",
"func (pt Point) Sub(other Point) Point {\n\tpt.X -= other.X\n\tpt.Y -= other.Y\n\treturn pt\n}",
"func (t Torus) Sub(a, b Point) Point {\n\ta, b = t.normPair(a, b)\n\treturn a.Sub(b)\n}",
"func (v *V) Sub(x *V) *V {\n\tif !IsVSameShape(x, v) {\n\t\tpanic(ErrShape)\n\t}\n\tfor i, e := range x.Data {\n\t\tv.Data[i] -= e\n\t}\n\treturn v\n}",
"func (p *Point) Sub(p2 Point) {\n\tp.X -= p2.X\n\tp.Y -= p2.Y\n\tp.Z -= p2.Z\n}",
"func (z *Vec3) Sub(x, y *Vec3) *Vec3 {\n\tz.X, z.Y, z.Z = x.X-y.X, x.Y-y.Y, x.Z-y.Z\n\treturn z\n}",
"func gfSub(a, b gfElement) gfElement {\n\treturn a ^ b\n}",
"func (rg Range) Sub(p Point) Range {\n\trg.Max = rg.Max.Sub(p)\n\trg.Min = rg.Min.Sub(p)\n\treturn rg\n}",
"func (z *E6) Sub(x, y *E6) *E6 {\n\tz.B0.Sub(&x.B0, &y.B0)\n\tz.B1.Sub(&x.B1, &y.B1)\n\tz.B2.Sub(&x.B2, &y.B2)\n\treturn z\n}",
"func (v vector) sub(vec vector) vector {\n\tv.x -= vec.x\n\tv.y -= vec.y\n\tv.z -= vec.z\n\treturn v\n}",
"func (a *Vector) SubP(b Vector) {\n a.X = a.X - b.X\n a.Y = a.Y - b.Y\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Mul sets f = x y and returns f.
|
func (f *Float) Mul(x, y *Float) *Float {
x.doinit()
y.doinit()
f.doinit()
C.mpf_mul(&f.i[0], &x.i[0], &y.i[0])
return f
}
|
[
"func feMul(out *fieldElement, a *fieldElement, b *fieldElement)",
"func (z *Float) Mul(x, y *Float) *Float {\n\t// possible: panic(ErrNaN{\"multiplication of zero with infinity\"})\n}",
"func op_f64_mul(expr *CXExpression, fp int) {\n\tinp1, inp2, out1 := expr.Inputs[0], expr.Inputs[1], expr.Outputs[0]\n\toutB1 := FromF64(ReadF64(fp, inp1) * ReadF64(fp, inp2))\n\tWriteMemory(GetFinalOffset(fp, out1), outB1)\n}",
"func (x f26dot6) mul(y f26dot6) f26dot6 {\n\treturn f26dot6(int64(x) * int64(y) >> 6)\n}",
"func (z *Int) Mul(x, y *Int) *Int {}",
"func (z *E2) Mul(x, y *E2) *E2 {\n\tmulGenericE2(z, x, y)\n\treturn z\n}",
"func NewMul(x, y value.Value) *InstMul {\n\treturn &InstMul{\n\t\tX: x,\n\t\tY: y,\n\t\tMetadata: make(map[string]*metadata.Metadata),\n\t}\n}",
"func (f *Float) Mul(g *Float) *Float {\n\tbf := new(big.Float).Mul(f.Float, g.Float)\n\treturn &Float{Float: bf}\n}",
"func Mul(x, y Number) Number {\n\treturn Number{\n\t\tReal: x.Real * y.Real,\n\t\tE1mag: x.Real*y.E1mag + x.E1mag*y.Real,\n\t\tE2mag: x.Real*y.E2mag + x.E2mag*y.Real,\n\t\tE1E2mag: x.Real*y.E1E2mag + x.E1mag*y.E2mag + x.E2mag*y.E1mag + x.E1E2mag*y.Real,\n\t}\n}",
"func FloatMul(z *big.Float, x, y *big.Float,) *big.Float",
"func (x Vector64) Mul(y float64) Vector64 {\n\tfor i := 0; i < len(x); i++ {\n\t\tx[i] *= y\n\t}\n\treturn x\n}",
"func NewMul(x, y value.Value) *InstMul {\n\tinst := &InstMul{X: x, Y: y}\n\t// Compute type.\n\tinst.Type()\n\treturn inst\n}",
"func Mul(z, x, y *Elt)",
"func (v *Vec3i) SetMul(other Vec3i) {\n\tv.X *= other.X\n\tv.Y *= other.Y\n\tv.Z *= other.Z\n}",
"func (v vector) mul(s float64) vector {\n\tv.x *= s\n\tv.y *= s\n\tv.z *= s\n\treturn v\n}",
"func Mul(t1 TermT, t2 TermT) TermT {\n\treturn TermT(C.yices_mul(C.term_t(t1), C.term_t(t2)))\n}",
"func (x *Vector) Mul(v *Vector) *Vector {\n\tx.X *= v.X\n\tx.Y *= v.Y\n\treturn x\n}",
"func (x *Nat) Mul(y *Nat, m *Modulus) *Nat {\n\t// A Montgomery multiplication by a value out of the Montgomery domain\n\t// takes the result out of Montgomery representation.\n\txR := NewNat().set(x).montgomeryRepresentation(m) // xR = x * R mod m\n\treturn x.montgomeryMul(xR, y, m) // x = xR * y / R mod m\n}",
"func polyMul(x, y poly) (res poly) {\n\tfor y != 0 {\n\t\tif (y & 1) != 0 {\n\t\t\tres = res ^ x\n\t\t}\n\t\ty = y >> 1\n\t\tx = x << 1\n\t}\n\treturn res\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Div sets f = x / y and returns f.
|
func (f *Float) Div(x, y *Float) *Float {
x.doinit()
y.doinit()
f.doinit()
C.mpf_div(&f.i[0], &x.i[0], &y.i[0])
return f
}
|
[
"func Command_Div(script *rex.Script, params []*rex.Value) {\n\tif len(params) != 2 {\n\t\trex.ErrorParamCount(\"float:div\", \"2\")\n\t}\n\n\tscript.RetVal = rex.NewValueFloat64(params[0].Float64() / params[1].Float64())\n\treturn\n}",
"func (f Fixed) Div(f0 Fixed) Fixed {\n\tif f.IsNaN() || f0.IsNaN() {\n\t\treturn NaN\n\t}\n\treturn NewF(f.Float() / f0.Float())\n}",
"func Div(a float64, b float64) (float64, error) {\n\tif b == 0 {\n\t\treturn 0.0, errors.New(\"Can't devide by zero\")\n\t}\n\n\treturn a / b, nil\n}",
"func (f Fixed) Div(f0 Fixed) Fixed {\n\tif f.IsNaN() || f0.IsNaN() {\n\t\treturn NaN\n\t}\n\treturn NewFromFloat(f.Float() / f0.Float())\n}",
"func div(x, y int) (answer int, err error) {\n\tif y == 0 {\n\t\terr = fmt.Errorf(\"Cannot Divid by zero\")\n\t} else {\n\t\tanswer = x / y\n\t}\n\treturn\n}",
"func (x f26dot6) div(y f26dot6) f26dot6 {\n\treturn f26dot6((int64(x) << 6) / int64(y))\n}",
"func Div( a *context.Value, b *context.Value ) (*context.Value,error) {\n if a != nil && b != nil {\n if b.IsZero() {\n return nil, errors.New( \"Division by zero\")\n }\n\n switch a.OperationType( b ) {\n case context.VAR_BOOL:\n return context.IntValue( a.Int() / b.Int() ), nil\n case context.VAR_INT:\n return context.FloatValue( a.Float() / b.Float() ), nil\n case context.VAR_FLOAT:\n return context.FloatValue( a.Float() / b.Float() ), nil\n case context.VAR_COMPLEX:\n return context.ComplexValue( a.Complex() / b.Complex() ), nil\n }\n }\n\n return nil, errors.New( \"Unsupported type for div\" )\n}",
"func Divide() {\n\tMatch('/')\n\tFactor()\n\tEmitLn(\"MOVE (SP)+,D1\")\n\tEmitLn(\"DIVS D1,D0\")\n}",
"func (z *Float64) Divide(y *Float64, a float64) *Float64 {\n\tz.l = y.l / a\n\tz.r = y.r / a\n\treturn z\n}",
"func Div(valueA gcv.Value, valueB gcv.Value) gcv.Value {\n\tif valueA.Type() == gcv.Complex || valueB.Type() == gcv.Complex {\n\t\treturn gcv.MakeValue(valueA.Complex() / valueB.Complex())\n\t}\n\treturn gcv.MakeValue(valueA.Real() / valueB.Real())\n}",
"func Div(a, b int) (int, error) {\n\tif b == 0 {\n\t\treturn 0, errors.New(\"can't divide by 0\")\n\t}\n\n\treturn a / b, nil\n\n}",
"func (cal *Calculate) div(value float64) (result float64) {\n\tif len(cal.Arg) == 2 {\n\t\treturn (cal.Arg[0] / cal.Arg[1])\n\t} else if len(cal.Arg) == 1 {\n\n\t\treturn (value / cal.Arg[0])\n\t}\n\n\tlog.Fatalln(\"Please check the data format of the calculation unit\")\n\treturn\n}",
"func Divide() {\n\tMatch('/')\n\tFactor()\n\tEmitLn(\"MOVE (SP)+,D1\")\n\tEmitLn(\"EXS.L D0\")\n\tEmitLn(\"DIVS D1,D0\")\n}",
"func Divide(a, operand int) int { return operand / a }",
"func (v *Vec4f) Div(scalar float32) {\n\tv.X /= scalar\n\tv.Y /= scalar\n\tv.Z /= scalar\n\tv.W /= scalar\n}",
"func (a Vector) Div(b float64) Vector {\n return Vector{a.X / b, a.Y / b}\n}",
"func (a Vec2) Div(b Vec2) Vec2 {\n\treturn Vec2{a.X / b.X, a.Y / b.Y}\n}",
"func Divisor(x int, y int) (resultado int) {\n\tresultado = x / y\n\treturn\n}",
"func op_f64_div(expr *CXExpression, fp int) {\n\tinp1, inp2, out1 := expr.Inputs[0], expr.Inputs[1], expr.Outputs[0]\n\toutB1 := FromF64(ReadF64(fp, inp1) / ReadF64(fp, inp2))\n\tWriteMemory(GetFinalOffset(fp, out1), outB1)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sqrt sets f = Sqrt(x) and returns f.
|
func (f *Float) Sqrt(x *Float) *Float {
x.doinit()
f.doinit()
C.mpf_sqrt(&f.i[0], &x.i[0])
return f
}
|
[
"func Sqrt(x float64) (float64, error) {\r\n\tif (x < 0) {\r\n\t\treturn 0, ErrNegativeSqrt(x)\r\n\t}\r\n\r\n\tz := 1.0\r\n\teps := 1E-6\r\n\r\n\t// anonymous function\r\n\tf := func(x, z float64) float64 { \r\n \treturn - (z*z - x) / (2*z)\r\n \t} \r\n\r\n\tfor math.Abs(f(x, z)) > eps {\r\n\t\tz += f(x, z)\r\n\t}\r\n\treturn z, nil\r\n}",
"func Sqrt(x int) int {\n\treturn neogointernal.Opcode1(\"SQRT\", x).(int)\n}",
"func Sqrt(arg float64) float64 {\n\treturn math.Sqrt(arg)\n}",
"func Sqrt(z, x *decimal.Big) *decimal.Big {\n\tif xs := x.Sign(); xs <= 0 {\n\t\tif xs == 0 {\n\t\t\treturn z.SetMantScale(0, 0)\n\t\t}\n\t\tz.SetNaN(false)\n\t\treturn signal(z,\n\t\t\tdecimal.InvalidOperation,\n\t\t\terrors.New(\"math.Sqrt: cannot take square root of negative number\"),\n\t\t)\n\t}\n\tif snan := x.IsNaN(true); snan || x.IsNaN(false) {\n\t\tx.SetNaN(snan)\n\t\treturn signal(z,\n\t\t\tdecimal.InvalidOperation, decimal.ErrNaN{\"square root of NaN\"})\n\t}\n\tif x.IsInf(1) {\n\t\treturn z.SetInf(false)\n\t}\n\n\tzcp := z.Context.Precision()\n\n\t// Fast path #1: use math.Sqrt if our decimal is small enough. 0 and 22\n\t// are implementation details of the Float64 method. If Float64 is altered,\n\t// change them.\n\tif zcp <= 16 && (x.Scale() >= 0 && x.Scale() < 22) {\n\t\treturn z.SetFloat64(math.Sqrt(x.Float64())).Round(zcp)\n\t}\n\n\t// Fast path #2: x is a small perfect square.\n\tif x.IsInt() && !x.IsBig() {\n\t\tswitch xc := x.Int64(); xc & 0xF {\n\t\tcase 0, 1, 4, 9:\n\t\t\t// \"Show that floating point sqrt(x*x) >= x for all long x.\"\n\t\t\t// https://math.stackexchange.com/a/238885/153292\n\t\t\tsqrt := int64(math.Sqrt(float64(xc)))\n\t\t\tif sqrt*sqrt == xc {\n\t\t\t\treturn z.SetMantScale(sqrt, 0)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Source for the following algorithm:\n\t//\n\t// T. E. Hull and A. Abrham. 1985. Properly rounded variable precision\n\t// square root. ACM Trans. Math. Softw. 11, 3 (September 1985), 229-237.\n\t// DOI: https://doi.org/10.1145/214408.214413\n\n\tvar (\n\t\txp = int32(x.Precision())\n\n\t\t// The algorithm requires a normalized ``f β [0.1, 1)'' Of the two ways\n\t\t// to normalize f, adjusting its scale is the quickest. However, it then\n\t\t// requires us to increment approx's scale by e/2 instead of simply\n\t\t// setting it to e/2.\n\t\tf = new(decimal.Big).Copy(x).SetScale(xp)\n\n\t\t// It also means we have to adjust e to equal out the sale adjustment.\n\t\te = xp - x.Scale()\n\n\t\ttmp decimal.Big\n\t\tapprox = alias(z, x)\n\t)\n\n\tif e&1 == 0 {\n\t\tapprox.Add(approx1, tmp.Mul(approx2, f)) // approx := .259 + .819f\n\t} else {\n\t\tf.Quo(f, ten) // f := f/10\n\t\te++ // e := e + 1\n\t\tapprox.Add(approx3, tmp.Mul(approx4, f)) // approx := 0.819 + 2.59*f\n\t}\n\n\tvar (\n\t\tmaxp = zcp + 2\n\t\tp int32 = 3\n\t)\n\n\tfor p < maxp {\n\t\t// p := min(2*p - 2, maxp)\n\t\tif p = 2*p - 2; p > maxp {\n\t\t\tp = maxp\n\t\t}\n\t\t// precision p\n\t\ttmp.Context.SetPrecision(p)\n\t\t// approx := .5*(approx + f/approx)\n\t\tapprox.Mul(ptFive, tmp.Add(approx, tmp.Quo(f, approx)))\n\t}\n\n\t// The paper also specifies an additional code block for adjusting approx.\n\t// This code never went into the branches that modified approx, rounding\n\t// to half even does the same thing. The GDA spec requires us to use\n\t// rounding mode half even (speleotrove.com/decimal/daops.html#refsqrt)\n\t// anyway.\n\n\tapprox.Context.RoundingMode = decimal.ToNearestEven\n\treturn z.Set(approx.SetScale(approx.Scale() - e/2).Round(zcp))\n}",
"func Sqrt(x float64) (float64, error) {\n\tif x < 0 {\n\t\treturn 0, ErrNegativeSqrt(x)\n\t}\n\t\n\tz := 1.0\n\tvar y float64\n\titers := 0\n\tsens := 1e-10\n\tfmt.Println(\"Printing iterations of Newton's Method:\")\n\tfor {\n\t\ty -= (z*z - x) / (2*z)\n\t\tif Abs(z - y) < sens {\n\t\t\tbreak\n\t\t}\n\t\titers++\n\t\tz = y\n\t\tfmt.Println(z)\n\t}\n\t\n\tfmt.Println(\"Number of iterations:\", iters)\n\treturn z, nil\n}",
"func Sqrt(a Tensor, opts ...FuncOpt) (retVal Tensor, err error) {\n\tswitch t := a.(type) {\n\tcase *Dense:\n\t\tif t.IsMaterializable() {\n\t\t\tvar f interface{}\n\t\t\tswitch t.t.Kind() {\n\t\t\tcase reflect.Float64:\n\t\t\t\tf = math.Sqrt\n\t\t\tcase reflect.Float32:\n\t\t\t\tf = math32.Sqrt\n\t\t\tdefault:\n\t\t\t\terr = errors.Errorf(\"Sqrt only works on floats\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\treturn t.Apply(f, opts...)\n\t\t}\n\t\tif !isFloat(t.t) {\n\t\t\terr = errors.Errorf(\"Sqrt only works on floats\")\n\t\t\treturn\n\t\t}\n\n\t\t// otherwise, we have optimizations for this\n\t\tvar reuse *Dense\n\t\tvar safe, toReuse, incr bool\n\t\tif reuse, safe, toReuse, incr, err = prepUnaryDense(t, opts...); err != nil {\n\t\t\terr = errors.Wrapf(err, opFail, \"PointwiseSquare\")\n\t\t\treturn\n\t\t}\n\n\t\tswitch {\n\t\tcase incr:\n\t\t\tcloned := t.Clone().(*Dense)\n\t\t\tswitch t.t.Kind() {\n\t\t\tcase reflect.Float64:\n\t\t\t\tvecf64.Sqrt(cloned.float64s())\n\t\t\tcase reflect.Float32:\n\t\t\t\tvecf32.Sqrt(cloned.float32s())\n\t\t\t}\n\t\t\t_, err = reuse.Add(cloned, UseUnsafe())\n\t\t\tretVal = reuse\n\t\tcase toReuse:\n\t\t\tcopyDense(reuse, t)\n\t\t\tswitch t.t.Kind() {\n\t\t\tcase reflect.Float64:\n\t\t\t\tvecf64.Sqrt(reuse.float64s())\n\t\t\tcase reflect.Float32:\n\t\t\t\tvecf32.Sqrt(reuse.float32s())\n\t\t\t}\n\t\t\tretVal = reuse\n\t\tcase safe:\n\t\t\tcloned := t.Clone().(*Dense)\n\t\t\tswitch t.t.Kind() {\n\t\t\tcase reflect.Float64:\n\t\t\t\tvecf64.Sqrt(cloned.float64s())\n\t\t\tcase reflect.Float32:\n\t\t\t\tvecf32.Sqrt(cloned.float32s())\n\t\t\t}\n\t\t\tretVal = cloned\n\t\tcase !safe:\n\t\t\tswitch t.t.Kind() {\n\t\t\tcase reflect.Float64:\n\t\t\t\tvecf64.Sqrt(t.float64s())\n\t\t\tcase reflect.Float32:\n\t\t\t\tvecf32.Sqrt(t.float32s())\n\t\t\t}\n\t\t\tretVal = t\n\t\t}\n\tdefault:\n\t\tpanic(\"NYI - not yet implemented\")\n\t}\n\treturn\n}",
"func (g *Graph) Sqrt(x Node) Node {\n\treturn g.NewOperator(fn.NewSqrt(x), x)\n}",
"func (z *Decimal) Sqrt(x *Decimal) *Decimal {\n\tif debugDecimal {\n\t\tx.validate()\n\t}\n\n\tif z.prec == 0 {\n\t\tz.prec = x.prec\n\t}\n\n\tif x.Sign() == -1 {\n\t\t// following IEEE754-2008 (section 7.2)\n\t\tpanic(ErrNaN{\"square root of negative operand\"})\n\t}\n\n\t// handle Β±0 and +β\n\tif x.form != finite {\n\t\tz.acc = Exact\n\t\tz.form = x.form\n\t\tz.neg = x.neg // IEEE754-2008 requires βΒ±0 = Β±0\n\t\treturn z\n\t}\n\n\t// MantExp sets the argument's precision to the receiver's, and\n\t// when z.prec > x.prec this will lower z.prec. Restore it after\n\t// the MantExp call.\n\tprec := z.prec\n\tb := x.MantExp(z)\n\tz.prec = prec\n\n\t// Compute β(zΒ·10**b) as\n\t// β( z)Β·10**(Β½b) if b is even\n\t// β(10z)Β·10**(βΒ½bβ) if b > 0 is odd\n\t// β(z/10)Β·10**(βΒ½bβ) if b < 0 is odd\n\tswitch b % 2 {\n\tcase 0:\n\t\t// nothing to do\n\tcase 1:\n\t\tz.exp++\n\tcase -1:\n\t\tz.exp--\n\t}\n\t// 0.01 <= z < 10.0\n\n\t// Unlike with big.Float, solving xΒ² - z = 0 directly is faster only for\n\t// very small precisions (<_DW/2).\n\t//\n\t// Solve 1/xΒ² - z = 0 instead.\n\tz.sqrtInverse(z)\n\n\t// restore precision and re-attach halved exponent\n\treturn z.SetMantExp(z, b/2)\n}",
"func Sqrt(value gcv.Value) gcv.Value {\n\tif value.Type() == gcv.Complex {\n\t\treturn gcv.MakeValue(cmplx.Sqrt(value.Complex()))\n\t}\n\treturn gcv.MakeValue(math.Sqrt(value.Real()))\n}",
"func (self *State)Sqrt(a any)any{\n self.IncOperations(self.coeff[\"sqrt\"]+self.off[\"sqrt\"])\n return wrap1(a,math.Sqrt)\n}",
"func (d Decimal) Sqrt() Decimal {\n\ts, _ := d.SqrtRound(int32(DivisionPrecision))\n\treturn s\n}",
"func funcSqrt(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {\n\treturn simpleFunc(vals, enh, math.Sqrt)\n}",
"func op_f64_sqrt(expr *CXExpression, fp int) {\n\tinp1, out1 := expr.Inputs[0], expr.Outputs[0]\n\toutB1 := FromF64(math.Sqrt(ReadF64(fp, inp1)))\n\tWriteMemory(GetFinalOffset(fp, out1), outB1)\n}",
"func (v Vector[T]) Sqrt() Vector[T] {\n\treturn New(\n\t\tT(math.Sqrt(float64(v.x))),\n\t\tT(math.Sqrt(float64(v.y))),\n\t\tT(math.Sqrt(float64(v.z))),\n\t\tT(math.Sqrt(float64(v.w))),\n\t)\n}",
"func SquareRoot(x float64) float64 {\n\treturn squareRootItr(1, x)\n}",
"func sqrt(x float64, z float64) float64{\n\n\tvar z_next float64 = z - (((z*z) - x) / (2 * z))\n\t\n\t//Returning the answer\n\treturn z_next \n}",
"func SQRTSD(mx, x operand.Op) { ctx.SQRTSD(mx, x) }",
"func findSqrt(x, y float64, z int) float64 {\n\t// track x before/after increment\n\tvar xNew float64 = x\n\n\t// repeat for z iterations\n\tfor i := 1; i <= z; i++ {\n\t\t// store current version of x\n\t\tvar xOld = xNew\n\n\t\t// calculate distance between square of x and actual y\n\t\t// scale down difference using derivative function (i.e. / 2x)\n\t\t// decrement x to get closer to actual square root\n\t\txNew -= ((xNew * xNew) - y) / (2 * xNew)\n\n\t\t// show progress\n\t\tfmt.Println(xNew, xNew * xNew, y)\n\n\t\t// return output if value has converged (i.e. found perfect square root)\n\t\tif xNew - xOld == 0 {\n\t\t\t// return final approximation of sqrt\n\t\t\treturn xNew\n\t\t}\n\t}\n\n\t// return final approximation of sqrt\n\treturn x\n}",
"func FloatSqrt(z *big.Float, x *big.Float,) *big.Float"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
PowUint sets f = x^y and returns f
|
func (f *Float) PowUint(x *Float, y uint) *Float {
x.doinit()
f.doinit()
C.mpf_pow_ui(&f.i[0], &x.i[0], C.ulong(y))
return f
}
|
[
"func op_f64_pow(expr *CXExpression, fp int) {\n\tinp1, inp2, out1 := expr.Inputs[0], expr.Inputs[1], expr.Outputs[0]\n\toutB1 := FromF64(math.Pow(ReadF64(fp, inp1), ReadF64(fp, inp2)))\n\tWriteMemory(GetFinalOffset(fp, out1), outB1)\n}",
"func bigPow(x int, y int) *big.Float {\n\tone := new(big.Float).SetInt64(1)\n\tif y == 0 {\n\t\treturn one\n\t}\n\tbigX := new(big.Float).SetInt64(int64(x))\n\tproduct := new(big.Float).Copy(bigX)\n\t// Always calculate positive power, inverse later.\n\tisNeg := false\n\tif y < 0 {\n\t\tisNeg = true\n\t\ty = -y\n\t}\n\tfor i := int(0); i < y-1; i++ {\n\t\tproduct = new(big.Float).Mul(product, bigX)\n\t}\n\tif isNeg {\n\t\treturn new(big.Float).Quo(one, product)\n\t}\n\treturn product\n}",
"func intPow(a, b int) uint64 {\n\tr := uint64(1)\n\tfor b > 0 {\n\t\tr *= uint64(a)\n\t\tb--\n\t}\n\treturn r\n}",
"func (self *State)Pow(a,b any)any{\n self.IncOperations(self.coeff[\"pow\"]+self.off[\"pow\"])\n return wrap2(a,b,math.Pow)\n}",
"func pow(x, n int8) int8 {\n\tif n == 0 {\n\t\treturn 1\n\t} else {\n\t\treturn x * pow(x, n-1)\n\t}\n}",
"func pow(a, b int) int {\n\tp := 1\n\tfor b > 0 {\n\t\tif b&1 != 0 {\n\t\t\tp *= a\n\t\t}\n\t\tb >>= 1\n\t\ta *= a\n\t}\n\treturn p\n}",
"func power_of_two(n int) uint64 {\n return uint64(math.Pow(2.0, float64(n)))\n}",
"func Pow(a, b int) int {\n\treturn neogointernal.Opcode2(\"POW\", a, b).(int)\n}",
"func (*Math) Pow(x float64, y float64) float64 {\n\treturn math.Pow(x, y)\n}",
"func Pow(a, n uint) (uint, error) {\n\tconst op = \"Computing power of unsigned integer\"\n\n\t// Ensure that this will not overflow\n\tif a > 0 && boundLog2(a)*n >= bits.UintSize {\n\t\treturn 0, errors.New(\n\t\t\top, errors.Overflow,\n\t\t\t\"%d^%d is likely to overflow uint\", a, n,\n\t\t)\n\t}\n\n\tres := uint(1)\n\ttmp := a\n\tfor ; n > 0; n >>= 1 {\n\t\tif (n & 1) == 1 {\n\t\t\tres *= tmp\n\t\t}\n\t\ttmp *= tmp\n\t}\n\treturn res, nil\n}",
"func Pow(x int, y int) int {\n\n\tres := x\n\n\tswitch {\n\tcase y == 0:\n\t\treturn 1\n\tcase y == 1:\n\t\treturn x\n\tdefault:\n\t\tfor i := 1; i < y; i++ {\n\t\t\tres *= x\n\t\t}\n\t\treturn res\n\t}\n}",
"func floorPow2(v int) int {}",
"func FastPow(base, pow int) int {\n\tif pow < 0 {\n\t\treturn 0\n\t}\n\tres := 1\n\tfor pow > 0 {\n\t\tif pow%2 == 1 {\n\t\t\tres *= base\n\t\t}\n\t\tbase *= base\n\t\tpow >>= 1\n\t}\n\treturn res\n}",
"func (x f26dot6) mul(y f26dot6) f26dot6 {\n\treturn f26dot6(int64(x) * int64(y) >> 6)\n}",
"func polyMul(x, y poly) (res poly) {\n\tfor y != 0 {\n\t\tif (y & 1) != 0 {\n\t\t\tres = res ^ x\n\t\t}\n\t\ty = y >> 1\n\t\tx = x << 1\n\t}\n\treturn res\n}",
"func (cal *Calculate) pow(value float64) (result float64) {\n\tif len(cal.Arg) == 2 {\n\t\treturn math.Pow(cal.Arg[0], cal.Arg[1])\n\t} else if len(cal.Arg) == 1 {\n\t\treturn math.Pow(value, cal.Arg[0])\n\t}\n\n\tlog.Fatalln(\"Please check the data format of the calculation unit\")\n\treturn\n}",
"func ModPow(x, y, m *big.Int) *big.Int {\n\tif y.Sign() == -1 {\n\t\tt := new(big.Int).ModInverse(x, m)\n\t\treturn t.Exp(t, new(big.Int).Neg(y), m)\n\t}\n\treturn new(big.Int).Exp(x, y, m)\n}",
"func FastFloatPow(base float64, pow int) float64 {\n\tif pow < 0 {\n\t\treturn 1 / FastFloatPow(base, -pow)\n\t}\n\tres := 1.0\n\tfor pow > 0 {\n\t\tif pow%2 == 1 {\n\t\t\tres *= base\n\t\t}\n\t\tbase *= base\n\t\tpow >>= 1\n\t}\n\treturn res\n}",
"func bigPow(a, b int64) *big.Int {\n\tr := big.NewInt(a)\n\treturn r.Exp(r, big.NewInt(b), nil)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Mul2Exp sets z = x 2^s and returns z.
|
func (f *Float) Mul2Exp(x *Float, s uint) *Float {
x.doinit()
f.doinit()
C.mpf_mul_2exp(&f.i[0], &x.i[0], C.mp_bitcnt_t(s))
return f
}
|
[
"func (z *Int) Exp(x, y, m *Int) *Int {}",
"func Exp2(x *internal.Decimal) (*internal.Decimal, error) {\n\tvar d internal.Decimal\n\t_, err := apdContext.Pow(&d, two, x)\n\treturn &d, err\n}",
"func (z *Int) Exp(x, y, m *Int) *Int {\n\tm.doinit()\n\tx.doinit()\n\ty.doinit()\n\tz.doinit()\n\tif m == nil {\n\t\tC.mpz_pow_ui(&z.i[0], &x.i[0], C.mpz_get_ui(&y.i[0]))\n\t} else {\n\t\tC.mpz_powm(&z.i[0], &x.i[0], &y.i[0], &m.i[0])\n\t}\n\treturn z\n}",
"func (evaluator *Evaluator) MulByPow2(c0 CkksElement, pow2 uint64, cOut CkksElement) (err error) {\n\n\tvar level uint64\n\n\tif level, err = checkLevels([]CkksElement{c0, cOut}); err != nil {\n\t\treturn err\n\t}\n\n\tfor i := range cOut.Value() {\n\t\tevaluator.ckkscontext.contextLevel[level].MulByPow2(c0.Value()[i], pow2, cOut.Value()[i])\n\t}\n\n\treturn nil\n}",
"func Exp(z, n *inf.Dec, s inf.Scale) *inf.Dec {\n\ts += 2\n\tnn := new(inf.Dec).Set(n)\n\tif z == nil {\n\t\tz = new(inf.Dec)\n\t\tz.SetUnscaled(1).SetScale(0)\n\t} else {\n\t\tz.SetUnscaled(1).SetScale(0)\n\t}\n\n\t// We are computing (e^n) by splitting n into an integer and a float\n\t// (e.g 3.1 ==> x = 3, y = 0.1), this allows us to write\n\t// e^n = e^(x+y) = e^x * e^y\n\n\t// Split out x (integer(n))\n\tx := new(inf.Dec).Round(nn, 0, inf.RoundDown)\n\n\t// Split out y (n - x) which is < 1\n\ty := new(inf.Dec).Sub(nn, x)\n\n\t// convert x to integer\n\tinteger, ok := x.Unscaled()\n\tif !ok {\n\t\tpanic(\"integer out of range\")\n\t}\n\n\tex := integerPower(z, new(inf.Dec).Set(decimalE), integer, s+2)\n\treturn smallExp(ex, y, s-2)\n}",
"func Pow(z, x, y *inf.Dec, s inf.Scale) (*inf.Dec, error) {\n\ts = s + 2\n\tif z == nil {\n\t\tz = new(inf.Dec)\n\t\tz.SetUnscaled(1).SetScale(0)\n\t}\n\n\t// Check if y is of type int.\n\ttmp := new(inf.Dec).Abs(y)\n\tisInt := tmp.Cmp(new(inf.Dec).Round(tmp, 0, inf.RoundDown)) == 0\n\n\txs := x.Sign()\n\tif xs == 0 {\n\t\tswitch y.Sign() {\n\t\tcase 0:\n\t\t\treturn z.SetUnscaled(1).SetScale(0), nil\n\t\tcase 1:\n\t\t\treturn z.SetUnscaled(0).SetScale(0), nil\n\t\tdefault: // -1\n\t\t\t// undefined for y < 0\n\t\t\treturn nil, errPowZeroNegative\n\t\t}\n\t}\n\n\tneg := xs < 0\n\n\tif !isInt && neg {\n\t\treturn nil, errPowNegNonInteger\n\t}\n\n\t// Exponent Precision Explanation (RaduBerinde):\n\t// Say we compute the Log with a scale of k. That means that the result we get is:\n\t// ln x +/- 10^-k.\n\t// This leads to an error of y * 10^-k in the exponent, which leads to a\n\t// multiplicative error of e^(y*10^-k) in the result.\n\t// For small values of u, e^u can be approximated by 1 + u, so for large k\n\t// that error is around 1 + y*10^-k. So the additive error will be x^y * y * 10^-k,\n\t// and we want this to be less than 10^-s. This approximately means that k has to be\n\t// s + the number of digits before the decimal point in x^y. Which roughly is\n\t//\n\t// s + <the number of digits before decimal point in x> * y.\n\t//\n\t// exponent precision = s + <the number of digits before decimal point in x> * y.\n\tnumDigits := float64(x.UnscaledBig().BitLen()) / digitsToBitsRatio\n\tnumDigits -= float64(x.Scale())\n\n\t// Round up y which should provide us with a threshold in calculating the new scale.\n\tyu := float64(new(inf.Dec).Round(y, 0, inf.RoundUp).UnscaledBig().Int64())\n\n\t// exponent precision = s + <the number of digits before decimal point in x> * y\n\tes := s + inf.Scale(numDigits*yu)\n\tif es < 0 || es > maxPrecision {\n\t\treturn nil, errArgumentTooLarge\n\t}\n\n\ttmp = new(inf.Dec).Abs(x)\n\t_, err := Log(tmp, tmp, es)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttmp.Mul(tmp, y)\n\tExp(tmp, tmp, es)\n\n\tif neg && y.Round(y, 0, inf.RoundDown).UnscaledBig().Bit(0) == 1 {\n\t\ttmp.Neg(tmp)\n\t}\n\n\t// Round to the desired scale.\n\treturn z.Round(tmp, s-2, inf.RoundHalfUp), nil\n}",
"func (z *E2) Mul(x, y *E2) *E2 {\n\tmulGenericE2(z, x, y)\n\treturn z\n}",
"func (f *Float) Div2Exp(x *Float, s uint) *Float {\n\tx.doinit()\n\tf.doinit()\n\tC.mpf_div_2exp(&f.i[0], &x.i[0], C.mp_bitcnt_t(s))\n\treturn f\n}",
"func Mul(z, x, y *Elt)",
"func (z *Int) Exp(base, exponent *Int) *Int {\n\treturn z.Copy(ExpF(base, exponent))\n}",
"func (z *E6) MulByE2(x *E6, y *E2) *E6 {\n\tvar yCopy E2\n\tyCopy.Set(y)\n\tz.B0.Mul(&x.B0, &yCopy)\n\tz.B1.Mul(&x.B1, &yCopy)\n\tz.B2.Mul(&x.B2, &yCopy)\n\treturn z\n}",
"func pow2(exp int) int {\n\tif exp < 0 || exp > 31 {\n\t\tpanic(\"exponent must be between 0 and 31\")\n\t}\n\tn := 1\n\tfor i := 0; i < exp; i++ {\n\t\tn *= 2\n\t}\n\treturn n\n}",
"func (evaluator *Evaluator) MulByPow2New(c0 CkksElement, pow2 uint64) (cOut CkksElement, err error) {\n\n\tcOut = evaluator.ckkscontext.NewCiphertext(1, c0.Level(), c0.Scale())\n\n\tif err = evaluator.MulByPow2(c0, pow2, cOut); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cOut, nil\n}",
"func (z *Int) Mul(x, y *Int) *Int {}",
"func FloatSetMantExp(z *big.Float, mant *big.Float, exp int) *big.Float",
"func exp2(e uint8) int64 {\n\tif e == 0 {\n\t\treturn 1\n\t}\n\tb := int64(2)\n\tfor i := uint8(1); i < e; i++ {\n\t\tb *= 2\n\t}\n\treturn b\n}",
"func zSquared(zRe, zIm, cRe, cIm float64) (float64, float64) {\n zReNext := cRe - math.Pow(zIm, 2) + math.Pow(zRe, 2)\n zImNext := cIm + 2*zIm*zRe \n return zReNext, zImNext \n}",
"func (base *LRSBase) zi2 (h, si, y0, ci *big.Int) *big.Int {\n\ta := new(big.Int).Exp(h, si, base.P)\n\tb := new(big.Int).Exp(y0, ci, base.P)\n\treturn a.Mul(a, b).Mod(a, base.P)\n}",
"func Exp(x, y, m *big.Int) *big.Int {\n\treturn big.NewInt(0).Exp(x, y, m)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Div2Exp sets z = x / 2^s and returns z.
|
func (f *Float) Div2Exp(x *Float, s uint) *Float {
x.doinit()
f.doinit()
C.mpf_div_2exp(&f.i[0], &x.i[0], C.mp_bitcnt_t(s))
return f
}
|
[
"func Exp(z, n *inf.Dec, s inf.Scale) *inf.Dec {\n\ts += 2\n\tnn := new(inf.Dec).Set(n)\n\tif z == nil {\n\t\tz = new(inf.Dec)\n\t\tz.SetUnscaled(1).SetScale(0)\n\t} else {\n\t\tz.SetUnscaled(1).SetScale(0)\n\t}\n\n\t// We are computing (e^n) by splitting n into an integer and a float\n\t// (e.g 3.1 ==> x = 3, y = 0.1), this allows us to write\n\t// e^n = e^(x+y) = e^x * e^y\n\n\t// Split out x (integer(n))\n\tx := new(inf.Dec).Round(nn, 0, inf.RoundDown)\n\n\t// Split out y (n - x) which is < 1\n\ty := new(inf.Dec).Sub(nn, x)\n\n\t// convert x to integer\n\tinteger, ok := x.Unscaled()\n\tif !ok {\n\t\tpanic(\"integer out of range\")\n\t}\n\n\tex := integerPower(z, new(inf.Dec).Set(decimalE), integer, s+2)\n\treturn smallExp(ex, y, s-2)\n}",
"func Exp2(x *internal.Decimal) (*internal.Decimal, error) {\n\tvar d internal.Decimal\n\t_, err := apdContext.Pow(&d, two, x)\n\treturn &d, err\n}",
"func ModExp(x, y, N int64) int64 {\n\tif y == 0 {\n\t\treturn 1\n\t}\n\tz := ModExp(x, y/2, N)\n\tt := (z * z) % N\n\tif y%2 == 0 {\n\t\treturn t\n\t}\n\treturn (x * t) % N\n}",
"func Pow(z, x, y *inf.Dec, s inf.Scale) (*inf.Dec, error) {\n\ts = s + 2\n\tif z == nil {\n\t\tz = new(inf.Dec)\n\t\tz.SetUnscaled(1).SetScale(0)\n\t}\n\n\t// Check if y is of type int.\n\ttmp := new(inf.Dec).Abs(y)\n\tisInt := tmp.Cmp(new(inf.Dec).Round(tmp, 0, inf.RoundDown)) == 0\n\n\txs := x.Sign()\n\tif xs == 0 {\n\t\tswitch y.Sign() {\n\t\tcase 0:\n\t\t\treturn z.SetUnscaled(1).SetScale(0), nil\n\t\tcase 1:\n\t\t\treturn z.SetUnscaled(0).SetScale(0), nil\n\t\tdefault: // -1\n\t\t\t// undefined for y < 0\n\t\t\treturn nil, errPowZeroNegative\n\t\t}\n\t}\n\n\tneg := xs < 0\n\n\tif !isInt && neg {\n\t\treturn nil, errPowNegNonInteger\n\t}\n\n\t// Exponent Precision Explanation (RaduBerinde):\n\t// Say we compute the Log with a scale of k. That means that the result we get is:\n\t// ln x +/- 10^-k.\n\t// This leads to an error of y * 10^-k in the exponent, which leads to a\n\t// multiplicative error of e^(y*10^-k) in the result.\n\t// For small values of u, e^u can be approximated by 1 + u, so for large k\n\t// that error is around 1 + y*10^-k. So the additive error will be x^y * y * 10^-k,\n\t// and we want this to be less than 10^-s. This approximately means that k has to be\n\t// s + the number of digits before the decimal point in x^y. Which roughly is\n\t//\n\t// s + <the number of digits before decimal point in x> * y.\n\t//\n\t// exponent precision = s + <the number of digits before decimal point in x> * y.\n\tnumDigits := float64(x.UnscaledBig().BitLen()) / digitsToBitsRatio\n\tnumDigits -= float64(x.Scale())\n\n\t// Round up y which should provide us with a threshold in calculating the new scale.\n\tyu := float64(new(inf.Dec).Round(y, 0, inf.RoundUp).UnscaledBig().Int64())\n\n\t// exponent precision = s + <the number of digits before decimal point in x> * y\n\tes := s + inf.Scale(numDigits*yu)\n\tif es < 0 || es > maxPrecision {\n\t\treturn nil, errArgumentTooLarge\n\t}\n\n\ttmp = new(inf.Dec).Abs(x)\n\t_, err := Log(tmp, tmp, es)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttmp.Mul(tmp, y)\n\tExp(tmp, tmp, es)\n\n\tif neg && y.Round(y, 0, inf.RoundDown).UnscaledBig().Bit(0) == 1 {\n\t\ttmp.Neg(tmp)\n\t}\n\n\t// Round to the desired scale.\n\treturn z.Round(tmp, s-2, inf.RoundHalfUp), nil\n}",
"func (f *Float) Mul2Exp(x *Float, s uint) *Float {\n\tx.doinit()\n\tf.doinit()\n\tC.mpf_mul_2exp(&f.i[0], &x.i[0], C.mp_bitcnt_t(s))\n\treturn f\n}",
"func (z *Int) Exp(x, y, m *Int) *Int {}",
"func (z *Int) Exp(base, exponent *Int) *Int {\n\treturn z.Copy(ExpF(base, exponent))\n}",
"func pow2(exp int) int {\n\tif exp < 0 || exp > 31 {\n\t\tpanic(\"exponent must be between 0 and 31\")\n\t}\n\tn := 1\n\tfor i := 0; i < exp; i++ {\n\t\tn *= 2\n\t}\n\treturn n\n}",
"func (x f26dot6) div(y f26dot6) f26dot6 {\n\treturn f26dot6((int64(x) << 6) / int64(y))\n}",
"func exp2(e uint8) int64 {\n\tif e == 0 {\n\t\treturn 1\n\t}\n\tb := int64(2)\n\tfor i := uint8(1); i < e; i++ {\n\t\tb *= 2\n\t}\n\treturn b\n}",
"func Exp(o, z *big.Float) *big.Float {\n\tif o.Prec() == 0 {\n\t\to.SetPrec(z.Prec())\n\t}\n\tif z.Sign() == 0 {\n\t\treturn o.SetFloat64(1)\n\t}\n\tif z.IsInf() {\n\t\tif z.Sign() < 0 {\n\t\t\treturn o.Set(&gzero)\n\t\t}\n\t\treturn o.Set(z)\n\t}\n\n\tp := o\n\tif p == z {\n\t\t// We need z for Newton's algorithm, so ensure we don't overwrite it.\n\t\tp = new(big.Float).SetPrec(z.Prec())\n\t}\n\t// try to get initial estimate using IEEE-754 math\n\t// TODO: save work (and an import of math) by checking the exponent of z\n\tzf, _ := z.Float64()\n\tzf = math.Exp(zf)\n\tif math.IsInf(zf, 1) || zf == 0 {\n\t\t// too big or too small for IEEE-754 math,\n\t\t// perform argument reduction using\n\t\t// e^{2z} = (e^z)Β²\n\t\thalfZ := quicksh(new(big.Float), z, -1).SetPrec(p.Prec() + 64)\n\t\t// TODO: avoid recursion\n\t\thalfExp := Exp(halfZ, halfZ)\n\t\treturn p.Mul(halfExp, halfExp)\n\t}\n\t// we got a nice IEEE-754 estimate\n\tguess := big.NewFloat(zf)\n\n\t// f(t)/f'(t) = t*(log(t) - z)\n\tf := func(t *big.Float) *big.Float {\n\t\tp.Sub(Log(new(big.Float), t), z)\n\t\treturn p.Mul(p, t)\n\t}\n\n\tx := newton(f, guess, z.Prec()) // TODO: make newton operate in place\n\n\treturn o.Set(x)\n}",
"func Exp(x float64) float64 {\n\n\treturn math.Exp(x)\n}",
"func (d Decimal) Div(d2 Decimal) Decimal {\n\treturn d.DivRound(d2, int32(DivisionPrecision))\n}",
"func Exp2Sight(exp int64) float64 {\n\tlevel, _, _, _ := Exp2Level(exp)\n\treturn Level2Sight(level)\n}",
"func (base *LRSBase) zi2 (h, si, y0, ci *big.Int) *big.Int {\n\ta := new(big.Int).Exp(h, si, base.P)\n\tb := new(big.Int).Exp(y0, ci, base.P)\n\treturn a.Mul(a, b).Mod(a, base.P)\n}",
"func DIVSS(mx, x operand.Op) { ctx.DIVSS(mx, x) }",
"func IntDiv(z *big.Int, x, y *big.Int,) *big.Int",
"func modExp(base, exponent, modulus int64) (int64, error) {\n\n\t// Check preconditions\n\tif modulus <= 0 {\n\t\treturn 0, errors.New(fmt.Sprintf(\"Expected modulus > 0, but received modulus = %v\", modulus))\n\t}\n\tif exponent < 0 {\n\t\treturn 0, errors.New(fmt.Sprintf(\"Expected exponent >= 0, but received exponent = %v\", exponent))\n\t}\n\n\tif base == 0 || modulus == 1 { return 0, nil }\n\tresult := int64(1)\n\tbase = base % modulus\n\n\tfor exponent > 0 {\n\t\tif exponent&1 == 1 {\n\t\t\tresult = (result * base) % modulus\n\t\t}\n\n\t\texponent >>= 1\n\t\tbase = (base * base) % modulus\n\t}\n\n\treturn result, nil\n}",
"func (f *Float) Float2Exp() (d float64, exp int) {\n\tvar exp_ C.long\n\td = float64(C.mpf_get_d_2exp(&exp_, &f.i[0]))\n\texp = int(exp_)\n\treturn\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets f = Ceil(x) and returns f.
|
func (f *Float) Ceil(x *Float) *Float {
x.doinit()
f.doinit()
C.mpf_ceil(&f.i[0], &x.i[0])
return f
}
|
[
"func Ceil(x float64) int {\n\treturn int(math.Ceil(x))\n}",
"func (e *ErrDecimal) Ceil(d, x *Decimal) *Decimal {\n\treturn e.op2(d, x, e.Ctx.Ceil)\n}",
"func Ceil(value gcv.Value) (gcv.Value, error) {\n\tif value.Type() == gcv.Complex {\n\t\treturn nil, errors.New(\"Ceil is not supported for Complex numbers\")\n\t}\n\treturn gcv.MakeValue(math.Ceil(value.Real())), nil\n}",
"func (i *Number) Ceil(precision Number) *Number {\n\tif precision.IsBelow(*NewNumber(1)) {\n\t\treturn NewNumber(math.Ceil(i.AsFloat64()))\n\t}\n\tbuf := bytes.NewBuffer([]byte{})\n\tbuf.WriteString(\"1\")\n\tfor i := 0; i < precision.AsInt(); i++ {\n\t\tbuf.WriteString(\"0\")\n\t}\n\tfactor := NewNumber(buf.String())\n\tconverted := i.Multiply(*factor)\n\tceiling := NewNumber(math.Ceil(converted.AsFloat64()))\n\treturn ceiling.Divide(*factor)\n}",
"func Ceil(numerator, denominator int) int {\n\tif numerator%denominator == 0 {\n\t\treturn numerator / denominator\n\t}\n\treturn (numerator / denominator) + 1\n}",
"func Ceil(p *Point) *Point {\n\treturn p.SetTo(math.Ceil(p.X), math.Ceil(p.Y))\n}",
"func Ceil(r *Rectangle) *Rectangle {\n\tr.X = math.Ceil(r.X)\n\tr.Y = math.Ceil(r.Y)\n\n\treturn r\n}",
"func (v Vector[T]) Ceil() Vector[T] {\n\treturn New(\n\t\tT(math.Ceil(float64(v.x))),\n\t\tT(math.Ceil(float64(v.y))),\n\t\tT(math.Ceil(float64(v.z))),\n\t\tT(math.Ceil(float64(v.w))),\n\t)\n}",
"func (d Decimal) Ceil() Decimal {\n\td.ensureInitialized()\n\n\tif d.exp >= 0 {\n\t\treturn d\n\t}\n\n\texp := big.NewInt(10)\n\n\t// NOTE(vadim): must negate after casting to prevent int32 overflow\n\texp.Exp(exp, big.NewInt(-int64(d.exp)), nil)\n\n\tz, m := new(big.Int).DivMod(d.value, exp, new(big.Int))\n\tif m.Cmp(zeroInt) != 0 {\n\t\tz.Add(z, oneInt)\n\t}\n\treturn Decimal{value: z, exp: 0}\n}",
"func (gdt *Vector3) Ceil() Vector3 {\n\targ0 := gdt.getBase()\n\n\tret := C.go_godot_vector3_ceil(GDNative.api, arg0)\n\n\treturn Vector3{base: &ret}\n\n}",
"func CeilP(x float64, p int) float64 {\n\tk := math.Pow10(p)\n\treturn math.Ceil(x*k) / k\n}",
"func (fn *formulaFuncs) CEILING(argsList *list.List) formulaArg {\n\tif argsList.Len() == 0 {\n\t\treturn newErrorFormulaArg(formulaErrorVALUE, \"CEILING requires at least 1 argument\")\n\t}\n\tif argsList.Len() > 2 {\n\t\treturn newErrorFormulaArg(formulaErrorVALUE, \"CEILING allows at most 2 arguments\")\n\t}\n\tnumber, significance, res := 0.0, 1.0, 0.0\n\tn := argsList.Front().Value.(formulaArg).ToNumber()\n\tif n.Type == ArgError {\n\t\treturn n\n\t}\n\tnumber = n.Number\n\tif number < 0 {\n\t\tsignificance = -1\n\t}\n\tif argsList.Len() > 1 {\n\t\ts := argsList.Back().Value.(formulaArg).ToNumber()\n\t\tif s.Type == ArgError {\n\t\t\treturn s\n\t\t}\n\t\tsignificance = s.Number\n\t}\n\tif significance < 0 && number > 0 {\n\t\treturn newErrorFormulaArg(formulaErrorVALUE, \"negative sig to CEILING invalid\")\n\t}\n\tif argsList.Len() == 1 {\n\t\treturn newNumberFormulaArg(math.Ceil(number))\n\t}\n\tnumber, res = math.Modf(number / significance)\n\tif res > 0 {\n\t\tnumber++\n\t}\n\treturn newNumberFormulaArg(number * significance)\n}",
"func (v Vector[T]) CeilToInt() Vector[int] {\n\treturn New(\n\t\tint(math.Ceil(float64(v.x))),\n\t\tint(math.Ceil(float64(v.y))),\n\t\tint(math.Ceil(float64(v.z))),\n\t\tint(math.Ceil(float64(v.w))),\n\t)\n}",
"func MustCeil(value gcv.Value) gcv.Value {\n\tval, err := Ceil(value)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn val\n}",
"func CeilDiv(n, divisor int) int {\n\treturn int(math.Ceil(float64(n) / float64(divisor)))\n}",
"func (fn *formulaFuncs) ISOdotCEILING(argsList *list.List) formulaArg {\n\tif argsList.Len() == 0 {\n\t\treturn newErrorFormulaArg(formulaErrorVALUE, \"ISO.CEILING requires at least 1 argument\")\n\t}\n\tif argsList.Len() > 2 {\n\t\treturn newErrorFormulaArg(formulaErrorVALUE, \"ISO.CEILING allows at most 2 arguments\")\n\t}\n\tvar significance float64\n\tnumber := argsList.Front().Value.(formulaArg).ToNumber()\n\tif number.Type == ArgError {\n\t\treturn number\n\t}\n\tif number.Number < 0 {\n\t\tsignificance = -1\n\t}\n\tif argsList.Len() == 1 {\n\t\treturn newNumberFormulaArg(math.Ceil(number.Number))\n\t}\n\tif argsList.Len() > 1 {\n\t\ts := argsList.Back().Value.(formulaArg).ToNumber()\n\t\tif s.Type == ArgError {\n\t\t\treturn s\n\t\t}\n\t\tsignificance = s.Number\n\t\tsignificance = math.Abs(significance)\n\t\tif significance == 0 {\n\t\t\treturn newNumberFormulaArg(significance)\n\t\t}\n\t}\n\tval, res := math.Modf(number.Number / significance)\n\tif res != 0 {\n\t\tif number.Number > 0 {\n\t\t\tval++\n\t\t}\n\t}\n\treturn newNumberFormulaArg(val * significance)\n}",
"func Ceil32(x float32) float32 {\n\treturn float32(math.Ceil(float64(x)))\n}",
"func (d Decimal) RoundCeil(places int32) Decimal {\n\tif d.exp >= -places {\n\t\treturn d\n\t}\n\n\trescaled := d.rescale(-places)\n\tif d.Equal(rescaled) {\n\t\treturn d\n\t}\n\n\tif d.value.Sign() > 0 {\n\t\trescaled.value.Add(rescaled.value, oneInt)\n\t}\n\n\treturn rescaled\n}",
"func (self *Point) Ceil() *Point{\n return &Point{self.Object.Call(\"ceil\")}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets f = Floor(x) and returns f.
|
func (f *Float) Floor(x *Float) *Float {
x.doinit()
f.doinit()
C.mpf_floor(&f.i[0], &x.i[0])
return f
}
|
[
"func Floor(x float32) float32",
"func Floor(x float64, unit float64) float64 {\n\tif IsZero(unit) {\n\t\treturn x\n\t}\n\n\tunits := int64((x + unit*e) / unit)\n\treturn float64(units) * unit\n}",
"func (e *ErrDecimal) Floor(d, x *Decimal) *Decimal {\n\treturn e.op2(d, x, e.Ctx.Floor)\n}",
"func (i *Number) Floor(precision Number) *Number {\n\tif precision.IsBelow(*NewNumber(1)) {\n\t\treturn NewNumber(math.Floor(i.AsFloat64()))\n\t}\n\tbuf := bytes.NewBuffer([]byte{})\n\tbuf.WriteString(\"1\")\n\tfor i := 0; i < precision.AsInt(); i++ {\n\t\tbuf.WriteString(\"0\")\n\t}\n\tfactor := NewNumber(buf.String())\n\tconverted := i.Multiply(*factor)\n\tflooring := NewNumber(math.Floor(converted.AsFloat64()))\n\treturn flooring.Divide(*factor)\n}",
"func Floorp(f float64, n int) float64 {\n\tpow := math.Pow10(n)\n\treturn math.Floor(f*pow) / pow\n}",
"func wrap(x, bound float64) float64 {\n\tif x >= 0 && x < bound {\n\t\treturn x\n\t}\n\tif x = math.Mod(x, bound); x < 0 {\n\t\treturn bound + x\n\t}\n\treturn x\n}",
"func (d Decimal) Floor() Decimal {\n\td.ensureInitialized()\n\n\tif d.exp >= 0 {\n\t\treturn d\n\t}\n\n\texp := big.NewInt(10)\n\n\t// NOTE(vadim): must negate after casting to prevent int32 overflow\n\texp.Exp(exp, big.NewInt(-int64(d.exp)), nil)\n\n\tz := new(big.Int).Div(d.value, exp)\n\treturn Decimal{value: z, exp: 0}\n}",
"func Floor(t1 TermT) TermT {\n\treturn TermT(C.yices_floor(C.term_t(t1)))\n}",
"func FloorP(x float64, p int) float64 {\n\tk := math.Pow10(p)\n\treturn math.Floor(x*k) / k\n}",
"func Floor(value gcv.Value) (gcv.Value, error) {\n\tif value.Type() == gcv.Complex {\n\t\treturn nil, errors.New(\"Floor is not supported for Complex numbers\")\n\t}\n\treturn gcv.MakeValue(math.Floor(value.Real())), nil\n}",
"func (v Vector[T]) Floor() Vector[T] {\n\treturn New(\n\t\tT(math.Floor(float64(v.x))),\n\t\tT(math.Floor(float64(v.y))),\n\t\tT(math.Floor(float64(v.z))),\n\t\tT(math.Floor(float64(v.w))),\n\t)\n}",
"func FloorMod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"FloorMod\",\n\t\tInput: []tf.Input{\n\t\t\tx, y,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}",
"func (f *Float) Ceil(x *Float) *Float {\n\tx.doinit()\n\tf.doinit()\n\tC.mpf_ceil(&f.i[0], &x.i[0])\n\treturn f\n}",
"func Round(x float64) float64 {\n\treturn math.Floor(x + 0.5)\n}",
"func (s Size) Floor() Size {\n\tfor _, unit := range allUnits {\n\t\tif s >= unit {\n\t\t\treturn (s / unit) * unit\n\t\t}\n\t}\n\treturn s\n}",
"func FloorAtZero(x *float64) bool {\n\tif *x < 0 {\n\t\t*x = 0.0\n\t\treturn true\n\t}\n\treturn false\n}",
"func FloorDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"FloorDiv\",\n\t\tInput: []tf.Input{\n\t\t\tx, y,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}",
"func Floor32(x float32) float32 {\n\treturn float32(math.Floor(float64(x)))\n}",
"func (f Float) floor_int() (int) {\n i := int(f)\n if f < Float(0.0) && f != Float(i) { \n return i - 1\n }\n return i \n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets f = Trunc(x) (=round towards zero) and returns f.
|
func (f *Float) Trunc(x *Float) *Float {
x.doinit()
f.doinit()
C.mpf_trunc(&f.i[0], &x.i[0])
return f
}
|
[
"func TruncateFloat(f float64, flen int, decimal int) (float64, error) {\n\tif math.IsNaN(f) {\n\t\t// nan returns 0\n\t\treturn 0, nil\n\t}\n\n\tmaxF := getMaxFloat(flen, decimal)\n\n\tif !math.IsInf(f, 0) {\n\t\tf = truncateFloat(f, decimal)\n\t}\n\n\tif f > maxF {\n\t\tf = maxF\n\t} else if f < -maxF {\n\t\tf = -maxF\n\t}\n\n\treturn f, nil\n}",
"func roundFloat(x float64, prec int) float64 {\n\tvar rounder float64\n\tpow := math.Pow(10, float64(prec))\n\tintermed := x * pow\n\t_, frac := math.Modf(intermed)\n\tx = .5\n\tif frac < 0.0 {\n\t\tx = -.5\n\t}\n\tif frac >= x {\n\t\trounder = math.Ceil(intermed)\n\t} else {\n\t\trounder = math.Floor(intermed)\n\t}\n\n\treturn rounder / pow\n}",
"func Floor(x float32) float32",
"func Truncate(val float64, prec int) float64 {\n\tfrep := strconv.FormatFloat(val, 'g', prec, 64)\n\tval, _ = strconv.ParseFloat(frep, 64)\n\treturn val\n}",
"func round(x float64) float64 {\n\treturn math.Round(x)\n}",
"func roundValue(initialValue float64, floor float64) float64 {\n\tb := []byte(strings.Trim(fmt.Sprintf(\"%f\", initialValue), \"0\"))\n\tvalue := initialValue\n\tfor i := len(b)-1; i >= 0; i-- {\n\t\tif b[i] != '.' {\n\t\t\tb[i] = '0'\n\t\t\tround, e := strconv.ParseFloat(string(b), 64)\n\t\t\tif e != nil || round <= floor {\n\t\t\t\treturn value\n\t\t\t}\n\t\t\tvalue = round\n\t\t}\n\t}\n\treturn value\n}",
"func truncate(x, m int64) int64 {\n\tif m <= 0 {\n\t\treturn x\n\t}\n\treturn x - x%m\n}",
"func wrap(x, bound float64) float64 {\n\tif x >= 0 && x < bound {\n\t\treturn x\n\t}\n\tif x = math.Mod(x, bound); x < 0 {\n\t\treturn bound + x\n\t}\n\treturn x\n}",
"func RoundFloat(f float64) float64 {\n\tif math.Remainder(f, 1.0) < 0 {\n\t\treturn math.Ceil(f)\n\t}\n\treturn math.Floor(f)\n}",
"func Round(x float64) float64 {\n\treturn math.Floor(x + 0.5)\n}",
"func Floor(x float64, unit float64) float64 {\n\tif IsZero(unit) {\n\t\treturn x\n\t}\n\n\tunits := int64((x + unit*e) / unit)\n\treturn float64(units) * unit\n}",
"func (v Vec2D) Trunc(maxl float64) Vec2D {\n\tl := v.Len()\n\treturn v.SetLen(math.Min(maxl, l))\n}",
"func (f *Float) Floor(x *Float) *Float {\n\tx.doinit()\n\tf.doinit()\n\tC.mpf_floor(&f.i[0], &x.i[0])\n\treturn f\n}",
"func Normalize(start, end float64, f func(float64) float64) func(float64) float64 {\n\tiv := integration.Interval{Start: start, End: end}\n\tarea := integration.Integrate(f, iv)\n\treturn func(x float64) float64 {\n\t\treturn f(x) / area\n\t}\n}",
"func (f Float64) FilterZero() Float64 {\n\tif !f.IsPresent() || f.value == 0 {\n\t\treturn Float64{}\n\t}\n\n\treturn f\n}",
"func F(a interface{}) float64 {\n\treturn F64(a)\n}",
"func Roundup(f float64) float64 {\n\tn := math.Round(f * 100000)\n\tif int64(n)%10000 == 0 {\n\t\treturn n / 100000.0\n\t}\n\treturn (math.Floor(n/10000) + 1) / 10.0\n}",
"func roundFloat(x float64, decimals int) float64 {\n\t// return roundFloat(x, numDig(x)+decimals)\n\tfrep := strconv.FormatFloat(x, 'f', decimals, 64)\n\tf, _ := strconv.ParseFloat(frep, 64)\n\treturn f\n}",
"func f64ToFixed(x float64) render.Fixed {\n\treturn render.Fixed(x * 65536)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The Listener should reset the temporary error retry delay after a successful Accept.
|
func TestAcceptRetryTemporaryReset(t *testing.T) {
// the accepted connection
conn := &testutils.MockConn{}
// the listener that fails with temporary errors, accepts one,
// and fails again with temporary errors to check the reset
// of the temporary errors delay
listener := &testutils.MockListener{
AcceptFunc: func(i int) (net.Conn, error) {
if i < 5 {
return nil, testutils.ErrTemporaryTrue
}
if i == 5 {
return conn, nil
}
if i < 10 {
return nil, testutils.ErrTemporaryTrue
}
return nil, io.EOF
},
}
server := &RetryServer{
Listener: listener,
Dispatch: nopDispatch,
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
start := time.Now()
if err := server.Serve(ctx); errors.Cause(err) != io.EOF {
t.Errorf("want io.EOF, got %v", err)
}
// retried 5 times for temporary errors:
firstDelay := (5 + 10 + 20 + 40 + 80) * time.Millisecond
// then 4 more times after a delay reset:
secondDelay := (5 + 10 + 20 + 40) * time.Millisecond
want := firstDelay + secondDelay
got := time.Since(start)
if got < want || got > (want+(100*time.Millisecond)) {
t.Errorf("want duration of %v, got %v", want, got)
}
}
|
[
"func (s *Server) Accept() error {\n var tempDelay time.Duration // how long to sleep on accept failure\n for {\n c, e := s.listener.Accept()\n if e != nil {\n if ne, ok := e.(net.Error); ok && ne.Temporary() {\n if tempDelay == 0 {\n tempDelay = 5 * time.Millisecond\n } else {\n tempDelay *= 2\n }\n if max := 1 * time.Second; tempDelay > max {\n tempDelay = max\n }\n time.Sleep(tempDelay)\n continue\n }\n return e\n }\n go s.accept(c)\n }\n}",
"func TestRemoteSignerRetryTCPOnly(t *testing.T) {\n\tvar (\n\t\tattemptc = make(chan int)\n\t\tretries = 2\n\t)\n\n\tln, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\trequire.NoError(t, err)\n\n\tgo func(ln net.Listener, attemptc chan<- int) {\n\t\tattempts := 0\n\n\t\tfor {\n\t\t\tconn, err := ln.Accept()\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = conn.Close()\n\t\t\trequire.NoError(t, err)\n\n\t\t\tattempts++\n\n\t\t\tif attempts == retries {\n\t\t\t\tattemptc <- attempts\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}(ln, attemptc)\n\n\trs := NewRemoteSigner(\n\t\tlog.TestingLogger(),\n\t\tcmn.RandStr(12),\n\t\ttypes.NewMockPV(),\n\t\tDialTCPFn(ln.Addr().String(), testConnDeadline, ed25519.GenPrivKey()),\n\t)\n\tdefer rs.Stop()\n\n\tRemoteSignerConnDeadline(time.Millisecond)(rs)\n\tRemoteSignerConnRetries(retries)(rs)\n\n\tassert.Equal(t, rs.Start(), ErrDialRetryMax)\n\n\tselect {\n\tcase attempts := <-attemptc:\n\t\tassert.Equal(t, retries, attempts)\n\tcase <-time.After(100 * time.Millisecond):\n\t\tt.Error(\"expected remote to observe connection attempts\")\n\t}\n}",
"func TestSignerRemoteRetryTCPOnly(t *testing.T) {\n\tvar (\n\t\tattemptCh = make(chan int)\n\t\tretries = 2\n\t)\n\n\tln, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\trequire.NoError(t, err)\n\n\tgo func(ln net.Listener, attemptCh chan<- int) {\n\t\tattempts := 0\n\n\t\tfor {\n\t\t\tconn, err := ln.Accept()\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = conn.Close()\n\t\t\trequire.NoError(t, err)\n\n\t\t\tattempts++\n\n\t\t\tif attempts == retries {\n\t\t\t\tattemptCh <- attempts\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}(ln, attemptCh)\n\n\tserviceEndpoint := NewSignerServiceEndpoint(\n\t\tlog.TestingLogger(),\n\t\tcmn.RandStr(12),\n\t\ttypes.NewMockPV(),\n\t\tDialTCPFn(ln.Addr().String(), testTimeoutReadWrite, ed25519.GenPrivKey()),\n\t)\n\tdefer serviceEndpoint.Stop()\n\n\tSignerServiceEndpointTimeoutReadWrite(time.Millisecond)(serviceEndpoint)\n\tSignerServiceEndpointConnRetries(retries)(serviceEndpoint)\n\n\tassert.Equal(t, serviceEndpoint.Start(), ErrDialRetryMax)\n\n\tselect {\n\tcase attempts := <-attemptCh:\n\t\tassert.Equal(t, retries, attempts)\n\tcase <-time.After(100 * time.Millisecond):\n\t\tt.Error(\"expected remote to observe connection attempts\")\n\t}\n}",
"func (m *Message) Retry() {\n\tm.Trial++\n}",
"func (e *Opener) ErrTimeout(now time.Time, duration time.Duration) {\n\te.legitimateAttemptsCount.Inc(now)\n\te.errorsCount.Inc(now)\n}",
"func retryCheck(tweet *Tweet, client *oauth.Consumer, canFav bool, retry chan<- bool, ns []time.Duration, periodIndex int) (bool, int) {\n\tlog.Println(\"Checking again...\")\n\n\t//Test connection again\n\tif _, code := testConn(tweet, client); code != 429 && code != 88 { //We just check if we are banned because of surpassing the limit\n\t\t//Fav creation is allowed again, update parameters accordingly\n\t\tcanFav = true\n\t\tperiodIndex = 0\n\n\t} else {\n\t\t//Can't create favs yet\n\t\tcanFav = false\n\n\t\t//Send retry message after period\n\t\ttime.AfterFunc(ns[periodIndex] * time.Minute, func() {\n\t\t\tretry <- true\n\t\t})\n\n\t\tlog.Println(\"Still banned from creating favorites. Going to retry in\", ns[periodIndex].Nanoseconds(), \"minutes.\")\n\n\t\t//Increase period for next call\n\t\tif periodIndex < (len(ns) - 1) {\n\t\t\tperiodIndex++\n\t\t}\n\t}\n\n\treturn canFav, periodIndex\n}",
"func (l Listeners) Retried(req *http.Request, attempt int) {\n\tfor _, listener := range l {\n\t\tlistener.Retried(req, attempt)\n\t}\n}",
"func xRetry(args *xRetryArgs) error {\n\t// The select statement below is not guaranteed to return\n\t// a non-nil context error in the first iteration because\n\t// go chooses a case at random if they're both \"ready\".\n\tif args.ctx.Err() != nil {\n\t\treturn args.ctx.Err()\n\t}\n\n\tvar sleep time.Duration\n\n\tfor i := 0; ; i++ {\n\t\tselect {\n\t\tcase <-args.ctx.Done():\n\t\t\treturn args.ctx.Err()\n\t\tcase <-time.After(sleep):\n\t\t\t// Continue\n\t\t}\n\n\t\terr := args.f()\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\t// Was this a network error?\n\t\tif err, ok := err.(net.Error); ok {\n\t\t\t// Ignore temporary errors\n\t\t\tif err.Timeout() || err.Temporary() {\n\t\t\t\t// Have we reached the retry limit already?\n\t\t\t\tif i == args.maxRetry {\n\t\t\t\t\treturn &NetworkError{Err: err}\n\t\t\t\t}\n\n\t\t\t\tsleep = args.b.ForAttempt(float64(i))\n\t\t\t\targs.errs <- &NetworkError{Err: err, Retrying: true, Backoff: sleep}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Anything else is a fatal error\n\t\t\treturn &NetworkError{Err: err}\n\t\t}\n\n\t\treturn &RedisError{Err: err}\n\t}\n}",
"func (s *RetryServer) handleTemporary(delay *time.Duration, err error) bool {\n\troot := errors.Cause(err)\n\n\tif te, ok := root.(interface {\n\t\tTemporary() bool\n\t}); ok && te.Temporary() {\n\t\tif *delay == 0 {\n\t\t\t*delay = 5 * time.Millisecond\n\t\t} else {\n\t\t\t*delay *= 2\n\t\t}\n\n\t\tif max := 1 * time.Second; *delay > max {\n\t\t\t*delay = max\n\t\t}\n\n\t\tHandleError(errors.Wrap(err, fmt.Sprintf(\"temporary error, retrying in %v\", *delay)), s.ErrChan)\n\t\ttime.Sleep(*delay)\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (pub *Pubnub) resetRetryAndSendResponse() bool {\n\tretryCountMu.Lock()\n\tdefer retryCountMu.Unlock()\n\n\tif retryCount > 0 {\n\t\tpub.sendConnectionEvent(pub.channels.ConnectedNamesString(),\n\t\t\tpub.groups.ConnectedNamesString(), connectionReconnected)\n\n\t\tretryCount = 0\n\t\treturn true\n\t}\n\treturn false\n}",
"func (sender *MessageSender) Retry(msgRetry *MessageRetry) {\n\tfor {\n\t\ttime.Sleep(RetryIntervalInSec * time.Second)\n\n\t\tif msgRetry.retryCount >= sender.retryTimes {\n\t\t\t// Retried enough times\n\t\t\treturn\n\t\t}\n\n\t\tmsgRetry.isActiveMutex.Lock()\n\t\tif !msgRetry.isActive {\n\t\t\tmsgRetry.isActiveMutex.Unlock()\n\t\t\t// Retry is stopped\n\t\t\treturn\n\t\t}\n\t\tmsgRetry.isActiveMutex.Unlock()\n\n\t\tif msgRetry.msgType != msg_pb.MessageType_COMMITTED {\n\t\t\tsender.blockNumMutex.Lock()\n\t\t\tif msgRetry.blockNum < sender.blockNum {\n\t\t\t\tsender.blockNumMutex.Unlock()\n\t\t\t\t// Block already moved ahead, no need to retry old block's messages\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsender.blockNumMutex.Unlock()\n\t\t}\n\n\t\tmsgRetry.retryCount++\n\t\tif err := sender.host.SendMessageToGroups(msgRetry.groups, msgRetry.p2pMsg); err != nil {\n\t\t\tutils.Logger().Warn().Str(\"groupID[0]\", msgRetry.groups[0].String()).Uint64(\"blockNum\", msgRetry.blockNum).Str(\"MsgType\", msgRetry.msgType.String()).Int(\"RetryCount\", msgRetry.retryCount).Msg(\"[Retry] Failed re-sending consensus message\")\n\t\t} else {\n\t\t\tutils.Logger().Info().Str(\"groupID[0]\", msgRetry.groups[0].String()).Uint64(\"blockNum\", msgRetry.blockNum).Str(\"MsgType\", msgRetry.msgType.String()).Int(\"RetryCount\", msgRetry.retryCount).Msg(\"[Retry] Successfully resent consensus message\")\n\t\t}\n\t}\n}",
"func (r *RetryPolicy) Reset() {\n\tr.retryCount = atomic.NewUint32(0)\n}",
"func (pub *Pubnub) retryLoop(errorChannel chan<- []byte) {\n\tfor {\n\t\tpub.RLock()\n\t\tsubChannels := pub.channels.ConnectedNamesString()\n\t\tsubChannelsGroups := pub.groups.ConnectedNamesString()\n\t\tpub.RUnlock()\n\n\t\tif len(subChannels) > 0 || len(subChannelsGroups) > 0 {\n\t\t\t_, responseCode, err := pub.httpRequest(\"\", retryTrans)\n\n\t\t\tretryCountMu.RLock()\n\t\t\tretryCountLocal := retryCount\n\t\t\tretryCountMu.RUnlock()\n\n\t\t\tif (err != nil) && (responseCode != 403) && (retryCountLocal <= 0) {\n\t\t\t\tlogMu.Lock()\n\t\t\t\terrorLogger.Println(fmt.Sprintf(\"%s, response code: %d:\", err.Error(), responseCode))\n\t\t\t\tlogMu.Unlock()\n\n\t\t\t\tpub.checkForTimeoutAndRetries(err, errorChannel)\n\t\t\t\tpub.CloseExistingConnection()\n\t\t\t} else if (err == nil) && (retryCountLocal > 0) {\n\t\t\t\tpub.resetRetryAndSendResponse()\n\t\t\t}\n\t\t\tsleepForAWhile(false)\n\t\t} else {\n\t\t\tpub.closeRetryConnection()\n\t\t\tbreak\n\t\t}\n\t}\n}",
"func retryTimeout(ctx context.Context, retryInterval time.Duration, check func(ctx context.Context) error) {\n\n\tfor {\n\t\tfmt.Println(\"perform user check call\")\n\t\tif err := check(ctx); err == nil {\n\t\t\tfmt.Println(\"work finished successfully\")\n\t\t\treturn\n\t\t}\n\n\t\tfmt.Println(\"check if timeout has expired\")\n\t\tif ctx.Err() != nil {\n\t\t\tfmt.Println(\"time expired 1 :\", ctx.Err())\n\t\t\treturn\n\t\t}\n\n\t\tfmt.Printf(\"wait %s before trying again\\n\", retryInterval)\n\t\tt := time.NewTimer(retryInterval)\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tfmt.Println(\"timed expired 2 :\", ctx.Err())\n\t\t\tt.Stop()\n\t\t\treturn\n\t\tcase <-t.C:\n\t\t\tfmt.Println(\"retry again\")\n\t\t}\n\t}\n}",
"func (s *Server) Accept() error {\n\tvar tempDelay time.Duration // how long to sleep on accept failure\n\tfor {\n\t\tc, e := s.Listener.Accept()\n\t\tif e != nil {\n\t\t\tif ne, ok := e.(net.Error); ok && ne.Temporary() {\n\t\t\t\tif tempDelay == 0 {\n\t\t\t\t\ttempDelay = 5 * time.Millisecond\n\t\t\t\t} else {\n\t\t\t\t\ttempDelay *= 2\n\t\t\t\t}\n\t\t\t\tif max := 1 * time.Second; tempDelay > max {\n\t\t\t\t\ttempDelay = max\n\t\t\t\t}\n\t\t\t\ttime.Sleep(tempDelay)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn e\n\t\t}\n\t\tgo s.accept(c)\n\t}\n}",
"func (s *Stream) retry(req *http.Request, expBackOff backoff.BackOff, aggExpBackOff backoff.BackOff) {\n\t// close Messages channel and decrement the wait group counter\n\tdefer close(s.Messages)\n\tdefer s.group.Done()\n\n\tvar wait time.Duration\n\tfor !stopped(s.done) {\n\t\tresp, err := s.client.Do(req)\n\t\tif err != nil {\n\t\t\t// stop retrying for HTTP protocol errors\n\t\t\tpanic(err)\n\t\t}\n\t\t// when err is nil, resp contains a non-nil Body which must be closed\n\t\tdefer resp.Body.Close()\n\t\ts.body = resp.Body\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\t// receive stream response Body, handles closing\n\t\t\ts.receive(resp.Body)\n\t\t\texpBackOff.Reset()\n\t\t\taggExpBackOff.Reset()\n\t\tcase http.StatusServiceUnavailable:\n\t\t\t// exponential backoff\n\t\t\twait = expBackOff.NextBackOff()\n\t\tcase 420, http.StatusTooManyRequests:\n\t\t\t// 420 Enhance Your Calm is unofficial status code by Twitter on being rate limited.\n\t\t\t// aggressive exponential backoff\n\t\t\twait = aggExpBackOff.NextBackOff()\n\t\tdefault:\n\t\t\t// stop retrying for other response codes\n\t\t\tresp.Body.Close()\n\t\t\treturn\n\t\t}\n\t\t// close response before each retry\n\t\tresp.Body.Close()\n\t\tif wait == backoff.Stop {\n\t\t\treturn\n\t\t}\n\t\tsleepOrDone(wait, s.done)\n\t}\n}",
"func (ti *testItem) retry() {\n\t// release it to make it available for somebody else to try later:\n\t<-ti.take\n}",
"func (m *Manager) listenAborted() chan struct{} {\n\treturn m.state.doneCh\n}",
"func (m *MetricsRetryListener) Retried(attempt int) {\n\tif m.retryMetrics != nil {\n\t\tm.retryMetrics.getRetryCounter().Add(1)\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
RemoveDuplicateTags removes duplicate tags from a given TagInformation slice. This should be used whenever joining two slices of TagInformation. Duplicate tags do not work well in queries Example, without this, if a user searched "test" and their account had a global filter of "test", the SQL query would look for two instances of "test" This does not work as an image should only have 1 instance of the tag. This situation returns empty results.
|
func RemoveDuplicateTags(ToFilter []TagInformation) []TagInformation {
//Rules:
//We do not care about meta-tags
//Exlusionary tags win
for Index := 0; Index < len(ToFilter); Index++ {
if ToFilter[Index].IsMeta {
continue //Skip Metatags
}
//Standard tag confirmed, scan for duplicates
for ScanIndex := 0; ScanIndex < len(ToFilter); ScanIndex++ {
if Index == ScanIndex || ToFilter[ScanIndex].IsMeta {
continue //Skip comparing same entry, or meta tags
}
if ToFilter[ScanIndex].ID == ToFilter[Index].ID {
var ToRemove int
if ToFilter[ScanIndex].Exclude {
//Duplicate found is an exclusionary, so remove Index
ToRemove = Index
} else {
//Duplicate found is not exclusion, so remove ScanIndex
ToRemove = ScanIndex
}
//Remove and resize
ToFilter = append(ToFilter[:ToRemove], ToFilter[ToRemove+1:]...)
if ToRemove < Index {
//If we removed something before the index, then continue scan but decrement the current scan state
Index--
ScanIndex--
} else if ToRemove == Index {
//If we removed the current index, the decrement index, and start a new duplicate scan from whatever is there now
Index--
break
} else {
//Finally, the third potential, is we removed an element ahead of Index, in which case, we just need to continue current scan from the same ScanIndex
ScanIndex--
}
}
}
}
return ToFilter
}
|
[
"func MergeTagSlices(Original []TagInformation, ToAdd []TagInformation) []TagInformation {\n\t//Rules:\n\t//We do not care about meta-tags\n\t//Tags in ToAdd win\n\t//Exlusionary tags win after tags in ToAdd\n\n\t//First, remove duplicates from original that exist in ToAdd\n\tfor Index := 0; Index < len(ToAdd); Index++ {\n\t\tif ToAdd[Index].IsMeta {\n\t\t\tcontinue //Skip Metatags\n\t\t}\n\t\t//Standard tag confirmed, scan for duplicates\n\t\tfor ScanIndex := 0; ScanIndex < len(Original); ScanIndex++ {\n\t\t\tif Original[ScanIndex].IsMeta {\n\t\t\t\tcontinue //Skip comparing metas\n\t\t\t}\n\t\t\tif Original[ScanIndex].ID == ToAdd[Index].ID {\n\t\t\t\t//Remove and resize\n\t\t\t\tOriginal = append(Original[:ScanIndex], Original[ScanIndex+1:]...)\n\t\t\t\t//we just need to continue current scan from the same ScanIndex\n\t\t\t\tScanIndex--\n\t\t\t}\n\t\t}\n\t}\n\n\t//Now we can fall back to RemoveDuplicateTags to cleanup any other issues\n\treturn RemoveDuplicateTags(append(Original, ToAdd...))\n}",
"func (auo *ArticleUpdateOne) RemoveTags(t ...*Tag) *ArticleUpdateOne {\n\tids := make([]int, len(t))\n\tfor i := range t {\n\t\tids[i] = t[i].ID\n\t}\n\treturn auo.RemoveTagIDs(ids...)\n}",
"func (puo *ProjectUpdateOne) RemoveTags(t ...*Topic) *ProjectUpdateOne {\n\tids := make([]int, len(t))\n\tfor i := range t {\n\t\tids[i] = t[i].ID\n\t}\n\treturn puo.RemoveTagIDs(ids...)\n}",
"func uniqueTags(t1 gostatsd.Tags, t2 gostatsd.Tags) gostatsd.Tags {\n\treturn uniqueTagsWithSeen(map[string]struct{}{}, t1, t2)\n}",
"func (z *Zone) RemoveTags(tags ...string) int {\n\ts := NewSet(z.Tags...)\n\tn := len(s)\n\tfor _, t := range tags {\n\t\tdelete(s, t)\n\t}\n\td := n - len(s)\n\tif d != 0 {\n\t\tz.Tags = s.Values()\n\t}\n\treturn d\n}",
"func ExcludeTags(rii RegInvImage, excludedTags map[Tag]bool) RegInvImage {\n\tfiltered := make(RegInvImage)\n\tfor imageName, digestTags := range rii {\n\t\tfor digest, tags := range digestTags {\n\t\t\tfor _, tag := range tags {\n\t\t\t\tif _, excludeMe := excludedTags[tag]; excludeMe {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif filtered[imageName] == nil {\n\t\t\t\t\tfiltered[imageName] = make(DigestTags)\n\t\t\t\t}\n\t\t\t\tfiltered[imageName][digest] = append(\n\t\t\t\t\tfiltered[imageName][digest],\n\t\t\t\t\ttag)\n\t\t\t}\n\t\t}\n\t}\n\treturn filtered\n}",
"func MergeTags(generalTags []*Tag, infraTags []*Tag) []*Tag {\n\tvar dupMap = make(map[string]bool)\n\tfor _, tag := range generalTags {\n\t\tdupMap[tag.Key] = true\n\t}\n\tfor _, tag := range infraTags {\n\t\tif _, exists := dupMap[tag.Key]; !exists {\n\t\t\tgeneralTags = append(generalTags, tag)\n\t\t}\n\t}\n\treturn generalTags\n}",
"func (r *implementation) DistinctTags(ctx context.Context) ([]string, error) {\n\tvar tags []string\n\terr := pgxscan.Select(ctx, r.db, &tags, `\nSELECT DISTINCT UNNEST(tags) FROM articles\n`)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn tags, nil\n}",
"func (pu *ProjectUpdate) RemoveTags(t ...*Topic) *ProjectUpdate {\n\tids := make([]int, len(t))\n\tfor i := range t {\n\t\tids[i] = t[i].ID\n\t}\n\treturn pu.RemoveTagIDs(ids...)\n}",
"func (c *Client) TruncateTags(image string, truncateInactive bool, regularExpression string) error {\n\tvar tagsToRemove []string\n\tvar leaveTagsCounter = 0\n\n\ttags, err := c.ListTags(image)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif regularExpression != \"\" {\n\t\tregexPattern := fmt.Sprintf(`(?i)%s`, regularExpression)\n\t\tfor _, tag := range tags {\n\t\t\tmatched, _ := regexp.MatchString(regexPattern, tag.Name)\n\t\t\tif matched {\n\t\t\t\ttagsToRemove = append(tagsToRemove, tag.Name)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfor _, tag := range tags {\n\t\t\tif tag.TagStatus == \"inactive\" {\n\t\t\t\ttagsToRemove = append(tagsToRemove, tag.Name)\n\t\t\t\tleaveTagsCounter = 1\n\t\t\t}\n\t\t}\n\t}\n\n\tfor i := leaveTagsCounter; i < len(tagsToRemove); i++ {\n\t\tcolor.Green(\"\\u2714 Delete tag %s\", BW(tagsToRemove[i]))\n\t\tif err := c.deleteDockerImageTag(image, tagsToRemove[i]); err != nil {\n\t\t\tcolor.Red(\"Error while deleting image tag: %s\", err)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func TestTagDups(t *testing.T) {\n\tnoTags := defaultOpts\n\tnoTags.TagDups = false\n\n\tcases := []TestCase{\n\t\t{\n\t\t\t[]TestRecord{\n\t\t\t\t{R: basicA1, DupFlag: false, UnexpectedTags: []sam.Tag{sam.NewTag(\"DI\"), sam.NewTag(\"DS\"), sam.NewTag(\"DT\"), sam.NewTag(\"DU\")}},\n\t\t\t\t{R: basicB1, DupFlag: true, UnexpectedTags: []sam.Tag{sam.NewTag(\"DI\"), sam.NewTag(\"DS\"), sam.NewTag(\"DT\"), sam.NewTag(\"DU\")}},\n\t\t\t\t{R: basicA2, DupFlag: false, UnexpectedTags: []sam.Tag{sam.NewTag(\"DI\"), sam.NewTag(\"DS\"), sam.NewTag(\"DT\"), sam.NewTag(\"DU\")}},\n\t\t\t\t{R: basicB2, DupFlag: true, UnexpectedTags: []sam.Tag{sam.NewTag(\"DI\"), sam.NewTag(\"DS\"), sam.NewTag(\"DT\"), sam.NewTag(\"DU\")}},\n\t\t\t},\n\t\t\tnoTags,\n\t\t},\n\t\t{\n\t\t\t[]TestRecord{\n\t\t\t\t{R: basicA1, DupFlag: false, ExpectedAuxs: []sam.Aux{NewAux(\"DI\", \"0\"), NewAux(\"DS\", 2)}},\n\t\t\t\t{R: basicB1, DupFlag: true, ExpectedAuxs: []sam.Aux{NewAux(\"DI\", \"0\"), NewAux(\"DS\", 2), NewAux(\"DT\", \"SQ\")}},\n\t\t\t\t{R: basicA2, DupFlag: false, ExpectedAuxs: []sam.Aux{NewAux(\"DI\", \"0\"), NewAux(\"DS\", 2)}},\n\t\t\t\t{R: basicB2, DupFlag: true, ExpectedAuxs: []sam.Aux{NewAux(\"DI\", \"0\"), NewAux(\"DS\", 2), NewAux(\"DT\", \"SQ\")}},\n\t\t\t},\n\t\t\tdefaultOpts,\n\t\t},\n\t}\n\tRunTestCases(t, header, cases)\n}",
"func (au *ArticleUpdate) RemoveTags(t ...*Tag) *ArticleUpdate {\n\tids := make([]int, len(t))\n\tfor i := range t {\n\t\tids[i] = t[i].ID\n\t}\n\treturn au.RemoveTagIDs(ids...)\n}",
"func (sp *Space) RemoveTags(tags ...string) {\n\tfor _, shape := range *sp {\n\t\tshape.RemoveTags(tags...)\n\t}\n}",
"func RemoveTags(src string) string {\n\tre := regexp.MustCompile(`(?i)(<([^>]+)>)`)\n\t// be paranoid and do replacement recursevly, just in case\n\tfor {\n\t\tcopy := src\n\t\tsrc = re.ReplaceAllString(src, \" \")\n\t\tif src == copy {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn re.ReplaceAllString(src, \" \")\n}",
"func tags_Delete_WithSameImageID(t *testing.T, opts ...configOpt) {\n\topts = append(opts)\n\tenv := newTestEnv(t, opts...)\n\tdefer env.Shutdown()\n\n\timageName, err := reference.WithName(\"foo/bar\")\n\tcheckErr(t, err, \"building named object\")\n\n\t// build two tags pointing to the same image\n\ttag1 := \"1.0.0\"\n\ttag2 := \"latest\"\n\tcreateRepositoryWithMultipleIdenticalTags(t, env, imageName.Name(), []string{tag1, tag2})\n\n\t// delete one of the tags\n\tref, err := reference.WithTag(imageName, tag1)\n\tcheckErr(t, err, \"building tag reference\")\n\n\ttagURL, err := env.builder.BuildTagURL(ref)\n\tcheckErr(t, err, \"building tag URL\")\n\n\tresp, err := httpDelete(tagURL)\n\tmsg := \"checking tag delete\"\n\tcheckErr(t, err, msg)\n\n\tdefer resp.Body.Close()\n\n\tcheckResponse(t, msg, resp, http.StatusAccepted)\n\n\t// check the other tag is still there\n\ttagsURL, err := env.builder.BuildTagsURL(imageName)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error building tags url: %v\", err)\n\t}\n\tresp, err = http.Get(tagsURL)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error getting tags: %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tdec := json.NewDecoder(resp.Body)\n\tvar tagsResponse tagsAPIResponse\n\tif err := dec.Decode(&tagsResponse); err != nil {\n\t\tt.Fatalf(\"unexpected error decoding response: %v\", err)\n\t}\n\n\tif tagsResponse.Name != imageName.Name() {\n\t\tt.Fatalf(\"tags name should match image name: %v != %v\", tagsResponse.Name, imageName)\n\t}\n\n\tif len(tagsResponse.Tags) != 1 {\n\t\tt.Fatalf(\"expected 1 tag, got %d: %v\", len(tagsResponse.Tags), tagsResponse.Tags)\n\t}\n\n\tif tagsResponse.Tags[0] != tag2 {\n\t\tt.Fatalf(\"expected tag to be %q, got %q\", tagsResponse.Tags[0], tag2)\n\t}\n}",
"func DeleteTags(elemTags map[string]string, blacklist map[string]bool) {\n\tfor key, isPrefix := range blacklist {\n\t\tif isPrefix {\n\t\t\tfor tag := range elemTags {\n\t\t\t\tif strings.HasPrefix(tag, key) {\n\t\t\t\t\tdelete(elemTags, tag)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tdelete(elemTags, key)\n\t\t}\n\t}\n}",
"func (th *TagHandler) uniqueFilterAndAddTags(mName string, mHostname *gostatsd.Source, mTags *gostatsd.Tags) bool {\n\tif len(th.filters) == 0 {\n\t\t*mTags = uniqueTags(*mTags, th.tags)\n\t\treturn true\n\t}\n\n\tdropTags := map[string]struct{}{}\n\n\tfor _, filter := range th.filters {\n\t\tif len(filter.MatchMetrics) > 0 && !filter.MatchMetrics.MatchAny(mName) { // returns false if nothing present\n\t\t\t// name doesn't match an include, stop\n\t\t\tcontinue\n\t\t}\n\n\t\t// this list may be empty, and therefore return false\n\t\tif filter.ExcludeMetrics.MatchAny(mName) { // returns false if nothing present\n\t\t\t// name matches an exclude, stop\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(filter.MatchTags) > 0 && !filter.MatchTags.MatchAnyMultiple(*mTags) { // returns false if either list is empty\n\t\t\t// no tags match\n\t\t\tcontinue\n\t\t}\n\n\t\tif filter.DropMetric {\n\t\t\treturn false\n\t\t}\n\n\t\tfor _, dropFilter := range filter.DropTags {\n\t\t\tfor _, tag := range *mTags {\n\t\t\t\tif dropFilter.Match(tag) {\n\t\t\t\t\tdropTags[tag] = present\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif filter.DropHost {\n\t\t\t*mHostname = \"\"\n\t\t}\n\t}\n\n\t*mTags = uniqueTagsWithSeen(dropTags, *mTags, th.tags)\n\treturn true\n}",
"func mergeTags(generalTags []*tag, infraTags []*tag) []*tag {\n\tvar dupMap = make(map[string]bool)\n\tfor _, tag := range generalTags {\n\t\tdupMap[tag.key] = true\n\t}\n\tfor _, tag := range infraTags {\n\t\tif _, exists := dupMap[tag.key]; !exists {\n\t\t\tgeneralTags = append(generalTags, tag)\n\t\t}\n\t}\n\treturn generalTags\n}",
"func TagsDiff(sqsTags map[string]string, newTags map[string]string) (removed, added map[string]string) {\n\tremoved = map[string]string{}\n\tfor k, v := range sqsTags {\n\t\tif _, ok := newTags[k]; !ok {\n\t\t\tremoved[k] = v\n\t\t}\n\t}\n\n\tadded = map[string]string{}\n\tfor k, newV := range newTags {\n\t\tif oldV, ok := sqsTags[k]; !ok || oldV != newV {\n\t\t\tadded[k] = newV\n\t\t}\n\t}\n\treturn\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
MergeTagSlices merges two tag slices, the second slice will always win for duplicates.
|
func MergeTagSlices(Original []TagInformation, ToAdd []TagInformation) []TagInformation {
//Rules:
//We do not care about meta-tags
//Tags in ToAdd win
//Exlusionary tags win after tags in ToAdd
//First, remove duplicates from original that exist in ToAdd
for Index := 0; Index < len(ToAdd); Index++ {
if ToAdd[Index].IsMeta {
continue //Skip Metatags
}
//Standard tag confirmed, scan for duplicates
for ScanIndex := 0; ScanIndex < len(Original); ScanIndex++ {
if Original[ScanIndex].IsMeta {
continue //Skip comparing metas
}
if Original[ScanIndex].ID == ToAdd[Index].ID {
//Remove and resize
Original = append(Original[:ScanIndex], Original[ScanIndex+1:]...)
//we just need to continue current scan from the same ScanIndex
ScanIndex--
}
}
}
//Now we can fall back to RemoveDuplicateTags to cleanup any other issues
return RemoveDuplicateTags(append(Original, ToAdd...))
}
|
[
"func mergeTags(t1, t2 []Tag) []Tag {\n\tn := len(t1) + len(t2)\n\tif n == 0 {\n\t\treturn nil\n\t}\n\n\tout := make([]Tag, 0, n)\n\tout = append(out, t1...)\n\tout = append(out, t2...)\n\n\treturn SortTags(out)\n}",
"func MergeIndexedSlices(first *IndexedSlices, second *IndexedSlices) (*IndexedSlices, error) {\n\tif first == nil {\n\t\treturn second, nil\n\t}\n\tif second == nil {\n\t\treturn first, nil\n\t}\n\tif first.ConcatTensors.Dtype != second.ConcatTensors.Dtype {\n\t\treturn nil, fmt.Errorf(\"Could not merge two IndexedSlices with different types\")\n\t}\n\tif first.ConcatTensors.Dims[1] != second.ConcatTensors.Dims[1] {\n\t\treturn nil, fmt.Errorf(\"Could not merge two IndexedSlices with different widths\")\n\t}\n\theight := first.ConcatTensors.Dims[0] + second.ConcatTensors.Dims[0]\n\twidth := first.ConcatTensors.Dims[1]\n\tdtype := first.ConcatTensors.Dtype\n\ttensor := NewEmptyTensor([]int64{height, width}, dtype)\n\tvar ids []int64\n\tfor i, id := range first.Ids {\n\t\ttensor.SetRow(int64(i), first.ConcatTensors.GetRow(int64(i)))\n\t\tids = append(ids, id)\n\t}\n\tstart := len(ids)\n\tfor i, id := range second.Ids {\n\t\ttensor.SetRow(int64(start+i), second.ConcatTensors.GetRow(int64(i)))\n\t\tids = append(ids, id)\n\t}\n\treturn NewIndexedSlices(tensor, ids), nil\n}",
"func mergeTagSets(s1, s2, scratch tagSet) tagSet {\n\ta := scratch\n\ti, j, k := 0, 0, 0\n\tfor ; i < len(s1) && j < len(s2) && k < len(a); k++ {\n\t\tif s1[i].key == s2[j].key {\n\t\t\ta[k] = s2[j]\n\t\t\ti++\n\t\t\tj++\n\t\t} else if s1[i].key < s2[j].key {\n\t\t\ta[k] = s1[i]\n\t\t\ti++\n\t\t} else {\n\t\t\ta[k] = s2[j]\n\t\t\tj++\n\t\t}\n\t}\n\tif i < len(s1) {\n\t\tk += copy(a[k:], s1[i:])\n\t}\n\tif j < len(s2) {\n\t\tk += copy(a[k:], s2[j:])\n\t}\n\treturn a[:k]\n}",
"func SliceMerge(slice1, slice2 []interface{}) (c []interface{}) {\r\n\tc = append(slice1, slice2...)\r\n\treturn\r\n}",
"func MergeTwoSorted(slice1 []int, slice2 []int, slice12 []int) {\n\tfmt.Println()\n\t// fmt.Println(\"MergeTwoSorted():\", len(slice1), len(slice2), len(slice12))\n\t// fmt.Println(\"MergeTwoSorted():\", slice1, slice2, slice12)\n\ti, j, k := 0, 0, 0\n\tfor {\n\t\t// fmt.Println(\"slice1[\", i, \"]=\", slice1[i])\n\t\t// fmt.Println(\"slice2[\", j, \"] =\", slice2[j])\n\t\tif slice1[i] < slice2[j] {\n\t\t\tslice12[k] = slice1[i]\n\t\t\tk++\n\t\t\ti++\n\t\t} else if slice1[i] == slice2[j] {\n\t\t\tslice12[k] = slice1[i]\n\t\t\tk++\n\t\t\ti++\n\n\t\t\tslice12[k] = slice2[j]\n\t\t\tk++\n\t\t\tj++\n\t\t} else if slice1[i] > slice2[j] {\n\t\t\tslice12[k] = slice2[j]\n\t\t\tk++\n\t\t\tj++\n\t\t}\n\n\t\tif i == len(slice1) {\n\t\t\tfor j < len(slice2) {\n\t\t\t\tslice12[k] = slice2[j]\n\t\t\t\tk++\n\t\t\t\tj++\n\t\t\t}\n\t\t}\n\n\t\tif j == len(slice2) {\n\t\t\tfor i < len(slice1) {\n\t\t\t\tslice12[k] = slice1[i]\n\t\t\t\tk++\n\t\t\t\ti++\n\t\t\t}\n\t\t}\n\n\t\tif k == len(slice12) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// fmt.Println(\"MergeTwoSorted():\", slice12)\n}",
"func (p *plugin) concatTags(tags1 *structtag.Tags, tags2 *structtag.Tags) (*structtag.Tags, error) {\n\tif tags1.Len() == 0 {\n\t\treturn tags2, nil\n\t}\n\tif tags2.Len() == 0 {\n\t\treturn tags1, nil\n\t}\n\n\tfor _, t2 := range tags2.Tags() {\n\t\tvar found bool\n\t\tfor _, t1 := range tags1.Tags() {\n\t\t\tif t1.Key == t2.Key {\n\t\t\t\tif len(t1.Name) == 0 {\n\t\t\t\t\tt1.Name = t2.Name\n\t\t\t\t}\n\t\t\t\tif t1.Options == nil || len(t1.Options) == 0 {\n\t\t\t\t\tt1.Options = t2.Options\n\t\t\t\t}\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tvar err error\n\t\t\ts := tags1.String() + \" \" + t2.String()\n\t\t\ttags1, err = structtag.Parse(s)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to parse tags '%s': %s\", s, err.Error())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn tags1, nil\n}",
"func (t TagSet) Merge(more TagSet) TagSet {\n\tmerged := t[:]\n\treturn append(merged, more...)\n}",
"func mergeTags(generalTags []*tag, infraTags []*tag) []*tag {\n\tvar dupMap = make(map[string]bool)\n\tfor _, tag := range generalTags {\n\t\tdupMap[tag.key] = true\n\t}\n\tfor _, tag := range infraTags {\n\t\tif _, exists := dupMap[tag.key]; !exists {\n\t\t\tgeneralTags = append(generalTags, tag)\n\t\t}\n\t}\n\treturn generalTags\n}",
"func mergeSortedSlices(a []uint64, b []uint64) (c []uint64) {\n\tmaxa := len(a)\n\tmaxb := len(b)\n\n\t// shortcuts:\n\tif maxa == 0 {\n\t\treturn b\n\t}\n\tif maxb == 0 {\n\t\treturn a\n\t}\n\n\t// make it (potentially) too long and truncate later\n\tc = make([]uint64, maxa+maxb)\n\n\tidxa, idxb := 0, 0\n\tfor j := 0; j < len(c); j++ {\n\t\t// if we're out of a or b, just use the remainder of the other one\n\t\tif idxa >= maxa {\n\t\t\t// a is done, copy remainder of b\n\t\t\tj += copy(c[j:], b[idxb:])\n\t\t\tc = c[:j] // truncate empty section of c\n\t\t\tbreak\n\t\t}\n\t\tif idxb >= maxb {\n\t\t\t// b is done, copy remainder of a\n\t\t\tj += copy(c[j:], a[idxa:])\n\t\t\tc = c[:j] // truncate empty section of c\n\t\t\tbreak\n\t\t}\n\n\t\tvala, valb := a[idxa], b[idxb]\n\t\tif vala < valb { // a is less so append that\n\t\t\tc[j] = vala\n\t\t\tidxa++\n\t\t} else if vala > valb { // b is less so append that\n\t\t\tc[j] = valb\n\t\t\tidxb++\n\t\t} else { // they're equal\n\t\t\tc[j] = vala\n\t\t\tidxa++\n\t\t\tidxb++\n\t\t}\n\t}\n\treturn\n}",
"func MergeTags(generalTags []*Tag, infraTags []*Tag) []*Tag {\n\tvar dupMap = make(map[string]bool)\n\tfor _, tag := range generalTags {\n\t\tdupMap[tag.Key] = true\n\t}\n\tfor _, tag := range infraTags {\n\t\tif _, exists := dupMap[tag.Key]; !exists {\n\t\t\tgeneralTags = append(generalTags, tag)\n\t\t}\n\t}\n\treturn generalTags\n}",
"func merge(slice1, slice2 []string) []string {\n\tsorted := make([]string, 0)\n\n\tfor len(slice1) > 0 && len(slice2) > 0 {\n\t\tif Compare(slice1[0], slice2[0]) {\n\t\t\tsorted = append(sorted, slice1[0])\n\t\t\tslice1 = slice1[1:]\n\t\t} else {\n\t\t\tsorted = append(sorted, slice2[0])\n\t\t\tslice2 = slice2[1:]\n\t\t}\n\t}\n\n\tfor _, x := range slice1 {\n\t\tsorted = append(sorted, x)\n\t}\n\tfor _, x := range slice2 {\n\t\tsorted = append(sorted, x)\n\t}\n\n\treturn sorted\n}",
"func merge(slice1, slice2 []int) []int {\n\tslice := make([]int, 0, len(slice1)+len(slice2))\n\n\tvar counter1, counter2 int\n\n\tfor counter1 != len(slice1) && counter2 != len(slice2) {\n\t\tif slice1[counter1] < slice2[counter2] {\n\t\t\tslice = append(slice, slice1[counter1])\n\t\t\tcounter1++\n\t\t} else {\n\t\t\tslice = append(slice, slice2[counter2])\n\t\t\tcounter2++\n\t\t}\n\t}\n\n\tif counter1 != len(slice1) {\n\t\tfor counter1 != len(slice1) {\n\t\t\tslice = append(slice, slice1[counter1])\n\t\t\tcounter1++\n\t\t}\n\t} else {\n\t\tfor counter2 != len(slice2) {\n\t\t\tslice = append(slice, slice2[counter2])\n\t\t\tcounter2++\n\t\t}\n\t}\n\n\treturn slice\n}",
"func (a *StringArray) Merge(b *StringArray) {\n\tif a.Len() == 0 {\n\t\t*a = *b\n\t\treturn\n\t}\n\n\tif b.Len() == 0 {\n\t\treturn\n\t}\n\n\t// Normally, both a and b should not contain duplicates. Due to a bug in older versions, it's\n\t// possible stored blocks might contain duplicate values. Remove them if they exists before\n\t// merging.\n\t// a = a.Deduplicate()\n\t// b = b.Deduplicate()\n\n\tif a.MaxTime() < b.MinTime() {\n\t\ta.Timestamps = append(a.Timestamps, b.Timestamps...)\n\t\ta.Values = append(a.Values, b.Values...)\n\t\treturn\n\t}\n\n\tif b.MaxTime() < a.MinTime() {\n\t\tvar tmp StringArray\n\t\ttmp.Timestamps = append(b.Timestamps, a.Timestamps...)\n\t\ttmp.Values = append(b.Values, a.Values...)\n\t\t*a = tmp\n\t\treturn\n\t}\n\n\tout := NewStringArrayLen(a.Len() + b.Len())\n\ti, j, k := 0, 0, 0\n\tfor i < len(a.Timestamps) && j < len(b.Timestamps) {\n\t\tif a.Timestamps[i] < b.Timestamps[j] {\n\t\t\tout.Timestamps[k] = a.Timestamps[i]\n\t\t\tout.Values[k] = a.Values[i]\n\t\t\ti++\n\t\t} else if a.Timestamps[i] == b.Timestamps[j] {\n\t\t\tout.Timestamps[k] = b.Timestamps[j]\n\t\t\tout.Values[k] = b.Values[j]\n\t\t\ti++\n\t\t\tj++\n\t\t} else {\n\t\t\tout.Timestamps[k] = b.Timestamps[j]\n\t\t\tout.Values[k] = b.Values[j]\n\t\t\tj++\n\t\t}\n\t\tk++\n\t}\n\n\tif i < len(a.Timestamps) {\n\t\tn := copy(out.Timestamps[k:], a.Timestamps[i:])\n\t\tcopy(out.Values[k:], a.Values[i:])\n\t\tk += n\n\t} else if j < len(b.Timestamps) {\n\t\tn := copy(out.Timestamps[k:], b.Timestamps[j:])\n\t\tcopy(out.Values[k:], b.Values[j:])\n\t\tk += n\n\t}\n\n\ta.Timestamps = out.Timestamps[:k]\n\ta.Values = out.Values[:k]\n}",
"func mergeRightTags(tagsLeft, tagsRight map[string]string) map[string]string {\n\tif tagsLeft == nil && tagsRight == nil {\n\t\treturn nil\n\t}\n\tif len(tagsRight) == 0 {\n\t\treturn tagsLeft\n\t}\n\tif len(tagsLeft) == 0 {\n\t\treturn tagsRight\n\t}\n\n\tresult := make(map[string]string, len(tagsLeft)+len(tagsRight))\n\tfor k, v := range tagsLeft {\n\t\tresult[k] = v\n\t}\n\tfor k, v := range tagsRight {\n\t\tresult[k] = v\n\t}\n\treturn result\n}",
"func (a *UnsignedArray) Merge(b *UnsignedArray) {\n\tif a.Len() == 0 {\n\t\t*a = *b\n\t\treturn\n\t}\n\n\tif b.Len() == 0 {\n\t\treturn\n\t}\n\n\t// Normally, both a and b should not contain duplicates. Due to a bug in older versions, it's\n\t// possible stored blocks might contain duplicate values. Remove them if they exists before\n\t// merging.\n\t// a = a.Deduplicate()\n\t// b = b.Deduplicate()\n\n\tif a.MaxTime() < b.MinTime() {\n\t\ta.Timestamps = append(a.Timestamps, b.Timestamps...)\n\t\ta.Values = append(a.Values, b.Values...)\n\t\treturn\n\t}\n\n\tif b.MaxTime() < a.MinTime() {\n\t\tvar tmp UnsignedArray\n\t\ttmp.Timestamps = append(b.Timestamps, a.Timestamps...)\n\t\ttmp.Values = append(b.Values, a.Values...)\n\t\t*a = tmp\n\t\treturn\n\t}\n\n\tout := NewUnsignedArrayLen(a.Len() + b.Len())\n\ti, j, k := 0, 0, 0\n\tfor i < len(a.Timestamps) && j < len(b.Timestamps) {\n\t\tif a.Timestamps[i] < b.Timestamps[j] {\n\t\t\tout.Timestamps[k] = a.Timestamps[i]\n\t\t\tout.Values[k] = a.Values[i]\n\t\t\ti++\n\t\t} else if a.Timestamps[i] == b.Timestamps[j] {\n\t\t\tout.Timestamps[k] = b.Timestamps[j]\n\t\t\tout.Values[k] = b.Values[j]\n\t\t\ti++\n\t\t\tj++\n\t\t} else {\n\t\t\tout.Timestamps[k] = b.Timestamps[j]\n\t\t\tout.Values[k] = b.Values[j]\n\t\t\tj++\n\t\t}\n\t\tk++\n\t}\n\n\tif i < len(a.Timestamps) {\n\t\tn := copy(out.Timestamps[k:], a.Timestamps[i:])\n\t\tcopy(out.Values[k:], a.Values[i:])\n\t\tk += n\n\t} else if j < len(b.Timestamps) {\n\t\tn := copy(out.Timestamps[k:], b.Timestamps[j:])\n\t\tcopy(out.Values[k:], b.Values[j:])\n\t\tk += n\n\t}\n\n\ta.Timestamps = out.Timestamps[:k]\n\ta.Values = out.Values[:k]\n}",
"func mergeInjectionArrays(first, second []apimodel.Injection) []apimodel.Injection {\n\tnewslice := make([]apimodel.Injection, len(first)+len(second))\n\tcopy(newslice, first)\n\tcopy(newslice[len(first):], second)\n\treturn newslice\n}",
"func mergeTags(localTags []*ecs.Tag, ec2Tags []*ecs.Tag) []*ecs.Tag {\n\ttagsMap := make(map[string]string)\n\n\tfor _, ec2Tag := range ec2Tags {\n\t\ttagsMap[aws.StringValue(ec2Tag.Key)] = aws.StringValue(ec2Tag.Value)\n\t}\n\n\tfor _, localTag := range localTags {\n\t\ttagsMap[aws.StringValue(localTag.Key)] = aws.StringValue(localTag.Value)\n\t}\n\n\treturn utils.MapToTags(tagsMap)\n}",
"func mergeExerciseArrays(first, second []apimodel.Exercise) []apimodel.Exercise {\n\tnewslice := make([]apimodel.Exercise, len(first)+len(second))\n\tcopy(newslice, first)\n\tcopy(newslice[len(first):], second)\n\treturn newslice\n}",
"func Merge(left, right []string) []string {\n\n\tsize, i, j := len(left)+len(right), 0, 0\n\tslice := make([]string, size, size)\n\n\tfor k := 0; k < size; k++ {\n\t\tif i > len(left)-1 && j <= len(right)-1 {\n\t\t\tslice[k] = right[j]\n\t\t\tj++\n\t\t} else if j > len(right)-1 && i <= len(left)-1 {\n\t\t\tslice[k] = left[i]\n\t\t\ti++\n\t\t} else if left[i] < right[j] {\n\t\t\tslice[k] = left[i]\n\t\t\ti++\n\t\t} else {\n\t\t\tslice[k] = right[j]\n\t\t\tj++\n\t\t}\n\t}\n\treturn slice\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.