query
stringlengths 8
6.75k
| document
stringlengths 9
1.89M
| negatives
listlengths 19
19
| metadata
dict |
---|---|---|---|
check the first parameter, true if it wants only a Renderer
|
func hasRendererParam(handlerType reflect.Type) bool {
//if the handler doesn't take arguments, false
if handlerType.NumIn() == 0 {
return false
}
//if the first argument is not a pointer, false
p1 := handlerType.In(0)
if p1.Kind() != reflect.Ptr {
return false
}
//but if the first argument is a renderer, true
if p1.Elem() == rendererType {
return true
}
return false
}
|
[
"func hasContextAndRenderer(handlerType reflect.Type) bool {\n\n\t//first check if we have pass 2 arguments\n\tif handlerType.NumIn() < 2 {\n\t\treturn false\n\t}\n\n\tfirstParamIsContext := hasContextParam(handlerType)\n\n\t//the first argument/parameter is always context if exists otherwise it's only Renderer or ResponseWriter,Request.\n\tif firstParamIsContext == false {\n\t\treturn false\n\t}\n\n\tp2 := handlerType.In(1)\n\tif p2.Kind() != reflect.Ptr {\n\t\treturn false\n\t}\n\t//but if the first argument is a context, true\n\tif p2.Elem() == rendererType {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func IsRendered(wid fyne.Widget) bool {\n\trenderersLock.RLock()\n\t_, found := renderers[wid]\n\trenderersLock.RUnlock()\n\treturn found\n}",
"func (DrawTexture) IsDrawAction() {}",
"func Renderer(wid fyne.Widget) fyne.WidgetRenderer {\n\tif wid == nil {\n\t\treturn nil\n\t}\n\n\tif wd, ok := wid.(isBaseWidget); ok {\n\t\tif wd.super() != nil {\n\t\t\twid = wd.super()\n\t\t}\n\t}\n\n\trenderersLock.RLock()\n\trinfo, ok := renderers[wid]\n\trenderersLock.RUnlock()\n\tif !ok {\n\t\trinfo = &rendererInfo{renderer: wid.CreateRenderer()}\n\t\trenderersLock.Lock()\n\t\trenderers[wid] = rinfo\n\t\trenderersLock.Unlock()\n\t}\n\n\tif rinfo == nil {\n\t\treturn nil\n\t}\n\n\trinfo.setAlive()\n\n\treturn rinfo.renderer\n}",
"func (i *Image) IsRenderable() bool {\n\treturn i.renderable\n}",
"func (renderbuffer Renderbuffer) IsRenderbuffer() bool {\n\treturn gl.IsRenderbuffer(uint32(renderbuffer))\n}",
"func (DrawText) IsDrawAction() {}",
"func IsRenderbuffer(rb Renderbuffer) bool {\n\treturn gl.IsRenderbuffer(rb.Value)\n}",
"func (isRenderable) Filter(e ces.Entity) bool {\n\t_, ok := e.(renderComponent)\n\treturn ok\n}",
"func (self *TileSprite) Renderable() bool{\n return self.Object.Get(\"renderable\").Bool()\n}",
"func IsRenderbuffer(renderbuffer uint32) bool {\n\treturn glBoolToBool(C.glIsRenderbuffer(C.GLuint(renderbuffer)))\n}",
"func (r *Renderer) RenderFrame() RenderFrame {\n\tif targetA {return frameB}\n\treturn frameA\n}",
"func (r *RenderThrottler) NeedRendering() bool {\n\tif !r.isActive {\n\t\treturn true // always render when throttler is disabled\n\t}\n\treturn r.needRendering\n}",
"func isDraw(s Abs) bool {\n\tif s.err == nil {\n\t\treturn false\n\t}\n\t_, ok := s.err.(checkmateError)\n\treturn !ok\n}",
"func (me TxsdFeBlendTypeMode) IsScreen() bool { return me.String() == \"screen\" }",
"func IsGraphic(r rune) bool {\n\treturn is(graphic, r)\n}",
"func (rv *River) Renderer(r Renderer) *River {\n\trv.renderer = r\n\treturn rv\n}",
"func DetectRendererType(filename string, input io.Reader) string {\n\tbuf, err := io.ReadAll(input)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\tfor _, renderer := range renderers {\n\t\tif detector, ok := renderer.(RendererContentDetector); ok && detector.CanRender(filename, bytes.NewReader(buf)) {\n\t\t\treturn renderer.Name()\n\t\t}\n\t}\n\treturn \"\"\n}",
"func (b *Baa) Render() Renderer {\n\treturn b.GetDI(\"render\").(Renderer)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
check if two parameters, true if it wants Context following by a Renderer
|
func hasContextAndRenderer(handlerType reflect.Type) bool {
//first check if we have pass 2 arguments
if handlerType.NumIn() < 2 {
return false
}
firstParamIsContext := hasContextParam(handlerType)
//the first argument/parameter is always context if exists otherwise it's only Renderer or ResponseWriter,Request.
if firstParamIsContext == false {
return false
}
p2 := handlerType.In(1)
if p2.Kind() != reflect.Ptr {
return false
}
//but if the first argument is a context, true
if p2.Elem() == rendererType {
return true
}
return false
}
|
[
"func hasRendererParam(handlerType reflect.Type) bool {\n\t//if the handler doesn't take arguments, false\n\tif handlerType.NumIn() == 0 {\n\t\treturn false\n\t}\n\n\t//if the first argument is not a pointer, false\n\tp1 := handlerType.In(0)\n\tif p1.Kind() != reflect.Ptr {\n\t\treturn false\n\t}\n\t//but if the first argument is a renderer, true\n\tif p1.Elem() == rendererType {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (DrawTexture) IsDrawAction() {}",
"func (DrawText) IsDrawAction() {}",
"func hasContextParam(handlerType reflect.Type) bool {\n\t//if the handler doesn't take arguments, false\n\tif handlerType.NumIn() == 0 {\n\t\treturn false\n\t}\n\n\t//if the first argument is not a pointer, false\n\tp1 := handlerType.In(0)\n\tif p1.Kind() != reflect.Ptr {\n\t\treturn false\n\t}\n\t//but if the first argument is a context, true\n\tif p1.Elem() == contextType {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func requiresContext(handlerType reflect.Type) bool {\n\t//if the method doesn't take arguments, no\n\tif handlerType.NumIn() == 0 {\n\t\treturn false\n\t}\n\n\t//if the first argument is not a pointer, no\n\ta0 := handlerType.In(0)\n\tif a0.Kind() != reflect.Ptr {\n\t\treturn false\n\t}\n\t//if the first argument is a context, yes\n\tif a0.Elem() == contextType {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (me TxsdFeBlendTypeMode) IsScreen() bool { return me.String() == \"screen\" }",
"func (r *subprocess) isContext(n ast.Node, ctx *gosec.Context) bool {\n\tselector, indent, err := gosec.GetCallInfo(n, ctx)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif selector == \"exec\" && indent == \"CommandContext\" {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (me TxsdColorProfileTypeRenderingIntent) IsPerceptual() bool { return me.String() == \"perceptual\" }",
"func (receiver *ContextNode) HasContext() bool {\n\treturn receiver.child != nil\n}",
"func y(s string, r int, ctx context.Context, x int) { // MATCH /context.Context should be the first parameter of a function/\n}",
"func (r *Renderer) RenderFrame() RenderFrame {\n\tif targetA {return frameB}\n\treturn frameA\n}",
"func (me TxsdFeBlendTypeMode) IsMultiply() bool { return me.String() == \"multiply\" }",
"func (f *Func) WithContext() bool {\n\treturn f.withContext == 1\n}",
"func (c *Context2D) DrawFocusIfNeeded() { c.Call(\"drawFocusIfNeeded\") }",
"func Context(c gfx.Context) gfx.Context {\n\treturn Checker(c)\n}",
"func (c *Context2D) IsPointInStroke(p *Point) bool { return c.Call(\"isPointInStroke\", p.X, p.Y).Bool() }",
"func (me TxsdFeBlendTypeMode) IsNormal() bool { return me.String() == \"normal\" }",
"func (self *BitmapData) Context() dom.CanvasRenderingContext2D{\n return WrapCanvasRenderingContext2D(self.Object.Get(\"context\"))\n}",
"func (pos *Pos) IsPassed(x, y int) bool {\n\treturn (*pos.maps)[x*pos.Len+y] == 10 //has passd\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetNerCustomizedSeaEcom invokes the alinlp.GetNerCustomizedSeaEcom API synchronously
|
func (client *Client) GetNerCustomizedSeaEcom(request *GetNerCustomizedSeaEcomRequest) (response *GetNerCustomizedSeaEcomResponse, err error) {
response = CreateGetNerCustomizedSeaEcomResponse()
err = client.DoAction(request, response)
return
}
|
[
"func (client *Client) GetNerCustomizedSeaEcomWithCallback(request *GetNerCustomizedSeaEcomRequest, callback func(response *GetNerCustomizedSeaEcomResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetNerCustomizedSeaEcomResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetNerCustomizedSeaEcom(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) GetNerCustomizedSeaEcomWithChan(request *GetNerCustomizedSeaEcomRequest) (<-chan *GetNerCustomizedSeaEcomResponse, <-chan error) {\n\tresponseChan := make(chan *GetNerCustomizedSeaEcomResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetNerCustomizedSeaEcom(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func CreateGetNerCustomizedSeaEcomResponse() (response *GetNerCustomizedSeaEcomResponse) {\n\tresponse = &GetNerCustomizedSeaEcomResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateGetNerCustomizedSeaEcomRequest() (request *GetNerCustomizedSeaEcomRequest) {\n\trequest = &GetNerCustomizedSeaEcomRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetNerCustomizedSeaEcom\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func (client *Client) GetWsCustomizedChEcomContentWithCallback(request *GetWsCustomizedChEcomContentRequest, callback func(response *GetWsCustomizedChEcomContentResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetWsCustomizedChEcomContentResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetWsCustomizedChEcomContent(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) GetWsCustomizedChEcomContent(request *GetWsCustomizedChEcomContentRequest) (response *GetWsCustomizedChEcomContentResponse, err error) {\n\tresponse = CreateGetWsCustomizedChEcomContentResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}",
"func (client *Client) GetWsCustomizedChEcomContentWithChan(request *GetWsCustomizedChEcomContentRequest) (<-chan *GetWsCustomizedChEcomContentResponse, <-chan error) {\n\tresponseChan := make(chan *GetWsCustomizedChEcomContentResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetWsCustomizedChEcomContent(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func (client ModelClient) GetCustomPrebuiltEntityRolesSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}",
"func getProductsSameEAN(c chan []ZunkaSiteProductRx, ean string) {\n\tproducts := []ZunkaSiteProductRx{}\n\n\t// Request product add.\n\tclient := &http.Client{}\n\t// title = \"GABINETE COOLER MASTER MASTERBOX LITE 3.1 TG LATERAL EM VIDRO TEMPERADO ATX/E-ATX/MINI-ITX/MICRO-AT\"\n\treq, err := http.NewRequest(\"GET\", zunkaSiteHost()+\"/setup/products-same-ean\", nil)\n\tif err != nil {\n\t\tError.Print(err)\n\t\tc <- products\n\t\treturn\n\t}\n\t// Query params\n\tq := req.URL.Query()\n\tq.Add(\"ean\", ean)\n\treq.URL.RawQuery = q.Encode()\n\t// Head.\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.SetBasicAuth(zunkaSiteUser(), zunkaSitePass())\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\tError.Print(err)\n\t\tc <- products\n\t\treturn\n\t}\n\t// res, err := http.Post(\"http://localhost:3080/setup/product/add\", \"application/json\", bytes.NewBuffer(reqBody))\n\tdefer res.Body.Close()\n\n\t// Result.\n\tresBody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tError.Print(err)\n\t\tc <- products\n\t\treturn\n\t}\n\t// No 200 status.\n\tif res.StatusCode != 200 {\n\t\tError.Print(errors.New(fmt.Sprintf(\"Getting products same Ean from zunkasite.\\nstatus: %v\\nbody: %v\", res.StatusCode, string(resBody))))\n\t\tc <- products\n\t\treturn\n\t}\n\terr = json.Unmarshal(resBody, &products)\n\tif err != nil {\n\t\tError.Print(err)\n\t}\n\t// Debug.Printf(\"Product[0]: %v\", products[0])\n\tc <- products\n\treturn\n}",
"func (a *AllApiService) EnterpriseGetEnterpriseNetworkSegments(ctx _context.Context, body EnterpriseGetEnterpriseNetworkSegments) ([]EnterpriseGetEnterpriseNetworkSegmentsResultItem, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue []EnterpriseGetEnterpriseNetworkSegmentsResultItem\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/enterprise/getEnterpriseNetworkSegments\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v []EnterpriseGetEnterpriseNetworkSegmentsResultItem\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 500 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}",
"func (client ModelClient) ListCustomPrebuiltEntitiesSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}",
"func (client AppsClient) ListAvailableCustomPrebuiltDomainsForCultureSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}",
"func (client ModelClient) ListCustomPrebuiltEntitiesResponder(resp *http.Response) (result ListEntityExtractor, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}",
"func (client *Client) UpdateEnterpriseCustomInfoWithCallback(request *UpdateEnterpriseCustomInfoRequest, callback func(response *UpdateEnterpriseCustomInfoResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *UpdateEnterpriseCustomInfoResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.UpdateEnterpriseCustomInfo(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client ModelClient) GetCustomPrebuiltEntityRolesResponder(resp *http.Response) (result ListEntityRole, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}",
"func (cc *Cluster) AsyncSetrange(args ...interface{}) redis.Future {\n\treturn cc.AsyncCall(cmdSetrange, args...)\n}",
"func (client AppsClient) ListAvailableCustomPrebuiltDomainsSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}",
"func (fM *FeslManager) NuGetPersonas(event GameSpy.EventClientTLSCommand) {\n\tif !event.Client.IsActive {\n\t\tlog.Noteln(\"Client left\")\n\t\treturn\n\t}\n\n\tif event.Client.RedisState.Get(\"clientType\") == \"server\" {\n\t\tfM.NuGetPersonasServer(event)\n\t\treturn\n\t}\n\n\trows, err := fM.stmtGetHeroesByUserID.Query(event.Client.RedisState.Get(\"uID\"))\n\tif err != nil {\n\t\treturn\n\t}\n\n\tpersonaPacket := make(map[string]string)\n\tpersonaPacket[\"TXN\"] = \"NuGetPersonas\"\n\n\tvar i = 0\n\tfor rows.Next() {\n\t\tvar id, userID, heroName, online string\n\t\terr := rows.Scan(&id, &userID, &heroName, &online)\n\t\tif err != nil {\n\t\t\tlog.Errorln(err)\n\t\t\treturn\n\t\t}\n\t\tpersonaPacket[\"personas.\"+strconv.Itoa(i)] = heroName\n\t\tevent.Client.RedisState.Set(\"ownerId.\"+strconv.Itoa(i+1), id)\n\t\ti++\n\t}\n\n\tevent.Client.RedisState.Set(\"numOfHeroes\", strconv.Itoa(i))\n\n\tpersonaPacket[\"personas.[]\"] = strconv.Itoa(i)\n\n\tevent.Client.WriteFESL(event.Command.Query, personaPacket, event.Command.PayloadID)\n\tfM.logAnswer(event.Command.Query, personaPacket, event.Command.PayloadID)\n}",
"func (client AppsClient) ListAvailableCustomPrebuiltDomainsForCultureResponder(resp *http.Response) (result ListPrebuiltDomain, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetNerCustomizedSeaEcomWithChan invokes the alinlp.GetNerCustomizedSeaEcom API asynchronously
|
func (client *Client) GetNerCustomizedSeaEcomWithChan(request *GetNerCustomizedSeaEcomRequest) (<-chan *GetNerCustomizedSeaEcomResponse, <-chan error) {
responseChan := make(chan *GetNerCustomizedSeaEcomResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.GetNerCustomizedSeaEcom(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
|
[
"func (client *Client) GetNerCustomizedSeaEcomWithCallback(request *GetNerCustomizedSeaEcomRequest, callback func(response *GetNerCustomizedSeaEcomResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetNerCustomizedSeaEcomResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetNerCustomizedSeaEcom(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) GetNerCustomizedSeaEcom(request *GetNerCustomizedSeaEcomRequest) (response *GetNerCustomizedSeaEcomResponse, err error) {\n\tresponse = CreateGetNerCustomizedSeaEcomResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}",
"func (client *Client) GetWsCustomizedChEcomContentWithChan(request *GetWsCustomizedChEcomContentRequest) (<-chan *GetWsCustomizedChEcomContentResponse, <-chan error) {\n\tresponseChan := make(chan *GetWsCustomizedChEcomContentResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetWsCustomizedChEcomContent(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func (client *Client) GetWsCustomizedChEcomContentWithCallback(request *GetWsCustomizedChEcomContentRequest, callback func(response *GetWsCustomizedChEcomContentResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetWsCustomizedChEcomContentResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetWsCustomizedChEcomContent(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) OemSitingSelctionWithChan(request *OemSitingSelctionRequest) (<-chan *OemSitingSelctionResponse, <-chan error) {\n\tresponseChan := make(chan *OemSitingSelctionResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.OemSitingSelction(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func (client *Client) GetWsCustomizedChO2OWithChan(request *GetWsCustomizedChO2ORequest) (<-chan *GetWsCustomizedChO2OResponse, <-chan error) {\n\tresponseChan := make(chan *GetWsCustomizedChO2OResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetWsCustomizedChO2O(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func (client *Client) UpdateEnterpriseCustomInfoWithChan(request *UpdateEnterpriseCustomInfoRequest) (<-chan *UpdateEnterpriseCustomInfoResponse, <-chan error) {\n\tresponseChan := make(chan *UpdateEnterpriseCustomInfoResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.UpdateEnterpriseCustomInfo(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func CreateGetNerCustomizedSeaEcomResponse() (response *GetNerCustomizedSeaEcomResponse) {\n\tresponse = &GetNerCustomizedSeaEcomResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateGetNerCustomizedSeaEcomRequest() (request *GetNerCustomizedSeaEcomRequest) {\n\trequest = &GetNerCustomizedSeaEcomRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetNerCustomizedSeaEcom\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func (client *Client) CoreEngineWithChan(request *CoreEngineRequest) (<-chan *CoreEngineResponse, <-chan error) {\n\tresponseChan := make(chan *CoreEngineResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.CoreEngine(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func (client *Client) UnAssociateEnsEipAddressWithChan(request *UnAssociateEnsEipAddressRequest) (<-chan *UnAssociateEnsEipAddressResponse, <-chan error) {\n\tresponseChan := make(chan *UnAssociateEnsEipAddressResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.UnAssociateEnsEipAddress(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func (client *Client) OemSitingSelctionWithCallback(request *OemSitingSelctionRequest, callback func(response *OemSitingSelctionResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *OemSitingSelctionResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.OemSitingSelction(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) GetKeywordChEcomWithChan(request *GetKeywordChEcomRequest) (<-chan *GetKeywordChEcomResponse, <-chan error) {\n\tresponseChan := make(chan *GetKeywordChEcomResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetKeywordChEcom(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func (client *Client) GetKeywordChEcomWithCallback(request *GetKeywordChEcomRequest, callback func(response *GetKeywordChEcomResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetKeywordChEcomResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetKeywordChEcom(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func ProcessInChannel(wg *sync.WaitGroup, scConfig *common.SCConfiguration) {\n\tdefer wg.Done()\n\tfor { //nolint:gosimple\n\t\tselect {\n\t\tcase d := <-scConfig.EventInCh:\n\t\t\tif d.Type == channel.LISTENER {\n\t\t\t\tlog.Warnf(\"amqp disabled,no action taken: request to create listener address %s was called,but transport is not enabled\", d.Address)\n\t\t\t} else if d.Type == channel.SENDER {\n\t\t\t\tlog.Warnf(\"no action taken: request to create sender for address %s was called,but transport is not enabled\", d.Address)\n\t\t\t} else if d.Type == channel.EVENT && d.Status == channel.NEW {\n\t\t\t\tif e, err := v1event.GetCloudNativeEvents(*d.Data); err != nil {\n\t\t\t\t\tlog.Warnf(\"error marshalling event data\")\n\t\t\t\t} else {\n\t\t\t\t\tlog.Warnf(\"amqp disabled,no action taken(can't send to a desitination): logging new event %s\\n\", e.String())\n\t\t\t\t}\n\t\t\t\tout := channel.DataChan{\n\t\t\t\t\tAddress: d.Address,\n\t\t\t\t\tData: d.Data,\n\t\t\t\t\tStatus: channel.SUCCESS,\n\t\t\t\t\tType: channel.EVENT,\n\t\t\t\t\tProcessEventFn: d.ProcessEventFn,\n\t\t\t\t}\n\t\t\t\tif d.OnReceiveOverrideFn != nil {\n\t\t\t\t\tif err := d.OnReceiveOverrideFn(*d.Data, &out); err != nil {\n\t\t\t\t\t\tlog.Errorf(\"error onReceiveOverrideFn %s\", err)\n\t\t\t\t\t\tout.Status = channel.FAILED\n\t\t\t\t\t} else {\n\t\t\t\t\t\tout.Status = channel.SUCCESS\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tscConfig.EventOutCh <- &out\n\t\t\t} else if d.Type == channel.STATUS && d.Status == channel.NEW {\n\t\t\t\tlog.Warnf(\"amqp disabled,no action taken(can't send to a destination): logging new status check %v\\n\", d)\n\t\t\t\tout := channel.DataChan{\n\t\t\t\t\tAddress: d.Address,\n\t\t\t\t\tData: d.Data,\n\t\t\t\t\tStatus: channel.SUCCESS,\n\t\t\t\t\tType: channel.EVENT,\n\t\t\t\t\tProcessEventFn: d.ProcessEventFn,\n\t\t\t\t}\n\t\t\t\tif d.OnReceiveOverrideFn != nil {\n\t\t\t\t\tif err := d.OnReceiveOverrideFn(*d.Data, &out); err != nil {\n\t\t\t\t\t\tlog.Errorf(\"error onReceiveOverrideFn %s\", err)\n\t\t\t\t\t\tout.Status = channel.FAILED\n\t\t\t\t\t} else {\n\t\t\t\t\t\tout.Status = channel.SUCCESS\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-scConfig.CloseCh:\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (esaclient *ESAClient) reader(respMsgChan chan<- ResponseMessage, stopChan <-chan bool) {\n\tlog.Log(globals.Logger, log.INFO, \"starting reader goroutine\", nil)\n\tdefer log.Log(globals.Logger, log.INFO, \"exiting reader goroutine\", nil)\n\n\treader(esaclient, respMsgChan, stopChan)\n}",
"func (client *Client) BeginVnDialogueWithChan(request *BeginVnDialogueRequest) (<-chan *BeginVnDialogueResponse, <-chan error) {\n\tresponseChan := make(chan *BeginVnDialogueResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.BeginVnDialogue(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func (client *Client) GetNerChMedicalWithChan(request *GetNerChMedicalRequest) (<-chan *GetNerChMedicalResponse, <-chan error) {\n\tresponseChan := make(chan *GetNerChMedicalResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetNerChMedical(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func (client *Client) ConfigDdosWithChan(request *ConfigDdosRequest) (<-chan *ConfigDdosResponse, <-chan error) {\nresponseChan := make(chan *ConfigDdosResponse, 1)\nerrChan := make(chan error, 1)\nerr := client.AddAsyncTask(func() {\ndefer close(responseChan)\ndefer close(errChan)\nresponse, err := client.ConfigDdos(request)\nif err != nil {\nerrChan <- err\n} else {\nresponseChan <- response\n}\n})\nif err != nil {\nerrChan <- err\nclose(responseChan)\nclose(errChan)\n}\nreturn responseChan, errChan\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetNerCustomizedSeaEcomWithCallback invokes the alinlp.GetNerCustomizedSeaEcom API asynchronously
|
func (client *Client) GetNerCustomizedSeaEcomWithCallback(request *GetNerCustomizedSeaEcomRequest, callback func(response *GetNerCustomizedSeaEcomResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *GetNerCustomizedSeaEcomResponse
var err error
defer close(result)
response, err = client.GetNerCustomizedSeaEcom(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
|
[
"func (client *Client) GetNerCustomizedSeaEcomWithChan(request *GetNerCustomizedSeaEcomRequest) (<-chan *GetNerCustomizedSeaEcomResponse, <-chan error) {\n\tresponseChan := make(chan *GetNerCustomizedSeaEcomResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetNerCustomizedSeaEcom(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func (client *Client) GetNerCustomizedSeaEcom(request *GetNerCustomizedSeaEcomRequest) (response *GetNerCustomizedSeaEcomResponse, err error) {\n\tresponse = CreateGetNerCustomizedSeaEcomResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}",
"func (client *Client) GetWsCustomizedChEcomContentWithCallback(request *GetWsCustomizedChEcomContentRequest, callback func(response *GetWsCustomizedChEcomContentResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetWsCustomizedChEcomContentResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetWsCustomizedChEcomContent(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func CreateGetNerCustomizedSeaEcomResponse() (response *GetNerCustomizedSeaEcomResponse) {\n\tresponse = &GetNerCustomizedSeaEcomResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func (client *Client) UpdateEnterpriseCustomInfoWithCallback(request *UpdateEnterpriseCustomInfoRequest, callback func(response *UpdateEnterpriseCustomInfoResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *UpdateEnterpriseCustomInfoResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.UpdateEnterpriseCustomInfo(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func CreateGetNerCustomizedSeaEcomRequest() (request *GetNerCustomizedSeaEcomRequest) {\n\trequest = &GetNerCustomizedSeaEcomRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetNerCustomizedSeaEcom\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func (client *Client) OemSitingSelctionWithCallback(request *OemSitingSelctionRequest, callback func(response *OemSitingSelctionResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *OemSitingSelctionResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.OemSitingSelction(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) GetCustomerAccountInfoWithCallback(request *GetCustomerAccountInfoRequest, callback func(response *GetCustomerAccountInfoResponse, err error)) (<-chan int) {\nresult := make(chan int, 1)\nerr := client.AddAsyncTask(func() {\nvar response *GetCustomerAccountInfoResponse\nvar err error\ndefer close(result)\nresponse, err = client.GetCustomerAccountInfo(request)\ncallback(response, err)\nresult <- 1\n})\nif err != nil {\ndefer close(result)\ncallback(nil, err)\nresult <- 0\n}\nreturn result\n}",
"func Callback(c *gin.Context) {\n\tprovider := c.Param(\"provider\")\n\n\tvar logincode vo.LoginReq\n\tif err := c.ShouldBindQuery(&logincode); err != nil {\n\t\tfmt.Println(\"xxxx\", err)\n\t}\n\n\tfmt.Println(\"provider\", provider, logincode)\n\n\tuserInfo := vo.GetUserInfoFromOauth(provider, logincode.Code, logincode.State)\n\tfmt.Println(\"get user info\", userInfo)\n\n\tif userInfo == nil {\n\t\tc.JSON(http.StatusOK, sailor.HTTPAirdbResponse{\n\t\t\tCode: enum.AirdbSuccess,\n\t\t\tSuccess: true,\n\t\t\tData: vo.LoginResp{\n\t\t\t\tNickname: \"xxx\",\n\t\t\t\tHeadimgurl: \"xxx.png\",\n\t\t\t},\n\t\t})\n\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, sailor.HTTPAirdbResponse{\n\t\tCode: enum.AirdbSuccess,\n\t\tSuccess: true,\n\t\tData: vo.LoginResp{\n\t\t\tNickname: userInfo.Login,\n\t\t\tHeadimgurl: userInfo.AvatarURL,\n\t\t},\n\t})\n}",
"func (client *Client) UnAssociateEnsEipAddressWithCallback(request *UnAssociateEnsEipAddressRequest, callback func(response *UnAssociateEnsEipAddressResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *UnAssociateEnsEipAddressResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.UnAssociateEnsEipAddress(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) ConfigDdosWithCallback(request *ConfigDdosRequest, callback func(response *ConfigDdosResponse, err error)) (<-chan int) {\nresult := make(chan int, 1)\nerr := client.AddAsyncTask(func() {\nvar response *ConfigDdosResponse\nvar err error\ndefer close(result)\nresponse, err = client.ConfigDdos(request)\ncallback(response, err)\nresult <- 1\n})\nif err != nil {\ndefer close(result)\ncallback(nil, err)\nresult <- 0\n}\nreturn result\n}",
"func (client *Client) GetWsCustomizedChO2OWithCallback(request *GetWsCustomizedChO2ORequest, callback func(response *GetWsCustomizedChO2OResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetWsCustomizedChO2OResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetWsCustomizedChO2O(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) GetIndustryCommerceInfoWithCallback(request *GetIndustryCommerceInfoRequest, callback func(response *GetIndustryCommerceInfoResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetIndustryCommerceInfoResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetIndustryCommerceInfo(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) GetKeywordChEcomWithCallback(request *GetKeywordChEcomRequest, callback func(response *GetKeywordChEcomResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetKeywordChEcomResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetKeywordChEcom(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) QueryCustomerAddressListWithCallback(request *QueryCustomerAddressListRequest, callback func(response *QueryCustomerAddressListResponse, err error)) (<-chan int) {\nresult := make(chan int, 1)\nerr := client.AddAsyncTask(func() {\nvar response *QueryCustomerAddressListResponse\nvar err error\ndefer close(result)\nresponse, err = client.QueryCustomerAddressList(request)\ncallback(response, err)\nresult <- 1\n})\nif err != nil {\ndefer close(result)\ncallback(nil, err)\nresult <- 0\n}\nreturn result\n}",
"func (client *Client) GetEMapWithCallback(request *GetEMapRequest, callback func(response *GetEMapResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetEMapResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetEMap(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) GetNerChMedicalWithCallback(request *GetNerChMedicalRequest, callback func(response *GetNerChMedicalResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetNerChMedicalResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetNerChMedical(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) GetUserNetProfileDescriptionWithCallback(request *GetUserNetProfileDescriptionRequest, callback func(response *GetUserNetProfileDescriptionResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetUserNetProfileDescriptionResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetUserNetProfileDescription(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) CoreEngineWithCallback(request *CoreEngineRequest, callback func(response *CoreEngineResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *CoreEngineResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.CoreEngine(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
CreateGetNerCustomizedSeaEcomRequest creates a request to invoke GetNerCustomizedSeaEcom API
|
func CreateGetNerCustomizedSeaEcomRequest() (request *GetNerCustomizedSeaEcomRequest) {
request = &GetNerCustomizedSeaEcomRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("alinlp", "2020-06-29", "GetNerCustomizedSeaEcom", "alinlp", "openAPI")
request.Method = requests.POST
return
}
|
[
"func (client *Client) GetNerCustomizedSeaEcom(request *GetNerCustomizedSeaEcomRequest) (response *GetNerCustomizedSeaEcomResponse, err error) {\n\tresponse = CreateGetNerCustomizedSeaEcomResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}",
"func CreateGetNerCustomizedSeaEcomResponse() (response *GetNerCustomizedSeaEcomResponse) {\n\tresponse = &GetNerCustomizedSeaEcomResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateGetWsCustomizedChEcomContentRequest() (request *GetWsCustomizedChEcomContentRequest) {\n\trequest = &GetWsCustomizedChEcomContentRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetWsCustomizedChEcomContent\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func (client *Client) GetNerCustomizedSeaEcomWithCallback(request *GetNerCustomizedSeaEcomRequest, callback func(response *GetNerCustomizedSeaEcomResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetNerCustomizedSeaEcomResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetNerCustomizedSeaEcom(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) GetNerCustomizedSeaEcomWithChan(request *GetNerCustomizedSeaEcomRequest) (<-chan *GetNerCustomizedSeaEcomResponse, <-chan error) {\n\tresponseChan := make(chan *GetNerCustomizedSeaEcomResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetNerCustomizedSeaEcom(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func CreateGetWsCustomizedChO2ORequest() (request *GetWsCustomizedChO2ORequest) {\n\trequest = &GetWsCustomizedChO2ORequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetWsCustomizedChO2O\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func CreateOemSitingSelctionRequest() (request *OemSitingSelctionRequest) {\n\trequest = &OemSitingSelctionRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"cloudwf\", \"2017-03-28\", \"OemSitingSelction\", \"cloudwf\", \"openAPI\")\n\treturn\n}",
"func CreateUpdateEnterpriseCustomInfoRequest() (request *UpdateEnterpriseCustomInfoRequest) {\n\trequest = &UpdateEnterpriseCustomInfoRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"finmall\", \"2018-07-23\", \"UpdateEnterpriseCustomInfo\", \"finmall\", \"openAPI\")\n\treturn\n}",
"func CreateGetIndustryCommerceInfoRequest() (request *GetIndustryCommerceInfoRequest) {\n\trequest = &GetIndustryCommerceInfoRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"companyreg\", \"2020-10-22\", \"GetIndustryCommerceInfo\", \"companyreg\", \"openAPI\")\n\trequest.Method = requests.GET\n\treturn\n}",
"func CreateGetNerChMedicalRequest() (request *GetNerChMedicalRequest) {\n\trequest = &GetNerChMedicalRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetNerChMedical\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func CreateGetKeywordChEcomRequest() (request *GetKeywordChEcomRequest) {\n\trequest = &GetKeywordChEcomRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetKeywordChEcom\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func CreateCoreEngineRequest() (request *CoreEngineRequest) {\n\trequest = &CoreEngineRequest{\n\t\tRoaRequest: &requests.RoaRequest{},\n\t}\n\trequest.InitWithApiInfo(\"amp\", \"2020-07-08\", \"CoreEngine\", \"/getVersion/demo\", \"ServiceCode\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func CreateGetEMapRequest() (request *GetEMapRequest) {\n\trequest = &GetEMapRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"cusanalytic_sc_online\", \"2019-05-24\", \"GetEMap\", \"\", \"\")\n\treturn\n}",
"func CreateGetServiceRequest() (request *GetServiceRequest) {\n\trequest = &GetServiceRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"ComputeNestSupplier\", \"2021-05-21\", \"GetService\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func (client *ManagersClient) getExtendedInfoCreateRequest(ctx context.Context, resourceGroupName string, managerName string, options *ManagersClientGetExtendedInfoOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorSimple/managers/{managerName}/extendedInformation/vaultExtendedInfo\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managerName == \"\" {\n\t\treturn nil, errors.New(\"parameter managerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managerName}\", url.PathEscape(managerName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2016-10-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func (client *AssociationsClient) getCreateRequest(ctx context.Context, scope string, associationName string, options *AssociationsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.CustomProviders/associations/{associationName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\tif associationName == \"\" {\n\t\treturn nil, errors.New(\"parameter associationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{associationName}\", url.PathEscape(associationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-09-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func CreateDescribeExposedInstanceCriteriaRequest() (request *DescribeExposedInstanceCriteriaRequest) {\n\trequest = &DescribeExposedInstanceCriteriaRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Sas\", \"2018-12-03\", \"DescribeExposedInstanceCriteria\", \"sas\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func CreateDescribeReservedInstancesRequest() (request *DescribeReservedInstancesRequest) {\n\trequest = &DescribeReservedInstancesRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Ecs\", \"2014-05-26\", \"DescribeReservedInstances\", \"ecs\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func NewCreateanewNcosLevelRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) {\n\tvar err error\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/ncoslevels\")\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", queryUrl.String(), body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(\"Content-Type\", contentType)\n\treturn req, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
CreateGetNerCustomizedSeaEcomResponse creates a response to parse from GetNerCustomizedSeaEcom response
|
func CreateGetNerCustomizedSeaEcomResponse() (response *GetNerCustomizedSeaEcomResponse) {
response = &GetNerCustomizedSeaEcomResponse{
BaseResponse: &responses.BaseResponse{},
}
return
}
|
[
"func CreateGetNerCustomizedSeaEcomRequest() (request *GetNerCustomizedSeaEcomRequest) {\n\trequest = &GetNerCustomizedSeaEcomRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetNerCustomizedSeaEcom\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func (client *Client) GetNerCustomizedSeaEcom(request *GetNerCustomizedSeaEcomRequest) (response *GetNerCustomizedSeaEcomResponse, err error) {\n\tresponse = CreateGetNerCustomizedSeaEcomResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}",
"func CreateGetWsCustomizedChEcomContentResponse() (response *GetWsCustomizedChEcomContentResponse) {\n\tresponse = &GetWsCustomizedChEcomContentResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateGetNerChMedicalResponse() (response *GetNerChMedicalResponse) {\n\tresponse = &GetNerChMedicalResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateGetWsCustomizedChO2OResponse() (response *GetWsCustomizedChO2OResponse) {\n\tresponse = &GetWsCustomizedChO2OResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateUpdateEnterpriseCustomInfoResponse() (response *UpdateEnterpriseCustomInfoResponse) {\n\tresponse = &UpdateEnterpriseCustomInfoResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func (client *Client) GetNerCustomizedSeaEcomWithCallback(request *GetNerCustomizedSeaEcomRequest, callback func(response *GetNerCustomizedSeaEcomResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetNerCustomizedSeaEcomResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetNerCustomizedSeaEcom(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}",
"func (client *Client) GetNerCustomizedSeaEcomWithChan(request *GetNerCustomizedSeaEcomRequest) (<-chan *GetNerCustomizedSeaEcomResponse, <-chan error) {\n\tresponseChan := make(chan *GetNerCustomizedSeaEcomResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetNerCustomizedSeaEcom(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}",
"func CreateOemSitingSelctionResponse() (response *OemSitingSelctionResponse) {\n\tresponse = &OemSitingSelctionResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateUnAssociateEnsEipAddressResponse() (response *UnAssociateEnsEipAddressResponse) {\n\tresponse = &UnAssociateEnsEipAddressResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func newCustomsResponse(answers string) (customsResponse, error) {\n\tresult := customsResponse{}\n\tfor _, cc := range answers {\n\t\tif cc < 'a' || cc > 'z' {\n\t\t\terr := errors.Errorf(\"could not parse `%s`: invalid character `%c`\", answers, cc)\n\t\t\treturn customsResponse{}, err\n\t\t}\n\t\tresult[cc-'a'] = true\n\t}\n\treturn result, nil\n}",
"func CreateGetIndustryCommerceInfoResponse() (response *GetIndustryCommerceInfoResponse) {\n\tresponse = &GetIndustryCommerceInfoResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateDetachCenResponse() (response *DetachCenResponse) {\n\tresponse = &DetachCenResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func ParseCreateanewNcosLevelResponse(rsp *http.Response) (*CreateanewNcosLevelResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &CreateanewNcosLevelResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 201:\n\t\tvar dest []Thenewlycreateditemorempty32\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON201 = &dest\n\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 422:\n\t\tvar dest Anerror\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON422 = &dest\n\n\t}\n\n\treturn response, nil\n}",
"func CreateCoreEngineResponse() (response *CoreEngineResponse) {\n\tresponse = &CoreEngineResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateConfigNetworkResponse() (response *ConfigNetworkResponse) {\n\tresponse = &ConfigNetworkResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateGetEMapResponse() (response *GetEMapResponse) {\n\tresponse = &GetEMapResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateIntelligentCompositionResponse() (response *IntelligentCompositionResponse) {\n\tresponse = &IntelligentCompositionResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateCreateCustomCallTaggingResponse() (response *CreateCustomCallTaggingResponse) {\n\tresponse = &CreateCustomCallTaggingResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
WithHeimdallClient sets the client
|
func WithHeimdallClient(client heimdall.Client) Option {
return func(s *Storage) {
s.client = client
}
}
|
[
"func (c *Client) SetClient(client *http.Client) {\n c.client.SetClient(client)\n}",
"func (s *Service) SetClient(client util.HTTPClient) { s.httpClient = client }",
"func (c *RetryClient) SetClient(ctx context.Context, cli Client) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tc.cli = cli\n\n\tif c.chTask != nil {\n\t\treturn\n\t}\n\n\tc.chTask = make(chan func(ctx context.Context, cli Client))\n\tgo func() {\n\t\tctx := context.Background()\n\t\tfor task := range c.chTask {\n\t\t\tc.mu.Lock()\n\t\t\tcli := c.cli\n\t\t\tc.mu.Unlock()\n\t\t\ttask(ctx, cli)\n\t\t}\n\t}()\n}",
"func (_m *esClientInterface) setClient(_a0 *elastic.Client) {\n\t_m.Called(_a0)\n}",
"func (c *RetryClient) SetClient(ctx context.Context, cli *BaseClient) {\n\tc.mu.Lock()\n\tc.cli = cli\n\tc.mu.Unlock()\n\n\tif c.chTask != nil {\n\t\treturn\n\t}\n\n\tc.chTask = make(chan struct{}, 1)\n\tgo func() {\n\t\tctx := context.Background()\n\t\tfor {\n\t\t\tc.mu.Lock()\n\t\t\tif len(c.taskQueue) == 0 {\n\t\t\t\tc.mu.Unlock()\n\t\t\t\t_, ok := <-c.chTask\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttask := c.taskQueue[0]\n\t\t\tc.taskQueue = c.taskQueue[1:]\n\t\t\tcli := c.cli\n\t\t\tc.mu.Unlock()\n\n\t\t\ttask(ctx, cli)\n\t\t}\n\t}()\n}",
"func (api *RESTAPI) SetClient(c *rpc.Client) {\n\tapi.rpcClient = c\n\tapi.rpcReady <- struct{}{}\n}",
"func SetClient(client *http.Client) {\n\thttpClient = client\n}",
"func (handler *RestHandler) SetClient(client *http.Client) {\n\thandler.Client = client\n}",
"func (rest *RESTAPI) SetClient(c *rpc.Client) {\n\trest.rpcClient = c\n\trest.rpcReady <- struct{}{}\n}",
"func InitHarborClient(hc *HarborClient) {\n\thbrCli = hc\n}",
"func (m *MqttClientBase) SetClientBase(host string, qos byte, clientID string, channel chan *models.PublishMessage, username string, password string, keepAlive time.Duration, pingTimeout time.Duration) {\n\tm.Qos = qos\n\tm.Host = host\n\tm.Username = username\n\tm.Password = password\n\tm.KeepAlive = keepAlive\n\tm.PingTimeout = pingTimeout\n\tm.Connecting = false\n\tm.Client = createPahoClient(host, clientID, username, password, keepAlive, pingTimeout)\n\tm.PublishChannel = channel\n}",
"func (g *HttpGetter) SetClient(c *http.Client) {\n\tif c == nil {\n\t\tg.client = http.DefaultClient\n\t} else {\n\t\tg.client = c\n\t}\n}",
"func (r *Handler) SetRedigoClient(conn clients.RedigoClientConn) {\n\tr.clientName = \"redigo\"\n\tr.implementation = &clients.Redigo{Conn: conn}\n}",
"func (bot *Bot) SetClient(c *http.Client) {\n\tbot.client = c\n}",
"func SetClient(client *Client, ctx *fiber.Ctx) {\n\tctx.Locals(\"nats_client\", client)\n}",
"func (service *BaseService) SetHTTPClient(client *http.Client) {\n\tsetMinimumTLSVersion(client)\n\n\tif isRetryableClient(service.Client) {\n\t\t// If \"service\" is currently holding a retryable client,\n\t\t// then set \"client\" as the embedded client used for individual requests.\n\t\ttr := service.Client.Transport.(*retryablehttp.RoundTripper)\n\t\ttr.Client.HTTPClient = client\n\t} else {\n\t\t// Otherwise, just hang \"client\" directly off the base service.\n\t\tservice.Client = client\n\t}\n}",
"func SetClient(c client.Client) {\n\tdefaultK.client = c\n}",
"func (t *StaticTokenGenerator) SetClient(c *Client) {\n\tt.client = c\n}",
"func (c *Client) SetHTTPClient(h *http.Client) {\n\tc.httpClient = NewHTTPClient(h, nil, c.debug)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
CreateAuthMiddleware creates the middleware for authtication
|
func CreateAuthMiddleware() (*jwt.Middleware, error) {
err := variables.LoadTokenKeys()
if err != nil {
return nil, err
}
authMiddleware := &jwt.Middleware{
Realm: "numapp",
SigningAlgorithm: variables.SigningAlgorithm,
Key: variables.TokenSignKey,
VerifyKey: &variables.TokenSignKey.PublicKey,
Timeout: time.Hour,
MaxRefresh: time.Hour * 24,
Authenticator: func(username string, password string) error {
// Log the user in
err := login.Login(username, password)
if err != nil {
return err
}
return nil
},
}
return authMiddleware, nil
}
|
[
"func NewAuthMiddleware(svc interfaces.Service, r interfaces.Repository) interfaces.Service {\n\treturn &authMiddleware{\n\t\tnext: svc,\n\t\trepository: r,\n\t}\n}",
"func NewAuthMiddleware(up UserProvider) echo.MiddlewareFunc {\n\treturn func(next echo.HandlerFunc) echo.HandlerFunc {\n\t\treturn func(c echo.Context) error {\n\t\t\tctx := c.Request().Context()\n\n\t\t\taccountID := c.Param(\"account_id\")\n\t\t\tif accountID == \"\" || len(accountID) <= 10 {\n\t\t\t\treturn errors.New(\"account id missing or invalid\")\n\t\t\t}\n\n\t\t\tauth := c.Request().Header.Get(\"authorization\")\n\t\t\tif auth == \"\" || !strings.HasPrefix(auth, \"Bearer \") || len(auth) <= 10 {\n\t\t\t\treturn errors.New(\"token invalid\")\n\t\t\t}\n\n\t\t\ttoken := auth[7:]\n\n\t\t\tu, err := up.GetByToken(ctx, token)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"token invalid\")\n\t\t\t}\n\n\t\t\tif domain.ID(accountID) != u.AccountID {\n\t\t\t\treturn errors.New(\"account invalid\")\n\t\t\t}\n\n\t\t\tif u.TokenExpiresAt.Before(time.Now()) {\n\t\t\t\treturn errors.New(\"token expired\")\n\t\t\t}\n\n\t\t\t// Replace current request object\n\t\t\tc.SetRequest(c.Request().WithContext(domain.ContextWithSession(ctx, u)))\n\n\t\t\treturn next(c)\n\t\t}\n\t}\n}",
"func authMiddleware(tokenMaker token.Maker) gin.HandlerFunc {\n\treturn func(ctx *gin.Context) {\n\t\t// TODO: authen here\n\t\tctx.Next()\n\t}\n}",
"func AuthMiddleware() *Middleware {\n\tm := make([]*Middleware, 0, 1+len(extraAuthMiddlewares))\n\tm = append(m, RequireAuthMiddleware)\n\tm = append(m, extraAuthMiddlewares...)\n\treturn composeMiddleware(m...)\n}",
"func SetMiddlewareAuth(next http.HandlerFunc, authReq bool) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tresponse, err := auth.ValidateToken(r)\n\t\tif !authReq && err != nil {\n\t\t\tctx := context.WithValue(r.Context(), contextkey.ContextKeyUsernameCaller, \"\")\n\t\t\tctx2 := context.WithValue(ctx, contextkey.ContextKeyUserIDCaller, \"\")\n\t\t\tnext(w, r.WithContext(ctx2))\n\t\t\treturn\n\t\t} else if err != nil {\n\t\t\tresponses.ERROR(w, http.StatusUnauthorized, err)\n\t\t\treturn\n\t\t}\n\t\tnext(w, response)\n\t}\n}",
"func New() iris2.HandlerFunc {\n\tl := &authMiddleware{}\n\treturn l.Serve\n}",
"func (m JWTAuthMiddleware) Setup() {}",
"func (route routerConfig) authMiddleware(deviceDB *devices.Database) func(next http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {\n\t\t\tif route.routeAuth[request.URL.Path] {\n\t\t\t\tnext.ServeHTTP(writer, request)\n\t\t\t} else {\n\t\t\t\tdev, err := auth.VerifyAccessToken(request, deviceDB)\n\t\t\t\tif err != nil {\n\t\t\t\t\terr.Encode(&writer)\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\tcontext.Set(request, \"localpart\", dev.UserID)\n\t\t\t\t\tnext.ServeHTTP(writer, request)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}",
"func NewAuthMiddleware() gopress.MiddlewareFunc {\n\treturn func(next gopress.HandlerFunc) gopress.HandlerFunc {\n\t\treturn func(c gopress.Context) error {\n\t\t\tcookie, err := c.Cookie(\"uid\")\n\t\t\tif err != nil {\n\t\t\t\treturn c.Redirect(http.StatusFound, \"/login?cookie=err\")\n\t\t\t}\n\n\t\t\tif cookie.Value == \"\" {\n\t\t\t\tdropCookie(c, cookie)\n\t\t\t\treturn c.Redirect(http.StatusFound, \"/login?cookie=nil\")\n\t\t\t}\n\n\t\t\tcontainer := gopress.AppFromContext(c).Services\n\n\t\t\tdbs := container.Get(services.DBServerName).(*services.DBService)\n\t\t\tuid, err := strconv.Atoi(cookie.Value)\n\t\t\tif err != nil {\n\t\t\t\tdropCookie(c, cookie)\n\t\t\t\treturn c.Redirect(http.StatusFound, \"/login?cookie=invalid\")\n\t\t\t}\n\t\t\tuser := &models.User{}\n\t\t\tif dbs.ORM.First(user, uid).RecordNotFound() {\n\t\t\t\tdropCookie(c, cookie)\n\t\t\t\treturn c.Redirect(http.StatusFound, \"/login?cookie=nosuchuser\")\n\t\t\t}\n\n\t\t\tmessageNum := getMessageNum(dbs.ORM, user.ID)\n\t\t\tc.Set(\"messageNum\", messageNum)\n\t\t\tc.Set(\"haveMessage\", messageNum > 0)\n\t\t\tc.Set(\"user\", user)\n\n\t\t\treturn next(c)\n\t\t}\n\t}\n}",
"func GetAuthMiddleware(cfg *types.Config) gin.HandlerFunc {\n\tif !cfg.OIDCEnable {\n\t\treturn gin.BasicAuth(gin.Accounts{\n\t\t\t// Use the config's username and password for basic auth\n\t\t\tcfg.Username: cfg.Password,\n\t\t})\n\t}\n\treturn CustomAuth(cfg)\n}",
"func (j *GinJwt) AuthMiddleware() gin.HandlerFunc {\n\tif j._ginJwtMiddleware != nil {\n\t\treturn j._ginJwtMiddleware.MiddlewareFunc()\n\t} else {\n\t\treturn nil\n\t}\n}",
"func (m *middlewareCreator) CreateGinMiddleware() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tauthHeader := c.GetHeader(\"Authorization\")\n\t\trx := regexp.MustCompile(\"^[B|b]earer\\\\s*\")\n\t\tjwt := rx.ReplaceAllString(authHeader, \"\")\n\t\tresult := m.Validator.ValidateToken(jwt)\n\n\t\tif !result.Valid || result.Expired {\n\t\t\tif m.FailureHook != nil {\n\t\t\t\tm.FailureHook(c)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Per RFC responding with a 401 whether invalid or expired\n\t\t\tc.AbortWithStatusJSON(http.StatusUnauthorized, m.notAuthorizedError())\n\t\t\treturn\n\t\t}\n\n\t\tc.Set(\"UserPayload\", result)\n\n\t\tif m.SuccessHook != nil {\n\t\t\tm.SuccessHook(c)\n\t\t}\n\t}\n}",
"func SetupAuth(router *mux.Router) {\n\n\trouter.Use(authMiddleware)\n}",
"func (s *Setup) AuthMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\n\t\t// Capturing Authorizathion header.\n\t\ttokenHeader := r.Header.Get(\"Authorization\")\n\n\t\t// Checking if the value is empty.\n\t\tif tokenHeader == \"\" {\n\t\t\terrhandler.DecodeError(w, r, s.logger, errEmptyToken, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\t// Checking if the header contains Bearer string and if the token exists.\n\t\tif !strings.Contains(tokenHeader, \"Bearer\") || len(strings.Split(tokenHeader, \"Bearer \")) == 1 {\n\t\t\terrhandler.DecodeError(w, r, s.logger, errMalformedToken, http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\t// Capturing the token.\n\t\tjwtString := strings.Split(tokenHeader, \"Bearer \")[1]\n\n\t\t// Parsing the token to verify its authenticity.\n\t\ttoken, err := jwt.Parse(jwtString, func(token *jwt.Token) (interface{}, error) {\n\t\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"Unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t\t}\n\t\t\tcfg, err := config.Load()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn []byte(cfg.JWTSecret), nil\n\t\t})\n\n\t\t// Returning parsing errors.\n\t\tif err != nil {\n\t\t\terrhandler.DecodeError(w, r, s.logger, err.Error(), http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\t// If the token is valid.\n\t\tif token.Valid {\n\t\t\tnext.ServeHTTP(w, r)\n\t\t} else {\n\t\t\terrhandler.DecodeError(w, r, s.logger, errInvalidJWTToken, http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t})\n}",
"func (b *BearerGenerator) NewMiddleware() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\t// Be pessimistic\n\t\tok := false\n\n\t\t// Check that the token is present and valid\n\t\tif tokenStr, err := FromAuthHeader(c.Request); err == nil {\n\t\t\ttoken, err := b.Finder.FindByToken(tokenStr)\n\n\t\t\tif err == nil && !b.Validator.Expired(token) {\n\t\t\t\tok = true\n\t\t\t\tc.Set(\"token\", token)\n\t\t\t}\n\t\t}\n\n\t\tif !ok {\n\t\t\tc.JSON(http.StatusUnauthorized, gin.H{\n\t\t\t\t\"status\": \"error\",\n\t\t\t\t\"messages\": []string{\"Invalid or expired authorization token\"},\n\t\t\t})\n\t\t\tc.Abort()\n\t\t} else {\n\t\t\tc.Next()\n\t\t}\n\t}\n}",
"func GetAuthMiddleware() echo.MiddlewareFunc {\n\treturn middleware.JWTWithConfig(middleware.JWTConfig{\n\t\tSigningKey: []byte(\"secret\"),\n\t})\n}",
"func NewAuthPrepare() Middleware {\n\treturn Chain(\n\t\tBasicParamsMap,\n\t\tMapHeader,\n\t\tFilterAuthReqParams,\n\t)\n}",
"func DiCreateAuthHandler(jwtService *user.JwtService) *middleware.AuthHandler {\n\treturn middleware.CreateAuthHandler(jwtService)\n}",
"func authMiddleware() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\t// Get the client secret key\n\t\terr := jwtMiddleWare.CheckJWT(c.Writer, c.Request)\n\t\tif err != nil {\n\t\t\t// Token not found\n\t\t\tfmt.Println(err)\n\t\t\tc.Abort()\n\t\t\tc.Writer.WriteHeader(http.StatusUnauthorized)\n\t\t\tc.Writer.Write([]byte(\"Unauthorized\"))\n\t\t\treturn\n\t\t}\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
MakeHandler creates the api request handler
|
func MakeHandler() *http.Handler {
api := rest.NewApi()
authMiddleware, err := CreateAuthMiddleware()
if err != nil {
panic(err)
}
api.Use(&rest.IfMiddleware{
// Only authenticate non login or register requests
Condition: func(request *rest.Request) bool {
return (request.URL.Path != variables.APIPathLoginUserServer) && (request.URL.Path != variables.APIPathRegisterUserServer)
},
IfTrue: authMiddleware,
})
api.Use(rest.DefaultProdStack...)
router, err := rest.MakeRouter(
rest.Post(variables.APIPathLoginUserServer, authMiddleware.LoginHandler),
rest.Get(variables.APIPathRefreshUserServer, authMiddleware.RefreshHandler),
rest.Post(variables.APIPathRegisterUserServer, PostRegister),
rest.Get(variables.APIPathUserServer, GetUser),
rest.Post(variables.APIPathUserServer, PostUser),
)
if err != nil {
log.Fatal(err)
}
api.SetApp(router)
handler := api.MakeHandler()
return &handler
}
|
[
"func apiMakeHandler(fn func(http.ResponseWriter, *http.Request)) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tm := apiValidPath.FindStringSubmatch(r.URL.Path)\n\t\tif len(m) == 0 {\n\t\t\tAPIError(w, \"Not found\", http.StatusNotFound)\n\t\t}\n\t\t// fn => apiCandleHandler\n\t\tfn(w, r)\n\t}\n}",
"func MakeHttpHandler(ctx context.Context, endpoints endpoint.Endpoints, logger log.Logger,) http.Handler {\n\tr := mux.NewRouter()\n\t// 链路追踪\n//\tzipkinServer := zipkin.HTTPServerTrace(zipkinTracer, zipkin.Name(\"http-transport\"))\n\toptions := []kithttp.ServerOption{\n\t\tkithttp.ServerErrorLogger(logger),\n\t\tkithttp.ServerErrorEncoder(kithttp.DefaultErrorEncoder),\n\t\tkithttp.ServerErrorEncoder(func(ctx context.Context, err error, w http.ResponseWriter) {\n\t\t\tlogger.Log(fmt.Sprint(ctx.Value(ContextReqUUid)))\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\tjson.NewEncoder(w).Encode(err)\n\t\t}),\n\t\tkithttp.ServerBefore(func(ctx context.Context, request *http.Request) context.Context {\n\t\t\tUUID := uuid.NewV5(uuid.Must(uuid.NewV4(),nil), \"req_uuid\").String()\n\t\t\tlogger.Log(\"给请求添加uuid\", zap.Any(\"UUID\", UUID))\n\t\t\tctx = context.WithValue(ctx, ContextReqUUid, UUID)\n\t\t\treturn ctx\n\t\t}),\n\t//\tzipkinServer,\n\t}\n\n // 暴露具体的 endpoint\n\tr.Methods(\"POST\").Path(\"/register\").Handler(kithttp.NewServer(\n\t\tendpoints.RegistAccount,\n\t\tdecodeRegisterRequest, // 请求参数\n\t\tencode.JsonResponse,\n\t\toptions...,\n\t))\n\n\tr.Methods(\"POST\").Path(\"/login\").Handler(kithttp.NewServer(\n\t\tendpoints.LoginAccount,\n\t\tdecodeLoginRequest, // 请求参数\n\t\tencode.JsonResponse,\n\t\toptions...,\n\t))\n\tr.Methods(\"POST\").Path(\"/userInfo\").Handler(kithttp.NewServer(\n\t\tendpoints.GetUserInfoByToken,\n\t\tdecodeGetTokenRequest, // 请求参数\n\t\tencode.JsonResponse,\n\t\toptions...,\n\t))\n\treturn r\n}",
"func MakeHttpHandler(ctx context.Context, endpoints endpoint.Endpoints, logger log.Logger, zipkinTracer *gozipkin.Tracer,) http.Handler {\n\tr := mux.NewRouter()\n\t// 链路追踪\n\tzipkinServer := zipkin.HTTPServerTrace(zipkinTracer, zipkin.Name(\"http-transport\"))\n\toptions := []kithttp.ServerOption{\n\t\tkithttp.ServerErrorLogger(logger),\n\t\tkithttp.ServerErrorEncoder(kithttp.DefaultErrorEncoder),\n\t\tkithttp.ServerErrorEncoder(func(ctx context.Context, err error, w http.ResponseWriter) {\n\t\t\tlogger.Log(fmt.Sprint(ctx.Value(ContextReqUUid)))\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\tjson.NewEncoder(w).Encode(err)\n\t\t}),\n\t\tkithttp.ServerBefore(func(ctx context.Context, request *http.Request) context.Context {\n\t\t\tUUID := uuid.NewV5(uuid.Must(uuid.NewV4(),nil), \"req_uuid\").String()\n\t\t\tlogger.Log(\"给请求添加uuid\", zap.Any(\"UUID\", UUID))\n\t\t\tctx = context.WithValue(ctx, ContextReqUUid, UUID)\n\t\t\treturn ctx\n\t\t}),\n\t\tzipkinServer,\n\t}\n\tr.Methods(\"POST\").Path(\"/movie-tags\").Handler(kithttp.NewServer(\n\t\tendpoints.MovieTags,\n\t\tdecodeMoviesTagsRequest, // 请求参数\n\t\tencode.JsonResponse,\n\t\toptions...,\n\t))\n // 暴露具体的 endpoint\n\tr.Methods(\"POST\").Path(\"/movie-list\").Handler(kithttp.NewServer(\n\t\tendpoints.MoviesList,\n\t\tdecodeHotPlayMoviesrRequest, // 请求参数\n\t\tencode.JsonResponse,\n\t\toptions...,\n\t))\n\n\tr.Methods(\"POST\").Path(\"/movie-detail\").Handler(kithttp.NewServer(\n\t\tendpoints.MovieDetail,\n\t\tdecodMovieDetailRequest, // 请求参数\n\t\tencode.JsonResponse,\n\t\toptions...,\n\t))\n\tr.Methods(\"POST\").Path(\"/credits\").Handler(kithttp.NewServer(\n\t\tendpoints.MovieCreditsWithTypes,\n\t\tdecodeMovieCreditsWithTypes, // 请求参数\n\t\tencode.JsonResponse,\n\t\toptions...,\n\t))\n\treturn r\n}",
"func MakeHandler(service Service, logger log.Logger) http.Handler {\n\topts := []khttp.ServerOption{\n\t\tkhttp.ServerErrorLogger(logger),\n\t\tkhttp.ServerErrorEncoder(kit.EncodeError),\n\t}\n\n\troutes := kit.Routes{\n\t\tkit.Route{\n\t\t\tName: \"UUID\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/one/v1/uuid\",\n\t\t\tHandler: khttp.NewServer(\n\t\t\t\tmakeUuidEndpoint(service),\n\t\t\t\tkit.Decode(nil),\n\t\t\t\tkit.Encode,\n\t\t\t\topts...,\n\t\t\t),\n\t\t},\n\t\tkit.Route{\n\t\t\tName: \"BULK\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/one/v1/bulk/{amount}\",\n\t\t\tHandler: khttp.NewServer(\n\t\t\t\tmakeBulkEndpoint(service),\n\t\t\t\tdecodeBulkRequest,\n\t\t\t\tkit.Encode,\n\t\t\t\topts...,\n\t\t\t),\n\t\t},\n\t}\n\n\treturn kit.AddRoutes(routes...)\n}",
"func MakeHandler(ctx context.Context, us Service, logger kitlog.Logger) http.Handler {\n\tr := mux.NewRouter()\n\n\topts := []kithttp.ServerOption{\n\t\tkithttp.ServerErrorLogger(logger),\n\t\tkithttp.ServerErrorEncoder(encodeError),\n\t\tkithttp.ServerBefore(kithttp.PopulateRequestContext, func(c context.Context, r *http.Request) context.Context {\n\t\t\tvar scheme = \"http\"\n\t\t\tif r.TLS != nil {\n\t\t\t\tscheme = \"https\"\n\t\t\t}\n\t\t\tc = context.WithValue(c, contextKeyHTTPAddress, scheme+\"://\"+r.Host+\"/\")\n\t\t\treturn c\n\t\t}),\n\t}\n\n\tURLHealthzHandler := kithttp.NewServer(\n\t\tmakeURLHealthzEndpoint(us),\n\t\tfunc(c context.Context, r *http.Request) (interface{}, error) {\n\t\t\treturn nil, nil\n\t\t},\n\t\tencodeResponse,\n\t\topts...,\n\t)\n\tURLShortifyHandler := kithttp.NewServer(\n\t\tmakeURLShortifyEndpoint(us),\n\t\tdecodeURLShortenerRequest,\n\t\tencodeResponse,\n\t\topts...,\n\t)\n\tURLRedirectHandler := kithttp.NewServer(\n\t\tmakeURLRedirectEndpoint(us),\n\t\tdecodeURLRedirectRequest,\n\t\tencodeRedirectResponse,\n\t\topts...,\n\t)\n\tURLInfoHandler := kithttp.NewServer(\n\t\tmakeURLInfoEndpoint(us),\n\t\tdecodeURLInfoRequest,\n\t\tencodeResponse,\n\t\topts...,\n\t)\n\n\tr.Handle(\"/\", URLShortifyHandler).Methods(\"POST\")\n\tr.Handle(\"/healthz\", URLHealthzHandler).Methods(\"GET\")\n\tr.Handle(\"/{shortURL}\", URLRedirectHandler).Methods(\"GET\")\n\tr.Handle(\"/info/{shortURL}\", URLInfoHandler).Methods(\"GET\")\n\n\treturn r\n}",
"func makeHandler(server *ServerContext, privs handlerPrivs, method handlerMethod) http.Handler {\n\treturn http.HandlerFunc(func(r http.ResponseWriter, rq *http.Request) {\n\t\th := newHandler(server, privs, r, rq)\n\t\terr := h.invoke(method)\n\t\th.writeError(err)\n\t\th.logDuration(true) \n\t})\n}",
"func MakeHTTPHandler(s Service) http.Handler {\n\tr := chi.NewRouter()\n\n\tListPostsHandler := kithttp.NewServer(\n\t\tmakeListPostsEndpoint(s),\n\t\tlistPostsRequestDecoder,\n\t\tresthttp.EncodeJSONResponse,\n\t)\n\tr.Method(http.MethodGet, \"/\", ListPostsHandler)\n\n\tGetPostHandler := kithttp.NewServer(\n\t\tmakeGetPostEndpoint(s),\n\t\tgetPostRequestDecoder,\n\t\tresthttp.EncodeJSONResponse,\n\t\t[]kithttp.ServerOption{\n\t\t\tkithttp.ServerErrorEncoder(resthttp.EncodeError),\n\t\t}...,\n\t)\n\tr.Method(http.MethodGet, \"/{id}\", GetPostHandler)\n\n\treturn r\n}",
"func CreateHTTPAPIHandler(iManager integration.IntegrationManager, cManager clientapi.DevOpsClientManager,\n\tsManager settings.SettingsManager,\n\tsbManager systembanner.SystemBannerManager,\n\ttpManager thirdpartyapi.ThirdPartyManager) (http.Handler, error) {\n\n\tmw := NewLicenseMiddlewareFactory(false)\n\tapiHandler := APIHandler{iManager: iManager, cManager: cManager, sManager: &sManager, groupValidator: mw}\n\twsContainer := restful.NewContainer()\n\twsContainer.EnableContentEncoding(true)\n\n\tapiV1Ws := new(restful.WebService)\n\n\tInstallFilters(apiV1Ws, cManager, mw)\n\n\tapiV1Ws.Path(\"/api/v1\").\n\t\tConsumes(restful.MIME_JSON).\n\t\tProduces(restful.MIME_JSON).\n\t\tParam(restful.HeaderParameter(\"Authorization\", \"Given Bearer token will use this as authorization for the API\"))\n\n\twsContainer.Add(apiV1Ws)\n\n\tapiV2Ws := new(restful.WebService)\n\tapiV2Ws.Path(\"\").\n\t\tConsumes(restful.MIME_JSON).\n\t\tProduces(restful.MIME_JSON).\n\t\tParam(restful.HeaderParameter(\"Authorization\", \"Given Bearer token will use this as authorization for the API\"))\n\twsContainer.Add(apiV2Ws)\n\n\tintegrationHandler := integration.NewIntegrationHandler(iManager)\n\tintegrationHandler.Install(apiV1Ws)\n\tintegrationHandler.Install(apiV2Ws)\n\n\tsettingsHandler := settings.NewSettingsHandler(sManager)\n\tsettingsHandler.Install(apiV1Ws)\n\tsettingsHandler.Install(apiV2Ws)\n\n\tsystemBannerHandler := systembanner.NewSystemBannerHandler(sbManager)\n\tsystemBannerHandler.Install(apiV1Ws)\n\tsystemBannerHandler.Install(apiV2Ws)\n\n\tthirPartyHandler := thirdparty.NewThirdPartyHandler(&sManager, cManager, tpManager)\n\tthirPartyHandler.Install(apiV1Ws)\n\tthirPartyHandler.Install(apiV2Ws)\n\n\tconfigurationHandler := thandler.NewAPIHandler(\"configuration\")\n\tconfigurationHandler.Install(apiV1Ws)\n\tconfigurationHandler.Install(apiV2Ws)\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/namespaces\").\n\t\t\tTo(apiHandler.handleGetNamespaces).\n\t\t\tWrites(ns.NamespaceList{}).\n\t\t\tDoc(\"get namespaces list\").\n\t\t\tReturns(200, \"OK\", ns.NamespaceList{}))\n\n\tapiV2Ws.Route(\n\t\tapiV2Ws.GET(\"/apis/v1/projects/{name}/clusters/{cluster}/namespaces\").\n\t\t\tTo(apiHandler.handleNewGetNamespaces).\n\t\t\tWrites(v1.NamespaceList{}).\n\t\t\tDoc(\"new get project list\").\n\t\t\tReturns(200, \"OK\", v1.NamespaceList{}))\n\n\tapiV2Ws.Route(\n\t\tapiV2Ws.GET(\"/project/v1/projects/{name}/clusters/{cluster}/namespaces\").\n\t\t\tTo(apiHandler.handleNewGetNamespaces).\n\t\t\tWrites(v1.NamespaceList{}).\n\t\t\tDoc(\"new get project list\").\n\t\t\tReturns(200, \"OK\", v1.NamespaceList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/appdeployment\").\n\t\t\tTo(apiHandler.handleDeploy).\n\t\t\tReads(deployment.AppDeploymentSpec{}).\n\t\t\tWrites(deployment.AppDeploymentSpec{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/configuration\").\n\t\t\tTo(apiHandler.handleGetPlatformConfiguration).\n\t\t\tWrites(configmap.ConfigMapDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/cani\").\n\t\t\tTo(apiHandler.handleCanI).\n\t\t\tReads(authv1.SelfSubjectAccessReviewSpec{}).\n\t\t\tWrites(common.CanIResponse{}).\n\t\t\tDoc(\"Validates access for user\").\n\t\t\tReturns(200, \"OK\", common.CanIResponse{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/caniadmin\").\n\t\t\tTo(apiHandler.handleCanIAdmin).\n\t\t\tWrites(common.CanIResponse{}).\n\t\t\tDoc(\"Validates access for admin user\").\n\t\t\tReturns(200, \"OK\", common.CanIResponse{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/apis\").\n\t\t\tTo(apiHandler.handleGetAPIGroups).\n\t\t\tWrites(metav1.APIGroupList{}).\n\t\t\tDoc(\"Fetches a list of API groups available\").\n\t\t\tReturns(200, \"OK\", metav1.APIGroupList{}))\n\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.POST(\"/appdeployment/validate/name\").\n\t//\t\tTo(apiHandler.handleNameValidity).\n\t//\t\tReads(validation.AppNameAppNameValiditySpecValiditySpec{}).\n\t//\t\tWrites(validation.AppNameValidity{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.POST(\"/appdeployment/validate/imagereference\").\n\t//\t\tTo(apiHandler.handleImageReferenceValidity).\n\t//\t\tReads(validation.ImageReferenceValiditySpec{}).\n\t//\t\tWrites(validation.ImageReferenceValidity{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.POST(\"/appdeployment/validate/protocol\").\n\t//\t\tTo(apiHandler.handleProtocolValidity).\n\t//\t\tReads(validation.ProtocolValiditySpec{}).\n\t//\t\tWrites(validation.ProtocolValidity{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/appdeployment/protocols\").\n\t//\t\tTo(apiHandler.handleGetAvailableProcotols).\n\t//\t\tWrites(deployment.Protocols{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.POST(\"/appdeploymentfromfile\").\n\t//\t\tTo(apiHandler.handleDeployFromFile).\n\t//\t\tReads(deployment.AppDeploymentFromFileSpec{}).\n\t//\t\tWrites(deployment.AppDeploymentFromFileResponse{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/replicationcontroller\").\n\t//\t\tTo(apiHandler.handleGetReplicationControllerList).\n\t//\t\tWrites(replicationcontroller.ReplicationControllerList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/replicationcontroller/{namespace}\").\n\t//\t\tTo(apiHandler.handleGetReplicationControllerList).\n\t//\t\tWrites(replicationcontroller.ReplicationControllerList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/replicationcontroller/{namespace}/{replicationController}\").\n\t//\t\tTo(apiHandler.handleGetReplicationControllerDetail).\n\t//\t\tWrites(replicationcontroller.ReplicationControllerDetail{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.POST(\"/replicationcontroller/{namespace}/{replicationController}/update/pod\").\n\t//\t\tTo(apiHandler.handleUpdateReplicasCount).\n\t//\t\tReads(replicationcontroller.ReplicationControllerSpec{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/replicationcontroller/{namespace}/{replicationController}/pod\").\n\t//\t\tTo(apiHandler.handleGetReplicationControllerPods).\n\t//\t\tWrites(pod.PodList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/replicationcontroller/{namespace}/{replicationController}/event\").\n\t//\t\tTo(apiHandler.handleGetReplicationControllerEvents).\n\t//\t\tWrites(common.EventList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/replicationcontroller/{namespace}/{replicationController}/service\").\n\t//\t\tTo(apiHandler.handleGetReplicationControllerServices).\n\t//\t\tWrites(resourceService.ServiceList{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/workload\").\n\t//\t\tTo(apiHandler.handleGetWorkloads).\n\t//\t\tWrites(workload.Workloads{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/workload/{namespace}\").\n\t//\t\tTo(apiHandler.handleGetWorkloads).\n\t//\t\tWrites(workload.Workloads{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/cluster\").\n\t//\t\tTo(apiHandler.handleGetCluster).\n\t//\t\tWrites(cluster.Cluster{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/discovery\").\n\t//\t\tTo(apiHandler.handleGetDiscovery).\n\t//\t\tWrites(discovery.Discovery{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/discovery/{namespace}\").\n\t//\t\tTo(apiHandler.handleGetDiscovery).\n\t//\t\tWrites(discovery.Discovery{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/config\").\n\t//\t\tTo(apiHandler.handleGetConfig).\n\t//\t\tWrites(config.Config{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/config/{namespace}\").\n\t//\t\tTo(apiHandler.handleGetConfig).\n\t//\t\tWrites(config.Config{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/replicaset\").\n\t//\t\tTo(apiHandler.handleGetReplicaSets).\n\t//\t\tWrites(replicaset.ReplicaSetList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/replicaset/{namespace}\").\n\t//\t\tTo(apiHandler.handleGetReplicaSets).\n\t//\t\tWrites(replicaset.ReplicaSetList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/replicaset/{namespace}/{replicaSet}\").\n\t//\t\tTo(apiHandler.handleGetReplicaSetDetail).\n\t//\t\tWrites(replicaset.ReplicaSetDetail{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/replicaset/{namespace}/{replicaSet}/pod\").\n\t//\t\tTo(apiHandler.handleGetReplicaSetPods).\n\t//\t\tWrites(pod.PodList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/replicaset/{namespace}/{replicaSet}/event\").\n\t//\t\tTo(apiHandler.handleGetReplicaSetEvents).\n\t//\t\tWrites(common.EventList{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/pod\").\n\t//\t\tTo(apiHandler.handleGetPods).\n\t//\t\tWrites(pod.PodList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/pod/{namespace}\").\n\t//\t\tTo(apiHandler.handleGetPods).\n\t//\t\tWrites(pod.PodList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/pod/{namespace}/{pod}\").\n\t//\t\tTo(apiHandler.handleGetPodDetail).\n\t//\t\tWrites(pod.PodDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pod/{namespace}/{pod}/container\").\n\t\t\tTo(apiHandler.handleGetPodContainers).\n\t\t\tWrites(pod.PodDetail{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/pod/{namespace}/{pod}/event\").\n\t//\t\tTo(apiHandler.handleGetPodEvents).\n\t//\t\tWrites(common.EventList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pod/{namespace}/{pod}/shell/{container}\").\n\t\t\tTo(apiHandler.handleExecShell).\n\t\t\tWrites(TerminalResponse{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/pod/{namespace}/{pod}/persistentvolumeclaim\").\n\t//\t\tTo(apiHandler.handleGetPodPersistentVolumeClaims).\n\t//\t\tWrites(persistentvolumeclaim.PersistentVolumeClaimList{}))\n\t//\n\n\t// region Deployment\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/deployment\").\n\t\t\tTo(apiHandler.handleGetDeployments).\n\t\t\tWrites(deployment.DeploymentList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/deployment/{namespace}\").\n\t\t\tTo(apiHandler.handleGetDeployments).\n\t\t\tWrites(deployment.DeploymentList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/deployment/{namespace}/{deployment}\").\n\t\t\tTo(apiHandler.handleGetDeploymentDetail).\n\t\t\tWrites(deployment.DeploymentDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/deployment/{namespace}/{deployment}\").\n\t\t\tTo(apiHandler.handleUpdateDeploymentDetail).\n\t\t\tWrites(appsv1.Deployment{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/deployment/{namespace}/{deployment}/start\").\n\t\t\tTo(apiHandler.handleStartStopDeployment).\n\t\t\tDoc(\"start deployment\").\n\t\t\tReturns(http.StatusNoContent, \"OK\", struct{}{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/deployment/{namespace}/{deployment}/stop\").\n\t\t\tTo(apiHandler.handleStartStopDeployment).\n\t\t\tDoc(\"stop deployment\").\n\t\t\tReturns(http.StatusNoContent, \"OK\", struct{}{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/deployment/{namespace}/{deployment}/yaml\").\n\t\t\tTo(apiHandler.handleUpdateDeploymentDetailYaml).\n\t\t\tWrites(appsv1.Deployment{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/deployment/{namespace}/{deployment}/replicas\").\n\t\t\tTo(apiHandler.handleUpdateDeploymentReplicas).\n\t\t\tWrites(deployment.DeploymentReplica{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/deployment/{namespace}/{deployment}/network\").\n\t\t\tTo(apiHandler.handleUpdateDeploymentNetwork).\n\t\t\tWrites(appsv1.Deployment{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/deployment/{namespace}/{deployment}/container/{container}/\").\n\t\t\tTo(apiHandler.handlePutDeploymentContainer).\n\t\t\tWrites(appsv1.Deployment{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/deployment/{namespace}/{deployment}/container/{container}/image\").\n\t\t\tTo(apiHandler.handleUpdateDeploymentContainerImage).\n\t\t\tWrites(appsv1.Deployment{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/deployment/{namespace}/{deployment}/container/{container}/resources\").\n\t\t\tTo(apiHandler.handleUpdateDeploymentContainerResources).\n\t\t\tWrites(appsv1.Deployment{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/deployment/{namespace}/{deployment}/container/{container}/env\").\n\t\t\tTo(apiHandler.handleUpdateDeploymentContainerEnv).\n\t\t\tWrites(appsv1.Deployment{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/deployment/{namespace}/{deployment}/container/{container}/volumeMount/\").\n\t\t\tTo(apiHandler.handleCreateDeploymentVolumeMount).\n\t\t\tWrites(appsv1.Deployment{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/deployment/{namespace}/{deployment}/event\").\n\t\t\tTo(apiHandler.handleGetDeploymentEvents).\n\t\t\tWrites(common.EventList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/deployment/{namespace}/{deployment}/pods\").\n\t\t\tTo(apiHandler.handleGetDeploymentPods).\n\t\t\tWrites(pod.PodList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/deployment/{namespace}/{deployment}/oldreplicaset\").\n\t\t\tTo(apiHandler.handleGetDeploymentOldReplicaSets).\n\t\t\tWrites(replicaset.ReplicaSetList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/deployment/{namespace}/{deployment}/actions/rollback\").\n\t\t\tTo(apiHandler.handleRollBackDeploymentToRevision).\n\t\t\tReads(common.RevisionDetail{}).\n\t\t\tWrites(appsv1.Deployment{}).\n\t\t\tDoc(\"rollback deployment to special revision\").\n\t\t\tReturns(200, \"OK\", appsv1.Deployment{}))\n\n\t// endregion\n\n\t// region Scale\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.PUT(\"/scale/{kind}/{namespace}/{name}/\").\n\t//\t\tTo(apiHandler.handleScaleResource).\n\t//\t\tWrites(scaling.ReplicaCounts{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/scale/{kind}/{namespace}/{name}\").\n\t//\t\tTo(apiHandler.handleGetReplicaCount).\n\t//\t\tWrites(scaling.ReplicaCounts{}))\n\t// endregion\n\n\t// region Deamonset\n\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/daemonset\").\n\t//\t\tTo(apiHandler.handleGetDaemonSetList).\n\t//\t\tWrites(daemonset.DaemonSetList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/daemonset/{namespace}\").\n\t//\t\tTo(apiHandler.handleGetDaemonSetList).\n\t//\t\tWrites(daemonset.DaemonSetList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/daemonset/{namespace}/{daemonset}\").\n\t\t\tTo(apiHandler.handleGetDaemonSetDetail).\n\t\t\tWrites(appsv1.DaemonSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/daemonset/{namespace}/{daemonset}\").\n\t\t\tTo(apiHandler.handleUpdateDaemonSetDetail).\n\t\t\tWrites(appsv1.DaemonSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/daemonset/{namespace}/{daemonset}/yaml\").\n\t\t\tTo(apiHandler.handleUpdateDaemonSetDetail).\n\t\t\tWrites(appsv1.Deployment{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/daemonset/{namespace}/{daemonset}/pods\").\n\t\t\tTo(apiHandler.handleGetDaemonSetPods).\n\t\t\tWrites(pod.PodList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/daemonset/{namespace}/{daemonset}/container/{container}/\").\n\t\t\tTo(apiHandler.handlePutDaemonSetContainer).\n\t\t\tWrites(appsv1.DaemonSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/daemonset/{namespace}/{daemonset}/container/{container}/image\").\n\t\t\tTo(apiHandler.handleUpdateDaemonSetContainerImage).\n\t\t\tWrites(appsv1.DaemonSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/daemonset/{namespace}/{daemonset}/container/{container}/env\").\n\t\t\tTo(apiHandler.handleUpdateDaemonSetContainerEnv).\n\t\t\tWrites(appsv1.DaemonSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/daemonset/{namespace}/{daemonset}/container/{container}/resources\").\n\t\t\tTo(apiHandler.handleUpdateDaemonSetContainerResource).\n\t\t\tWrites(appsv1.DaemonSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/daemonset/{namespace}/{daemonset}/container/{container}/volumeMount/\").\n\t\t\tTo(apiHandler.handleCreateDaemonSetVolumeMount).\n\t\t\tWrites(appsv1.DaemonSet{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/daemonset/{namespace}/{daemonSet}/service\").\n\t//\t\tTo(apiHandler.handleGetDaemonSetServices).\n\t//\t\tWrites(resourceService.ServiceList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/daemonset/{namespace}/{daemonSet}/event\").\n\t//\t\tTo(apiHandler.handleGetDaemonSetEvents).\n\t//\t\tWrites(common.EventList{}))\n\n\t// endregion\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/horizontalpodautoscaler\").\n\t\t\tTo(apiHandler.handleGetHorizontalPodAutoscalerList).\n\t\t\tWrites(horizontalpodautoscaler.HorizontalPodAutoscalerList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/horizontalpodautoscaler/{namespace}\").\n\t\t\tTo(apiHandler.handleGetHorizontalPodAutoscalerList).\n\t\t\tWrites(horizontalpodautoscaler.HorizontalPodAutoscalerList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/horizontalpodautoscaler/{namespace}/{horizontalpodautoscaler}\").\n\t\t\tTo(apiHandler.handleGetHorizontalPodAutoscalerDetail).\n\t\t\tWrites(horizontalpodautoscaler.HorizontalPodAutoscalerDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/horizontalpodautoscaler/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateHorizontalPodAutoscaler).\n\t\t\tReads(horizontalpodautoscaler.HorizontalPodAutoscalerDetail{}).\n\t\t\tWrites(horizontalpodautoscaler.HorizontalPodAutoscalerDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/horizontalpodautoscaler/{namespace}/{horizontalpodautoscaler}\").\n\t\t\tTo(apiHandler.handleUpdateHorizontalPodAutoscaler).\n\t\t\tWrites(horizontalpodautoscaler.HorizontalPodAutoscalerDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/horizontalpodautoscaler/{namespace}/{horizontalpodautoscaler}\").\n\t\t\tTo(apiHandler.handleDeleteHorizontalPodAutoscaler).\n\t\t\tWrites(horizontalpodautoscaler.HorizontalPodAutoscalerDetail{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/job\").\n\t//\t\tTo(apiHandler.handleGetJobList).\n\t//\t\tWrites(job.JobList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/job/{namespace}\").\n\t//\t\tTo(apiHandler.handleGetJobList).\n\t//\t\tWrites(job.JobList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/job/{namespace}/{name}\").\n\t//\t\tTo(apiHandler.handleGetJobDetail).\n\t//\t\tWrites(job.JobDetail{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/job/{namespace}/{name}/pod\").\n\t//\t\tTo(apiHandler.handleGetJobPods).\n\t//\t\tWrites(pod.PodList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/job/{namespace}/{name}/event\").\n\t//\t\tTo(apiHandler.handleGetJobEvents).\n\t//\t\tWrites(common.EventList{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/cronjob\").\n\t//\t\tTo(apiHandler.handleGetCronJobList).\n\t//\t\tWrites(cronjob.CronJobList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/cronjob/{namespace}\").\n\t//\t\tTo(apiHandler.handleGetCronJobList).\n\t//\t\tWrites(cronjob.CronJobList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/cronjob/{namespace}/{name}\").\n\t//\t\tTo(apiHandler.handleGetCronJobDetail).\n\t//\t\tWrites(cronjob.CronJobDetail{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/cronjob/{namespace}/{name}/job\").\n\t//\t\tTo(apiHandler.handleGetCronJobJobs).\n\t//\t\tWrites(job.JobList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/cronjob/{namespace}/{name}/event\").\n\t//\t\tTo(apiHandler.handleGetCronJobEvents).\n\t//\t\tWrites(common.EventList{}))\n\t//\n\n\t// region Namespace\n\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.POST(\"/namespace\").\n\t//\t\tTo(apiHandler.handleCreateNamespace).\n\t//\t\tReads(ns.NamespaceSpec{}).\n\t//\t\tWrites(ns.NamespaceSpec{}))\n\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/namespace/{name}\").\n\t//\t\tTo(apiHandler.handleGetNamespaceDetail).\n\t//\t\tWrites(ns.NamespaceDetail{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/namespace/{name}/event\").\n\t//\t\tTo(apiHandler.handleGetNamespaceEvents).\n\t//\t\tWrites(common.EventList{}))\n\t//\n\t// endregion\n\n\t// region Secret\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/secret\").\n\t\t\tTo(apiHandler.handleGetSecretList).\n\t\t\tWrites(secret.SecretList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/secret/{namespace}\").\n\t\t\tTo(apiHandler.handleGetSecretList).\n\t\t\tWrites(secret.SecretList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/secret/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetSecretDetail).\n\t\t\tWrites(secret.SecretDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/secret/{name}/resources\").\n\t\t\tTo(apiHandler.handleGetSecretRelatedResources).\n\t\t\tWrites(secret.ResourceList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/secret/{namespace}/{name}/resources\").\n\t\t\tTo(apiHandler.handleGetSecretRelatedResources).\n\t\t\tWrites(secret.ResourceList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/secret/{name}\").\n\t\t\tTo(apiHandler.handleUpdateSecret).\n\t\t\tWrites(secret.SecretDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/secret/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdateSecret).\n\t\t\tWrites(secret.SecretDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/secret\").\n\t\t\tTo(apiHandler.handleCreateSecret).\n\t\t\tReads(secret.SecretDetail{}).\n\t\t\tWrites(secret.Secret{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/secret/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateSecret).\n\t\t\tReads(secret.SecretDetail{}).\n\t\t\tWrites(secret.Secret{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/secret/{name}\").\n\t\t\tTo(apiHandler.handleDeleteSecret).\n\t\t\tWrites(secret.SecretDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/secret/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeleteSecret).\n\t\t\tWrites(secret.SecretDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/secret/{namespace}/{name}/actions/tradeapp\").\n\t\t\tTo(apiHandler.handleUpdateSecretBelongApp).\n\t\t\tReads(common.AppNameDetail{}).\n\t\t\tWrites(secret.SecretDetail{}).\n\t\t\tDoc(\"update secret belongs app\").\n\t\t\tReturns(200, \"OK\", secret.SecretDetail{}))\n\t// endregion\n\n\t// region Configmap\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/configmap\").\n\t\t\tTo(apiHandler.handleGetConfigMapList).\n\t\t\tWrites(configmap.ConfigMapList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/configmap/{namespace}\").\n\t\t\tTo(apiHandler.handleGetConfigMapList).\n\t\t\tWrites(configmap.ConfigMapList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/configmap/{namespace}/{configmap}\").\n\t\t\tTo(apiHandler.handleGetConfigMapDetail).\n\t\t\tWrites(configmap.ConfigMapDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/configmap/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateConfigMap).\n\t\t\tReads(configmap.ConfigMapDetail{}).\n\t\t\tWrites(configmap.ConfigMapDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/configmap/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdateConfigMap).\n\t\t\tWrites(configmap.ConfigMapDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/configmap/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeleteConfigMap).\n\t\t\tWrites(configmap.ConfigMapDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/configmap/{namespace}/{name}/actions/tradeapp\").\n\t\t\tTo(apiHandler.handleUpdateConfigMapBelongApp).\n\t\t\tReads(common.AppNameDetail{}).\n\t\t\tWrites(configmap.ConfigMapDetail{}).\n\t\t\tDoc(\"update configmap belongs app\").\n\t\t\tReturns(200, \"OK\", configmap.ConfigMapDetail{}))\n\t// endregion\n\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/service\").\n\t//\t\tTo(apiHandler.handleGetServiceList).\n\t//\t\tWrites(resourceService.ServiceList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/service/{namespace}\").\n\t//\t\tTo(apiHandler.handleGetServiceList).\n\t//\t\tWrites(resourceService.ServiceList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/service/{namespace}/{service}\").\n\t\t\tTo(apiHandler.handleGetServiceDetail))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/service/{namespace}/{service}/pod\").\n\t//\t\tTo(apiHandler.handleGetServicePods).\n\t//\t\tWrites(pod.PodList{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/ingress\").\n\t//\t\tTo(apiHandler.handleGetIngressList).\n\t//\t\tWrites(ingress.IngressList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/ingress/{namespace}\").\n\t//\t\tTo(apiHandler.handleGetIngressList).\n\t//\t\tWrites(ingress.IngressList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/ingress/{namespace}/{name}\").\n\t//\t\tTo(apiHandler.handleGetIngressDetail).\n\t//\t\tWrites(ingress.IngressDetail{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/statefulset\").\n\t//\t\tTo(apiHandler.handleGetStatefulSetList).\n\t//\t\tWrites(statefulset.StatefulSetList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/statefulset/{namespace}\").\n\t//\t\tTo(apiHandler.handleGetStatefulSetList).\n\t//\t\tWrites(statefulset.StatefulSetList{}))\n\n\t// region Statefulset\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/statefulset/{namespace}/{statefulset}\").\n\t\t\tTo(apiHandler.handleGetStatefulSetDetail).\n\t\t\tWrites(appsv1.StatefulSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/statefulset/{namespace}/{statefulset}\").\n\t\t\tTo(apiHandler.handleUpdateStatefulSetDetail).\n\t\t\tWrites(appsv1.StatefulSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/statefulset/{namespace}/{statefulset}/start\").\n\t\t\tTo(apiHandler.handleStartStopStatefulSet).\n\t\t\tDoc(\"start statefulset\").\n\t\t\tReturns(http.StatusNoContent, \"OK\", struct{}{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/statefulset/{namespace}/{statefulset}/stop\").\n\t\t\tTo(apiHandler.handleStartStopStatefulSet).\n\t\t\tDoc(\"stop statefulset\").\n\t\t\tReturns(http.StatusNoContent, \"OK\", struct{}{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/statefulset/{namespace}/{statefulset}/yaml\").\n\t\t\tTo(apiHandler.handleUpdateStatefulSetDetail).\n\t\t\tWrites(appsv1.StatefulSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/statefulset/{namespace}/{statefulset}/replicas\").\n\t\t\tTo(apiHandler.handleUpdateStatefulSetReplicas).\n\t\t\tWrites(appsv1.StatefulSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/statefulset/{namespace}/{statefulset}/pods\").\n\t\t\tTo(apiHandler.handleGetStatefulSetPods).\n\t\t\tWrites(pod.PodList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/statefulset/{namespace}/{statefulset}/container/{container}/\").\n\t\t\tTo(apiHandler.handlePutStatefulSetContainer).\n\t\t\tWrites(appsv1.StatefulSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/statefulset/{namespace}/{statefulset}/container/{container}/image\").\n\t\t\tTo(apiHandler.handleUpdateStatefulSetContainerImage).\n\t\t\tWrites(appsv1.StatefulSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/statefulset/{namespace}/{statefulset}/container/{container}/env\").\n\t\t\tTo(apiHandler.handleUpdateStatefulSetContainerEnv).\n\t\t\tWrites(appsv1.StatefulSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/statefulset/{namespace}/{statefulset}/container/{container}/resources\").\n\t\t\tTo(apiHandler.handleUpdateStatefulSetContainerResource).\n\t\t\tWrites(appsv1.StatefulSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/statefulset/{namespace}/{statefulset}/container/{container}/volumeMount/\").\n\t\t\tTo(apiHandler.handleCreateStatefulSetVolumeMount).\n\t\t\tWrites(appsv1.StatefulSet{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/statefulset/{namespace}/{statefulset}/event\").\n\t//\t\tTo(apiHandler.handleGetStatefulSetEvents).\n\t//\t\tWrites(common.EventList{}))\n\n\t// endregion\n\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/node\").\n\t//\t\tTo(apiHandler.handleGetNodeList).\n\t//\t\tWrites(node.NodeList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/node/{name}\").\n\t//\t\tTo(apiHandler.handleGetNodeDetail).\n\t//\t\tWrites(node.NodeDetail{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/node/{name}/event\").\n\t//\t\tTo(apiHandler.handleGetNodeEvents).\n\t//\t\tWrites(common.EventList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/node/{name}/pod\").\n\t//\t\tTo(apiHandler.handleGetNodePods).\n\t//\t\tWrites(pod.PodList{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.DELETE(\"/_raw/{kind}/namespace/{namespace}/name/{name}\").\n\t//\t\tTo(apiHandler.handleDeleteResource))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/_raw/{kind}/namespace/{namespace}/name/{name}\").\n\t//\t\tTo(apiHandler.handleGetResource))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.PUT(\"/_raw/{kind}/namespace/{namespace}/name/{name}\").\n\t//\t\tTo(apiHandler.handlePutResource))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.DELETE(\"/_raw/{kind}/name/{name}\").\n\t//\t\tTo(apiHandler.handleDeleteResource))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/_raw/{kind}/name/{name}\").\n\t//\t\tTo(apiHandler.handleGetResource))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.PUT(\"/_raw/{kind}/name/{name}\").\n\t//\t\tTo(apiHandler.handlePutResource))\n\t//\n\n\t// region RBAC\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/rbac/role\").\n\t\t\tTo(apiHandler.handleGetRbacRoleList).\n\t\t\tWrites(rbacroles.RbacRoleList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/rbac/rolebinding\").\n\t\t\tTo(apiHandler.handleGetRbacRoleBindingList).\n\t\t\tWrites(rbacrolebindings.RbacRoleBindingList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/rolebinding/{namespace}\").\n\t\t\tTo(apiHandler.handleListRoleBindingsOriginal).\n\t\t\tWrites(rolebinding.RoleBindingList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/rolebinding/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateRoleBinding).\n\t\t\tDoc(\"creates a rolebinding\").\n\t\t\tWrites(rbacv1.RoleBinding{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/rolebinding/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeleteRoleBindingsOriginal).\n\t\t\tDoc(\"delete a rolebinding\").\n\t\t\tWrites(rbacv1.RoleBinding{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/rbac/status\").\n\t//\t\tTo(apiHandler.handleRbacStatus).\n\t//\t\tWrites(validation.RbacStatus{}))\n\n\t// endregion\n\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/persistentvolume\").\n\t//\t\tTo(apiHandler.handleGetPersistentVolumeList).\n\t//\t\tWrites(persistentvolume.PersistentVolumeList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/persistentvolume/{persistentvolume}\").\n\t//\t\tTo(apiHandler.handleGetPersistentVolumeDetail).\n\t//\t\tWrites(persistentvolume.PersistentVolumeDetail{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/persistentvolume/namespace/{namespace}/name/{persistentvolume}\").\n\t//\t\tTo(apiHandler.handleGetPersistentVolumeDetail).\n\t//\t\tWrites(persistentvolume.PersistentVolumeDetail{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/persistentvolumeclaim/\").\n\t//\t\tTo(apiHandler.handleGetPersistentVolumeClaimList).\n\t//\t\tWrites(persistentvolumeclaim.PersistentVolumeClaimList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/persistentvolumeclaim/{namespace}\").\n\t\t\tTo(apiHandler.handleGetPersistentVolumeClaimList).\n\t\t\tWrites(persistentvolumeclaim.PersistentVolumeClaimList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/persistentvolumeclaim/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetPersistentVolumeClaimDetail).\n\t\t\tWrites(persistentvolumeclaim.PersistentVolumeClaimDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/persistentvolumeclaim/{namespace}\").\n\t\t\tTo(apiHandler.handleCreatePersistentVolumeClaim).\n\t\t\tReads(persistentvolumeclaim.PersistentVolumeClaimDetail{}).\n\t\t\tWrites(persistentvolumeclaim.PersistentVolumeClaimDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/persistentvolumeclaim/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdatePersistentVolumeClaim).\n\t\t\tWrites(persistentvolumeclaim.PersistentVolumeClaimDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/persistentvolumeclaim/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeletePersistentVolumeClaim).\n\t\t\tWrites(persistentvolumeclaim.PersistentVolumeClaimDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/persistentvolumeclaim/{namespace}/{name}/actions/tradeapp\").\n\t\t\tTo(apiHandler.handleUpdatePersistentVolumeClaimBelongApp).\n\t\t\tReads(common.AppNameDetail{}).\n\t\t\tWrites(persistentvolumeclaim.PersistentVolumeClaimDetail{}).\n\t\t\tDoc(\"update persistentvolumeclaim belongs app\").\n\t\t\tReturns(200, \"OK\", persistentvolumeclaim.PersistentVolumeClaimDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/storageclass\").\n\t\t\tTo(apiHandler.handleGetStorageClassList).\n\t\t\tWrites(storageclass.StorageClassList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/storageclass/{storageclass}\").\n\t\t\tTo(apiHandler.handleGetStorageClass).\n\t\t\tWrites(storageclass.StorageClass{}))\n\n\t// apiV1Ws.Route(\n\t// \tapiV1Ws.GET(\"/storageclass/{storageclass}/persistentvolume\").\n\t// \t\tTo(apiHandler.handleGetStorageClassPersistentVolumes).\n\t// \t\tWrites(persistentvolume.PersistentVolumeList{}))\n\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/log/source/{namespace}/{resourceName}/{resourceType}\").\n\t//\t\tTo(apiHandler.handleLogSource).\n\t//\t\tWrites(controller.LogSources{}))\n\n\t// region log\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/log/{namespace}/{pod}\").\n\t\t\tTo(apiHandler.handleLogs).\n\t\t\tWrites(logs.LogDetails{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/log/{namespace}/{pod}/{container}\").\n\t\t\tTo(apiHandler.handleLogs).\n\t\t\tWrites(logs.LogDetails{}))\n\t//\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/log/file/{namespace}/{pod}/{container}\").\n\t\t\tTo(apiHandler.handleLogFile).\n\t\t\tWrites(logs.LogDetails{}))\n\t// endregion\n\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/overview/\").\n\t//\t\tTo(apiHandler.handleOverview).\n\t//\t\tWrites(overview.Overview{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/overview/{namespace}\").\n\t//\t\tTo(apiHandler.handleOverview).\n\t//\t\tWrites(overview.Overview{}))\n\t//\n\n\t// region others\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/others\").\n\t\t\tTo(apiHandler.handleOtherResourcesList).\n\t\t\tWrites(other.ResourceList{}).\n\t\t\tDoc(\"get all resources\").\n\t\t\tParam(restful.QueryParameter(\"filterBy\", \"filter option separated by comma. For example parameter1,value1,parameter2,value2 - means that the data should be filtered by parameter1 equals value1 and parameter2 equals value2\").\n\t\t\t\tDataType(\"string\").\n\t\t\t\tAllowableValues(map[string]string{\n\t\t\t\t\t\"name\": \"search by name partial match\",\n\t\t\t\t\t\"namespace\": \"filter by namespace\",\n\t\t\t\t\t\"kind\": \"filter by kind\",\n\t\t\t\t\t\"scope\": \"allowed value `namespaced` and `clustered` filter by if a resource is namespaced\",\n\t\t\t\t})).\n\t\t\tParam(restful.QueryParameter(\"sortBy\", \"sort option separated by comma. For example a,parameter1,d,parameter2 - means that the data should be sorted by parameter1 (ascending) and later sort by parameter2 (descending)\").\n\t\t\t\tDataType(\"string\").\n\t\t\t\tAllowableValues(map[string]string{\n\t\t\t\t\t\"name\": \"\",\n\t\t\t\t\t\"namespace\": \"\",\n\t\t\t\t\t\"kind\": \"\",\n\t\t\t\t\t\"creationTimestamp\": \"\",\n\t\t\t\t})).\n\t\t\tParam(restful.QueryParameter(\"itemsPerPage\", \"items per page\").\n\t\t\t\tDataType(\"integer\")).\n\t\t\tParam(restful.QueryParameter(\"page\", \"page number\").DataType(\"integer\")).\n\t\t\tReturns(200, \"OK\", other.ResourceList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/others\").\n\t\t\tTo(apiHandler.handleOtherResourceCreate).\n\t\t\tDoc(\"create a resource\").\n\t\t\tReads([]unstructured.Unstructured{}).\n\t\t\tConsumes(restful.MIME_JSON).\n\t\t\tReturns(200, \"OK\", CreateResponse{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/releases\").\n\t\t\tTo(apiHandler.handleReleaseCreate).\n\t\t\tDoc(\"create a release\").\n\t\t\tReads([]unstructured.Unstructured{}).\n\t\t\tConsumes(restful.MIME_JSON).\n\t\t\tReturns(200, \"OK\", []unstructured.Unstructured{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/releases/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetReleaseDetail).\n\t\t\tDoc(\"get a release\").\n\t\t\tReads(release.ReleaseDetails{}).\n\t\t\tConsumes(restful.MIME_JSON).\n\t\t\tReturns(200, \"OK\", release.ReleaseDetails{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/others/{group}/{version}/{kind}/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleOtherResourceDetail).\n\t\t\tWrites(other.OtherResourceDetail{}).\n\t\t\tDoc(\"get a resource detail with events\").\n\t\t\tReturns(200, \"OK\", other.OtherResourceDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/others/{group}/{version}/{kind}/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleOtherResourceDetail).\n\t\t\tDoc(\"delete a resource\"))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/others/{group}/{version}/{kind}/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleOtherResourceDetail).\n\t\t\tDoc(\"update a resource with whole resource json\").\n\t\t\tReads(unstructured.Unstructured{}).\n\t\t\tConsumes(restful.MIME_JSON))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PATCH(\"/others/{group}/{version}/{kind}/{namespace}/{name}/{field}\").\n\t\t\tTo(apiHandler.handleOtherResourcePatch).\n\t\t\tDoc(\"update resource annotations or labels\").\n\t\t\tReads(other.FieldPayload{}).\n\t\t\tConsumes(restful.MIME_JSON))\n\t// endregion\n\n\t// ---- DEVOPS APIS ----\n\n\t// region Jenkins\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/jenkinses\").\n\t\t\tTo(apiHandler.handleGetJenkins).\n\t\t\tWrites(jenkins.JenkinsList{}).\n\t\t\tDoc(\"get jenkins list\").\n\t\t\tReturns(200, \"OK\", jenkins.JenkinsList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/jenkinses/{name}\").\n\t\t\tTo(apiHandler.handleRetriveJenkins).\n\t\t\tWrites(v1alpha1.Jenkins{}).\n\t\t\tDoc(\"retrieve jenkins config\").\n\t\t\tReturns(200, \"OK\", v1alpha1.Jenkins{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/jenkinses/{name}/resources\").\n\t\t\tTo(apiHandler.handleGetJenkinsResources).\n\t\t\tWrites(common.ResourceList{}).\n\t\t\tDoc(\"retrieve resources associated with jenkins\").\n\t\t\tReturns(200, \"OK\", common.ResourceList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/jenkinses/{name}\").\n\t\t\tTo(apiHandler.handleDeleteJenkins).\n\t\t\tWrites(jenkins.Jenkins{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/jenkinses/{name}\").\n\t\t\tTo(apiHandler.handlePutJenkins).\n\t\t\tWrites(v1alpha1.Jenkins{}).\n\t\t\tDoc(\"update jenkins config\").\n\t\t\tReturns(200, \"OK\", v1alpha1.Jenkins{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/jenkinses\").\n\t\t\tTo(apiHandler.handleCreateJenkins).\n\t\t\tWrites(v1alpha1.Jenkins{}).\n\t\t\tDoc(\"update jenkins config\").\n\t\t\tReturns(200, \"OK\", v1alpha1.Jenkins{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/jenkinsbinding\").\n\t\t\tTo(apiHandler.handleGetJenkinsBindingList).\n\t\t\tWrites(jenkinsbinding.JenkinsBindingList{}).\n\t\t\tDoc(\"get jenkinsbinding list\").\n\t\t\tReturns(200, \"OK\", jenkinsbinding.JenkinsBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/jenkinsbinding/{namespace}\").\n\t\t\tTo(apiHandler.handleGetJenkinsBindingList).\n\t\t\tWrites(jenkinsbinding.JenkinsBindingList{}).\n\t\t\tDoc(\"get namespaced jenkinsbinding list\").\n\t\t\tReturns(200, \"OK\", jenkinsbinding.JenkinsBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/jenkinsbinding/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetJenkinsBinding).\n\t\t\tDoc(\"get jenkinsbinding details\").\n\t\t\tWrites(v1alpha1.JenkinsBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/jenkinsbinding/{namespace}/{name}/croncheck\").\n\t\t\tTo(apiHandler.handleCronCheck).\n\t\t\tDoc(\"cron syntax check\").\n\t\t\tWrites(jenkinsbinding.CronCheckResult{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/jenkinsbinding/{namespace}/{name}/resources\").\n\t\t\tTo(apiHandler.handleGetJenkinsBindingResources).\n\t\t\tWrites(common.ResourceList{}).\n\t\t\tDoc(\"retrieve resources associated with jenkinsbinding\").\n\t\t\tReturns(200, \"OK\", common.ResourceList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/jenkinsbinding/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeleteJenkinsBinding).\n\t\t\tWrites(v1alpha1.JenkinsBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/jenkinsbinding/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateJenkinsBinding).\n\t\t\tWrites(v1alpha1.JenkinsBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/jenkinsbinding/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdateJenkinsBinding).\n\t\t\tWrites(v1alpha1.JenkinsBinding{}))\n\t// endregion\n\n\t//domain\n\t// region DomainBinding\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/domainbinding\").\n\t\t\tTo(apiHandler.handleGetDomainBindingList).\n\t\t\tWrites(domainbinding.DomainBindingList{}).\n\t\t\tDoc(\"get domianbinding list\"))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/domainbinding\").\n\t\t\tTo(apiHandler.handleCreateDomainBinding).\n\t\t\tWrites(domainbinding.DomainBindingDetail{}).\n\t\t\tDoc(\"create domainbinding\"))\n\tdomainBindDetailURI := \"/domainbinding/{name}\"\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(domainBindDetailURI).\n\t\t\tTo(apiHandler.handleGetDomainBindingDetail).\n\t\t\tWrites(domainbinding.DomainBindingDetail{}).\n\t\t\tDoc(\"get domainbinding detail\"))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(domainBindDetailURI).\n\t\t\tTo(apiHandler.handleUpdateDomainBindingDetail).\n\t\t\tWrites(domainbinding.DomainBindingDetail{}).\n\t\t\tDoc(\"update domainbinding detail\"))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(domainBindDetailURI).\n\t\t\tTo(apiHandler.handleDeleteDomainBindingDetail).\n\t\t\tDoc(\"delete domainbinding detailt\"))\n\t// endregion\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/chart/{name}\").\n\t\t\tTo(apiHandler.handleGetChartDetail).\n\t\t\tWrites(catalog.Chart{}).\n\t\t\tDoc(\"get chart detail\"))\n\n\t// region PipelineTemplate\n\t// PipelineTemplateSync\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelinetemplatesync/{namespace}\").\n\t\t\tTo(apiHandler.handleGetPipelineTemplateSyncList).\n\t\t\tWrites(pipelinetemplatesync.PipelineTemplateSyncList{}).\n\t\t\tDoc(\"get pipelineTemplateSync list\").\n\t\t\tReturns(200, \"OK\", pipelinetemplatesync.PipelineTemplateSyncList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelinetemplatesync/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetPipelineTemplateSync).\n\t\t\tWrites(pipelinetemplatesync.PipelineTemplateSync{}).\n\t\t\tDoc(\"get detail of specific PipelineTemplateSync\").\n\t\t\tReturns(200, \"OK\", pipelinetemplatesync.PipelineTemplateSync{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/pipelinetemplatesync/{namespace}\").\n\t\t\tTo(apiHandler.handleCreatePipelineTemplateSync).\n\t\t\tWrites(pipelinetemplatesync.PipelineTemplateSync{}).\n\t\t\tDoc(\"create a pipelineTemplateSync\").\n\t\t\tReturns(200, \"OK\", pipelinetemplatesync.PipelineTemplateSync{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/pipelinetemplatesync/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdatePipelineTemplateSync).\n\t\t\tWrites(pipelinetemplatesync.PipelineTemplateSync{}).\n\t\t\tDoc(\"update a pipelineTemplateSync\").\n\t\t\tReturns(200, \"OK\", pipelinetemplatesync.PipelineTemplateSync{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/pipelinetemplatesync/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeletePipelineTemplateSync).\n\t\t\tWrites(struct{}{}).\n\t\t\tDoc(\"delete a PipelineTemplateSync\").\n\t\t\tReturns(200, \"OK\", struct{}{}))\n\n\t// PipelineTaskTemplate\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelinetasktemplate/{namespace}\").\n\t\t\tTo(apiHandler.handleGetPipelineTaskTemplateList).\n\t\t\tWrites(pipelinetasktemplate.PipelineTaskTemplateList{}).\n\t\t\tDoc(\"get a list of PipelineTaskTemplate\").\n\t\t\tReturns(200, \"OK\", pipelinetasktemplate.PipelineTaskTemplate{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelinetasktemplate/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetPipelineTaskTemplate).\n\t\t\tWrites(pipelinetasktemplate.PipelineTaskTemplate{}).\n\t\t\tDoc(\"get a PipelineTaskTemplate\").\n\t\t\tReturns(200, \"OK\", pipelinetasktemplate.PipelineTaskTemplate{}))\n\n\t// ClusterPipelineTemplate\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/clusterpipelinetemplate\").\n\t\t\tTo(apiHandler.handleGetClusterPipelineTemplateList).\n\t\t\tWrites(clusterpipelinetemplate.ClusterPipelineTemplateList{}).\n\t\t\tDoc(\"get a list of ClusterPipelineTemplate\").\n\t\t\tReturns(200, \"OK\", clusterpipelinetemplate.ClusterPipelineTemplateList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/clusterpipelinetemplate/{name}\").\n\t\t\tTo(apiHandler.handleGetClusterPipelineTemplate).\n\t\t\tWrites(clusterpipelinetemplate.ClusterPipelineTemplate{}).\n\t\t\tDoc(\"get a ClusterPipelineTemplate\").\n\t\t\tReturns(200, \"OK\", clusterpipelinetemplate.ClusterPipelineTemplate{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/clusterpipelinetemplate/{name}/preview\").\n\t\t\tTo(apiHandler.handlePreviewClusterPipelineTemplate).\n\t\t\tWrites(clusterpipelinetemplate.PreviewOptions{}).\n\t\t\tDoc(\"preview a ClusterPipelineTemplate\").\n\t\t\tReturns(200, \"OK\", \"\"))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/clusterpipelinetemplate/{name}/exports\").\n\t\t\tTo(apiHandler.handlerExportsClusterPiplineTemplate).\n\t\t\tWrites(clusterpipelinetemplate.PipelineExportedVariables{}).\n\t\t\tDoc(\"get the exports in clusterpipelinetemplate\").\n\t\t\tReturns(200, \"OK\", clusterpipelinetemplate.PipelineExportedVariables{}))\n\n\t// PipelineTemplateSync\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelinetemplatesync/{namespace}\").\n\t\t\tTo(apiHandler.handleGetPipelineTemplateSyncList).\n\t\t\tWrites(pipelinetemplatesync.PipelineTemplateSyncList{}).\n\t\t\tDoc(\"get pipelineTemplateSync list\").\n\t\t\tReturns(200, \"OK\", pipelinetemplatesync.PipelineTemplateSyncList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelinetemplatesync/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetPipelineTemplateSync).\n\t\t\tWrites(pipelinetemplatesync.PipelineTemplateSync{}).\n\t\t\tDoc(\"get detail of specific PipelineTemplateSync\").\n\t\t\tReturns(200, \"OK\", pipelinetemplatesync.PipelineTemplateSync{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/pipelinetemplatesync/{namespace}\").\n\t\t\tTo(apiHandler.handleCreatePipelineTemplateSync).\n\t\t\tWrites(pipelinetemplatesync.PipelineTemplateSync{}).\n\t\t\tDoc(\"create a pipelineTemplateSync\").\n\t\t\tReturns(200, \"OK\", pipelinetemplatesync.PipelineTemplateSync{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/pipelinetemplatesync/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdatePipelineTemplateSync).\n\t\t\tWrites(pipelinetemplatesync.PipelineTemplateSync{}).\n\t\t\tDoc(\"update a pipelineTemplateSync\").\n\t\t\tReturns(200, \"OK\", pipelinetemplatesync.PipelineTemplateSync{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/pipelinetemplatesync/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeletePipelineTemplateSync).\n\t\t\tWrites(struct{}{}).\n\t\t\tDoc(\"delete a PipelineTemplateSync\").\n\t\t\tReturns(200, \"OK\", struct{}{}))\n\n\t// PipelineTaskTemplate\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelinetasktemplate/{namespace}\").\n\t\t\tTo(apiHandler.handleGetPipelineTaskTemplateList).\n\t\t\tWrites(pipelinetasktemplate.PipelineTaskTemplateList{}).\n\t\t\tDoc(\"get a list of PipelineTaskTemplate\").\n\t\t\tReturns(200, \"OK\", pipelinetasktemplate.PipelineTaskTemplate{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelinetasktemplate/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetPipelineTaskTemplate).\n\t\t\tWrites(pipelinetasktemplate.PipelineTaskTemplate{}).\n\t\t\tDoc(\"get a PipelineTaskTemplate\").\n\t\t\tReturns(200, \"OK\", pipelinetasktemplate.PipelineTaskTemplate{}))\n\n\t// PipelineTemplate\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelinetemplate/{namespace}\").\n\t\t\tTo(apiHandler.handleGetPipelineTemplateList).\n\t\t\tWrites(pipelinetemplate.PipelineTemplateList{}).\n\t\t\tDoc(\"get a list of PipelineTemplate\").\n\t\t\tReturns(200, \"OK\", pipelinetemplate.PipelineTemplateList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelinetemplate/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetPipelineTemplate).\n\t\t\tWrites(pipelinetemplate.PipelineTemplate{}).\n\t\t\tDoc(\"get a PipelineTemplate\").\n\t\t\tReturns(200, \"OK\", pipelinetemplate.PipelineTemplate{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"pipelinetemplate/{namespace}/{name}/preview\").\n\t\t\tTo(apiHandler.handlePreviewPipelineTemplate).\n\t\t\tWrites(pipelinetemplate.PreviewOptions{}).\n\t\t\tDoc(\"jenkinsfile preview from PipelineTemplate\").\n\t\t\tReturns(200, \"OK\", \"\"))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelinetemplate/{namespace}/{name}/exports\").\n\t\t\tTo(apiHandler.handlerExportsPiplineTemplate).\n\t\t\tWrites(clusterpipelinetemplate.PipelineExportedVariables{}).\n\t\t\tDoc(\"get the exports in pipelinetemplate\").\n\t\t\tReturns(200, \"OK\", clusterpipelinetemplate.PipelineExportedVariables{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelinetemplatecategories/{namespace}\").\n\t\t\tTo(apiHandler.handlePipelinetemplatecategories).\n\t\t\tWrites(pipelinetemplate.PipelineTemplateCategoryList{}).\n\t\t\tDoc(\"get a PipelineTemplate\").\n\t\t\tReturns(200, \"OK\", pipelinetemplate.PipelineTemplateCategoryList{}))\n\n\t// endregion\n\n\t// region Pipeline\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelineconfig/{namespace}\").\n\t\t\tTo(apiHandler.handleGetPipelineConfigList).\n\t\t\tWrites(pipelineconfig.PipelineConfigList{}).\n\t\t\tDoc(\"get namespaced pipelineconfig list\").\n\t\t\tReturns(200, \"OK\", pipelineconfig.PipelineConfigList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/pipelineconfig/{namespace}\").\n\t\t\tTo(apiHandler.handleCreatePipelineConfig).\n\t\t\tWrites(pipelineconfig.PipelineConfigDetail{}).\n\t\t\tDoc(\"creates namespaced pipelineconfig\").\n\t\t\tReturns(200, \"OK\", pipelineconfig.PipelineConfigDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelineconfig/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetPipelineConfigDetail).\n\t\t\tWrites(pipelineconfig.PipelineConfig{}).\n\t\t\tDoc(\"get pipeline config details\").\n\t\t\tReturns(200, \"OK\", pipelineconfig.PipelineConfigDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/pipelineconfig/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdatePipelineConfig).\n\t\t\tWrites(pipelineconfig.PipelineConfig{}).\n\t\t\tDoc(\"update pipeline config\").\n\t\t\tReturns(200, \"OK\", pipelineconfig.PipelineConfigDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/pipelineconfig/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeletePipelineConfig).\n\t\t\tWrites(struct{}{}).\n\t\t\tDoc(\"deletes a pipeline config\").\n\t\t\tReturns(200, \"OK\", struct{}{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/pipelineconfig/{namespace}/{name}/trigger\").\n\t\t\tTo(apiHandler.handleTriggerPipelineConfig).\n\t\t\tWrites(pipelineconfig.PipelineConfigTrigger{}).\n\t\t\tDoc(\"triggers pipeline\").\n\t\t\tReturns(200, \"OK\", pipelineconfig.PipelineTriggerResponse{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/pipelineconfig/{namespace}/{name}/preview\").\n\t\t\tTo(apiHandler.handlePreviewPipelineConfig).\n\t\t\tWrites(pipelineconfig.PipelineConfigDetail{}).\n\t\t\tDoc(\"jenkinsfile preview\").\n\t\t\tReturns(200, \"OK\", \"\"))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/pipelineconfig/{namespace}/{name}/scan\").\n\t\t\tTo(apiHandler.handleScanPipelineConfig).\n\t\t\tDoc(\"scan multi-branch\").\n\t\t\tReturns(200, \"OK\", \"\"))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelineconfig/{namespace}/{name}/logs\").\n\t\t\tParam(restful.PathParameter(\"namespace\", \"Namespace to use\")).\n\t\t\tParam(restful.PathParameter(\"name\", \"Pipeline name to filter scope\")).\n\t\t\tParam(restful.QueryParameter(\"start\", \"Start offset to fetch logs\")).\n\t\t\tTo(apiHandler.handlePipelineConfigLogs).\n\t\t\tDoc(\"gets scan logs for multi-branch pipeline\").\n\t\t\tReturns(200, \"OK\", v1alpha1.PipelineConfigLog{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipeline/{namespace}\").\n\t\t\tTo(apiHandler.handleGetPipelineList).\n\t\t\tWrites(pipeline.PipelineList{}).\n\t\t\tDoc(\"get namespaced pipeline list\").\n\t\t\tReturns(200, \"OK\", pipeline.PipelineList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipeline/{namespace}/{name}\").\n\t\t\tParam(restful.QueryParameter(\"withFreshStages\", \"Whether to retrieve newest stages from Jenkins\")).\n\t\t\tTo(apiHandler.handleGetPipelineDetail).\n\t\t\tWrites(pipeline.Pipeline{}).\n\t\t\tDoc(\"get pipeline details\").\n\t\t\tReturns(200, \"OK\", pipeline.Pipeline{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/pipeline/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeletePipeline).\n\t\t\tWrites(struct{}{}).\n\t\t\tDoc(\"deletes a pipeline\").\n\t\t\tReturns(200, \"OK\", struct{}{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/pipeline/{namespace}/{name}/retry\").\n\t\t\tTo(apiHandler.handleRetryPipelineDetail).\n\t\t\tWrites(pipeline.RetryRequest{}).\n\t\t\tDoc(\"retries a pipeline\").\n\t\t\tReturns(200, \"OK\", v1alpha1.Pipeline{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/pipeline/{namespace}/{name}/abort\").\n\t\t\tTo(apiHandler.handleAbortPipeline).\n\t\t\tWrites(pipeline.AbortRequest{}).\n\t\t\tDoc(\"aborts a pipeline\").\n\t\t\tReturns(200, \"OK\", v1alpha1.Pipeline{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipeline/{namespace}/{name}/logs\").\n\t\t\tParam(restful.PathParameter(\"namespace\", \"Namespace to use\")).\n\t\t\tParam(restful.PathParameter(\"name\", \"Pipeline name to filter scope\")).\n\t\t\tParam(restful.QueryParameter(\"start\", \"Start offset to fetch logs\")).\n\t\t\tParam(restful.QueryParameter(\"stage\", \"Stage to fetch logs from\")).\n\t\t\tParam(restful.QueryParameter(\"step\", \"Step to fetch logs from. Can be combined with stage\")).\n\t\t\tTo(apiHandler.handlePipelineLogs).\n\t\t\tDoc(\"gets logs for pipeline\").\n\t\t\tReturns(200, \"OK\", v1alpha1.PipelineLog{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipeline/{namespace}/{name}/tasks\").\n\t\t\tParam(restful.PathParameter(\"namespace\", \"Namespace to use\")).\n\t\t\tParam(restful.PathParameter(\"name\", \"Pipeline name to filter scope\")).\n\t\t\tParam(restful.QueryParameter(\"stage\", \"Stage to fetch steps from. If not provided will return all stages\")).\n\t\t\tTo(apiHandler.handlePipelineTasks).\n\t\t\tDoc(\"gets steps for pipeline\").\n\t\t\tReturns(200, \"OK\", v1alpha1.PipelineTask{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/pipeline/{namespace}/{name}/inputs\").\n\t\t\tTo(apiHandler.handlePipelineInput).\n\t\t\tWrites(pipeline.InputOptions{}).\n\t\t\tDoc(\"response a input request which in a pipeline\").\n\t\t\tReturns(200, \"OK\", pipeline.InputResponse{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipeline/{namespace}/{name}/testreports\").\n\t\t\tParam(restful.QueryParameter(\"start\", \"Start offset to fetch test report items\")).\n\t\t\tParam(restful.QueryParameter(\"limit\", \"Limit of number to fetch test report items\")).\n\t\t\tTo(apiHandler.handlePipelineTestReports).\n\t\t\tDoc(\"response a input request which in a pipeline\").\n\t\t\tReturns(200, \"OK\", pipeline.PipelineTestReports{}))\n\n\t// endregion\n\n\t// region CodeRepository\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/codereposervice\").\n\t\t\tTo(apiHandler.handleCreateCodeRepoService).\n\t\t\tWrites(codereposervice.CodeRepoServiceList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/codereposervice/{name}\").\n\t\t\tTo(apiHandler.handleDeleteCodeRepoService).\n\t\t\tWrites(codereposervice.CodeRepoService{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/codereposervice/{name}\").\n\t\t\tTo(apiHandler.handleUpdateCodeRepoService).\n\t\t\tWrites(v1alpha1.CodeRepoService{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codereposervice\").\n\t\t\tTo(apiHandler.handleGetCodeRepoServiceList).\n\t\t\tWrites(codereposervice.CodeRepoServiceList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codereposervices\").\n\t\t\tTo(apiHandler.handleGetCodeRepoServiceList).\n\t\t\tWrites(codereposervice.CodeRepoServiceList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codereposervice/{name}\").\n\t\t\tTo(apiHandler.handleGetCodeRepoServiceDetail).\n\t\t\tWrites(v1alpha1.CodeRepoService{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codereposervice/{name}/resources\").\n\t\t\tTo(apiHandler.handleGetCodeRepoServiceResourceList).\n\t\t\tWrites(common.ResourceList{}).\n\t\t\tDoc(\"retrieve resources associated with codereposervice\").\n\t\t\tReturns(200, \"OK\", common.ResourceList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codereposervice/{name}/secrets\").\n\t\t\tTo(apiHandler.handleGetCodeRepoServiceSecretList).\n\t\t\tWrites(secret.SecretList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/coderepobinding/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateCodeRepoBinding).\n\t\t\tWrites(v1alpha1.CodeRepoBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/coderepobinding/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeleteCodeRepoBinding).\n\t\t\tWrites(v1alpha1.CodeRepoBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/coderepobinding/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdateCodeRepoBinding).\n\t\t\tWrites(struct{}{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/coderepobinding\").\n\t\t\tTo(apiHandler.handleGetCodeRepoBindingList).\n\t\t\tWrites(coderepobinding.CodeRepoBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/coderepobinding/{namespace}\").\n\t\t\tTo(apiHandler.handleGetCodeRepoBindingList).\n\t\t\tWrites(coderepobinding.CodeRepoBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/coderepobinding/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetCodeRepoBindingDetail).\n\t\t\tWrites(v1alpha1.CodeRepoBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/coderepobinding/{namespace}/{name}/resources\").\n\t\t\tTo(apiHandler.handleGetCodeRepoBindingResources).\n\t\t\tWrites(common.ResourceList{}).\n\t\t\tDoc(\"retrieve resources associated with coderepobinding\").\n\t\t\tReturns(200, \"OK\", common.ResourceList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/coderepobinding/{namespace}/{name}/secrets\").\n\t\t\tTo(apiHandler.handleGetCodeRepoBindingSecretList).\n\t\t\tWrites(secret.SecretList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/coderepobinding/{namespace}/{name}/repositories\").\n\t\t\tTo(apiHandler.handleGetCodeRepositoryListInBinding).\n\t\t\tWrites(coderepository.CodeRepositoryList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/coderepobinding/{namespace}/{name}/remote-repositories\").\n\t\t\tTo(apiHandler.handleGetRemoteRepositoryList).\n\t\t\tWrites(coderepository.CodeRepositoryList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/coderepository/{namespace}\").\n\t\t\tTo(apiHandler.handleGetCodeRepositoryList).\n\t\t\tWrites(coderepository.CodeRepositoryList{}).\n\t\t\tDoc(\"get namespaced coderepository list\").\n\t\t\tReturns(200, \"OK\", coderepository.CodeRepositoryList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/coderepository/{namespace}/{name}/branches\").\n\t\t\tParam(restful.PathParameter(\"sortBy\", \"sort option. The choices are creationTime\")).\n\t\t\tParam(restful.PathParameter(\"sortMode\", \"sort option. The choices are desc or asc\")).\n\t\t\tTo(apiHandler.HandleGetCodeRepositoryBranches).\n\t\t\tReturns(200, \"Get coderepo branch Successful\", v1alpha1.CodeRepoBranchResult{}))\n\n\t// endregion\n\n\t// region ToolChain\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/toolchain\").\n\t\t\tTo(apiHandler.handleGetToolChains).\n\t\t\tWrites(toolchain.ToolChainList{}).\n\t\t\tDoc(\"get namespaced coderepository list\").\n\t\t\tReturns(200, \"OK\", coderepository.CodeRepositoryList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/toolchain/bindings\").\n\t\t\tTo(apiHandler.handleGetToolChainBindings).\n\t\t\tWrites(toolchain.ToolChainBindingList{}).\n\t\t\tDoc(\"get toolchain binding list\").\n\t\t\tReturns(200, \"OK\", coderepository.CodeRepositoryList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/toolchain/bindings/{namespace}\").\n\t\t\tTo(apiHandler.handleGetToolChainBindings).\n\t\t\tWrites(toolchain.ToolChainBindingList{}).\n\t\t\tDoc(\"get namespaced toolchain binding list\").\n\t\t\tReturns(200, \"OK\", coderepository.CodeRepositoryList{}))\n\t// endregion\n\n\t// region callback\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/callback/oauth/{namespace}/secret/{secretNamespace}/{secretName}/codereposervice/{serviceName}\").\n\t\t\tTo(apiHandler.handleOAuthCallback).\n\t\t\tWrites(struct{}{}))\n\t// endregion\n\n\t// region ImageRegistry\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/imageregistry\").\n\t\t\tTo(apiHandler.handleCreateImageRegistry).\n\t\t\tWrites(imageregistry.ImageRegistryList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/imageregistry/{name}\").\n\t\t\tTo(apiHandler.handleDeleteImageRegsitry).\n\t\t\tWrites(imageregistry.ImageRegistry{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/imageregistry/{name}\").\n\t\t\tTo(apiHandler.handleUpdateImageRegistry).\n\t\t\tWrites(v1alpha1.ImageRegistry{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imageregistry\").\n\t\t\tTo(apiHandler.handleGetImageRegistryList).\n\t\t\tWrites(imageregistry.ImageRegistryList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imageregistry/{name}\").\n\t\t\tTo(apiHandler.handleGetImageRegistryDetail).\n\t\t\tWrites(v1alpha1.ImageRegistry{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imageregistry/{name}/secrets\").\n\t\t\tTo(apiHandler.handleGetImageRegistrySecretList).\n\t\t\tWrites(secret.SecretList{}))\n\t// endregion\n\n\t// region ImageRegistryBinding\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/imageregistrybinding/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateImageRegistryBinding).\n\t\t\tWrites(v1alpha1.ImageRegistryBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(URLIMAGEREGISTRYBINDINGDETAIL).\n\t\t\tTo(apiHandler.handleUpdateImageRegistryBinding).\n\t\t\tWrites(v1alpha1.ImageRegistryBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(URLIMAGEREGISTRYBINDINGDETAIL).\n\t\t\tTo(apiHandler.handleDeleteImageRegistryBinding).\n\t\t\tWrites(v1alpha1.ImageRegistryBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imageregistrybinding\").\n\t\t\tTo(apiHandler.handleGetImageRegistryBindingList).\n\t\t\tWrites(imageregistrybinding.ImageRegistryBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imageregistrybinding/{namespace}\").\n\t\t\tTo(apiHandler.handleGetImageRegistryBindingList).\n\t\t\tWrites(imageregistrybinding.ImageRegistryBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(URLIMAGEREGISTRYBINDINGDETAIL).\n\t\t\tTo(apiHandler.handleGetImageRegistryBindingDetail).\n\t\t\tWrites(v1alpha1.ImageRegistryBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imageregistrybinding/{namespace}/{name}/secrets\").\n\t\t\tTo(apiHandler.handleGetImageRegistryBindingSecretList).\n\t\t\tWrites(secret.SecretList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imageregistrybinding/{namespace}/{name}/repositories\").\n\t\t\tTo(apiHandler.handleGetImageRepositoryListInBinding).\n\t\t\tWrites(imagerepository.ImageRepositoryList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imageregistrybinding/{namespace}/{name}/remote-repositories\").\n\t\t\tTo(apiHandler.handleGetImageOriginRepositoryList).\n\t\t\tWrites(imagerepository.ImageRepositoryList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imageregistrybinding/{namespace}/{name}/remote-repositories-project\").\n\t\t\tTo(apiHandler.handleGetImageOriginRepositoryProjectList).\n\t\t\tWrites(imagerepository.ImageRepositoryList{}))\n\t// endregion\n\n\t// region ImageRepository\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imagerepository/{namespace}\").\n\t\t\tTo(apiHandler.handleGetImageRepositoryList).\n\t\t\tWrites(imagerepository.ImageRepositoryList{}).\n\t\t\tDoc(\"get namespaced imagerepository list\").\n\t\t\tReturns(200, \"OK\", imagerepository.ImageRepositoryList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imagerepositoryproject/{namespace}\").\n\t\t\tTo(apiHandler.handleGetImageRepositoryProjectList).\n\t\t\tWrites(imagerepository.ImageRepositoryList{}).\n\t\t\tDoc(\"get namespaced imagerepository list\").\n\t\t\tReturns(200, \"OK\", imagerepository.ImageRepositoryList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imagerepository/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetImageRepositoryDetail).\n\t\t\tWrites(v1alpha1.ImageRepository{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imagerepository/{namespace}/{name}/tags\").\n\t\t\tParam(restful.PathParameter(\"sortBy\", \"sort option. The choices are creationTime\")).\n\t\t\tParam(restful.PathParameter(\"sortMode\", \"sort option. The choices are desc or asc\")).\n\t\t\tTo(apiHandler.HandleGetImageTags).\n\t\t\tReturns(200, \"Get Image tags Successful\", v1alpha1.ImageTagResult{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"imagerepository/{namespace}/{name}/security\").\n\t\t\tParam(restful.PathParameter(\"tag\", \"Scan image tag name\")).\n\t\t\tTo(apiHandler.HandleScanImage).\n\t\t\tReturns(200, \"Create Scan Image Job Successful.\", v1alpha1.ImageResult{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"imagerepository/{namespace}/{name}/security\").\n\t\t\tParam(restful.PathParameter(\"tag\", \"Get image vulnerability tag name\")).\n\t\t\tTo(apiHandler.HandleGetVulnerability).\n\t\t\tReturns(200, \"Get Image Vulnerability Successful\", v1alpha1.VulnerabilityList{}))\n\t// endregion\n\n\t// region microservicesenvironments\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/microservicesenvironments\").\n\t\t\tFilter(mw.Product(ACPServiceFramework)).\n\t\t\tTo(apiHandler.handleMicroservicesEnvironmentList).\n\t\t\tWrites(asfClient.MicroservicesEnvironmentList{}).\n\t\t\tDoc(\"get microservicesenvironment list\").\n\t\t\tReturns(200, \"OK\", asfClient.MicroservicesEnvironmentList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/microservicesenvironments/{name}\").\n\t\t\tFilter(mw.Product(ACPServiceFramework)).\n\t\t\tWrites(microservicesenvironment.MicroservicesEnvironmentDetail{}).\n\t\t\tTo(apiHandler.handleGetMicroservicesEnviromentDetail).\n\t\t\tDoc(\"get microservicesenvironments detail by name\").\n\t\t\tReturns(200, \"OK\", microservicesenvironment.MicroservicesEnvironmentDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/microservicescomponent/{namespace}/{name}\").\n\t\t\tFilter(mw.Product(ACPServiceFramework)).\n\t\t\tTo(apiHandler.handlePutMicroservicesComponent).\n\t\t\tWrites(asfClient.MicroservicesComponent{}).\n\t\t\tDoc(\"install component\").\n\t\t\tReturns(200, \"OK\", asfClient.MicroservicesComponent{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/microservicescomponent/{namespace}/{name}\").\n\t\t\tFilter(mw.Product(ACPServiceFramework)).\n\t\t\tTo(apiHandler.handlePutMicroservicesComponent).\n\t\t\tWrites(asfClient.MicroservicesComponent{}).\n\t\t\tDoc(\"update component\").\n\t\t\tReturns(200, \"OK\", asfClient.MicroservicesComponent{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/microservicescomponent/{namespace}/{name}/start\").\n\t\t\tFilter(mw.Product(ACPServiceFramework)).\n\t\t\tTo(apiHandler.handlePutMicroservicesComponentStart).\n\t\t\tWrites(asfClient.MicroservicesComponent{}).\n\t\t\tDoc(\"start component\").\n\t\t\tReturns(200, \"OK\", asfClient.MicroservicesComponent{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/microservicescomponent/{namespace}/{name}/stop\").\n\t\t\tFilter(mw.Product(ACPServiceFramework)).\n\t\t\tTo(apiHandler.handlePutMicroservicesComponentStop).\n\t\t\tWrites(asfClient.MicroservicesComponentList{}).\n\t\t\tDoc(\"stop component\").\n\t\t\tReturns(200, \"OK\", asfClient.MicroservicesComponent{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/microservicesapps\").\n\t\t\tFilter(mw.Product(ACPServiceFramework)).\n\t\t\tWrites(microservicesapplication.MicroservicesApplicationList{}).\n\t\t\tTo(apiHandler.handleGetMicroservicesApps).\n\t\t\tDoc(\"get microservicesenvironments detail by name\").\n\t\t\tReturns(200, \"OK\", microservicesapplication.MicroservicesApplicationList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/microservicesconfigs\").\n\t\t\tWrites(microservicesconfiguration.MicroservicesConfigurationList{}).\n\t\t\tTo(apiHandler.handleGetMicroservicesConfigs).\n\t\t\tDoc(\"get microservicesenvironments detail by name\").\n\t\t\tReturns(200, \"OK\", microservicesconfiguration.MicroservicesConfigurationList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/domains\").\n\t\t\tWrites(domain.DomainList{}).\n\t\t\tTo(apiHandler.handleGetDomainList).\n\t\t\tDoc(\"get microservicesenvironments detail by name\").\n\t\t\tReturns(200, \"OK\", domain.DomainList{}))\n\n\t// endregion\n\n\t// region ProjectManagement\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/projectmanagement\").\n\t\t\tTo(apiHandler.handleCreateProjectManagement).\n\t\t\tWrites(v1alpha1.ProjectManagement{}).\n\t\t\tDoc(\"create a projectmanagement\").\n\t\t\tReturns(200, \"OK\", v1alpha1.ProjectManagement{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(URLProjectManagementDetails).\n\t\t\tTo(apiHandler.handleDeleteProjectManagement).\n\t\t\tWrites(struct{}{}).\n\t\t\tDoc(\"delete a projectmanagement\").\n\t\t\tReturns(200, \"OK\", struct{}{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(URLProjectManagementDetails).\n\t\t\tTo(apiHandler.handleUpdateProjectManagement).\n\t\t\tWrites(v1alpha1.ProjectManagement{}).\n\t\t\tDoc(\"update a projectmanagement\").\n\t\t\tReturns(200, \"OK\", v1alpha1.ProjectManagement{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/projectmanagement\").\n\t\t\tTo(apiHandler.handleGetProjectManagementList).\n\t\t\tWrites(projectmanagement.ProjectManagementList{}).\n\t\t\tDoc(\"get projectmanagement list\").\n\t\t\tReturns(200, \"OK\", projectmanagement.ProjectManagementList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(URLProjectManagementDetails).\n\t\t\tTo(apiHandler.handleGetProjectManagementDetail).\n\t\t\tWrites(v1alpha1.ProjectManagement{}).\n\t\t\tDoc(\"get a projectmanagement\").\n\t\t\tReturns(200, \"OK\", v1alpha1.ProjectManagement{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/projectmanagementbinding/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateProjectManagementBinding).\n\t\t\tWrites(v1alpha1.ProjectManagementBinding{}).\n\t\t\tDoc(\"create a projectmanagementbinding\").\n\t\t\tReturns(200, \"OK\", v1alpha1.ProjectManagementBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(URLProjectManagementBindingDetails).\n\t\t\tTo(apiHandler.handleDeleteProjectManagementBinding).\n\t\t\tWrites(struct{}{}).\n\t\t\tDoc(\"delete a projectmanagementbinding\").\n\t\t\tReturns(200, \"OK\", struct{}{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(URLProjectManagementBindingDetails).\n\t\t\tTo(apiHandler.handleUpdateProjectManagementBinding).\n\t\t\tWrites(v1alpha1.ProjectManagementBinding{}).\n\t\t\tDoc(\"update a projectmanagementbinding\").\n\t\t\tReturns(200, \"OK\", v1alpha1.ProjectManagementBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/projectmanagementbinding\").\n\t\t\tTo(apiHandler.handleGetProjectManagementBindingList).\n\t\t\tWrites(projectmanagementbinding.ProjectManagementBindingList{}).\n\t\t\tDoc(\"get projectmanagementbinding list in all namespaces\").\n\t\t\tReturns(200, \"OK\", projectmanagementbinding.ProjectManagementBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/projectmanagementbinding/{namespace}\").\n\t\t\tTo(apiHandler.handleGetProjectManagementBindingList).\n\t\t\tWrites(projectmanagementbinding.ProjectManagementBindingList{}).\n\t\t\tDoc(\"get projectmanagementbinding list in one namespace\").\n\t\t\tReturns(200, \"OK\", projectmanagementbinding.ProjectManagementBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(URLProjectManagementBindingDetails).\n\t\t\tTo(apiHandler.handleGetProjectManagementBindingDetail).\n\t\t\tWrites(v1alpha1.ProjectManagementBinding{}).\n\t\t\tDoc(\"get a projectmanagementbinding\").\n\t\t\tReturns(200, \"OK\", v1alpha1.ProjectManagementBinding{}))\n\t// endregion\n\n\t// region TestTool\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/testtool\").\n\t\t\tTo(apiHandler.handleCreateTestTool).\n\t\t\tWrites(v1alpha1.TestTool{}).\n\t\t\tDoc(\"create a testtool\").\n\t\t\tReturns(200, \"OK\", v1alpha1.TestTool{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(URLTestToolDetails).\n\t\t\tTo(apiHandler.handleDeleteTestTool).\n\t\t\tWrites(struct{}{}).\n\t\t\tDoc(\"delete a testtool\").\n\t\t\tReturns(200, \"OK\", struct{}{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(URLTestToolDetails).\n\t\t\tTo(apiHandler.handleUpdateTestTool).\n\t\t\tWrites(v1alpha1.TestTool{}).\n\t\t\tDoc(\"update a testtool\").\n\t\t\tReturns(200, \"OK\", v1alpha1.TestTool{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/testtool\").\n\t\t\tTo(apiHandler.handleGetTestToolList).\n\t\t\tWrites(testtool.TestToolList{}).\n\t\t\tDoc(\"get testtool list\").\n\t\t\tReturns(200, \"OK\", testtool.TestToolList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(URLTestToolDetails).\n\t\t\tTo(apiHandler.handleGetTestToolDetail).\n\t\t\tWrites(v1alpha1.TestTool{}).\n\t\t\tDoc(\"get a testtool\").\n\t\t\tReturns(200, \"OK\", v1alpha1.TestTool{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/testtoolbinding/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateTestToolBinding).\n\t\t\tWrites(v1alpha1.TestToolBinding{}).\n\t\t\tDoc(\"create a testtoolbinding\").\n\t\t\tReturns(200, \"OK\", v1alpha1.TestToolBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(URLTestToolBindingDetails).\n\t\t\tTo(apiHandler.handleDeleteTestToolBinding).\n\t\t\tWrites(struct{}{}).\n\t\t\tDoc(\"delete a testtoolbinding\").\n\t\t\tReturns(200, \"OK\", struct{}{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(URLTestToolBindingDetails).\n\t\t\tTo(apiHandler.handleUpdateTestToolBinding).\n\t\t\tWrites(v1alpha1.TestToolBinding{}).\n\t\t\tDoc(\"update a testtoolbinding\").\n\t\t\tReturns(200, \"OK\", v1alpha1.TestToolBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/testtoolbinding\").\n\t\t\tTo(apiHandler.handleGetTestToolBindingList).\n\t\t\tWrites(testtoolbinding.TestToolBindingList{}).\n\t\t\tDoc(\"get testtoolbinding list in all namespaces\").\n\t\t\tReturns(200, \"OK\", testtoolbinding.TestToolBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/testtoolbinding/{namespace}\").\n\t\t\tTo(apiHandler.handleGetTestToolBindingList).\n\t\t\tWrites(testtoolbinding.TestToolBindingList{}).\n\t\t\tDoc(\"get testtoolbinding list in one namespace\").\n\t\t\tReturns(200, \"OK\", testtoolbinding.TestToolBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(URLTestToolBindingDetails).\n\t\t\tTo(apiHandler.handleGetTestToolBindingDetail).\n\t\t\tWrites(v1alpha1.TestToolBinding{}).\n\t\t\tDoc(\"get a testtoolbinding\").\n\t\t\tReturns(200, \"OK\", v1alpha1.TestToolBinding{}))\n\t// endregion\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/microservicesconfigs\").\n\t\t\tFilter(mw.Product(ACPServiceFramework)).\n\t\t\tWrites(microservicesconfiguration.MicroservicesConfigurationList{}).\n\t\t\tTo(apiHandler.handleGetMicroservicesConfigs).\n\t\t\tDoc(\"get microservicesenvironments detail by name\").\n\t\t\tReturns(200, \"OK\", microservicesconfiguration.MicroservicesConfigurationList{}))\n\n\t// region Statistics\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/statistics/pipeline/{namespace}\").\n\t\t\tTo(apiHandler.handleGetPipelineStatistics).\n\t\t\tWrites(struct{}{}).\n\t\t\tDoc(\"get the statistics info of pipeline\").\n\t\t\tReturns(200, \"OK\", struct{}{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/statistics/stage/{namespace}\").\n\t\t\tTo(apiHandler.handleGetStageStatistics).\n\t\t\tWrites(struct{}{}).\n\t\t\tDoc(\"get the statistics info of stage\").\n\t\t\tReturns(200, \"OK\", struct{}{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/statistics/codequality/{namespace}\").\n\t\t\tTo(apiHandler.handleGetCodeQualityStatistics).\n\t\t\tWrites(struct{}{}).\n\t\t\tDoc(\"get the statistics info of stage\").\n\t\t\tReturns(200, \"OK\", struct{}{}))\n\n\t// endregion\n\n\t// region CodeQualityTool\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/codequalitytool\").\n\t\t\tTo(apiHandler.handleCreateCodeQualityTool).\n\t\t\tWrites(v1alpha1.CodeQualityTool{}).\n\t\t\tDoc(\"create a code quality tool\").\n\t\t\tReturns(200, \"OK\", v1alpha1.CodeQualityTool{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(URLCODEQUALITYTOOLDETAIL).\n\t\t\tTo(apiHandler.handleDeleteCodeQualityTool).\n\t\t\tWrites(v1alpha1.CodeQualityTool{}).\n\t\t\tDoc(\"delete a code quality tool with name\").\n\t\t\tReturns(200, \"OK\", v1alpha1.CodeQualityTool{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(URLCODEQUALITYTOOLDETAIL).\n\t\t\tTo(apiHandler.handleUpdateCodeQualityTool).\n\t\t\tWrites(v1alpha1.CodeQualityTool{}).\n\t\t\tDoc(\"update a code quality tool with name\").\n\t\t\tReturns(200, \"OK\", v1alpha1.CodeQualityTool{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(URLCODEQUALITYTOOLDETAIL).\n\t\t\tTo(apiHandler.handleGetCodeQualityTool).\n\t\t\tWrites(v1alpha1.CodeQualityTool{}).\n\t\t\tDoc(\"get a code quality tool with name\").\n\t\t\tReturns(200, \"OK\", v1alpha1.CodeQualityTool{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codequalitytool\").\n\t\t\tTo(apiHandler.handleListCodeQualityTool).\n\t\t\tWrites(v1alpha1.CodeQualityTool{}).\n\t\t\tDoc(\"list code quality tools\").\n\t\t\tReturns(200, \"OK\", codequalitytool.CodeQualityToolList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/codequalitybinding/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateCodeQualityBinding).\n\t\t\tWrites(v1alpha1.CodeQualityBinding{}).\n\t\t\tDoc(\"create a code quality binding\").\n\t\t\tReturns(200, \"OK\", v1alpha1.CodeQualityBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/codequalitybinding/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdateCodeQualityBinding).\n\t\t\tDoc(\"update a code quality binding with name\").\n\t\t\tReturns(200, \"OK\", v1alpha1.CodeQualityBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codequalitybinding/{namespace}\").\n\t\t\tTo(apiHandler.handleGetCodeQualityBindingList).\n\t\t\tDoc(\"get namespaced code quality binding list\").\n\t\t\tReturns(200, \"OK\", codequalitybinding.CodeQualityBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codequalitybinding\").\n\t\t\tTo(apiHandler.handleGetCodeQualityBindingList).\n\t\t\tDoc(\"get all code quality binding list\").\n\t\t\tReturns(200, \"OK\", codequalitybinding.CodeQualityBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codequalitybinding/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetCodeQualityBindingDetail).\n\t\t\tDoc(\"get code quality binding with name\").\n\t\t\tReturns(200, \"OK\", v1alpha1.CodeQualityBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codequalitybinding/{namespace}/{name}/projects\").\n\t\t\tTo(apiHandler.handleGetCodeQualityProjectListInBinding).\n\t\t\tDoc(\"get code quality project list in binding\").\n\t\t\tReturns(200, \"OK\", codequalityproject.CodeQualityProjectList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codequalitybinding/{namespace}/{name}/secrets\").\n\t\t\tTo(apiHandler.handleGetCodeQualityBindingSecretList).\n\t\t\tDoc(\"get bind secret list\").\n\t\t\tReturns(200, \"OK\", secret.SecretList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/codequalitybinding/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeleteCodeQualityBinding).\n\t\t\tDoc(\"delete code quality binding with name\").\n\t\t\tReturns(200, \"OK\", common.ResourceList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/codequalityproject/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateCodeQualityProject).\n\t\t\tDoc(\"create a code quality project\").\n\t\t\tReturns(200, \"OK\", v1alpha1.CodeQualityProject{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/codequalityproject/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdateCodeQualityProject).\n\t\t\tDoc(\"update a code quality project with name\").\n\t\t\tReturns(200, \"OK\", v1alpha1.CodeQualityProject{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codequalityproject/{namespace}\").\n\t\t\tTo(apiHandler.handleGetCodeQualityProjectList).\n\t\t\tDoc(\"create a code quality project\").\n\t\t\tReturns(200, \"OK\", codequalityproject.CodeQualityProjectList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codequalityproject/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetCodeQualityProjectDetail).\n\t\t\tDoc(\"create a code quality project\").\n\t\t\tReturns(200, \"OK\", v1alpha1.CodeQualityProject{}))\n\n\t//region asm\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/service/{namespace}\").\n\t\t\tTo(apiHandler.handleGetServiceListByProject).\n\t\t\tWrites(resourceService.ServiceNameList{}))\n\t//endregion\n\n\t// region asm\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/servicemesh/graphs/{namespace}\").\n\t\t\tTo(apiHandler.handleGetNamespaceGraph).\n\t\t\tDoc(\"get namespace service graph\").\n\t\t\tReturns(200, \"OK\", servicegraph.Graph{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/servicemesh/metrics\").\n\t\t\tTo(apiHandler.handleGetMetrics).\n\t\t\tDoc(\"get metrics from given options\").\n\t\t\tReturns(200, \"ok\", \"\"))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/servicemesh/nodegraphs\").\n\t\t\tTo(apiHandler.handleGetNodeGraph).\n\t\t\tDoc(\"get namespace service graph\").\n\t\t\tReturns(200, \"OK\", servicegraph.Graph{}))\n\t//endregion\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/microservice/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetMicroserviceRelation).\n\t\t\tDoc(\"get microservice deployment and svc relation\"))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/microservice/{namespace}/{name}/service\").\n\t\t\tTo(apiHandler.handleCreateMicroserviceSvc))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/microservice/{namespace}/{name}/service/{servicename}\").\n\t\t\tTo(apiHandler.handleUpdateMicroserviceSvc))\n\n\t// destinationrule\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/destinationrule/{namespace}\").\n\t\t\tTo(apiHandler.handleListDestinationRule).\n\t\t\tDoc(\"get namespace destination rule\"),\n\t)\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/destinationrule/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetDestinationRuleDetail),\n\t)\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/destinationruleinfohost/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetDestinationRuleInfoHost),\n\t)\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/destinationrule/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdateDestinationRule))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/destinationrule/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeleteDestinationRule),\n\t)\n\n\t// virtualservice\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/virtualservice/{namespace}\").\n\t\t\tTo(apiHandler.handleListVirtualService))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/virtualservice/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetVirtualService))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/virtualservicehost/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetVirtualServiceByHost))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/virtualservice/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateVirtualService),\n\t)\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/virtualservice/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdateVirtualService))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/virtualservice/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeleteVirtualService),\n\t)\n\n\t// Policy\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/policy/{namespace}\").\n\t\t\tTo(apiHandler.handleListPolicy))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/policy/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetPolicy))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/policy/{namespace}\").\n\t\t\tTo(apiHandler.handleCreatePolicy),\n\t)\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/policy/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdatePolicy))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/policy/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeletePolicy),\n\t)\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/gateway/{namespace}\").\n\t\t\tTo(apiHandler.handleListGateways))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/gateway/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetGateway))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/gateway/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateGateway))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/gateway/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdateGateway))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/gateway/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeleteGateway))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/asmclusterconfig/{name}\").\n\t\t\tTo(apiHandler.handleGetASMClusterConfig))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/asmclusterconfig/{name}\").\n\t\t\tTo(apiHandler.handleUpdateASMClusterConfig))\n\t// endregion\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/artifactregistrymanagers\").\n\t\t\tTo(apiHandler.handleCreateArtifactRegistryManager).\n\t\t\tWrites(artifactregistrymanager.ArtifactRegistryManager{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/artifactregistrymanagers/{name}\").\n\t\t\tTo(apiHandler.handleDeleteArtifactRegistryManager).\n\t\t\tWrites(artifactregistrymanager.ArtifactRegistryManager{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/artifactregistrymanagers/{name}\").\n\t\t\tTo(apiHandler.handleUpdateArtifactRegistryManager).\n\t\t\tWrites(v1alpha1.ArtifactRegistryManager{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/artifactregistrymanagers\").\n\t\t\tTo(apiHandler.handleGetArtifactRegistryManagerList).\n\t\t\tWrites(artifactregistrymanager.ArtifactRegistryManagerList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/artifactregistrymanagers/{name}\").\n\t\t\tTo(apiHandler.handleGetArtifactRegistryManagerDetail).\n\t\t\tWrites(v1alpha1.ArtifactRegistryManager{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/artifactregistries\").\n\t\t\tTo(apiHandler.handleCreateArtifactRegistry).\n\t\t\tWrites(artifactregistry.ArtifactRegistry{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/artifactregistries/{name}\").\n\t\t\tTo(apiHandler.handleDeleteArtifactRegistry).\n\t\t\tWrites(artifactregistry.ArtifactRegistry{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/artifactregistries/{name}\").\n\t\t\tTo(apiHandler.handleUpdateArtifactRegistry).\n\t\t\tWrites(v1alpha1.ArtifactRegistry{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/artifactregistries\").\n\t\t\tTo(apiHandler.handleGetArtifactRegistryList).\n\t\t\tWrites(artifactregistry.ArtifactRegistryList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/artifactregistries/{name}\").\n\t\t\tTo(apiHandler.handleGetArtifactRegistryDetail).\n\t\t\tWrites(v1alpha1.ArtifactRegistry{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/artifactregistrybindings\").\n\t\t\tTo(apiHandler.handleCreateArtifactRegistryBinding).\n\t\t\tWrites(artifactregistrybinding.ArtifactRegistryBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/artifactregistrybindings/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateArtifactRegistryBinding).\n\t\t\tWrites(artifactregistrybinding.ArtifactRegistryBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/artifactregistrybindings/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeleteArtifactRegistryBinding).\n\t\t\tWrites(artifactregistrybinding.ArtifactRegistryBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/artifactregistrybindings/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdateArtifactRegistryBinding).\n\t\t\tWrites(v1alpha1.ArtifactRegistryBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/artifactregistrybindings/{namespace}\").\n\t\t\tTo(apiHandler.handleGetArtifactRegistryBindingList).\n\t\t\tWrites(artifactregistrybinding.ArtifactRegistryBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/artifactregistrybindings/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetArtifactRegistryBindingDetail).\n\t\t\tWrites(v1alpha1.ArtifactRegistryBinding{}))\n\n\t//common route\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/common/{resource}\").\n\t\t\tTo(apiHandler.handlePostCommonResource).\n\t\t\tWrites(make(map[string]interface{})))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/common/{resource}/{name}\").\n\t\t\tTo(apiHandler.handleDeleteCommonResource).\n\t\t\tWrites(make(map[string]interface{})))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/common/{resource}/{name}\").\n\t\t\tTo(apiHandler.handlePutCommonResource).\n\t\t\tWrites(make(map[string]interface{})))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/common/{resource}\").\n\t\t\tTo(apiHandler.handleGetCommonResourceList).\n\t\t\tWrites(make(map[string]interface{})))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/common/{resource}/{name}\").\n\t\t\tTo(apiHandler.handleGetCommonResource).\n\t\t\tWrites(make(map[string]interface{})))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/common/namespace/{namespace}/{resource}\").\n\t\t\tTo(apiHandler.handlePostCommonResource).\n\t\t\tWrites(make(map[string]interface{})))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/common/namespace/{namespace}/{resource}/{name}\").\n\t\t\tTo(apiHandler.handleDeleteCommonResource).\n\t\t\tWrites(make(map[string]interface{})))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/common/namespace/{namespace}/{resource}/{name}\").\n\t\t\tTo(apiHandler.handlePutCommonResource).\n\t\t\tWrites(make(map[string]interface{})))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/common/namespace/{namespace}/{resource}\").\n\t\t\tTo(apiHandler.handleGetCommonResourceList).\n\t\t\tWrites(make(map[string]interface{})))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/common/namespace/{namespace}/{resource}/{name}\").\n\t\t\tTo(apiHandler.handleGetCommonResource).\n\t\t\tWrites(make(map[string]interface{})))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/common/{resource}/{name}/sub/{sub}\").\n\t\t\tTo(apiHandler.handleGetCommonResourceSub).\n\t\t\tWrites(make(map[string]interface{})))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/common/{resource}/{name}/sub/{sub}\").\n\t\t\tTo(apiHandler.handlePostCommonResourceSub).\n\t\t\tWrites(make(map[string]interface{})))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/common/namespace/{namespace}/{resource}/{name}/sub/{sub}\").\n\t\t\tTo(apiHandler.handleGetCommonResourceSub).\n\t\t\tWrites(make(map[string]interface{})))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/common/namespace/{namespace}/{resource}/{name}/sub/{sub}\").\n\t\t\tTo(apiHandler.handlePostCommonResourceSub).\n\t\t\tWrites(make(map[string]interface{})))\n\n\tAddAppCoreUrl(apiV1Ws, apiHandler)\n\treturn wsContainer, nil\n}",
"func MakeHandler(s Service, logger kitlog.Logger) http.Handler {\n\topts := []kithttp.ServerOption{\n\t\tkithttp.ServerErrorHandler(transport.NewLogErrorHandler(logger)),\n\t\tkithttp.ServerErrorEncoder(encodeError),\n\t\tkithttp.ServerBefore(jwt.HTTPToContext()),\n\t}\n\n\tgetRecipesHandler := kithttp.NewServer(\n\t\tmakeGetRecipesEndpoint(s),\n\t\tnopDecodeRequest,\n\t\tencodeResponse,\n\t\topts...,\n\t)\n\tgetRecipeHandler := kithttp.NewServer(\n\t\tmakeGetRecipeEndpoint(s),\n\t\tdecodeGetRecipeRequest,\n\t\tencodeResponse,\n\t\topts...,\n\t)\n\n\tr := mux.NewRouter()\n\n\tr.Handle(\"/recipes/recipes\", getRecipesHandler).Methods(\"GET\")\n\tr.Handle(\"/recipes/recipe/{id}\", getRecipeHandler).Methods(\"GET\")\n\n\treturn r\n}",
"func CreateHTTPAPIHandler(iManager integration.IntegrationManager, cManager clientapi.ClientManager,\n\tauthManager authApi.AuthManager, sManager settingsApi.SettingsManager) (http.Handler, error) {\n\tapiHandler := APIHandler{iManager: iManager, cManager: cManager, sManager: sManager}\n\twsContainer := restful.NewContainer()\n\twsContainer.EnableContentEncoding(true)\n\n\tapiV1Ws := new(restful.WebService)\n\n\tInstallFilters(apiV1Ws, cManager)\n\n\tapiV1Ws.Path(\"/api/v1\").\n\t\tConsumes(restful.MIME_JSON).\n\t\tProduces(restful.MIME_JSON)\n\twsContainer.Add(apiV1Ws)\n\n\tintegrationHandler := integration.NewIntegrationHandler(iManager)\n\tintegrationHandler.Install(apiV1Ws)\n\n\tpluginHandler := plugin.NewPluginHandler(cManager)\n\tpluginHandler.Install(apiV1Ws)\n\n\tauthHandler := auth.NewAuthHandler(authManager)\n\tauthHandler.Install(apiV1Ws)\n\n\tsettingsHandler := settings.NewSettingsHandler(sManager, cManager)\n\tsettingsHandler.Install(apiV1Ws)\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"csrftoken/{action}\").\n\t\t\tTo(apiHandler.handleGetCsrfToken).\n\t\t\tWrites(api.CsrfToken{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/appdeployment\").\n\t\t\tTo(apiHandler.handleDeploy).\n\t\t\tReads(deployment.AppDeploymentSpec{}).\n\t\t\tWrites(deployment.AppDeploymentSpec{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/appdeployment/validate/name\").\n\t\t\tTo(apiHandler.handleNameValidity).\n\t\t\tReads(validation.AppNameValiditySpec{}).\n\t\t\tWrites(validation.AppNameValidity{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/appdeployment/validate/imagereference\").\n\t\t\tTo(apiHandler.handleImageReferenceValidity).\n\t\t\tReads(validation.ImageReferenceValiditySpec{}).\n\t\t\tWrites(validation.ImageReferenceValidity{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/appdeployment/validate/protocol\").\n\t\t\tTo(apiHandler.handleProtocolValidity).\n\t\t\tReads(validation.ProtocolValiditySpec{}).\n\t\t\tWrites(validation.ProtocolValidity{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/appdeployment/protocols\").\n\t\t\tTo(apiHandler.handleGetAvailableProtocols).\n\t\t\tWrites(deployment.Protocols{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/appdeploymentfromfile\").\n\t\t\tTo(apiHandler.handleDeployFromFile).\n\t\t\tReads(deployment.AppDeploymentFromFileSpec{}).\n\t\t\tWrites(deployment.AppDeploymentFromFileResponse{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/replicationcontroller\").\n\t\t\tTo(apiHandler.handleGetReplicationControllerList).\n\t\t\tWrites(replicationcontroller.ReplicationControllerList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/replicationcontroller/{namespace}\").\n\t\t\tTo(apiHandler.handleGetReplicationControllerList).\n\t\t\tWrites(replicationcontroller.ReplicationControllerList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/replicationcontroller/{namespace}/{replicationController}\").\n\t\t\tTo(apiHandler.handleGetReplicationControllerDetail).\n\t\t\tWrites(replicationcontroller.ReplicationControllerDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/replicationcontroller/{namespace}/{replicationController}/update/pod\").\n\t\t\tTo(apiHandler.handleUpdateReplicasCount).\n\t\t\tReads(replicationcontroller.ReplicationControllerSpec{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/replicationcontroller/{namespace}/{replicationController}/pod\").\n\t\t\tTo(apiHandler.handleGetReplicationControllerPods).\n\t\t\tWrites(pod.PodList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/replicationcontroller/{namespace}/{replicationController}/event\").\n\t\t\tTo(apiHandler.handleGetReplicationControllerEvents).\n\t\t\tWrites(common.EventList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/replicationcontroller/{namespace}/{replicationController}/service\").\n\t\t\tTo(apiHandler.handleGetReplicationControllerServices).\n\t\t\tWrites(resourceService.ServiceList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/replicaset\").\n\t\t\tTo(apiHandler.handleGetReplicaSets).\n\t\t\tWrites(replicaset.ReplicaSetList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/replicaset/{namespace}\").\n\t\t\tTo(apiHandler.handleGetReplicaSets).\n\t\t\tWrites(replicaset.ReplicaSetList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/replicaset/{namespace}/{replicaSet}\").\n\t\t\tTo(apiHandler.handleGetReplicaSetDetail).\n\t\t\tWrites(replicaset.ReplicaSetDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/replicaset/{namespace}/{replicaSet}/pod\").\n\t\t\tTo(apiHandler.handleGetReplicaSetPods).\n\t\t\tWrites(pod.PodList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/replicaset/{namespace}/{replicaSet}/service\").\n\t\t\tTo(apiHandler.handleGetReplicaSetServices).\n\t\t\tWrites(pod.PodList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/replicaset/{namespace}/{replicaSet}/event\").\n\t\t\tTo(apiHandler.handleGetReplicaSetEvents).\n\t\t\tWrites(common.EventList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pod\").\n\t\t\tTo(apiHandler.handleGetPods).\n\t\t\tWrites(pod.PodList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pod/{namespace}\").\n\t\t\tTo(apiHandler.handleGetPods).\n\t\t\tWrites(pod.PodList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pod/{namespace}/{pod}\").\n\t\t\tTo(apiHandler.handleGetPodDetail).\n\t\t\tWrites(pod.PodDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pod/{namespace}/{pod}/container\").\n\t\t\tTo(apiHandler.handleGetPodContainers).\n\t\t\tWrites(pod.PodDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pod/{namespace}/{pod}/event\").\n\t\t\tTo(apiHandler.handleGetPodEvents).\n\t\t\tWrites(common.EventList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pod/{namespace}/{pod}/shell/{container}\").\n\t\t\tTo(apiHandler.handleExecShell).\n\t\t\tWrites(TerminalResponse{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pod/{namespace}/{pod}/persistentvolumeclaim\").\n\t\t\tTo(apiHandler.handleGetPodPersistentVolumeClaims).\n\t\t\tWrites(persistentvolumeclaim.PersistentVolumeClaimList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/deployment\").\n\t\t\tTo(apiHandler.handleGetDeployments).\n\t\t\tWrites(deployment.DeploymentList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/deployment/{namespace}\").\n\t\t\tTo(apiHandler.handleGetDeployments).\n\t\t\tWrites(deployment.DeploymentList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/deployment/{namespace}/{deployment}\").\n\t\t\tTo(apiHandler.handleGetDeploymentDetail).\n\t\t\tWrites(deployment.DeploymentDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/deployment/{namespace}/{deployment}/event\").\n\t\t\tTo(apiHandler.handleGetDeploymentEvents).\n\t\t\tWrites(common.EventList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/deployment/{namespace}/{deployment}/oldreplicaset\").\n\t\t\tTo(apiHandler.handleGetDeploymentOldReplicaSets).\n\t\t\tWrites(replicaset.ReplicaSetList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/deployment/{namespace}/{deployment}/newreplicaset\").\n\t\t\tTo(apiHandler.handleGetDeploymentNewReplicaSet).\n\t\t\tWrites(replicaset.ReplicaSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/{kind}/{namespace}/{deployment}/pause\").\n\t\t\tTo(apiHandler.handleDeploymentPause).\n\t\t\tWrites(deployment.DeploymentDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/{kind}/{namespace}/{deployment}/rollback\").\n\t\t\tTo(apiHandler.handleDeploymentRollback).\n\t\t\tReads(deployment.RolloutSpec{}).\n\t\t\tWrites(deployment.RolloutSpec{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/{kind}/{namespace}/{deployment}/restart\").\n\t\t\tTo(apiHandler.handleDeploymentRestart).\n\t\t\tWrites(deployment.RolloutSpec{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/{kind}/{namespace}/{deployment}/resume\").\n\t\t\tTo(apiHandler.handleDeploymentResume).\n\t\t\tWrites(deployment.DeploymentDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/scale/{kind}/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleScaleResource).\n\t\t\tWrites(scaling.ReplicaCounts{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/scale/{kind}/{name}\").\n\t\t\tTo(apiHandler.handleScaleResource).\n\t\t\tWrites(scaling.ReplicaCounts{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/scale/{kind}/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetReplicaCount).\n\t\t\tWrites(scaling.ReplicaCounts{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/scale/{kind}/{name}\").\n\t\t\tTo(apiHandler.handleGetReplicaCount).\n\t\t\tWrites(scaling.ReplicaCounts{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/daemonset\").\n\t\t\tTo(apiHandler.handleGetDaemonSetList).\n\t\t\tWrites(daemonset.DaemonSetList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/daemonset/{namespace}\").\n\t\t\tTo(apiHandler.handleGetDaemonSetList).\n\t\t\tWrites(daemonset.DaemonSetList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/daemonset/{namespace}/{daemonSet}\").\n\t\t\tTo(apiHandler.handleGetDaemonSetDetail).\n\t\t\tWrites(daemonset.DaemonSetDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/daemonset/{namespace}/{daemonSet}/pod\").\n\t\t\tTo(apiHandler.handleGetDaemonSetPods).\n\t\t\tWrites(pod.PodList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/daemonset/{namespace}/{daemonSet}/service\").\n\t\t\tTo(apiHandler.handleGetDaemonSetServices).\n\t\t\tWrites(resourceService.ServiceList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/daemonset/{namespace}/{daemonSet}/event\").\n\t\t\tTo(apiHandler.handleGetDaemonSetEvents).\n\t\t\tWrites(common.EventList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/horizontalpodautoscaler\").\n\t\t\tTo(apiHandler.handleGetHorizontalPodAutoscalerList).\n\t\t\tWrites(horizontalpodautoscaler.HorizontalPodAutoscalerList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/horizontalpodautoscaler/{namespace}\").\n\t\t\tTo(apiHandler.handleGetHorizontalPodAutoscalerList).\n\t\t\tWrites(horizontalpodautoscaler.HorizontalPodAutoscalerList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/{kind}/{namespace}/{name}/horizontalpodautoscaler\").\n\t\t\tTo(apiHandler.handleGetHorizontalPodAutoscalerListForResource).\n\t\t\tWrites(horizontalpodautoscaler.HorizontalPodAutoscalerList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/horizontalpodautoscaler/{namespace}/{horizontalpodautoscaler}\").\n\t\t\tTo(apiHandler.handleGetHorizontalPodAutoscalerDetail).\n\t\t\tWrites(horizontalpodautoscaler.HorizontalPodAutoscalerDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/job\").\n\t\t\tTo(apiHandler.handleGetJobList).\n\t\t\tWrites(job.JobList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/job/{namespace}\").\n\t\t\tTo(apiHandler.handleGetJobList).\n\t\t\tWrites(job.JobList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/job/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetJobDetail).\n\t\t\tWrites(job.JobDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/job/{namespace}/{name}/pod\").\n\t\t\tTo(apiHandler.handleGetJobPods).\n\t\t\tWrites(pod.PodList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/job/{namespace}/{name}/event\").\n\t\t\tTo(apiHandler.handleGetJobEvents).\n\t\t\tWrites(common.EventList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/cronjob\").\n\t\t\tTo(apiHandler.handleGetCronJobList).\n\t\t\tWrites(cronjob.CronJobList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/cronjob/{namespace}\").\n\t\t\tTo(apiHandler.handleGetCronJobList).\n\t\t\tWrites(cronjob.CronJobList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/cronjob/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetCronJobDetail).\n\t\t\tWrites(cronjob.CronJobDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/cronjob/{namespace}/{name}/job\").\n\t\t\tTo(apiHandler.handleGetCronJobJobs).\n\t\t\tWrites(job.JobList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/cronjob/{namespace}/{name}/event\").\n\t\t\tTo(apiHandler.handleGetCronJobEvents).\n\t\t\tWrites(common.EventList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/cronjob/{namespace}/{name}/trigger\").\n\t\t\tTo(apiHandler.handleTriggerCronJob))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/namespace\").\n\t\t\tTo(apiHandler.handleCreateNamespace).\n\t\t\tReads(ns.NamespaceSpec{}).\n\t\t\tWrites(ns.NamespaceSpec{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/namespace\").\n\t\t\tTo(apiHandler.handleGetNamespaces).\n\t\t\tWrites(ns.NamespaceList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/namespace/{name}\").\n\t\t\tTo(apiHandler.handleGetNamespaceDetail).\n\t\t\tWrites(ns.NamespaceDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/namespace/{name}/event\").\n\t\t\tTo(apiHandler.handleGetNamespaceEvents).\n\t\t\tWrites(common.EventList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/event\").\n\t\t\tTo(apiHandler.handleGetEventList).\n\t\t\tWrites(common.EventList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/event/{namespace}\").\n\t\t\tTo(apiHandler.handleGetEventList).\n\t\t\tWrites(common.EventList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/secret\").\n\t\t\tTo(apiHandler.handleGetSecretList).\n\t\t\tWrites(secret.SecretList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/secret/{namespace}\").\n\t\t\tTo(apiHandler.handleGetSecretList).\n\t\t\tWrites(secret.SecretList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/secret/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetSecretDetail).\n\t\t\tWrites(secret.SecretDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/secret\").\n\t\t\tTo(apiHandler.handleCreateImagePullSecret).\n\t\t\tReads(secret.ImagePullSecretSpec{}).\n\t\t\tWrites(secret.Secret{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/configmap\").\n\t\t\tTo(apiHandler.handleGetConfigMapList).\n\t\t\tWrites(configmap.ConfigMapList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/configmap/{namespace}\").\n\t\t\tTo(apiHandler.handleGetConfigMapList).\n\t\t\tWrites(configmap.ConfigMapList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/configmap/{namespace}/{configmap}\").\n\t\t\tTo(apiHandler.handleGetConfigMapDetail).\n\t\t\tWrites(configmap.ConfigMapDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/service\").\n\t\t\tTo(apiHandler.handleGetServiceList).\n\t\t\tWrites(resourceService.ServiceList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/service/{namespace}\").\n\t\t\tTo(apiHandler.handleGetServiceList).\n\t\t\tWrites(resourceService.ServiceList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/service/{namespace}/{service}\").\n\t\t\tTo(apiHandler.handleGetServiceDetail).\n\t\t\tWrites(resourceService.ServiceDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/service/{namespace}/{service}/event\").\n\t\t\tTo(apiHandler.handleGetServiceEvent).\n\t\t\tWrites(common.EventList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/service/{namespace}/{service}/pod\").\n\t\t\tTo(apiHandler.handleGetServicePods).\n\t\t\tWrites(pod.PodList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/service/{namespace}/{service}/ingress\").\n\t\t\tTo(apiHandler.handleGetServiceIngressList).\n\t\t\tWrites(ingress.IngressList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/serviceaccount\").\n\t\t\tTo(apiHandler.handleGetServiceAccountList).\n\t\t\tWrites(serviceaccount.ServiceAccountList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/serviceaccount/{namespace}\").\n\t\t\tTo(apiHandler.handleGetServiceAccountList).\n\t\t\tWrites(serviceaccount.ServiceAccountList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/serviceaccount/{namespace}/{serviceaccount}\").\n\t\t\tTo(apiHandler.handleGetServiceAccountDetail).\n\t\t\tWrites(serviceaccount.ServiceAccountDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/serviceaccount/{namespace}/{serviceaccount}/secret\").\n\t\t\tTo(apiHandler.handleGetServiceAccountSecrets).\n\t\t\tWrites(secret.SecretList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/serviceaccount/{namespace}/{serviceaccount}/imagepullsecret\").\n\t\t\tTo(apiHandler.handleGetServiceAccountImagePullSecrets).\n\t\t\tWrites(secret.SecretList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/ingress\").\n\t\t\tTo(apiHandler.handleGetIngressList).\n\t\t\tWrites(ingress.IngressList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/ingress/{namespace}\").\n\t\t\tTo(apiHandler.handleGetIngressList).\n\t\t\tWrites(ingress.IngressList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/ingress/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetIngressDetail).\n\t\t\tWrites(ingress.IngressDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/ingress/{namespace}/{ingress}/event\").\n\t\t\tTo(apiHandler.handleGetIngressEvent).\n\t\t\tWrites(common.EventList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/networkpolicy\").\n\t\t\tTo(apiHandler.handleGetNetworkPolicyList).\n\t\t\tWrites(networkpolicy.NetworkPolicyList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/networkpolicy/{namespace}\").\n\t\t\tTo(apiHandler.handleGetNetworkPolicyList).\n\t\t\tWrites(networkpolicy.NetworkPolicyList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/networkpolicy/{namespace}/{networkpolicy}\").\n\t\t\tTo(apiHandler.handleGetNetworkPolicyDetail).\n\t\t\tWrites(networkpolicy.NetworkPolicyDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/statefulset\").\n\t\t\tTo(apiHandler.handleGetStatefulSetList).\n\t\t\tWrites(statefulset.StatefulSetList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/statefulset/{namespace}\").\n\t\t\tTo(apiHandler.handleGetStatefulSetList).\n\t\t\tWrites(statefulset.StatefulSetList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/statefulset/{namespace}/{statefulset}\").\n\t\t\tTo(apiHandler.handleGetStatefulSetDetail).\n\t\t\tWrites(statefulset.StatefulSetDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/statefulset/{namespace}/{statefulset}/pod\").\n\t\t\tTo(apiHandler.handleGetStatefulSetPods).\n\t\t\tWrites(pod.PodList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/statefulset/{namespace}/{statefulset}/event\").\n\t\t\tTo(apiHandler.handleGetStatefulSetEvents).\n\t\t\tWrites(common.EventList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/node\").\n\t\t\tTo(apiHandler.handleGetNodeList).\n\t\t\tWrites(node.NodeList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/node/{name}\").\n\t\t\tTo(apiHandler.handleGetNodeDetail).\n\t\t\tWrites(node.NodeDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/node/{name}/event\").\n\t\t\tTo(apiHandler.handleGetNodeEvents).\n\t\t\tWrites(common.EventList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/node/{name}/pod\").\n\t\t\tTo(apiHandler.handleGetNodePods).\n\t\t\tWrites(pod.PodList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/_raw/{kind}/namespace/{namespace}/name/{name}\").\n\t\t\tTo(apiHandler.handleDeleteResource))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/_raw/{kind}/namespace/{namespace}/name/{name}\").\n\t\t\tTo(apiHandler.handleGetResource))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/_raw/{kind}/namespace/{namespace}/name/{name}\").\n\t\t\tTo(apiHandler.handlePutResource))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/_raw/{kind}/name/{name}\").\n\t\t\tTo(apiHandler.handleDeleteResource))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/_raw/{kind}/name/{name}\").\n\t\t\tTo(apiHandler.handleGetResource))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/_raw/{kind}/name/{name}\").\n\t\t\tTo(apiHandler.handlePutResource))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/clusterrole\").\n\t\t\tTo(apiHandler.handleGetClusterRoleList).\n\t\t\tWrites(clusterrole.ClusterRoleList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/clusterrole/{name}\").\n\t\t\tTo(apiHandler.handleGetClusterRoleDetail).\n\t\t\tWrites(clusterrole.ClusterRoleDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/clusterrolebinding\").\n\t\t\tTo(apiHandler.handleGetClusterRoleBindingList).\n\t\t\tWrites(clusterrolebinding.ClusterRoleBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/clusterrolebinding/{name}\").\n\t\t\tTo(apiHandler.handleGetClusterRoleBindingDetail).\n\t\t\tWrites(clusterrolebinding.ClusterRoleBindingDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/role/{namespace}\").\n\t\t\tTo(apiHandler.handleGetRoleList).\n\t\t\tWrites(role.RoleList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/role/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetRoleDetail).\n\t\t\tWrites(role.RoleDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/rolebinding/{namespace}\").\n\t\t\tTo(apiHandler.handleGetRoleBindingList).\n\t\t\tWrites(rolebinding.RoleBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/rolebinding/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetRoleBindingDetail).\n\t\t\tWrites(rolebinding.RoleBindingDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/persistentvolume\").\n\t\t\tTo(apiHandler.handleGetPersistentVolumeList).\n\t\t\tWrites(persistentvolume.PersistentVolumeList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/persistentvolume/{persistentvolume}\").\n\t\t\tTo(apiHandler.handleGetPersistentVolumeDetail).\n\t\t\tWrites(persistentvolume.PersistentVolumeDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/persistentvolume/namespace/{namespace}/name/{persistentvolume}\").\n\t\t\tTo(apiHandler.handleGetPersistentVolumeDetail).\n\t\t\tWrites(persistentvolume.PersistentVolumeDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/persistentvolumeclaim/\").\n\t\t\tTo(apiHandler.handleGetPersistentVolumeClaimList).\n\t\t\tWrites(persistentvolumeclaim.PersistentVolumeClaimList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/persistentvolumeclaim/{namespace}\").\n\t\t\tTo(apiHandler.handleGetPersistentVolumeClaimList).\n\t\t\tWrites(persistentvolumeclaim.PersistentVolumeClaimList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/persistentvolumeclaim/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetPersistentVolumeClaimDetail).\n\t\t\tWrites(persistentvolumeclaim.PersistentVolumeClaimDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/crd\").\n\t\t\tTo(apiHandler.handleGetCustomResourceDefinitionList).\n\t\t\tWrites(types.CustomResourceDefinitionList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/crd/{crd}\").\n\t\t\tTo(apiHandler.handleGetCustomResourceDefinitionDetail).\n\t\t\tWrites(types.CustomResourceDefinitionDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/crd/{namespace}/{crd}/object\").\n\t\t\tTo(apiHandler.handleGetCustomResourceObjectList).\n\t\t\tWrites(types.CustomResourceObjectList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/crd/{namespace}/{crd}/{object}\").\n\t\t\tTo(apiHandler.handleGetCustomResourceObjectDetail).\n\t\t\tWrites(types.CustomResourceObjectDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/crd/{namespace}/{crd}/{object}/event\").\n\t\t\tTo(apiHandler.handleGetCustomResourceObjectEvents).\n\t\t\tWrites(common.EventList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/storageclass\").\n\t\t\tTo(apiHandler.handleGetStorageClassList).\n\t\t\tWrites(storageclass.StorageClassList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/storageclass/{storageclass}\").\n\t\t\tTo(apiHandler.handleGetStorageClass).\n\t\t\tWrites(storageclass.StorageClass{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/storageclass/{storageclass}/persistentvolume\").\n\t\t\tTo(apiHandler.handleGetStorageClassPersistentVolumes).\n\t\t\tWrites(persistentvolume.PersistentVolumeList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/ingressclass\").\n\t\t\tTo(apiHandler.handleGetIngressClassList).\n\t\t\tWrites(ingressclass.IngressClassList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/ingressclass/{ingressclass}\").\n\t\t\tTo(apiHandler.handleGetIngressClass).\n\t\t\tWrites(ingressclass.IngressClass{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/log/source/{namespace}/{resourceName}/{resourceType}\").\n\t\t\tTo(apiHandler.handleLogSource).\n\t\t\tWrites(controller.LogSources{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/log/{namespace}/{pod}\").\n\t\t\tTo(apiHandler.handleLogs).\n\t\t\tWrites(logs.LogDetails{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/log/{namespace}/{pod}/{container}\").\n\t\t\tTo(apiHandler.handleLogs).\n\t\t\tWrites(logs.LogDetails{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/log/file/{namespace}/{pod}/{container}\").\n\t\t\tTo(apiHandler.handleLogFile).\n\t\t\tWrites(logs.LogDetails{}))\n\n\treturn wsContainer, nil\n}",
"func NewAPIHandler() (handler APIHandler) {\n\n\thandler = &handlerExecution{}\n\n\thandler = &handlerACL{BaseHandler: BaseHandler{\n\t\tNext: handler,\n\t}}\n\n\thandler = &handlerParamsValidator{\n\t\tBaseHandler{Next: handler},\n\t}\n\n\treturn\n}",
"func MakeHTTPSHandler(s Service) http.Handler {\n\tr := chi.NewRouter()\n\n\t//Obtener personas por su identificador\n\tgetPersonByHandler := kithttp.NewServer(\n\t\tmakeGetPersonByIDEndPoint(s),\n\t\tgetPersonByIDRequestDecoder,\n\t\tkithttp.EncodeJSONResponse,\n\t)\n\tr.Method(http.MethodGet, \"/id/{id}\", getPersonByHandler)\n\n\t//Obtener personas paginadas\n\tgetPersonHandler := kithttp.NewServer(\n\t\tmakeGetPersonsEndPoint(s),\n\t\tgetPersonsRequestDecoder,\n\t\tkithttp.EncodeJSONResponse,\n\t)\n\tr.Method(http.MethodPost, \"/paginated\", getPersonHandler)\n\n\t//Agregar a una persona\n\taddPersonHandler := kithttp.NewServer(\n\t\tmakeAddPersonEndpoint(s),\n\t\taddPersonRequestDecoder,\n\t\tkithttp.EncodeJSONResponse,\n\t)\n\tr.Method(http.MethodPost, \"/insert\", addPersonHandler)\n\n\t//Actualizar personas\n\tupdatePersonHandler := kithttp.NewServer(\n\t\tmakeUpdatePersonEndpoint(s),\n\t\tupdatePersonRequestDecoder,\n\t\tkithttp.EncodeJSONResponse,\n\t)\n\tr.Method(http.MethodPut, \"/update\", updatePersonHandler)\n\n\t//Eliminar PERSONA\n\tdeletePersonHandler := kithttp.NewServer(\n\t\tmakeDeletePersonEndPoint(s),\n\t\tdeletePersonRequestDecoder,\n\t\tkithttp.EncodeJSONResponse,\n\t)\n\tr.Method(http.MethodDelete, \"/delete/{id}\", deletePersonHandler)\n\treturn r\n}",
"func MakeHttpHandler(ctx context.Context, endpoints endpoint.StringEndpoints, zipkinTracer *gozipkin.Tracer, logger log.Logger) http.Handler {\n\tr := mux.NewRouter()\n\n\tzipkinServer := zipkin.HTTPServerTrace(zipkinTracer, zipkin.Name(\"http-transport\"))\n\n\toptions := []kithttp.ServerOption{\n\t\tkithttp.ServerErrorLogger(logger),\n\t\tkithttp.ServerErrorEncoder(kithttp.DefaultErrorEncoder),\n\t\tzipkinServer,\n\t}\n\n\tr.Methods(\"POST\").Path(\"/op/{type}/{a}/{b}\").Handler(kithttp.NewServer(\n\t\tendpoints.StringEndpoint,\n\t\tdecodeStringRequest,\n\t\tencodeStringResponse,\n\t\toptions...,\n\t))\n\n\tr.Path(\"/metrics\").Handler(promhttp.Handler())\n\n\t// create health check handler\n\tr.Methods(\"GET\").Path(\"/health\").Handler(kithttp.NewServer(\n\t\tendpoints.HealthCheckEndpoint,\n\t\tdecodeHealthCheckRequest,\n\t\tencodeStringResponse,\n\t\toptions...,\n\t))\n\n\treturn r\n}",
"func makeHandler(server *ServerContext, privs handlerPrivs, method handlerMethod) http.Handler {\n\treturn http.HandlerFunc(func(r http.ResponseWriter, rq *http.Request) {\n\t\trunOffline := false\n\t\th := newHandler(server, privs, r, rq, runOffline)\n\t\terr := h.invoke(method)\n\t\th.writeError(err)\n\t\th.logDuration(true)\n\t})\n}",
"func CreateHandler(config *Config) http.Handler {\n\trouter := httprouter.New()\n\trouter.RedirectTrailingSlash = false\n\n\trouter.GET(\"/\", indexHandler{config: config}.Handle)\n\n\tfor name, pkg := range config.Packages {\n\t\thandle := packageHandler{\n\t\t\tpkgName: name,\n\t\t\tpkg: pkg,\n\t\t\tconfig: config,\n\t\t}.Handle\n\t\trouter.GET(fmt.Sprintf(\"/%s\", name), handle)\n\t\trouter.GET(fmt.Sprintf(\"/%s/*path\", name), handle)\n\t}\n\n\treturn router\n}",
"func MakeHandler(action interface{}) http.HandlerFunc {\n\treturn func(rsp http.ResponseWriter, req *http.Request) {\n\n\t\t// find the type of the controller and create a new one\n\t\tcontrollerType := reflect.TypeOf(action).In(0)\n\t\tnewController := reflect.New(controllerType)\n\n\t\t// find the action of the controller\n\t\tactionName := runtime.FuncForPC(reflect.ValueOf(action).Pointer()).Name()\n\t\tactionName = actionName[strings.LastIndex(actionName, \".\")+1 : len(actionName)]\n\n\t\t// fill the base controller\n\t\tbaseController := new(Controller)\n\t\tbaseController.Response = rsp\n\t\tbaseController.Request = req\n\t\tbaseController.Name = controllerType.Name()\n\t\tbaseController.Context = appengine.NewContext(req)\n\t\tbaseController.Action = actionName\n\t\t// inject the base controller\n\t\tnewController.Elem().FieldByName(\"Controller\").Set(reflect.ValueOf(baseController))\n\n\t\t// execute the action\n\t\treflect.ValueOf(action).Call([]reflect.Value{newController.Elem()})\n\t}\n}",
"func MakeHandler(ts Service, logger kitlog.Logger) http.Handler {\n\tr := mux.NewRouter()\n\n\topts := []kithttp.ServerOption{\n\t\tkithttp.ServerErrorLogger(logger),\n\t\tkithttp.ServerErrorEncoder(encodeError),\n\t}\n\n\ttrackCargoHandler := kithttp.NewServer(\n\t\tmakeTrackCargoEndpoint(ts),\n\t\tdecodeTrackCargoRequest,\n\t\tencodeResponse,\n\t\topts...,\n\t)\n\n\tr.Handle(\"/tracking/v1/cargos/{id}\", trackCargoHandler).Methods(\"GET\")\n\n\treturn r\n}",
"func makeHandler(h HandleFunc) HandleFunc {\n\tLog.Debug(\"converting func(http.ResponseWriter, *Request) to Frodo.HandleFunc\")\n\treturn h\n}",
"func SetupHTTPHandler(svc auth.DeviceAPI, router *mux.Router, tokenSvc auth.TokenService, logger log.Logger, lmt httpapi.LimiterFactory) {\n\tvar handler httpapi.JSONAPIHandler\n\t{\n\t\thandler = httpapi.AuthMiddleware(svc.Create, tokenSvc, auth.JWTAuthorized)\n\t\thandler = httpapi.RateLimitMiddleware(handler, lmt.NewLimiter(\n\t\t\t\"DeviceAPI.Create\", httpapi.PerMinute, int64(20),\n\t\t))\n\t\thandler = httpapi.ErrorLoggingMiddleware(handler, logger)\n\t\thttpHandler := httpapi.ToHandlerFunc(handler, http.StatusOK)\n\t\trouter.HandleFunc(\"/api/v1/device\", httpHandler).Methods(\"Post\")\n\t}\n\t{\n\t\thandler = httpapi.AuthMiddleware(svc.Verify, tokenSvc, auth.JWTAuthorized)\n\t\thandler = httpapi.RateLimitMiddleware(handler, lmt.NewLimiter(\n\t\t\t\"DeviceAPI.Verify\", httpapi.PerMinute, int64(20),\n\t\t))\n\t\thandler = httpapi.ErrorLoggingMiddleware(handler, logger)\n\t\thttpHandler := httpapi.ToHandlerFunc(handler, http.StatusCreated)\n\t\trouter.HandleFunc(\"/api/v1/device/verify\", httpHandler).Methods(\"Post\")\n\t}\n\t{\n\t\thandler = httpapi.AuthMiddleware(svc.Remove, tokenSvc, auth.JWTAuthorized)\n\t\thandler = httpapi.RateLimitMiddleware(handler, lmt.NewLimiter(\n\t\t\t\"DeviceAPI.Remove\", httpapi.PerMinute, int64(20),\n\t\t))\n\t\thandler = httpapi.ErrorLoggingMiddleware(handler, logger)\n\t\thttpHandler := httpapi.ToHandlerFunc(handler, http.StatusOK)\n\t\trouter.HandleFunc(\"/api/v1/device/{deviceID}\", httpHandler).Methods(\"Delete\")\n\t}\n\t{\n\t\thandler = httpapi.AuthMiddleware(svc.Rename, tokenSvc, auth.JWTAuthorized)\n\t\thandler = httpapi.RateLimitMiddleware(handler, lmt.NewLimiter(\n\t\t\t\"DeviceAPI.Rename\", httpapi.PerMinute, int64(20),\n\t\t))\n\t\thandler = httpapi.ErrorLoggingMiddleware(handler, logger)\n\t\thttpHandler := httpapi.ToHandlerFunc(handler, http.StatusOK)\n\t\trouter.HandleFunc(\"/api/v1/device/{deviceID}\", httpHandler).Methods(\"Patch\")\n\t}\n\t{\n\t\thandler = httpapi.AuthMiddleware(svc.List, tokenSvc, auth.JWTAuthorized)\n\t\thandler = httpapi.RateLimitMiddleware(handler, lmt.NewLimiter(\n\t\t\t\"DeviceAPI.List\", httpapi.PerMinute, int64(60),\n\t\t))\n\t\thandler = httpapi.ErrorLoggingMiddleware(handler, logger)\n\t\thttpHandler := httpapi.ToHandlerFunc(handler, http.StatusOK)\n\t\trouter.HandleFunc(\"/api/v1/device\", httpHandler).Methods(\"Get\")\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates a case in the AWS Support Center. This operation is similar to how you create a case in the AWS Support Center Create Case ( page. The AWS Support API doesn't support requesting service limit increases. You can submit a service limit increase in the following ways: Submit a request from the AWS Support Center Create Case ( page. Use the Service Quotas RequestServiceQuotaIncrease ( operation. A successful CreateCase request returns an AWS Support case number. You can use the DescribeCases operation and specify the case number to get existing AWS Support cases. After you create a case, use the AddCommunicationToCase operation to add additional communication or attachments to an existing case. The caseId is separate from the displayId that appears in the AWS Support Center ( Use the DescribeCases operation to get the displayId. You must have a Business or Enterprise support plan to use the AWS Support API. If you call the AWS Support API from an account that does not have a Business or Enterprise support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see AWS Support (
|
func (c *Client) CreateCase(ctx context.Context, params *CreateCaseInput, optFns ...func(*Options)) (*CreateCaseOutput, error) {
if params == nil {
params = &CreateCaseInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateCase", params, optFns, addOperationCreateCaseMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateCaseOutput)
out.ResultMetadata = metadata
return out, nil
}
|
[
"func (c *Client) AddCase(sectionID int, newCase SendableCase) (Case, error) {\n\tcreatedCase := Case{}\n\terr := c.sendRequest(\"POST\", fmt.Sprintf(\"add_case/%d\", sectionID), newCase, &createdCase)\n\treturn createdCase, err\n}",
"func CaseNew(c echo.Context) error {\n\tdefer c.Request().Body.Close()\n\n\t//TODO: fix this generals function validatePermission and others\n\terr := validatePermission(c, \"cases_new\")\n\tif err != nil {\n\t\treturn returnInvalidResponse(http.StatusForbidden, err, err.Error())\n\t}\n\t_, info := cases.New(c, nil)\n\treturn info\n}",
"func (c *Client) AddCommunicationToCase(ctx context.Context, params *AddCommunicationToCaseInput, optFns ...func(*Options)) (*AddCommunicationToCaseOutput, error) {\n\tif params == nil {\n\t\tparams = &AddCommunicationToCaseInput{}\n\t}\n\n\tresult, metadata, err := c.invokeOperation(ctx, \"AddCommunicationToCase\", params, optFns, addOperationAddCommunicationToCaseMiddlewares)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := result.(*AddCommunicationToCaseOutput)\n\tout.ResultMetadata = metadata\n\treturn out, nil\n}",
"func NewCase(condition string, statements ...Statement) *Case {\n\treturn &Case{\n\t\tcondition: condition,\n\t\tstatements: statements,\n\t\tcaller: fetchClientCallerLine(),\n\t}\n}",
"func (mr *MockClientMockRecorder) CreateCase(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateCase\", reflect.TypeOf((*MockClient)(nil).CreateCase), arg0)\n}",
"func NewCase(evaluator evaluatorFunc, command commandFunc) Case {\n\treturn Case{\n\t\tevaluator: evaluator,\n\t\tcommand: command,\n\t}\n}",
"func NewCase(x constant.Constant, target *Block) *Case {\n\treturn &Case{X: x, Target: target}\n}",
"func ceCreateScenario(w http.ResponseWriter, r *http.Request) {\n\tlog.Debug(\"ceCreateScenario\")\n\n\t// Get scenario name from request parameters\n\tvars := mux.Vars(r)\n\tscenarioName := vars[\"name\"]\n\tlog.Debug(\"Scenario name: \", scenarioName)\n\n\t// Retrieve scenario from request body\n\tvar scenario Scenario\n\tdecoder := json.NewDecoder(r.Body)\n\terr := decoder.Decode(&scenario)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Add new scenario to DB\n\trev, err := addScenario(db, scenarioName, scenario)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tlog.Debug(\"Scenario added with rev: \", rev)\n\n\t// OK\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusOK)\n}",
"func NewCase(p core.Value, v interface{}) Case {\n\treturn Case{p, v}\n}",
"func Case(what ...interface{}) *CaseBuilder {\n\tb := &CaseBuilder{}\n\n\tswitch len(what) {\n\tcase 0:\n\tcase 1:\n\t\tb = b.what(what[0])\n\tdefault:\n\t\tb = b.what(newPart(what[0], what[1:]...))\n\n\t}\n\treturn b\n}",
"func (c *Client) DeleteCase(caseID int) error {\n\treturn c.sendRequest(\"POST\", fmt.Sprintf(\"delete_case/%d\", caseID), nil, nil)\n}",
"func (h *SupportAPI) CreateSupportRecord(c *gin.Context) {\n\tctx := context.Background()\n\tuserID := c.MustGet(\"userID\").(uint)\n\tctx = context.WithValue(ctx, entities.UserIDKey, userID)\n\tvar req supportRequest\n\terr := c.BindJSON(&req)\n\tif err != nil {\n\t\tentities.SendParsingError(c, \"There has been an error while parsing your information, please try again\", err)\n\t\treturn\n\t}\n\terr = req.Validate()\n\tif err != nil {\n\t\tentities.SendValidationError(c, err.Error(), err)\n\t\treturn\n\t}\n\tinfo := entities.SupportInfo{\n\t\tEmail: req.Email,\n\t\tMobile: req.Mobile,\n\t}\n\tcreatedInfo, err := h.SupportUsecase.Create(ctx, &info)\n\tif err != nil {\n\t\tentities.SendValidationError(c, \"You are not authrorized to creates support info records\", err)\n\t\treturn\n\t}\n\tc.JSON(200, gin.H{\n\t\t\"messages\": \"Support Info Created Successfully\",\n\t\t\"info\": createdInfo,\n\t})\n\treturn\n}",
"func NewCaseOperation()(*CaseOperation) {\n m := &CaseOperation{\n Entity: *iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.NewEntity(),\n }\n return m\n}",
"func NewCaseService(repo CaseRepository) *CaseService {\n\treturn &CaseService{CaseRepo: repo}\n}",
"func (s *Service) GrantCase(c context.Context, nwMsg []byte, oldMsg []byte) (err error) {\n\tmr := &model.Case{}\n\tif err = json.Unmarshal(nwMsg, mr); err != nil {\n\t\tlog.Error(\"json.Unmarshal(%s) error(%v)\", string(nwMsg), err)\n\t\treturn\n\t}\n\tif mr.Status != model.CaseStatusGranting {\n\t\treturn\n\t}\n\tstime, err := time.ParseInLocation(time.RFC3339, mr.Stime, time.Local)\n\tif err != nil {\n\t\tstime, err = time.ParseInLocation(\"2006-01-02 15:04:05\", mr.Stime, time.Local)\n\t\tif err != nil {\n\t\t\tlog.Error(\"time.ParseInLocation(%s) error(%v)\", mr.Stime, err)\n\t\t\treturn\n\t\t}\n\t\terr = nil\n\t}\n\tetime, err := time.ParseInLocation(time.RFC3339, mr.Etime, time.Local)\n\tif err != nil {\n\t\tetime, err = time.ParseInLocation(\"2006-01-02 15:04:05\", mr.Etime, time.Local)\n\t\tif err != nil {\n\t\t\tlog.Error(\"time.ParseInLocation(%s) error(%v)\", mr.Etime, err)\n\t\t\treturn\n\t\t}\n\t\terr = nil\n\t}\n\tsimCase := &model.SimCase{\n\t\tID: mr.ID,\n\t\tMid: mr.Mid,\n\t\tVoteRule: mr.Agree,\n\t\tVoteBreak: mr.Against,\n\t\tVoteDelete: mr.VoteDelete,\n\t\tCaseType: mr.CaseType,\n\t\tStime: xtime.Time(stime.Unix()),\n\t\tEtime: xtime.Time(etime.Unix()),\n\t}\n\tmcases := make(map[int64]*model.SimCase)\n\tmcases[mr.ID] = simCase\n\tif err = s.dao.SetGrantCase(c, mcases); err != nil {\n\t\tlog.Error(\"s.dao.SetMIDCaseGrant(%+v) error(%v)\", mr, err)\n\t}\n\treturn\n}",
"func CreateContest(ctx context.Context, contest *models.Contest) error {\n\treturn db.InTransaction(ctx, func(tx *sqlx.Tx) error {\n\t\tquery := `\n INSERT INTO\n contest(short_name, host_name, start_time, selection_window_end_time, duration, title, hidden_scoreboard)\n VALUES($1, $2, $3, $4, $5 * '1 microsecond'::interval, $6, $7)\n RETURNING contest_id`\n\t\tif err := tx.QueryRowContext(ctx, query, contest.ShortName, contest.HostName, contest.StartTime, contest.FlexibleEndTime, contest.Duration/time.Microsecond, contest.Title, contest.HiddenScoreboard).Scan(&contest.ContestID); err != nil {\n\t\t\tif db.PgErrCode(err) == db.UniquenessViolation {\n\t\t\t\treturn ErrShortNameExists\n\t\t\t} else {\n\t\t\t\treturn fmt.Errorf(\"failed create contest query: %v\", err)\n\t\t\t}\n\t\t}\n\t\tfor _, p := range contest.Problems {\n\t\t\tp.ContestID = contest.ContestID\n\t\t}\n\t\tif err := insertProblems(ctx, contest, tx); err != nil {\n\t\t\treturn fmt.Errorf(\"failed insert contest problem: %v\", err)\n\t\t}\n\t\treturn nil\n\t})\n}",
"func NewSubmitBusinessCase(\n\tconfig Config,\n\tauthorize func(context.Context, *models.SystemIntake) (bool, error),\n\tfetchOpenBusinessCase func(context.Context, uuid.UUID) (*models.BusinessCase, error),\n\tvalidateForSubmit func(businessCase *models.BusinessCase) error,\n\tsaveAction func(context.Context, *models.Action) error,\n\tupdateIntake func(context.Context, *models.SystemIntake) (*models.SystemIntake, error),\n\tupdateBusinessCase func(context.Context, *models.BusinessCase) (*models.BusinessCase, error),\n\tsendEmail func(ctx context.Context, requestName string, intakeID uuid.UUID) error,\n\tnewIntakeStatus models.SystemIntakeStatus,\n) ActionExecuter {\n\treturn func(ctx context.Context, intake *models.SystemIntake, action *models.Action) error {\n\t\tok, err := authorize(ctx, intake)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !ok {\n\t\t\treturn &apperrors.UnauthorizedError{Err: err}\n\t\t}\n\n\t\tbusinessCase, err := fetchOpenBusinessCase(ctx, intake.ID)\n\t\tif err != nil {\n\t\t\treturn &apperrors.QueryError{\n\t\t\t\tErr: err,\n\t\t\t\tOperation: apperrors.QueryFetch,\n\t\t\t\tModel: intake,\n\t\t\t}\n\t\t}\n\t\t// Uncomment below when UI has changed for unique lifecycle costs\n\t\t//err = appvalidation.BusinessCaseForUpdate(businessCase)\n\t\t//if err != nil {\n\t\t//\treturn &models.BusinessCase{}, err\n\t\t//}\n\t\tupdatedAt := config.clock.Now()\n\t\tbusinessCase.UpdatedAt = &updatedAt\n\n\t\tif businessCase.InitialSubmittedAt == nil {\n\t\t\tbusinessCase.InitialSubmittedAt = &updatedAt\n\t\t}\n\t\tbusinessCase.LastSubmittedAt = &updatedAt\n\t\tif businessCase.SystemIntakeStatus == models.SystemIntakeStatusBIZCASEFINALNEEDED {\n\t\t\terr = validateForSubmit(businessCase)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\terr = saveAction(ctx, action)\n\t\tif err != nil {\n\t\t\treturn &apperrors.QueryError{\n\t\t\t\tErr: err,\n\t\t\t\tModel: action,\n\t\t\t\tOperation: apperrors.QueryPost,\n\t\t\t}\n\t\t}\n\n\t\tbusinessCase, err = updateBusinessCase(ctx, businessCase)\n\t\tif err != nil {\n\t\t\treturn &apperrors.QueryError{\n\t\t\t\tErr: err,\n\t\t\t\tModel: businessCase,\n\t\t\t\tOperation: apperrors.QuerySave,\n\t\t\t}\n\t\t}\n\n\t\tintake.Status = newIntakeStatus\n\t\tintake.UpdatedAt = &updatedAt\n\t\tintake, err = updateIntake(ctx, intake)\n\t\tif err != nil {\n\t\t\treturn &apperrors.QueryError{\n\t\t\t\tErr: err,\n\t\t\t\tModel: intake,\n\t\t\t\tOperation: apperrors.QuerySave,\n\t\t\t}\n\t\t}\n\n\t\terr = sendEmail(ctx, businessCase.ProjectName.String, businessCase.SystemIntakeID)\n\t\tif err != nil {\n\t\t\tappcontext.ZLogger(ctx).Error(\"Submit Business Case email failed to send: \", zap.Error(err))\n\t\t}\n\n\t\treturn nil\n\t}\n}",
"func NewCase(t *testing.T, path string) Case {\n\tc := Case{T: t}\n\tc.In = newFile(&c, path)\n\tc.Out = newFile(&c, path+Extension)\n\treturn c\n}",
"func (c *Client) UpdateCase(caseID int, updates SendableCase) (Case, error) {\n\tupdatedCase := Case{}\n\terr := c.sendRequest(\"POST\", fmt.Sprintf(\"update_case/%d\", caseID), updates, &updatedCase)\n\treturn updatedCase, err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetAwsSession Returns an AWS session for the specified server cfguration
|
func GetAwsSession(cfg config.ServerConfig) (*session.Session, error) {
var providers []credentials.Provider
customResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
if service == endpoints.RdsServiceID && cfg.AwsEndpointRdsURL != "" {
return endpoints.ResolvedEndpoint{
URL: cfg.AwsEndpointRdsURL,
SigningRegion: cfg.AwsEndpointSigningRegion,
}, nil
}
if service == endpoints.Ec2ServiceID && cfg.AwsEndpointEc2URL != "" {
return endpoints.ResolvedEndpoint{
URL: cfg.AwsEndpointEc2URL,
SigningRegion: cfg.AwsEndpointSigningRegion,
}, nil
}
if service == endpoints.MonitoringServiceID && cfg.AwsEndpointCloudwatchURL != "" {
return endpoints.ResolvedEndpoint{
URL: cfg.AwsEndpointCloudwatchURL,
SigningRegion: cfg.AwsEndpointSigningRegion,
}, nil
}
if service == endpoints.LogsServiceID && cfg.AwsEndpointCloudwatchLogsURL != "" {
return endpoints.ResolvedEndpoint{
URL: cfg.AwsEndpointCloudwatchLogsURL,
SigningRegion: cfg.AwsEndpointSigningRegion,
}, nil
}
return endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
}
if cfg.AwsAccessKeyID != "" {
providers = append(providers, &credentials.StaticProvider{
Value: credentials.Value{
AccessKeyID: cfg.AwsAccessKeyID,
SecretAccessKey: cfg.AwsSecretAccessKey,
SessionToken: "",
},
})
}
// add default providers
providers = append(providers, &credentials.EnvProvider{})
providers = append(providers, &credentials.SharedCredentialsProvider{Filename: "", Profile: ""})
// add the metadata service
def := defaults.Get()
def.Config.HTTPClient = config.CreateEC2IMDSHTTPClient(cfg)
def.Config.MaxRetries = aws.Int(2)
providers = append(providers, defaults.RemoteCredProvider(*def.Config, def.Handlers))
creds := credentials.NewChainCredentials(providers)
if cfg.AwsAssumeRole != "" || (cfg.AwsWebIdentityTokenFile != "" && cfg.AwsRoleArn != "") {
sess, err := session.NewSession(&aws.Config{
Credentials: creds,
CredentialsChainVerboseErrors: aws.Bool(true),
Region: aws.String(cfg.AwsRegion),
HTTPClient: cfg.HTTPClient,
EndpointResolver: endpoints.ResolverFunc(customResolver),
})
if err != nil {
return nil, err
}
if cfg.AwsAssumeRole != "" {
creds = stscreds.NewCredentials(sess, cfg.AwsAssumeRole)
} else if cfg.AwsWebIdentityTokenFile != "" && cfg.AwsRoleArn != "" {
creds = stscreds.NewWebIdentityCredentials(sess, cfg.AwsRoleArn, "", cfg.AwsWebIdentityTokenFile)
}
}
return session.NewSession(&aws.Config{
Credentials: creds,
CredentialsChainVerboseErrors: aws.Bool(true),
Region: aws.String(cfg.AwsRegion),
HTTPClient: cfg.HTTPClient,
EndpointResolver: endpoints.ResolverFunc(customResolver),
})
}
|
[
"func (u *Utils) GetAWSSession() (*session.Session, error) {\n\tawsRegion := os.Getenv(\"AWS_REGION\")\n\tawsAccessKeyID := os.Getenv(\"AWS_ACCESS_KEY_ID\")\n\tawsSecretAccessKey := os.Getenv(\"AWS_SECRET_ACCESS_KEY\")\n\tawsProfile := os.Getenv(\"AWS_PROFILE\")\n\tawsToken := \"\"\n\tt := strings.ToUpper(os.Getenv(\"AWS_SESSION_DEBUG\"))\n\tdebug := false\n\tif t == \"TRUE\" {\n\t\tdebug = true\n\t}\n\n\tif debug {\n\t\tlog.Printf(\"Initiating AWS Seesion with AWS_PROFILE = %s, AWS_REGION = %s, AWS_ACCESS_KEY_ID = %s, AWS_SECRET_ACCESS_KEY = %s\", awsProfile, awsRegion, awsAccessKeyID, awsSecretAccessKey)\n\t} else {\n\t\tlog.Printf(\"Initiating AWS Seesion with AWS_PROFILE = %s, AWS_REGION = %s\", awsProfile, awsRegion)\n\t}\n\n\tsess, err := session.NewSession(&aws.Config{\n\t\tRegion: aws.String(awsRegion),\n\t\tCredentials: credentials.NewStaticCredentials(awsAccessKeyID, awsSecretAccessKey, awsToken),\n\t})\n\n\treturn sess, err\n}",
"func GetAWSSession(parameters map[string]interface{}, creds []byte, defaultRegion string) (*session.Session, error) {\n\tawsCreds := &AWSCredentials{}\n\tif err := json.Unmarshal(creds, awsCreds); err != nil {\n\t\tlog.Error(err, \"Unmarshalling failed\")\n\t\treturn nil, err\n\t}\n\n\tregion, ok := parameters[\"region\"].(string)\n\tif !ok {\n\t\tlog.Error(nil, \"AWS region parameter missing\")\n\t\treturn nil, fmt.Errorf(\"AWS region parameter missing\")\n\t}\n\n\tif region == \"\" {\n\t\tregion = defaultRegion\n\t}\n\n\treturn session.NewSession(&aws.Config{\n\t\tRegion: aws.String(region),\n\t\tCredentials: credentials.NewStaticCredentials(\n\t\t\tawsCreds.AccessKeyID,\n\t\t\tawsCreds.SecretAccessKey,\n\t\t\tawsCreds.SessionToken),\n\t})\n}",
"func GetSession() (*session.Session, error) {\n\tif awsSession != nil {\n\t\treturn awsSession, nil\n\t}\n\n\tcreds := credentials.NewStaticCredentials(config.AWSSecretID, config.AWSSecretKey, \"\")\n\tsess, err := session.NewSession(&aws.Config{\n\t\tCredentials: creds,\n\t\tRegion: aws.String(\"us-east-1\"),\n\t})\n\n\tif err != nil {\n\t\tlogrus.Errorf(\"Unable to create aws session: %s\", err.Error())\n\t\treturn nil, fmt.Errorf(\"Unable to create aws session: %s\", err.Error())\n\t}\n\n\tawsSession = sess\n\treturn sess, nil\n}",
"func GetAWSSession() (*session.Session, error) {\n\tsess, err := session.NewSession(&aws.Config{Region: aws.String(\"us-east-1\")})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif sess == nil {\n\t\treturn nil, errors.New(\"can't create aws session\")\n\t}\n\treturn sess, nil\n}",
"func GetSession(region string) (*session.Session, error) {\n\n\tlog.Debug().Msg(\"Retrieving AWS Session\")\n\n\tsess, err := session.NewSession(&aws.Config{\n\t\tRegion: aws.String(region),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn sess, nil\n\n}",
"func (sc *SessionCache) GetSession(region string, s AWSDatasourceSettings) (*session.Session, error) {\n\tif region == \"\" || region == defaultRegion {\n\t\tregion = s.Region\n\t}\n\n\tauthTypeAllowed := false\n\tfor _, provider := range sc.authSettings.AllowedAuthProviders {\n\t\tif provider == s.AuthType.String() {\n\t\t\tauthTypeAllowed = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !authTypeAllowed {\n\t\treturn nil, fmt.Errorf(\"attempting to use an auth type that is not allowed: %q\", s.AuthType.String())\n\t}\n\n\tif s.AssumeRoleARN != \"\" && !sc.authSettings.AssumeRoleEnabled {\n\t\treturn nil, fmt.Errorf(\"attempting to use assume role (ARN) which is disabled in grafana.ini\")\n\t}\n\n\tbldr := strings.Builder{}\n\tfor i, s := range []string{\n\t\ts.AuthType.String(), s.AccessKey, s.Profile, s.AssumeRoleARN, region, s.Endpoint,\n\t} {\n\t\tif i != 0 {\n\t\t\tbldr.WriteString(\":\")\n\t\t}\n\t\tbldr.WriteString(strings.ReplaceAll(s, \":\", `\\:`))\n\t}\n\tcacheKey := bldr.String()\n\n\tsc.sessCacheLock.RLock()\n\tif env, ok := sc.sessCache[cacheKey]; ok {\n\t\tif env.expiration.After(time.Now().UTC()) {\n\t\t\tsc.sessCacheLock.RUnlock()\n\t\t\treturn env.session, nil\n\t\t}\n\t}\n\tsc.sessCacheLock.RUnlock()\n\n\tcfgs := []*aws.Config{\n\t\t{\n\t\t\tCredentialsChainVerboseErrors: aws.Bool(true),\n\t\t},\n\t}\n\n\tvar regionCfg *aws.Config\n\tif region == defaultRegion {\n\t\tplog.Warn(\"Region is set to \\\"default\\\", which is unsupported\")\n\t\tregion = \"\"\n\t}\n\tif region != \"\" {\n\t\tregionCfg = &aws.Config{Region: aws.String(region)}\n\t\tcfgs = append(cfgs, regionCfg)\n\t}\n\n\tif s.Endpoint != \"\" {\n\t\tcfgs = append(cfgs, &aws.Config{Endpoint: aws.String(s.Endpoint)})\n\t}\n\n\tif s.HTTPClient != nil {\n\t\tcfgs = append(cfgs, &aws.Config{HTTPClient: s.HTTPClient})\n\t}\n\n\tswitch s.AuthType {\n\tcase AuthTypeSharedCreds:\n\t\tplog.Debug(\"Authenticating towards AWS with shared credentials\", \"profile\", s.Profile,\n\t\t\t\"region\", region)\n\t\tcfgs = append(cfgs, &aws.Config{\n\t\t\tCredentials: credentials.NewSharedCredentials(\"\", s.Profile),\n\t\t})\n\tcase AuthTypeKeys:\n\t\tplog.Debug(\"Authenticating towards AWS with an access key pair\", \"region\", region)\n\t\tcfgs = append(cfgs, &aws.Config{\n\t\t\tCredentials: credentials.NewStaticCredentials(s.AccessKey, s.SecretKey, \"\"),\n\t\t})\n\tcase AuthTypeDefault:\n\t\tplog.Debug(\"Authenticating towards AWS with default SDK method\", \"region\", region)\n\tcase AuthTypeEC2IAMRole:\n\t\tplog.Debug(\"Authenticating towards AWS with IAM Role\", \"region\", region)\n\t\tsess, err := newSession(cfgs...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcfgs = append(cfgs, &aws.Config{Credentials: newEC2RoleCredentials(sess)})\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"Unrecognized authType: %d\", s.AuthType))\n\t}\n\tsess, err := newSession(cfgs...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tduration := stscreds.DefaultDuration\n\texpiration := time.Now().UTC().Add(duration)\n\tif s.AssumeRoleARN != \"\" && sc.authSettings.AssumeRoleEnabled {\n\t\t// We should assume a role in AWS\n\t\tplog.Debug(\"Trying to assume role in AWS\", \"arn\", s.AssumeRoleARN)\n\n\t\tcfgs := []*aws.Config{\n\t\t\t{\n\t\t\t\tCredentialsChainVerboseErrors: aws.Bool(true),\n\t\t\t},\n\t\t\t{\n\t\t\t\tCredentials: newSTSCredentials(sess, s.AssumeRoleARN, func(p *stscreds.AssumeRoleProvider) {\n\t\t\t\t\t// Not sure if this is necessary, overlaps with p.Duration and is undocumented\n\t\t\t\t\tp.Expiry.SetExpiration(expiration, 0)\n\t\t\t\t\tp.Duration = duration\n\t\t\t\t\tif s.ExternalID != \"\" {\n\t\t\t\t\t\tp.ExternalID = aws.String(s.ExternalID)\n\t\t\t\t\t}\n\t\t\t\t}),\n\t\t\t},\n\t\t}\n\t\tif regionCfg != nil {\n\t\t\tcfgs = append(cfgs, regionCfg)\n\t\t}\n\t\tsess, err = newSession(cfgs...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tplog.Debug(\"Successfully created AWS session\")\n\n\tsc.sessCacheLock.Lock()\n\tsc.sessCache[cacheKey] = envelope{\n\t\tsession: sess,\n\t\texpiration: expiration,\n\t}\n\tsc.sessCacheLock.Unlock()\n\n\treturn sess, nil\n}",
"func AwsSession() *session.Session {\n\treturn awsSession.Copy()\n}",
"func (c *cloudClients) GetAWSSession(region string) (*awssession.Session, error) {\n\tc.mtx.RLock()\n\tif session, ok := c.awsSessions[region]; ok {\n\t\tc.mtx.RUnlock()\n\t\treturn session, nil\n\t}\n\tc.mtx.RUnlock()\n\treturn c.initAWSSession(region)\n}",
"func (CcsAwsSession *ccsAwsSession) GetAWSSessions() error {\n\tvar err error\n\n\tCcsAwsSession.once.Do(func() {\n\t\tawsProfile := viper.GetString(config.AWSProfile)\n\t\tawsAccessKey := viper.GetString(config.AWSAccessKey)\n\t\tawsSecretAccessKey := viper.GetString(config.AWSSecretAccessKey)\n\n\t\toptions := session.Options{\n\t\t\tConfig: aws.Config{\n\t\t\t\tRegion: aws.String(viper.GetString(config.AWSRegion)),\n\t\t\t},\n\t\t}\n\n\t\tif awsProfile != \"\" {\n\t\t\toptions.Profile = awsProfile\n\t\t} else if awsAccessKey != \"\" || awsSecretAccessKey != \"\" {\n\t\t\toptions.Config.Credentials = credentials.NewStaticCredentials(awsAccessKey, awsSecretAccessKey, \"\")\n\t\t}\n\n\t\tCcsAwsSession.session, err = session.NewSessionWithOptions(options)\n\t\tCcsAwsSession.iam = iam.New(CcsAwsSession.session)\n\t\tCcsAwsSession.ec2 = ec2.New(CcsAwsSession.session)\n\t\tCcsAwsSession.accountId = viper.GetString(config.AWSAccountId)\n\t})\n\tif err != nil {\n\t\tlog.Printf(\"error initializing AWS session: %v\", err)\n\t}\n\n\treturn nil\n}",
"func StartAwsSession() {\n\tsess, err := session.NewSession()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\n\tAwsSession = sess\n}",
"func createSession(accessKeyId string, secretAccessKey string, profileName string) *session.Session {\n return session.Must(session.NewSessionWithOptions(session.Options{\n Config: aws.Config{\n Region: aws.String(\"eu-west-1\"),\n Credentials: credentials.NewStaticCredentials(accessKeyId, secretAccessKey, \"\"),\n },\n Profile: profileName,\n }))\n}",
"func (*SDKGetter) Session(clusterConfig *v1alpha1.AWSClusterProviderConfig) *session.Session {\n\treturn session.Must(session.NewSession(aws.NewConfig().WithRegion(clusterConfig.Region)))\n}",
"func CreateAwsSession(valid bool) *session.Session {\n\n\t// Setup\n\tvar creds AwsCreds\n\tvar err error = nil\n\n\t// Clear the environment variables\n\tos.Unsetenv(EnvAwsKey)\n\tos.Unsetenv(EnvAwsSecret)\n\tos.Unsetenv(EnvAwsDefRegion)\n\tos.Unsetenv(EnvAwsRegion)\n\n\t// Are we creating a valid session?\n\tif valid {\n\n\t\t// Get the credentials\n\t\tcreds, err = LoadAwsCreds()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\t// Set the environment variables\n\t\tos.Setenv(EnvAwsKey, creds.Key)\n\t\tos.Setenv(EnvAwsSecret, creds.Secret)\n\t\tos.Setenv(EnvAwsDefRegion, creds.Region)\n\t\tos.Setenv(EnvAwsRegion, creds.Region)\n\t}\n\n\t// Create the session\n\tsess, err := auth.NewSession()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Return the session\n\treturn sess\n}",
"func GetSessionOptions(c *Config) (*session.Options, error) {\n\toptions := &session.Options{\n\t\tConfig: aws.Config{\n\t\t\tEndpointResolver: c.EndpointResolver(),\n\t\t\tHTTPClient: cleanhttp.DefaultClient(),\n\t\t\tMaxRetries: aws.Int(0),\n\t\t\tRegion: aws.String(c.Region),\n\t\t},\n\t\tProfile: c.Profile,\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t}\n\n\t// get and validate credentials\n\tcreds, err := GetCredentials(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// add the validated credentials to the session options\n\toptions.Config.Credentials = creds\n\n\tif c.Insecure {\n\t\ttransport := options.Config.HTTPClient.Transport.(*http.Transport)\n\t\ttransport.TLSClientConfig = &tls.Config{\n\t\t\tInsecureSkipVerify: true,\n\t\t}\n\t}\n\n\tif c.DebugLogging {\n\t\toptions.Config.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody | aws.LogDebugWithRequestRetries | aws.LogDebugWithRequestErrors)\n\t\toptions.Config.Logger = DebugLogger{}\n\t}\n\n\treturn options, nil\n}",
"func awsSessionFromURL(awsURL *url.URL) (client.ConfigProvider, error) {\n\tif awsURL == nil {\n\t\treturn nil, fmt.Errorf(\"no URL specified for DynamoDB\")\n\t}\n\tpath := strings.TrimPrefix(awsURL.Path, \"/\")\n\tif len(path) > 0 {\n\t\tlevel.Warn(log.Logger).Log(\"msg\", \"ignoring DynamoDB URL path\", \"path\", path)\n\t}\n\tconfig, err := awscommon.ConfigFromURL(awsURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconfig = config.WithMaxRetries(0) // We do our own retries, so we can monitor them\n\tconfig = config.WithHTTPClient(&http.Client{Transport: defaultTransport})\n\treturn session.NewSession(config)\n}",
"func GetSSMSession(region string) SSMAPI {\n\tvar svc SSMAPI = ssm.New(getSession(region))\n\treturn svc\n}",
"func GetAmazonCredentials(profile string, assumeRole string) (string, map[string]string, error) {\n\t/* create a new session, which is basically the same as the following, but may also contain a region\n\tcreds := credentials.NewChainCredentials(\n\t\t[]credentials.Provider{\n\t\t\t&credentials.EnvProvider{},\n\t\t\t&credentials.SharedCredentialsProvider{},\n\t\t}) */\n\tvar sess *session.Session\n\tvar err error\n\n\t// use direct auth based on exported env vars\n\tif id, ok := os.LookupEnv(\"AWS_ACCESS_KEY_ID\"); ok {\n\t\tif profile != \"\" {\n\t\t\tlog.Warnf(\"AWS profile `%s` is overridden to `%s` by AWS_ACCESS_KEY_ID env var explicitly\", profile, id)\n\t\t}\n\t\tsess, err = session.NewSession(&aws.Config{})\n\t} else {\n\t\tsess, err = session.NewSessionWithOptions(session.Options{\n\t\t\tProfile: profile,\n\t\t})\n\t}\n\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\tvar creds credentials.Value\n\n\tif assumeRole != \"\" {\n\t\toptions := func(p *stscreds.AssumeRoleProvider) {\n\t\t\tp.Duration = 1 * time.Hour\n\t\t}\n\t\tcreds, err = stscreds.NewCredentials(sess, assumeRole, options).Get()\n\t\tif err != nil {\n\t\t\treturn \"\", nil, err\n\t\t}\n\t} else {\n\t\tcreds, err = sess.Config.Credentials.Get()\n\t\tif err != nil {\n\t\t\treturn \"\", nil, err\n\t\t}\n\t}\n\n\tout := map[string]string{\n\t\t\"AWS_ACCESS_KEY_ID\": creds.AccessKeyID,\n\t\t\"AWS_SECRET_ACCESS_KEY\": creds.SecretAccessKey,\n\t\t\"AWS_SESSION_TOKEN\": creds.SessionToken,\n\t}\n\n\tif sess.Config.Region != nil {\n\t\tout[AwsRegionKey] = *sess.Config.Region\n\t}\n\n\treturn creds.AccessKeyID, out, nil\n}",
"func New(region string) (*session.Session, error) {\n\tawsSession, err := session.NewSession(&aws.Config{\n\t\tRegion: aws.String(region),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn awsSession, nil\n}",
"func newAWSSession(accessKeyID, secretAccessKey, region, asssumeRoleArn string) (*session.Session, error) {\n\tvar awsConf *aws.Config\n\tif secretAccessKey != \"\" && accessKeyID != \"\" {\n\t\tcreds := credentials.NewStaticCredentials(accessKeyID, secretAccessKey, \"\")\n\t\tawsConf = &aws.Config{Credentials: creds, Region: ®ion}\n\t} else {\n\t\tawsConf = &aws.Config{Region: ®ion}\n\t}\n\n\t// Optional: Assuming role\n\tif asssumeRoleArn != \"\" {\n\t\tstaticsess, err := session.NewSession(&aws.Config{Credentials: awsConf.Credentials})\n\t\tif err != nil {\n\t\t\treturn nil, status.Errorf(codes.Internal, \"failed to create new session: %v\", err)\n\t\t}\n\n\t\tawsConf.Credentials = credentials.NewCredentials(&stscreds.AssumeRoleProvider{\n\t\t\tClient: sts.New(staticsess),\n\t\t\tRoleARN: asssumeRoleArn,\n\t\t\tDuration: 15 * time.Minute,\n\t\t})\n\t}\n\n\treturn session.NewSession(awsConf)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewAuthorizer registers a new resource with the given unique name, arguments, and options.
|
func NewAuthorizer(ctx *pulumi.Context,
name string, args *AuthorizerArgs, opts ...pulumi.ResourceOpt) (*Authorizer, error) {
if args == nil || args.RestApi == nil {
return nil, errors.New("missing required argument 'RestApi'")
}
inputs := make(map[string]interface{})
if args == nil {
inputs["authorizerCredentials"] = nil
inputs["authorizerResultTtlInSeconds"] = nil
inputs["authorizerUri"] = nil
inputs["identitySource"] = nil
inputs["identityValidationExpression"] = nil
inputs["name"] = nil
inputs["providerArns"] = nil
inputs["restApi"] = nil
inputs["type"] = nil
} else {
inputs["authorizerCredentials"] = args.AuthorizerCredentials
inputs["authorizerResultTtlInSeconds"] = args.AuthorizerResultTtlInSeconds
inputs["authorizerUri"] = args.AuthorizerUri
inputs["identitySource"] = args.IdentitySource
inputs["identityValidationExpression"] = args.IdentityValidationExpression
inputs["name"] = args.Name
inputs["providerArns"] = args.ProviderArns
inputs["restApi"] = args.RestApi
inputs["type"] = args.Type
}
s, err := ctx.RegisterResource("aws:apigateway/authorizer:Authorizer", name, true, inputs, opts...)
if err != nil {
return nil, err
}
return &Authorizer{s: s}, nil
}
|
[
"func NewAuthorizer(ctx *pulumi.Context,\n\tname string, args *AuthorizerArgs, opts ...pulumi.ResourceOption) (*Authorizer, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.RestApi == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'RestApi'\")\n\t}\n\tvar resource Authorizer\n\terr := ctx.RegisterResource(\"aws:apigateway/authorizer:Authorizer\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (a *AuthzHandler) CreateAuthorizer() func(*http.Request) error {\n\tif !a.Enabled {\n\t\treturn func(*http.Request) error {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn a.Authorizer\n}",
"func (c *APIGateway) CreateAuthorizerRequest(input *CreateAuthorizerInput) (req *request.Request, output *Authorizer) {\n\top := &request.Operation{\n\t\tName: opCreateAuthorizer,\n\t\tHTTPMethod: \"POST\",\n\t\tHTTPPath: \"/restapis/{restapi_id}/authorizers\",\n\t}\n\n\tif input == nil {\n\t\tinput = &CreateAuthorizerInput{}\n\t}\n\n\treq = c.newRequest(op, input, output)\n\toutput = &Authorizer{}\n\treq.Data = output\n\treturn\n}",
"func NewAuthorizer(client *http.Client, f func(string) (string, string, error)) Authorizer {\n\tif client == nil {\n\t\tclient = http.DefaultClient\n\t}\n\treturn &dockerAuthorizer{\n\t\tcredentials: f,\n\t\tclient: client,\n\t\tauth: map[string]string{},\n\t}\n}",
"func (scanner) newBasicAuthorizer(username string, password string) *autorest.BasicAuthorizer {\n\treturn autorest.NewBasicAuthorizer(username, password)\n}",
"func (a *AuthorizationsService) Create(params interface{}) (auth *Authorization, result *Result) {\n\tresult = a.client.post(a.URL, params, &auth)\n\treturn\n}",
"func NewAuthorizer(clientset kubernetes.Interface, logger Logger) *Authorizer {\n\tif logger == nil {\n\t\tlogger = log.New(os.Stderr, \"rbac\", log.LstdFlags)\n\t}\n\n\tauthz := &Authorizer{\n\t\tlogger: logger,\n\t\tclientset: clientset,\n\t\tsyncDuration: defaultResyncDuration,\n\t\tselector: labels.NewSelector(),\n\t\tinformerStop: make(chan struct{}),\n\t}\n\tauthz.prepareCache()\n\treturn authz\n}",
"func NewRegistry(ctx context.Context, registrationConfFilepath string, logger logrus.FieldLogger) (*Registry, error) {\n\tregistryData := &RegistryData{}\n\n\tif registrationConfFilepath != \"\" {\n\t\tlogger.Debugf(\"parsing authorities registration conf from %v\", registrationConfFilepath)\n\t\tregistryFile, err := ioutil.ReadFile(registrationConfFilepath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terr = yaml.Unmarshal(registryFile, registryData)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tr := &Registry{\n\t\tauthorities: make(map[string]*AuthorityRegistration),\n\n\t\tlogger: logger,\n\t}\n\n\tvar defaultAuthority *AuthorityRegistration\n\tfor _, authority := range registryData.Authorities {\n\t\tvalidateErr := authority.Validate()\n\t\tregisterErr := r.Register(authority)\n\t\tfields := logrus.Fields{\n\t\t\t\"id\": authority.ID,\n\t\t\t\"client_id\": authority.ClientID,\n\t\t\t\"with_client_secret\": authority.ClientSecret != \"\",\n\t\t\t\"authority_type\": authority.AuthorityType,\n\t\t\t\"insecure\": authority.Insecure,\n\t\t\t\"default\": authority.Default,\n\t\t\t\"discover\": authority.discover,\n\t\t\t\"alias_required\": authority.IdentityAliasRequired,\n\t\t}\n\n\t\tif validateErr != nil {\n\t\t\tlogger.WithError(validateErr).WithFields(fields).Warnln(\"skipped registration of invalid authority entry\")\n\t\t\tcontinue\n\t\t}\n\t\tif registerErr != nil {\n\t\t\tlogger.WithError(registerErr).WithFields(fields).Warnln(\"skipped registration of invalid authority\")\n\t\t\tcontinue\n\t\t}\n\t\tif authority.Default || defaultAuthority == nil {\n\t\t\tif defaultAuthority == nil || !defaultAuthority.Default {\n\t\t\t\tdefaultAuthority = authority\n\t\t\t} else {\n\t\t\t\tlogger.Warnln(\"ignored default authority flag since already have a default\")\n\t\t\t}\n\t\t} else {\n\t\t\t// TODO(longsleep): Implement authority selection.\n\t\t\tlogger.Warnln(\"non-default additional authorities are not supported yet\")\n\t\t}\n\n\t\tgo func() {\n\t\t\tif initializeErr := authority.Initialize(ctx, logger); initializeErr != nil {\n\t\t\t\tlogger.WithError(initializeErr).WithFields(fields).Warnln(\"failed to initialize authority\")\n\t\t\t}\n\t\t}()\n\n\t\tlogger.WithFields(fields).Debugln(\"registered authority\")\n\t}\n\n\tif defaultAuthority != nil {\n\t\tif defaultAuthority.Default {\n\t\t\tr.defaultID = defaultAuthority.ID\n\t\t\tlogger.WithField(\"id\", defaultAuthority.ID).Infoln(\"using external default authority\")\n\t\t} else {\n\t\t\tlogger.Warnln(\"non-default authorities are not supported yet\")\n\t\t}\n\t}\n\n\treturn r, nil\n}",
"func createAuthorization(roleRef, userRef string) string {\n\treturn fmt.Sprintf(`{\n \"type\": \"Authorization\",\n \"user\": \"%s\",\n \"role\": \"%s\",\n \"target\": \"%s\"\n}`, userRef, roleRef, userRef)\n}",
"func NewAuthorizer() Authorizer {\n\treturn &authorizer{}\n}",
"func New(config *Config) (api.Authorize, error) {\n\tif config == nil {\n\t\tconfig = NewDefaultConfig()\n\t}\n\n\treturn &authorizer{\n\t\tconfig: config,\n\t\tresolve: &resolverImp{},\n\t}, nil\n}",
"func NewResource(ctx *pulumi.Context,\n\tname string, args *ResourceArgs, opts ...pulumi.ResourceOption) (*Resource, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Config == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Config'\")\n\t}\n\tif args.ConfigArray == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ConfigArray'\")\n\t}\n\tif args.ConfigMap == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ConfigMap'\")\n\t}\n\tif args.Foo == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Foo'\")\n\t}\n\tif args.FooArray == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'FooArray'\")\n\t}\n\tif args.FooMap == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'FooMap'\")\n\t}\n\tif args.Config != nil {\n\t\targs.Config = pulumi.ToSecret(args.Config).(ConfigInput)\n\t}\n\tif args.ConfigArray != nil {\n\t\targs.ConfigArray = pulumi.ToSecret(args.ConfigArray).(ConfigArrayInput)\n\t}\n\tif args.ConfigMap != nil {\n\t\targs.ConfigMap = pulumi.ToSecret(args.ConfigMap).(ConfigMapInput)\n\t}\n\tif args.Foo != nil {\n\t\targs.Foo = pulumi.ToSecret(args.Foo).(pulumi.StringInput)\n\t}\n\tif args.FooArray != nil {\n\t\targs.FooArray = pulumi.ToSecret(args.FooArray).(pulumi.StringArrayInput)\n\t}\n\tif args.FooMap != nil {\n\t\targs.FooMap = pulumi.ToSecret(args.FooMap).(pulumi.StringMapInput)\n\t}\n\tsecrets := pulumi.AdditionalSecretOutputs([]string{\n\t\t\"config\",\n\t\t\"configArray\",\n\t\t\"configMap\",\n\t\t\"foo\",\n\t\t\"fooArray\",\n\t\t\"fooMap\",\n\t})\n\topts = append(opts, secrets)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Resource\n\terr := ctx.RegisterResource(\"mypkg::Resource\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (scanner) newAuthorizerFromFile() (autorest.Authorizer, error) {\n\tauthorizer, err := auth.NewAuthorizerFromFile(azure.PublicCloud.ResourceManagerEndpoint)\n\tif err != nil {\n\t\tzap.S().Errorf(\"failed to authorize : %v \", err)\n\t\treturn nil, err\n\t}\n\treturn authorizer, nil\n}",
"func NewAuthorizer(readToken string, writeToken string, allowPublicCreateGenerated bool) *DefaultAuthorizer {\n\treturn &DefaultAuthorizer{\n\t\treadToken: readToken,\n\t\twriteToken: writeToken,\n\t\tallowPublicCreateGenerated: allowPublicCreateGenerated,\n\t}\n}",
"func NewAuthorizerFromCLI() (autorest.Authorizer, error) {\n\tsettings, err := GetSettingsFromEnvironment()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif settings.Values[Resource] == \"\" {\n\t\tsettings.Values[Resource] = settings.Environment.ResourceManagerEndpoint\n\t}\n\n\treturn NewAuthorizerFromCLIWithResource(settings.Values[Resource])\n}",
"func (c *AuthCnfg) newAuthorizerWithEnvVars(\n\tauthorizerFactory func(resourceBaseURI string) (autorest.Authorizer, error),\n\tresourceBaseURI string,\n\tenvVars map[string]string,\n) (autorest.Authorizer, error) {\n\n\t// Set environment variables\n\tcurEnvVars := map[string]string{}\n\tfor key, val := range c.Env {\n\t\tif curVal, ok := os.LookupEnv(key); ok {\n\t\t\tcurEnvVars[key] = curVal\n\t\t}\n\t\tos.Setenv(key, val)\n\t}\n\n\t// Get authorizer\n\tauthorizer, err := authorizerFactory(resourceBaseURI)\n\n\t// Unset environment variables\n\tfor key := range c.Env {\n\t\tprevVal, ok := curEnvVars[key]\n\t\tif ok {\n\t\t\tos.Setenv(key, prevVal)\n\t\t} else {\n\t\t\tos.Unsetenv(key)\n\t\t}\n\t}\n\n\treturn authorizer, err\n}",
"func (c *myClient) createAuthorization(roleRef, userRef string, wait bool) (results map[string]interface{}, err error) {\n\tnamespace := \"authorization\"\n\n\turl := fmt.Sprintf(\"%s\", namespace)\n\tpostBody := createAuthorization(roleRef, userRef)\n\taction, _, err := c.httpPost(url, postBody)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif wait {\n\t\tc.jobWaiter(action)\n\t}\n\treturn action, err\n}",
"func (h *DefaultAuthorizer) AuthenticateCreate(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {\n\t\tif h.writeToken != \"\" {\n\t\t\tif !h.allowPublicCreateGenerated {\n\t\t\t\th.AuthenticateWrite(next).ServeHTTP(res, req)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// If id form field has value check the token\n\t\t\ttoken := h.getToken(req)\n\t\t\terr := req.ParseForm()\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Failed to parse form data: %s\", err)\n\t\t\t\tres.WriteHeader(http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\treqID := req.Form.Get(\"id\")\n\t\t\tif reqID != \"\" {\n\t\t\t\tif token != h.writeToken {\n\t\t\t\t\tlog.Debugf(\"Failed to authorize create %s\", req.RemoteAddr)\n\t\t\t\t\tres.WriteHeader(http.StatusUnauthorized)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tnext.ServeHTTP(res, req)\n\t})\n}",
"func CreateAuthor(w http.ResponseWriter, r *http.Request) {\n\tvar req createAuthorRequest\n\tif err := json.NewDecoder(r.Body).Decode(&req); err != nil {\n\t\tutils.Log.Error(\"Unable to decode request body\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tutils.Log.WithFields(log.Fields{\n\t\t\"name\": req.Name,\n\t}).Debugf(\"controllers/authors.go - CreateAuthor() -\")\n\n\tvalErrors := utils.ValidateStruct(req)\n\tif len(valErrors) != 0 {\n\t\tutils.Log.Errorf(\"Validation errors: %s\", valErrors)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(map[string][]string{\n\t\t\t\"errors\": valErrors,\n\t\t})\n\t\treturn\n\t}\n\n\tresult, err := db.DB.CreateAuthor(r.Context(), req.Name)\n\tif err != nil {\n\t\tutils.Log.Error(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusCreated)\n\tjson.NewEncoder(w).Encode(result)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetAuthorizer gets an existing Authorizer resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).
|
func GetAuthorizer(ctx *pulumi.Context,
name string, id pulumi.ID, state *AuthorizerState, opts ...pulumi.ResourceOpt) (*Authorizer, error) {
inputs := make(map[string]interface{})
if state != nil {
inputs["authorizerCredentials"] = state.AuthorizerCredentials
inputs["authorizerResultTtlInSeconds"] = state.AuthorizerResultTtlInSeconds
inputs["authorizerUri"] = state.AuthorizerUri
inputs["identitySource"] = state.IdentitySource
inputs["identityValidationExpression"] = state.IdentityValidationExpression
inputs["name"] = state.Name
inputs["providerArns"] = state.ProviderArns
inputs["restApi"] = state.RestApi
inputs["type"] = state.Type
}
s, err := ctx.ReadResource("aws:apigateway/authorizer:Authorizer", name, id, inputs, opts...)
if err != nil {
return nil, err
}
return &Authorizer{s: s}, nil
}
|
[
"func GetAuthorizer(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *AuthorizerState, opts ...pulumi.ResourceOption) (*Authorizer, error) {\n\tvar resource Authorizer\n\terr := ctx.ReadResource(\"aws:apigateway/authorizer:Authorizer\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func GetAuthorizer() autorest.Authorizer {\n\tif authorizer == nil {\n\t\tpanic(\"Failed to initialize authorizer\")\n\t}\n\treturn authorizer\n}",
"func GetAuthorizer(sp *ServicePrincipal, env *azure.Environment) (autorest.Authorizer, error) {\n\toauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, sp.TenantID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttoken, err := adal.NewServicePrincipalToken(*oauthConfig, sp.ClientID, sp.ClientSecret, env.ServiceManagementEndpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn autorest.NewBearerAuthorizer(token), nil\n}",
"func (o *Rule) GetAuthorizer() RuleHandler {\n\tif o == nil || o.Authorizer == nil {\n\t\tvar ret RuleHandler\n\t\treturn ret\n\t}\n\treturn *o.Authorizer\n}",
"func (c *APIGateway) GetAuthorizerRequest(input *GetAuthorizerInput) (req *request.Request, output *Authorizer) {\n\top := &request.Operation{\n\t\tName: opGetAuthorizer,\n\t\tHTTPMethod: \"GET\",\n\t\tHTTPPath: \"/restapis/{restapi_id}/authorizers/{authorizer_id}\",\n\t}\n\n\tif input == nil {\n\t\tinput = &GetAuthorizerInput{}\n\t}\n\n\treq = c.newRequest(op, input, output)\n\toutput = &Authorizer{}\n\treq.Data = output\n\treturn\n}",
"func (settings EnvironmentSettings) GetAuthorizer() (autorest.Authorizer, error) {\n\t//1.Client Credentials\n\tif c, e := settings.GetClientCredentials(); e == nil {\n\t\tlogger.Instance.Writeln(logger.LogInfo, \"EnvironmentSettings.GetAuthorizer() using client secret credentials\")\n\t\treturn c.Authorizer()\n\t}\n\n\t//2. Client Certificate\n\tif c, e := settings.GetClientCertificate(); e == nil {\n\t\tlogger.Instance.Writeln(logger.LogInfo, \"EnvironmentSettings.GetAuthorizer() using client certificate credentials\")\n\t\treturn c.Authorizer()\n\t}\n\n\t//3. Username Password\n\tif c, e := settings.GetUsernamePassword(); e == nil {\n\t\tlogger.Instance.Writeln(logger.LogInfo, \"EnvironmentSettings.GetAuthorizer() using user name/password credentials\")\n\t\treturn c.Authorizer()\n\t}\n\n\t// 4. MSI\n\tif !adal.MSIAvailable(context.Background(), nil) {\n\t\treturn nil, errors.New(\"MSI not available\")\n\t}\n\tlogger.Instance.Writeln(logger.LogInfo, \"EnvironmentSettings.GetAuthorizer() using MSI authentication\")\n\treturn settings.GetMSI().Authorizer()\n}",
"func (settings FileSettings) GetAuthorizer(resourceBaseURI string) (autorest.Authorizer, error) {\n\tif resourceBaseURI == \"\" {\n\t\tresourceBaseURI = azure.PublicCloud.ServiceManagementEndpoint\n\t}\n\tif a, err := settings.ClientCredentialsAuthorizer(resourceBaseURI); err == nil {\n\t\treturn a, err\n\t}\n\tif a, err := settings.ClientCertificateAuthorizer(resourceBaseURI); err == nil {\n\t\treturn a, err\n\t}\n\treturn nil, errors.New(\"auth file missing client and certificate credentials\")\n}",
"func getAuthorizerFrom(values map[string]string) (autorest.Authorizer, error) {\n\ts, err := getSettingsFrom(values)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ta, err := s.GetAuthorizer()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn a, nil\n}",
"func (c *AuthorizeClient) Get(ctx context.Context, id int) (*Authorize, error) {\n\treturn c.Query().Where(authorize.ID(id)).Only(ctx)\n}",
"func (o *Rule) GetAuthorizerOk() (*RuleHandler, bool) {\n\tif o == nil || o.Authorizer == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Authorizer, true\n}",
"func GetGraphAuthorizer(creds config.Credentials) (autorest.Authorizer, error) {\n\tif graphAuthorizer != nil {\n\t\treturn graphAuthorizer, nil\n\t}\n\n\tvar a autorest.Authorizer\n\tvar err error\n\n\ta, err = getAuthorizerForResource(config.Environment().GraphEndpoint, creds)\n\n\tif err == nil {\n\t\t// cache\n\t\tgraphAuthorizer = a\n\t} else {\n\t\tgraphAuthorizer = nil\n\t}\n\n\treturn graphAuthorizer, err\n}",
"func NewAuthorizer(ctx *pulumi.Context,\n\tname string, args *AuthorizerArgs, opts ...pulumi.ResourceOption) (*Authorizer, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.RestApi == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'RestApi'\")\n\t}\n\tvar resource Authorizer\n\terr := ctx.RegisterResource(\"aws:apigateway/authorizer:Authorizer\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func NewAuthorizer(ctx *pulumi.Context,\n\tname string, args *AuthorizerArgs, opts ...pulumi.ResourceOpt) (*Authorizer, error) {\n\tif args == nil || args.RestApi == nil {\n\t\treturn nil, errors.New(\"missing required argument 'RestApi'\")\n\t}\n\tinputs := make(map[string]interface{})\n\tif args == nil {\n\t\tinputs[\"authorizerCredentials\"] = nil\n\t\tinputs[\"authorizerResultTtlInSeconds\"] = nil\n\t\tinputs[\"authorizerUri\"] = nil\n\t\tinputs[\"identitySource\"] = nil\n\t\tinputs[\"identityValidationExpression\"] = nil\n\t\tinputs[\"name\"] = nil\n\t\tinputs[\"providerArns\"] = nil\n\t\tinputs[\"restApi\"] = nil\n\t\tinputs[\"type\"] = nil\n\t} else {\n\t\tinputs[\"authorizerCredentials\"] = args.AuthorizerCredentials\n\t\tinputs[\"authorizerResultTtlInSeconds\"] = args.AuthorizerResultTtlInSeconds\n\t\tinputs[\"authorizerUri\"] = args.AuthorizerUri\n\t\tinputs[\"identitySource\"] = args.IdentitySource\n\t\tinputs[\"identityValidationExpression\"] = args.IdentityValidationExpression\n\t\tinputs[\"name\"] = args.Name\n\t\tinputs[\"providerArns\"] = args.ProviderArns\n\t\tinputs[\"restApi\"] = args.RestApi\n\t\tinputs[\"type\"] = args.Type\n\t}\n\ts, err := ctx.RegisterResource(\"aws:apigateway/authorizer:Authorizer\", name, true, inputs, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Authorizer{s: s}, nil\n}",
"func (r *Registry) Get(ctx context.Context, authorityID string) (*AuthorityRegistration, bool) {\n\tif authorityID == \"\" {\n\t\treturn nil, false\n\t}\n\n\t// Lookup authority registration.\n\tr.mutex.RLock()\n\tregistration, ok := r.authorities[authorityID]\n\tr.mutex.RUnlock()\n\n\treturn registration, ok\n}",
"func GetKeyvaultAuthorizer(creds config.Credentials) (autorest.Authorizer, error) {\n\tif keyvaultAuthorizer != nil {\n\t\treturn keyvaultAuthorizer, nil\n\t}\n\n\t// BUG: default value for KeyVaultEndpoint is wrong\n\tvaultEndpoint := strings.TrimSuffix(config.Environment().KeyVaultEndpoint, \"/\")\n\t// BUG: alternateEndpoint replaces other endpoints in the configs below\n\talternateEndpoint, _ := url.Parse(\n\t\t\"https://login.windows.net/\" + creds.TenantID() + \"/oauth2/token\")\n\n\tvar a autorest.Authorizer\n\tvar err error\n\n\tswitch grantType(creds) {\n\tcase OAuthGrantTypeServicePrincipal:\n\t\toauthconfig, err := adal.NewOAuthConfig(\n\t\t\tconfig.Environment().ActiveDirectoryEndpoint, creds.TenantID())\n\t\tif err != nil {\n\t\t\treturn a, err\n\t\t}\n\t\toauthconfig.AuthorizeEndpoint = *alternateEndpoint\n\n\t\ttoken, err := adal.NewServicePrincipalToken(\n\t\t\t*oauthconfig, creds.ClientID(), creds.ClientSecret(), vaultEndpoint)\n\t\tif err != nil {\n\t\t\treturn a, err\n\t\t}\n\n\t\ta = autorest.NewBearerAuthorizer(token)\n\n\tcase OAuthGrantTypeManagedIdentity:\n\t\tMIEndpoint, err := adal.GetMSIVMEndpoint()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ttoken, err := adal.NewServicePrincipalTokenFromMSI(MIEndpoint, vaultEndpoint)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ta = autorest.NewBearerAuthorizer(token)\n\n\tcase OAuthGrantTypeDeviceFlow:\n\t\t// TODO: Remove this - it's an interactive authentication\n\t\t// method and doesn't make sense in an operator. Maybe it was\n\t\t// useful for early testing?\n\t\tdeviceConfig := auth.NewDeviceFlowConfig(creds.ClientID(), creds.TenantID())\n\t\tdeviceConfig.Resource = vaultEndpoint\n\t\tdeviceConfig.AADEndpoint = alternateEndpoint.String()\n\t\ta, err = deviceConfig.Authorizer()\n\tdefault:\n\t\treturn a, fmt.Errorf(\"invalid grant type specified\")\n\t}\n\n\tif err == nil {\n\t\tkeyvaultAuthorizer = a\n\t} else {\n\t\tkeyvaultAuthorizer = nil\n\t}\n\n\treturn keyvaultAuthorizer, err\n}",
"func (i *Influx) RetrieveAuthorization(authID string) (auth *protocol.Authorization, err error) {\n\tres, err := i.HTTPInstance.Get(context.TODO(), i.HTTPClient, i.GetBasicURL()+\"/authorizations\", map[string]string{\n\t\t\"authID\": authID,\n\t}, nil)\n\n\terr = json.Unmarshal(res, &auth)\n\n\treturn\n}",
"func NewAuthorizer() Authorizer {\n\treturn &authorizer{}\n}",
"func (c Client) authorizer() Authorizer {\n\tif c.Authorizer == nil {\n\t\treturn NullAuthorizer{}\n\t}\n\treturn c.Authorizer\n}",
"func Authorizer() security.Authorizer { return authorizer{} }"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The credentials required for the authorizer. To specify an IAM Role for API Gateway to assume, use the IAM Role ARN.
|
func (r *Authorizer) AuthorizerCredentials() pulumi.StringOutput {
return (pulumi.StringOutput)(r.s.State["authorizerCredentials"])
}
|
[
"func (ccc ClientCredentialsConfig) Authorizer() (autorest.Authorizer, error) {\n\toauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tspToken, err := adal.NewServicePrincipalToken(*oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get oauth token from client credentials: %v\", err)\n\t}\n\n\treturn autorest.NewBearerAuthorizer(spToken), nil\n}",
"func (o *CredentialProviderAPI) Authorizer() runtime.Authorizer {\n\n\treturn nil\n\n}",
"func (a Authorizer) AuthorizeRole(rw http.ResponseWriter, req *http.Request, role string) error {\n\treturn nil\n}",
"func Authorize(requiredRole string) func(http.Handler) http.Handler {\n\trequiredRole = strings.ToLower(requiredRole)\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tctx := r.Context()\n\n\t\t\tclaims, err := GetJWTClaims(ctx)\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Check role inside of an array of roles in access token claims.\n\t\t\tvar role bool\n\t\t\tfor _, userRole := range claims.User_roles {\n\t\t\t\tif strings.EqualFold(requiredRole, userRole) {\n\t\t\t\t\trole = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !role {\n\t\t\t\ts := fmt.Sprintf(\n\t\t\t\t\t\"you are not authorized for that action; get roles: %v, expected: %s\",\n\t\t\t\t\tclaims.User_roles,\n\t\t\t\t\trequiredRole,\n\t\t\t\t)\n\t\t\t\thttp.Error(w, s, http.StatusUnauthorized)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tnext.ServeHTTP(w, r)\n\t\t})\n\t}\n}",
"func (ups UsernamePasswordConfig) Authorizer() (autorest.Authorizer, error) {\n\tspToken, err := ups.ServicePrincipalToken()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get oauth token from username and password auth: %v\", err)\n\t}\n\treturn autorest.NewBearerAuthorizer(spToken), nil\n}",
"func (settings FileSettings) ClientCredentialsAuthorizer(baseURI string) (autorest.Authorizer, error) {\n\tresource, err := settings.getResourceForToken(baseURI)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn settings.ClientCredentialsAuthorizerWithResource(resource)\n}",
"func (ups UsernamePasswordConfig) Authorizer() (autorest.Authorizer, error) {\n\n\toauthConfig, err := adal.NewOAuthConfig(ups.AADEndpoint, ups.TenantID)\n\n\tspToken, err := adal.NewServicePrincipalTokenFromUsernamePassword(*oauthConfig, ups.ClientID, ups.Username, ups.Password, ups.Resource)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get oauth token from username and password auth: %v\", err)\n\t}\n\n\treturn autorest.NewBearerAuthorizer(spToken), nil\n}",
"func (as *authServer) Authorize(credentials Credentials, scopes Scopes, aud string) (*TokenCredentials, error) {\n\tauthorizer, ok := as.authorizers[credentials.Grant]\n\tif !ok {\n\t\treturn nil, InvalidGrant(errors.New(\"invalid grant type\"))\n\t}\n\terr := authorizer(credentials)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = as.checkAudience(aud, credentials)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts, err := as.sProvider(credentials.ID, credentials.Grant, aud, scopes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn as.createCredentials(credentials.ID, s, credentials.Grant, aud)\n}",
"func Authorizer() security.Authorizer { return authorizer{} }",
"func GetAssumeRoleCredentials(awsClient Client, durationSeconds *int64, roleSessionName, roleArn *string) (*sts.Credentials, error) {\n\tassumeRoleOutput, err := awsClient.AssumeRole(&sts.AssumeRoleInput{\n\t\tDurationSeconds: durationSeconds,\n\t\tRoleSessionName: roleSessionName,\n\t\tRoleArn: roleArn,\n\t})\n\tif err != nil {\n\t\t// Get error details\n\t\tklog.Errorf(\"Failed to assume role: %v\", err)\n\n\t\treturn nil, err\n\t}\n\n\tif assumeRoleOutput == nil {\n\t\tklog.Errorf(\"Get assume role output nil %v\", awsv1alpha1.ErrFederationTokenOutputNil)\n\t\treturn nil, awsv1alpha1.ErrFederationTokenOutputNil\n\t}\n\n\treturn assumeRoleOutput.Credentials, nil\n}",
"func (a *authority) Authorize(r *http.Request, act, res string) (Principal, error) {\n\tfor _, verifier := range a.authorizers {\n\t\t// OR logic on multiple credentials\n\t\tverified, err := verifier(r, act, res)\n\t\tif err == nil {\n\t\t\treturn verified, nil\n\t\t}\n\t}\n\n\treturn nil, ErrInvalidCredentials\n}",
"func Authorize(w http.ResponseWriter, r *http.Request, authorizer Authorizer) {\n\tauthReq, err := ParseAuthorizeRequest(r, authorizer.Decoder())\n\tif err != nil {\n\t\tAuthRequestError(w, r, authReq, err, authorizer.Encoder())\n\t\treturn\n\t}\n\tif authReq.RequestParam != \"\" && authorizer.RequestObjectSupported() {\n\t\tauthReq, err = ParseRequestObject(r.Context(), authReq, authorizer.Storage(), authorizer.Issuer())\n\t\tif err != nil {\n\t\t\tAuthRequestError(w, r, authReq, err, authorizer.Encoder())\n\t\t\treturn\n\t\t}\n\t}\n\tvalidation := ValidateAuthRequest\n\tif validater, ok := authorizer.(AuthorizeValidator); ok {\n\t\tvalidation = validater.ValidateAuthRequest\n\t}\n\tuserID, err := validation(r.Context(), authReq, authorizer.Storage(), authorizer.IDTokenHintVerifier())\n\tif err != nil {\n\t\tAuthRequestError(w, r, authReq, err, authorizer.Encoder())\n\t\treturn\n\t}\n\tif authReq.RequestParam != \"\" {\n\t\tAuthRequestError(w, r, authReq, oidc.ErrRequestNotSupported(), authorizer.Encoder())\n\t\treturn\n\t}\n\treq, err := authorizer.Storage().CreateAuthRequest(r.Context(), authReq, userID)\n\tif err != nil {\n\t\tAuthRequestError(w, r, authReq, oidc.DefaultToServerError(err, \"unable to save auth request\"), authorizer.Encoder())\n\t\treturn\n\t}\n\tclient, err := authorizer.Storage().GetClientByClientID(r.Context(), req.GetClientID())\n\tif err != nil {\n\t\tAuthRequestError(w, r, req, oidc.DefaultToServerError(err, \"unable to retrieve client by id\"), authorizer.Encoder())\n\t\treturn\n\t}\n\tRedirectToLogin(req.GetID(), client, w, r)\n}",
"func (c *STSCredentialsGetter) Get(role string, sessionDuration time.Duration) (*Credentials, error) {\n\troleARN := c.baseRoleARN + role\n\tif strings.HasPrefix(role, c.baseRoleARNPrefix) {\n\t\troleARN = role\n\t}\n\n\troleSessionName, err := normalizeRoleARN(roleARN, c.baseRoleARNPrefix)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparams := &sts.AssumeRoleInput{\n\t\tRoleArn: aws.String(roleARN),\n\t\tRoleSessionName: aws.String(roleSessionName),\n\t\tDurationSeconds: aws.Int64(int64(sessionDuration.Seconds())),\n\t}\n\n\tresp, err := c.svc.AssumeRole(params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Credentials{\n\t\tRoleARN: roleARN,\n\t\tAccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId),\n\t\tSecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey),\n\t\tSessionToken: aws.StringValue(resp.Credentials.SessionToken),\n\t\tExpiration: aws.TimeValue(resp.Credentials.Expiration),\n\t}, nil\n}",
"func (c *Context) Authorize(request *http.Request, route *MatchedRoute) (interface{}, *http.Request, error) {\n\tif route == nil || !route.HasAuth() {\n\t\treturn nil, nil, nil\n\t}\n\n\tvar rCtx = request.Context()\n\tif v := rCtx.Value(ctxSecurityPrincipal); v != nil {\n\t\treturn v, request, nil\n\t}\n\n\tapplies, usr, err := route.Authenticators.Authenticate(request, route)\n\tif !applies || err != nil || !route.Authenticators.AllowsAnonymous() && usr == nil {\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\treturn nil, nil, errors.Unauthenticated(\"invalid credentials\")\n\t}\n\tif route.Authorizer != nil {\n\t\tif err := route.Authorizer.Authorize(request, usr); err != nil {\n\t\t\tif _, ok := err.(errors.Error); ok {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\n\t\t\treturn nil, nil, errors.New(http.StatusForbidden, err.Error())\n\t\t}\n\t}\n\n\trCtx = request.Context()\n\n\trCtx = stdContext.WithValue(rCtx, ctxSecurityPrincipal, usr)\n\trCtx = stdContext.WithValue(rCtx, ctxSecurityScopes, route.Authenticator.AllScopes())\n\treturn usr, request.WithContext(rCtx), nil\n}",
"func getRoleCreds(client *sts.STS, roleArn string) (*sts.Credentials, error) {\n\tinput := new(sts.AssumeRoleInput).\n\t\tSetDurationSeconds(duration).\n\t\tSetRoleArn(roleArn).\n\t\tSetRoleSessionName(sessionName)\n\tif mfaToken != \"\" {\n\t\tinput.SetSerialNumber(mfaSerial).SetTokenCode(mfaToken)\n\t}\n\tresult, err := client.AssumeRole(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.Credentials, nil\n}",
"func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) {\n\tspToken, err := mc.ServicePrincipalToken()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn autorest.NewBearerAuthorizer(spToken), nil\n}",
"func (kv *AzureKeyVault) AuthorizeFromEnvironment() error {\n\tif os.Getenv(\"AZURE_TENANT_ID\") == \"\" {\n\t\treturn errors.New(\"AZURE_TENANT_ID environment variable not found\")\n\t}\n\n\tif os.Getenv(\"AZURE_CLIENT_ID\") == \"\" {\n\t\treturn errors.New(\"AZURE_CLIENT_ID environment variable not found\")\n\t}\n\n\tif os.Getenv(\"AZURE_CLIENT_SECRET\") == \"\" {\n\t\treturn errors.New(\"AZURE_CLIENT_SECRET environment variable not found\")\n\t}\n\n\tauthorizer, err := auth.NewAuthorizerFromEnvironment()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error occurred while authorizing: %v\", err)\n\t}\n\n\tkv.client.Authorizer = authorizer\n\tkv.authenticated = true\n\n\treturn nil\n}",
"func Authorize(loginRequest LoginRequest) error {\n\n\terr := ValidateToken(loginRequest.Token)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// password should be encrypted, and stored in a database.\n\tif loginRequest.Username != \"[email protected]\" || loginRequest.Password != \"#th@nH@rm#y#r!$100%D0p#\" {\n\t\treturn fmt.Errorf(\"Invalid credentials\")\n\t}\n\n\treturn nil\n}",
"func (ecp EnvironmentCredentialProvider) Credentials() (*AWSCredentials, error) {\n\taccessKey := os.Getenv(\"AWS_ACCESS_KEY_ID\")\n\tsecretKey := os.Getenv(\"AWS_SECRET_ACCESS_KEY\")\n\tsessionToken := os.Getenv(\"AWS_SESSION_TOKEN\")\n\n\tif len(accessKey) > 0 && len(secretKey) > 0 {\n\t\tif len(sessionToken) > 0 {\n\t\t\treturn &AWSCredentials{accessKey, secretKey, sessionToken}, nil\n\t\t}\n\t\treturn &AWSCredentials{AccessKey: accessKey, SecretKey: secretKey}, nil\n\t}\n\treturn nil, errors.New(\"Unable to find credentials in the environment\")\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Search performs a symbol search on the symbols service.
|
func (c *Client) Search(ctx context.Context, args search.SymbolsParameters) (symbols result.Symbols, err error) {
span, ctx := ot.StartSpanFromContext(ctx, "symbols.Client.Search")
defer func() {
if err != nil {
ext.Error.Set(span, true)
span.LogFields(otlog.Error(err))
}
span.Finish()
}()
span.SetTag("Repo", string(args.Repo))
span.SetTag("CommitID", string(args.CommitID))
resp, err := c.httpPost(ctx, "search", args.Repo, args)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
// best-effort inclusion of body in error message
body, _ := io.ReadAll(io.LimitReader(resp.Body, 200))
return nil, errors.Errorf(
"Symbol.Search http status %d: %s",
resp.StatusCode,
string(body),
)
}
var response search.SymbolsResponse
err = json.NewDecoder(resp.Body).Decode(&response)
if err != nil {
return nil, err
}
if response.Err != "" {
return nil, errors.New(response.Err)
}
symbols = response.Symbols
// 🚨 SECURITY: We have valid results, so we need to apply sub-repo permissions
// filtering.
if c.SubRepoPermsChecker == nil {
return symbols, err
}
checker := c.SubRepoPermsChecker()
if !authz.SubRepoEnabled(checker) {
return symbols, err
}
a := actor.FromContext(ctx)
// Filter in place
filtered := symbols[:0]
for _, r := range symbols {
rc := authz.RepoContent{
Repo: args.Repo,
Path: r.Path,
}
perm, err := authz.ActorPermissions(ctx, checker, a, rc)
if err != nil {
return nil, errors.Wrap(err, "checking sub-repo permissions")
}
if perm.Include(authz.Read) {
filtered = append(filtered, r)
}
}
return filtered, nil
}
|
[
"func (_EMABI *EMABITransactor) Search(opts *bind.TransactOpts, ct Struct0) (*types.Transaction, error) {\n\treturn _EMABI.contract.Transact(opts, \"search\", ct)\n}",
"func (_EMABI *EMABISession) Search(ct Struct0) (*types.Transaction, error) {\n\treturn _EMABI.Contract.Search(&_EMABI.TransactOpts, ct)\n}",
"func (_TestABI *TestABISession) Search(ct Struct0) (*types.Transaction, error) {\n\treturn _TestABI.Contract.Search(&_TestABI.TransactOpts, ct)\n}",
"func (r *Registry) Search(ctx context.Context, in gsvc.SearchInput) ([]gsvc.Service, error) {\n\tif in.Prefix == \"\" && in.Name != \"\" {\n\t\tin.Prefix = gsvc.NewServiceWithName(in.Name).GetPrefix()\n\t}\n\n\tres, err := r.kv.Get(ctx, in.Prefix, etcd3.WithPrefix())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tservices, err := extractResponseToServices(res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Service filter.\n\tfilteredServices := make([]gsvc.Service, 0)\n\tfor _, service := range services {\n\t\tif in.Prefix != \"\" && !gstr.HasPrefix(service.GetKey(), in.Prefix) {\n\t\t\tcontinue\n\t\t}\n\t\tif in.Name != \"\" && service.GetName() != in.Name {\n\t\t\tcontinue\n\t\t}\n\t\tif in.Version != \"\" && service.GetVersion() != in.Version {\n\t\t\tcontinue\n\t\t}\n\t\tif len(in.Metadata) != 0 {\n\t\t\tm1 := gmap.NewStrAnyMapFrom(in.Metadata)\n\t\t\tm2 := gmap.NewStrAnyMapFrom(service.GetMetadata())\n\t\t\tif !m1.IsSubOf(m2) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tresultItem := service\n\t\tfilteredServices = append(filteredServices, resultItem)\n\t}\n\treturn filteredServices, nil\n}",
"func Search(query string) ([]Place, error) {\n\treturn DefaultClient.Search(query)\n}",
"func (h *Handlers) Search(w http.ResponseWriter, r *http.Request) {\n\tinput, err := buildSearchInput(r.URL.Query())\n\tif err != nil {\n\t\th.logger.Error().Err(err).Str(\"query\", r.URL.RawQuery).Str(\"method\", \"Search\").Msg(\"invalid query\")\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tdataJSON, err := h.pkgManager.SearchJSON(r.Context(), input)\n\tif err != nil {\n\t\th.logger.Error().Err(err).Str(\"query\", r.URL.RawQuery).Str(\"method\", \"Search\").Send()\n\t\tif errors.Is(err, pkg.ErrInvalidInput) {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t} else {\n\t\t\thttp.Error(w, \"\", http.StatusInternalServerError)\n\t\t}\n\t\treturn\n\t}\n\thelpers.RenderJSON(w, dataJSON, helpers.DefaultAPICacheMaxAge)\n}",
"func (s *Service) Search(namespace, pattern string) []runtime.Object {\n\treturn s.repo.Search(namespace, pattern)\n}",
"func Search(cmd *cobra.Command, args []string) {\n\t//*TODO* Update with the new serializer option\n\tlog.Debug(\"Search subcommand run with log level: \", Verbose, \"\\n\")\n\n\tif len(args) > 0 {\n\t\tif args[0] != \"\" {\n\t\t\tsearchTerm = args[0]\n\t\t}\n\t}\n\tif len(args) > 1 {\n\t\tif args[1] != \"\" {\n\t\t\tgobFile = args[1]\n\t\t}\n\t}\n\n\tnp := persister.NewPersistor(persister.GOB)\n\terr := np.Load(gobFile, &data)\n\tif err != nil {\n\t\tlog.Errorf(\"error loading gob file: %v\", err)\n\t}\n\n\tfilters, urls, names := data.F, data.U, data.N\n\n\tvar found []interface{}\n\t// iterate through the filters and return indices of matches\n\tfor i, v := range filters {\n\t\tfilter, _ := cuckoo.Decode(v)\n\t\tif filter.Lookup([]byte(searchTerm)) {\n\t\t\tif len(found) >= rslts {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfound = append(found, Result{\n\t\t\t\tName: names[i],\n\t\t\t\tURL: urls[i],\n\t\t\t})\n\t\t}\n\t}\n\n\tif !nostd {\n\t\t// ** PRETTY OUTPUT FOR USE AT COMMAND LINE **\n\t\tfoundJSON, err := json.MarshalIndent(found, \"\", \" \")\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"error in marshalling the found results: %v\", err)\n\t\t}\n\t\tfmt.Printf(\"Search Results:\\n %s\\n\", string(foundJSON))\n\t}\n\n}",
"func (c *Client) Search(ctx context.Context, r *SearchRequest) (*SearchResponse, error) {\n\treq, err := c.requestForSearch(ctx, r)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"saucenao search: %w\", err)\n\t}\n\tresp, err := c.C.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"saucenao search: %s\", err)\n\t}\n\tdefer resp.Body.Close()\n\tswitch resp.StatusCode {\n\tcase 200:\n\tcase 429:\n\t\treturn nil, fmt.Errorf(\"saucenao search: %w\", QuotaError{})\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"saucenao search: unexpected status %v\", resp.Status)\n\t}\n\td, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"saucenao search: %s\", err)\n\t}\n\tvar sr SearchResponse\n\tif err := json.Unmarshal(d, &sr); err != nil {\n\t\treturn nil, fmt.Errorf(\"saucenao search: %s\", err)\n\t}\n\treturn &sr, nil\n}",
"func (s *gRPCsrv) Search(q *pb.Query, stream pb.Crawler_SearchServer) error {\n\tif q.Key == \"\" {\n\t\treturn badRequest(\"Key must not be empty\")\n\t}\n\n\t// create query\n\td := make(chan bool)\n\topt := mart.Query{\n\t\tKey: q.Key,\n\t\tOrder: mart.ByPrice,\n\t\tDone: func() { d <- true },\n\t}\n\tif q.Order == pb.Query_POPULAR {\n\t\topt.Order = mart.ByPopular\n\t}\n\n\t// find if mart available\n\tvar ms []*mart.Mart\n\tif q.Mart != \"\" {\n\t\tm, err := mart.Open(q.Mart)\n\t\tif err != nil {\n\t\t\treturn noFound(\"Mart \" + q.Mart + \" not available\")\n\t\t}\n\n\t\tms = append(ms, m)\n\t} else {\n\t\tms = mart.All()\n\t\tif len(ms) == 0 {\n\t\t\treturn noFound(\"No mart available\")\n\t\t}\n\t}\n\n\t// create context and channel; make search request\n\tctx, quit := context.WithCancel(stream.Context())\n\tdefer quit()\n\n\tput := make(chan []mart.Product)\n\tche := make(chan error)\n\tfor i := range ms {\n\t\tms[i].Search(ctx, opt, put, che)\n\t}\n\n\t// listen for search response\n\tvar sent, done int64\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tlog.Println(\"Search keyword\", q.Key, \"cancelled\")\n\t\t\treturn nil\n\t\tcase ps := <-put:\n\t\t\tfor i := range ps {\n\t\t\t\tsent++\n\t\t\t\tif q.Num > 0 && sent > q.Num { // reach max number, return\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tif err := stream.Send(&pb.Product{\n\t\t\t\t\tName: ps[i].Name,\n\t\t\t\t\tImage: ps[i].Image,\n\t\t\t\t\tPage: ps[i].Page,\n\t\t\t\t\tPrice: int64(ps[i].Price),\n\t\t\t\t\tMart: ps[i].Mart,\n\t\t\t\t}); err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t\treturn nil // connection lost?\n\t\t\t\t}\n\t\t\t}\n\t\tcase err := <-che:\n\t\t\tlog.Println(err)\n\t\tcase <-d:\n\t\t\tdone++\n\t\t\tif done == int64(len(ms)) { // all jobs are done\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n}",
"func Search(cmdInfo CommandInfo) {\n\tif len(cmdInfo.CmdOps) == 1 {\n\t\treturn\n\t}\n\tif strings.ToLower(cmdInfo.CmdOps[1]) == \"north\" || strings.ToLower(cmdInfo.CmdOps[1]) == \"south\" {\n\t\t// ByMonth search\n\t\tbyMonth(cmdInfo.CmdOps[1:], cmdInfo)\n\t\treturn\n\t}\n\tformatStr := toLowerAndFormat(cmdInfo.CmdOps[1:])\n\tentry, err := cmdInfo.Service.Entry.ByName(formatStr, \"bug_and_fish\")\n\tsearchItem := formatName(cmdInfo.CmdOps[1:])\n\tif err != nil {\n\t\t// If entry was not found in database\n\t\tword := strings.Split(searchItem, \" \")\n\t\tvals := cmdInfo.Service.Entry.FindLike(toLowerAndFormat(word), \"bug_and_fish\")\n\t\tvar fields []*discordgo.MessageEmbedField\n\t\tfor _, val := range vals {\n\t\t\tfields = append(fields, createFields(strings.Title(val.Type), strings.Title(removeUnderscore(val.Name)), true))\n\t\t}\n\t\tif len(fields) == 0 {\n\t\t\t// If no similar entries were found\n\t\t\tmsg := cmdInfo.createMsgEmbed(searchItem, errThumbURL, \"No similar entries found.\", errColor, fields)\n\t\t\tcmdInfo.Ses.ChannelMessageSendEmbed(cmdInfo.BotChID, msg)\n\t\t\treturn\n\t\t}\n\t\tmsg := cmdInfo.createMsgEmbed(searchItem, errThumbURL, \"Entry Not Found in Database... Perhaps you meant...?\", errColor, fields)\n\t\tcmdInfo.Ses.ChannelMessageSendEmbed(cmdInfo.BotChID, msg)\n\t\treturn\n\t}\n\tnHemi, sHemi := parseHemi(entry.NorthSt, entry.NorthEnd, entry.SouthSt, entry.SouthEnd)\n\tfields := format(\n\t\tcreateFields(\"Price\", strconv.Itoa(entry.SellPrice)+\" Bells\", true),\n\t\tcreateFields(\"Location\", removeUnderscore(entry.Location), true),\n\t\tcreateFields(\"Time\", removeUnderscore(entry.Time), false),\n\t\tcreateFields(\"Northern Hemisphere\", nHemi, false),\n\t\tcreateFields(\"Southern Hemisphere\", sHemi, false),\n\t)\n\tmsg := cmdInfo.createMsgEmbed(searchItem, entry.Image, strings.Title(entry.Type), searchColor, fields)\n\tcmdInfo.Ses.ChannelMessageSendEmbed(cmdInfo.BotChID, msg)\n}",
"func (_TestABI *TestABITransactor) Search(opts *bind.TransactOpts, ct Struct0) (*types.Transaction, error) {\n\treturn _TestABI.contract.Transact(opts, \"search\", ct)\n}",
"func Search(w http.ResponseWriter, r *http.Request) {\n\tviewData := BaseViewData(w, r)\n\n\ttermMap := utils.GetSearchTermsForString(r.FormValue(\"q\"), true)\n\tterms := make([]string, len(termMap))\n\ti := 0\n\tfor term := range termMap {\n\t\tterms[i] = term\n\t\ti++\n\t}\n\n\tpageNumStr := \"1\"\n\tif len(r.FormValue(\"page\")) > 0 {\n\t\tpageNumStr = r.FormValue(\"page\")\n\t}\n\n\tpage, err := strconv.Atoi(pageNumStr)\n\tif err != nil {\n\t\tviewData.NotFound(w)\n\t\treturn\n\t}\n\t// Correct for the human readable format for page numbers used\n\t// by the client here\n\tpage = page - 1\n\n\tplaceID := -1\n\tif viewData.Session != nil {\n\t\tplaceID = viewData.Session.User.PlaceID\n\t}\n\n\tlistings := []models.Listing{}\n\tif len(terms) > 0 {\n\t\tlistings, err = models.DoSearchForTerms(Base.Db, terms, page, placeID)\n\t\tif err != nil {\n\t\t\tviewData.InternalError(w)\n\t\t\treturn\n\t\t}\n\t}\n\n\tnumPages := models.GetPageCountForTerms(Base.Db, terms, placeID)\n\n\tviewData.Data = searchViewData{\n\t\tListings: listings,\n\t\tQuery: r.FormValue(\"q\"),\n\t\tPage: page + 1,\n\t\tStartOffset: page*50 + 1,\n\t\tEndOffset: page*50 + len(listings),\n\t\tMaxTotal: numPages * 50,\n\t\tOutOf: numPages,\n\t}\n\tRenderView(w, \"search#search\", viewData)\n}",
"func (s *Server) Search(ctx context.Context, req *querypb.SearchRequest) (*internalpb.SearchResults, error) {\n\treturn s.querynode.Search(ctx, req)\n}",
"func search(query string, ch chan<-string){\n\tgo duckDuckGoSearch(query, ch)\n\tgo googleSearch(query, ch)\n\tgo bingSearch(query, ch)\n}",
"func (s *CatalogService) Search(ctx context.Context, storefront string, opt *SearchOptions) (*Search, *Response, error) {\n\tu := fmt.Sprintf(\"v1/catalog/%s/search\", storefront)\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tsearch := &Search{}\n\tresp, err := s.client.Do(ctx, req, search)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn search, resp, nil\n}",
"func (c *Client) Search(query string) ([]Place, error) {\n\tvar cli http.Client\n\n\treq, err := http.NewRequest(http.MethodGet, apiSearch, nil)\n\tif err != nil {\n\t\treturn nil, xerrors.Errorf(\"could not create HTTP request: %w\", err)\n\t}\n\n\treq.Header.Set(\"User-Agent\", c.UserAgent)\n\n\tform := make(url.Values)\n\tform.Add(\"q\", query)\n\tform.Add(\"format\", \"jsonv2\")\n\tswitch c.AddressDetails {\n\tcase true:\n\t\tform.Add(\"addressdetails\", \"1\")\n\tdefault:\n\t\tform.Add(\"addressdetails\", \"0\")\n\t}\n\tif c.AcceptLanguages != nil {\n\t\tform.Add(\"accept-language\", strings.Join(c.AcceptLanguages, \",\"))\n\t}\n\treq.URL.RawQuery = form.Encode()\n\n\tresp, err := cli.Do(req)\n\tif err != nil {\n\t\treturn nil, xerrors.Errorf(\"could not send request to OpenStreetMap: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tout := new(bytes.Buffer)\n\t\tio.Copy(out, resp.Body)\n\t\treturn nil, xerrors.Errorf(\n\t\t\t\"invalid status code %s (%d):\\n%s\",\n\t\t\tresp.Status, resp.StatusCode, out.String(),\n\t\t)\n\t}\n\n\tvar places []Place\n\terr = json.NewDecoder(resp.Body).Decode(&places)\n\tif err != nil {\n\t\treturn nil, xerrors.Errorf(\"could not decode JSON reply from %q: %w\", req, err)\n\t}\n\n\treturn places, nil\n}",
"func (s *server) Search(ctx context.Context, req *pb.Request) (*pb.Result, error) { // HL\n\td := randomDuration(100 * time.Millisecond)\n\tlogSleep(ctx, d) // HL\n\tselect {\n\tcase <-time.After(d):\n\t\treturn &pb.Result{ // HL\n\t\t\tTitle: fmt.Sprintf(\"result for [%s] from backend %d\", req.Query, *index), // HL\n\t\t}, nil // HL\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\t}\n}",
"func cmdSearch(app *App) error {\n\tif len(app.args) != 1 {\n\t\treturn fmt.Errorf(\"search command takes exactly one arg\")\n\t}\n\n\terr := loadServerPkgList(app)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpkgs := app.serverPkgList\n\tterm := app.args[0]\n\tlog.Printf(\"searching %d pkg names for term %q\", len(pkgs), term)\n\n\tmatches, _ := getPkgMatches(pkgs, term)\n\tif len(matches) == 0 {\n\t\tlog.Printf(\"No package found matching %s\\n\", term)\n\t\treturn nil\n\t}\n\n\tif app.flagSearchv {\n\t\tprintPkgsWithLink(app, matches)\n\t} else {\n\t\tfor _, pkg := range matches {\n\t\t\tfmt.Println(pkg)\n\t\t}\n\t}\n\n\treturn nil\n\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ValidateNetwork validates a Network object.
|
func ValidateNetwork(network *extensionsv1alpha1.Network) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&network.ObjectMeta, true, apivalidation.NameIsDNSSubdomain, field.NewPath("metadata"))...)
allErrs = append(allErrs, ValidateNetworkSpec(&network.Spec, field.NewPath("spec"))...)
return allErrs
}
|
[
"func (m *Network) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMasterInterface(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func validateNetwork(req abstract.SubnetRequest) (bool, fail.Error) {\n\terr := validation.ValidateStruct(&req,\n\t\tvalidation.Field(&req.Name, validation.Required, validation.Length(1, 64)),\n\t\tvalidation.Field(&req.Name, validation.Required, validation.Match(regexp.MustCompile(`^[a-zA-Z0-9_-]+$`))),\n\t)\n\tif err != nil {\n\t\treturn false, fail.Wrap(err, \"validation issue\")\n\t}\n\n\treturn true, nil\n}",
"func (m *PVMInstanceNetwork) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func validateNetwork(t *testing.T, workingDir string) {\n\tterraformOptions := test_structure.LoadTerraformOptions(t, workingDir)\n\n\tsubnetNames := terraformOptions.Vars[\"subnet_names\"].([]interface{})\n\n\toutput := terraform.Output(t, terraformOptions, \"network_id\")\n\tassert.NotEmpty(t, output, \"network_id is empty\")\n\n\tsubnets := terraform.OutputList(t, terraformOptions, \"subnet_ids\")\n\tassert.Len(t, subnets, len(subnetNames), \"`subnet_ids` length is invalid\")\n\n\taddresses := terraform.OutputList(t, terraformOptions, \"subnet_address_ranges\")\n\tassert.Len(t, addresses, len(subnetNames), \"`subnet_address_ranges` length is invalid\")\n\n\t// check addresses\n\tfor _, cidr := range addresses {\n\t\t_, _, err := net.ParseCIDR(cidr)\n\t\tassert.Nil(t, err, \"net.ParseCIDR\")\n\t}\n}",
"func (m *V1NetworkCreateRequest) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateDestinationprefixes(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNat(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateParentnetworkid(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePrefixes(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePrimary(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateUnderlay(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *WifiNetwork) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *Network2) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *IpamNetworkDataData) SetNetworkIsValid(v string) {\n\to.NetworkIsValid = &v\n}",
"func (r *Reconciler) validateTransferNetwork(plan *api.Plan) (err error) {\n\tif plan.Spec.TransferNetwork == nil {\n\t\treturn\n\t}\n\tnotFound := libcnd.Condition{\n\t\tType: TransferNetNotValid,\n\t\tStatus: True,\n\t\tCategory: Critical,\n\t\tReason: NotFound,\n\t\tMessage: \"Transfer network is not valid.\",\n\t}\n\tkey := client.ObjectKey{\n\t\tNamespace: plan.Spec.TransferNetwork.Namespace,\n\t\tName: plan.Spec.TransferNetwork.Name,\n\t}\n\tnetAttachDef := &net.NetworkAttachmentDefinition{}\n\terr = r.Get(context.TODO(), key, netAttachDef)\n\tif k8serr.IsNotFound(err) {\n\t\terr = nil\n\t\tplan.Status.SetCondition(notFound)\n\t\treturn\n\t}\n\tif err != nil {\n\t\terr = liberr.Wrap(err)\n\t}\n\n\treturn\n}",
"func ValidateNetworkUpdate(new, old *extensionsv1alpha1.Network) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&new.ObjectMeta, &old.ObjectMeta, field.NewPath(\"metadata\"))...)\n\tallErrs = append(allErrs, ValidateNetworkSpecUpdate(&new.Spec, &old.Spec, new.DeletionTimestamp != nil, field.NewPath(\"spec\"))...)\n\tallErrs = append(allErrs, ValidateNetwork(new)...)\n\n\treturn allErrs\n}",
"func ValidateNetworkMode(c *Config) error {\n\tif c.General.Mode == \"\" {\n\t\tc.General.Mode = DefaultNetMode\n\t}\n\tc.General.Mode = strings.ToLower(c.General.Mode)\n\tswitch c.General.Mode {\n\tcase DualStackNetMode:\n\t\tfallthrough\n\tcase IPv4NetMode:\n\t\tfallthrough\n\tcase IPv6NetMode:\n\t\tglog.Infof(\"Building cluster in mode %q\", c.General.Mode)\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported network mode %q entered\", c.General.Mode)\n\t}\n\treturn nil\n}",
"func validateExternalNetwork(ctx context.Context, cli client.Client, externalNetwork string) error {\n\tinstance := &crdv1.ExternalNetwork{}\n\tkey := types.NamespacedName{Name: externalNetwork}\n\terr := cli.Get(ctx, key, instance)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (nt NetworkType) Validate() error {\n\tswitch nt {\n\tcase NetworkTypeDefault, NetworkTypeHost, NetworkTypeWeave:\n\t\treturn nil\n\tdefault:\n\t\treturn maskAny(errgo.WithCausef(nil, ValidationError, \"unknown network type '%s'\", string(nt)))\n\t}\n}",
"func ValidateNetworks(nets []string) error {\n\tfor _, n := range nets {\n\t\tif _, _, err := net.ParseCIDR(n); err != nil && net.ParseIP(n) == nil {\n\t\t\treturn errors.New(fmt.Sprintf(\"invalid network: %s\", n))\n\t\t}\n\t}\n\treturn nil\n}",
"func (m *NetworkV1Request) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateAws(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateAzure(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateGcp(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMock(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOutboundInternetTraffic(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (n NetworkConfig) validate() error {\n\tif n.IsEmpty() {\n\t\treturn nil\n\t}\n\tif err := n.VPC.validate(); err != nil {\n\t\treturn fmt.Errorf(`validate \"vpc\": %w`, err)\n\t}\n\tif err := n.Connect.validate(); err != nil {\n\t\treturn fmt.Errorf(`validate \"connect\": %w`, err)\n\t}\n\treturn nil\n}",
"func (m *RecoveryPlanNetworkInfo) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateNetworkMapping(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStaticIPAssignment(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *NetworkElement) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// validation for a type composition with EquipmentBase\n\tif err := m.EquipmentBase.Validate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateCards(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateFanmodules(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateManagementContoller(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateManagementEntity(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePsus(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRegisteredDevice(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTopSystem(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateUcsmRunningFirmware(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func checkCreateNetwork(t *testing.T, expError bool, tenant, network, encap, subnet, gw string, tag int) {\n\tnet := client.Network{\n\t\tTenantName: tenant,\n\t\tNetworkName: network,\n\t\tEncap: encap,\n\t\tSubnet: subnet,\n\t\tGateway: gw,\n\t\tPktTag: tag,\n\t}\n\terr := contivClient.NetworkPost(&net)\n\tif err != nil && !expError {\n\t\tt.Fatalf(\"Error creating network {%+v}. Err: %v\", net, err)\n\t} else if err == nil && expError {\n\t\tt.Fatalf(\"Create network {%+v} succeded while expecing error\", net)\n\t} else if err == nil {\n\t\t// verify network is created\n\t\t_, err := contivClient.NetworkGet(tenant, network)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Error getting network %s/%s. Err: %v\", tenant, network, err)\n\t\t}\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ValidateNetworkUpdate validates a Network object before an update.
|
func ValidateNetworkUpdate(new, old *extensionsv1alpha1.Network) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&new.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...)
allErrs = append(allErrs, ValidateNetworkSpecUpdate(&new.Spec, &old.Spec, new.DeletionTimestamp != nil, field.NewPath("spec"))...)
allErrs = append(allErrs, ValidateNetwork(new)...)
return allErrs
}
|
[
"func ValidateNetworkStatusUpdate(newStatus, oldStatus extensionsv1alpha1.NetworkStatus) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\treturn allErrs\n}",
"func ValidateNetworkSpecUpdate(new, old *extensionsv1alpha1.NetworkSpec, deletionTimestampSet bool, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif deletionTimestampSet && !apiequality.Semantic.DeepEqual(new, old) {\n\t\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new, old, fldPath)...)\n\t\treturn allErrs\n\t}\n\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.Type, old.Type, fldPath.Child(\"type\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.PodCIDR, old.PodCIDR, fldPath.Child(\"podCIDR\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.ServiceCIDR, old.ServiceCIDR, fldPath.Child(\"serviceCIDR\"))...)\n\n\treturn allErrs\n}",
"func (m *AzureManagedControlPlane) validateVirtualNetworkUpdate(old *AzureManagedControlPlane) field.ErrorList {\n\tvar allErrs field.ErrorList\n\tif old.Spec.VirtualNetwork.Name != m.Spec.VirtualNetwork.Name {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.Name\"),\n\t\t\t\tm.Spec.VirtualNetwork.Name,\n\t\t\t\t\"Virtual Network Name is immutable\"))\n\t}\n\n\tif old.Spec.VirtualNetwork.CIDRBlock != m.Spec.VirtualNetwork.CIDRBlock {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.CIDRBlock\"),\n\t\t\t\tm.Spec.VirtualNetwork.CIDRBlock,\n\t\t\t\t\"Virtual Network CIDRBlock is immutable\"))\n\t}\n\n\tif old.Spec.VirtualNetwork.Subnet.Name != m.Spec.VirtualNetwork.Subnet.Name {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.Subnet.Name\"),\n\t\t\t\tm.Spec.VirtualNetwork.Subnet.Name,\n\t\t\t\t\"Subnet Name is immutable\"))\n\t}\n\n\t// NOTE: This only works because we force the user to set the CIDRBlock for both the\n\t// managed and unmanaged Vnets. If we ever update the subnet cidr based on what's\n\t// actually set in the subnet, and it is different from what's in the Spec, for\n\t// unmanaged Vnets like we do with the AzureCluster this logic will break.\n\tif old.Spec.VirtualNetwork.Subnet.CIDRBlock != m.Spec.VirtualNetwork.Subnet.CIDRBlock {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.Subnet.CIDRBlock\"),\n\t\t\t\tm.Spec.VirtualNetwork.Subnet.CIDRBlock,\n\t\t\t\t\"Subnet CIDRBlock is immutable\"))\n\t}\n\n\tif old.Spec.VirtualNetwork.ResourceGroup != m.Spec.VirtualNetwork.ResourceGroup {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.ResourceGroup\"),\n\t\t\t\tm.Spec.VirtualNetwork.ResourceGroup,\n\t\t\t\t\"Virtual Network Resource Group is immutable\"))\n\t}\n\treturn allErrs\n}",
"func ValidateNetwork(network *extensionsv1alpha1.Network) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMeta(&network.ObjectMeta, true, apivalidation.NameIsDNSSubdomain, field.NewPath(\"metadata\"))...)\n\tallErrs = append(allErrs, ValidateNetworkSpec(&network.Spec, field.NewPath(\"spec\"))...)\n\n\treturn allErrs\n}",
"func (m *PVMInstanceNetwork) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *Network) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMasterInterface(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func validateNetwork(req abstract.SubnetRequest) (bool, fail.Error) {\n\terr := validation.ValidateStruct(&req,\n\t\tvalidation.Field(&req.Name, validation.Required, validation.Length(1, 64)),\n\t\tvalidation.Field(&req.Name, validation.Required, validation.Match(regexp.MustCompile(`^[a-zA-Z0-9_-]+$`))),\n\t)\n\tif err != nil {\n\t\treturn false, fail.Wrap(err, \"validation issue\")\n\t}\n\n\treturn true, nil\n}",
"func (g *Group) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}",
"func (m *Member) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}",
"func (proc *Proc) NodeNetworkUpdate(node *nm.Node, desc *model.MetricDesc, label *nm.MetricNetworkLables, Log logrus.FieldLogger) error {\n\n\tisExist := false\n\tfor idx, net := range node.Net.Devices {\n\t\tif net.Name == label.Device {\n\t\t\tLog.Debugf(\"Update Network Device[ %s ] Type[ %s ]\", label.Device, desc.FqName)\n\t\t\tisExist = true\n\t\t\tswitch desc.FqName {\n\t\t\tcase nm.MetricNameNetInfo:\n\t\t\tcase nm.MetricNameNetRecvBytes:\n\t\t\t\t// calc\n\t\t\t\tif net.PrevNetStatus.RxBytes != 0 {\n\t\t\t\t\tif net.PrevNetStatus.RxBytes > uint64(desc.Value) {\n\t\t\t\t\t\t// ( UINT64_MAX - prev ) + cur;\n\t\t\t\t\t\tnode.Net.Devices[idx].RxBytes = (math.MaxUint64 - net.PrevNetStatus.RxBytes) + uint64(desc.Value)\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// cur - prev;\n\t\t\t\t\t\tnode.Net.Devices[idx].RxBytes = uint64(desc.Value) - net.PrevNetStatus.RxBytes\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Save\n\t\t\t\tnode.Net.Devices[idx].PrevNetStatus.RxBytes = uint64(desc.Value)\n\t\t\tcase nm.MetricNameNetRecvPackets:\n\t\t\t\tif net.PrevNetStatus.RxPackets != 0 {\n\t\t\t\t\tif net.PrevNetStatus.RxPackets > uint64(desc.Value) {\n\t\t\t\t\t\tnode.Net.Devices[idx].RxPackets = (math.MaxUint64 - net.PrevNetStatus.RxPackets) + uint64(desc.Value)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnode.Net.Devices[idx].RxPackets = uint64(desc.Value) - net.PrevNetStatus.RxPackets\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Save\n\t\t\t\tnode.Net.Devices[idx].PrevNetStatus.RxPackets = uint64(desc.Value)\n\t\t\tcase nm.MetricNameNetTransmitBytes:\n\t\t\t\tif net.PrevNetStatus.TxBytes != 0 {\n\t\t\t\t\tif net.PrevNetStatus.TxBytes > uint64(desc.Value) {\n\t\t\t\t\t\tnode.Net.Devices[idx].TxBytes = (math.MaxUint64 - net.PrevNetStatus.TxBytes) + uint64(desc.Value)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnode.Net.Devices[idx].TxBytes = uint64(desc.Value) - net.PrevNetStatus.TxBytes\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Save\n\t\t\t\tnode.Net.Devices[idx].PrevNetStatus.TxBytes = uint64(desc.Value)\n\t\t\tcase nm.MetricNameNetTransmitPackets:\n\t\t\t\tif net.PrevNetStatus.TxPackets != 0 {\n\t\t\t\t\tif net.PrevNetStatus.TxPackets > uint64(desc.Value) {\n\t\t\t\t\t\tnode.Net.Devices[idx].TxPackets = (math.MaxUint64 - net.PrevNetStatus.TxPackets) + uint64(desc.Value)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnode.Net.Devices[idx].TxPackets = uint64(desc.Value) - net.PrevNetStatus.TxPackets\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Save\n\t\t\t\tnode.Net.Devices[idx].PrevNetStatus.TxPackets = uint64(desc.Value)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\tif !isExist {\n\t\tisExist := false\n\t\t// 수집 대상인지 확인\n\t\tfor _, name := range proc.config.CollectionConfig.NetDevices {\n\t\t\tif name == label.Device {\n\t\t\t\tisExist = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif isExist {\n\t\t\tLog.Debugf(\"Add New Network Device[ %s ]\", label.Device)\n\t\t\tnd := nm.NetDevice{\n\t\t\t\tName: label.Device,\n\t\t\t\tStatus: \"up\",\n\t\t\t}\n\t\t\tswitch desc.FqName {\n\t\t\tcase nm.MetricNameNetInfo:\n\t\t\tcase nm.MetricNameNetRecvBytes:\n\t\t\t\tnd.PrevNetStatus.RxBytes = uint64(desc.Value / 1000)\n\t\t\tcase nm.MetricNameNetRecvPackets:\n\t\t\t\tnd.PrevNetStatus.RxPackets = uint64(desc.Value / 1000)\n\t\t\tcase nm.MetricNameNetTransmitBytes:\n\t\t\t\tnd.PrevNetStatus.TxBytes = uint64(desc.Value / 1000)\n\t\t\tcase nm.MetricNameNetTransmitPackets:\n\t\t\t\tnd.PrevNetStatus.TxPackets = uint64(desc.Value / 1000)\n\t\t\t}\n\t\t\tnode.Net.Devices = append(node.Net.Devices, nd)\n\t\t}\n\t}\n\treturn nil\n}",
"func (m *Network2) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (net *NetworkUpdateInput) UpdateNetwork() (UpdateNetworkResponse, error) {\n\n\tif status := support.DoesCloudSupports(strings.ToLower(net.Cloud.Name)); status != true {\n\t\treturn UpdateNetworkResponse{}, fmt.Errorf(common.DefaultCloudResponse + \"UpdateNetwork\")\n\t}\n\n\tswitch strings.ToLower(net.Cloud.Name) {\n\tcase \"aws\":\n\n\t\tcreds, err := common.GetCredentials(\n\t\t\t&common.GetCredentialsInput{\n\t\t\t\tProfile: net.Cloud.Profile,\n\t\t\t\tCloud: net.Cloud.Name,\n\t\t\t},\n\t\t)\n\n\t\tif err != nil {\n\t\t\treturn UpdateNetworkResponse{}, err\n\t\t}\n\t\t// I will establish session so that we can carry out the process in cloud\n\t\tsession_input := awssess.CreateSessionInput{Region: net.Cloud.Region, KeyId: creds.KeyId, AcessKey: creds.SecretAccess}\n\t\tsess := session_input.CreateAwsSession()\n\n\t\t//authorizing to request further\n\t\tauthinpt := auth.EstablishConnectionInput{Region: net.Cloud.Region, Resource: \"ec2\", Session: sess}\n\n\t\t// I will call UpdateNetwork of interface and get the things done\n\t\tserverin := awsnetwork.UpdateNetworkInput{\n\t\t\tResource: net.Catageory.Resource,\n\t\t\tAction: net.Catageory.Action,\n\t\t\tGetRaw: net.Cloud.GetRaw,\n\t\t\tNetwork: awsnetwork.NetworkCreateInput{\n\t\t\t\tName: net.Catageory.Name,\n\t\t\t\tVpcCidr: net.Catageory.VpcCidr,\n\t\t\t\tVpcId: net.Catageory.VpcId,\n\t\t\t\tSubCidrs: net.Catageory.SubCidrs,\n\t\t\t\tType: net.Catageory.Type,\n\t\t\t\tPorts: net.Catageory.Ports,\n\t\t\t\tZone: net.Catageory.Zone,\n\t\t\t},\n\t\t}\n\t\tresponse, err := serverin.UpdateNetwork(authinpt)\n\t\tif err != nil {\n\t\t\treturn UpdateNetworkResponse{}, err\n\t\t}\n\t\treturn UpdateNetworkResponse{AwsResponse: response}, nil\n\n\tcase \"azure\":\n\t\treturn UpdateNetworkResponse{DefaultResponse: common.DefaultAzResponse}, nil\n\tcase \"gcp\":\n\t\treturn UpdateNetworkResponse{DefaultResponse: common.DefaultGcpResponse}, nil\n\tcase \"openstack\":\n\t\treturn UpdateNetworkResponse{DefaultResponse: common.DefaultOpResponse}, nil\n\tdefault:\n\t\treturn UpdateNetworkResponse{DefaultResponse: common.DefaultCloudResponse + \"NetworkUpdate\"}, nil\n\t}\n}",
"func (r *Reconciler) validateNetworkMap(plan *api.Plan) (err error) {\n\tref := plan.Spec.Map.Network\n\tnewCnd := libcnd.Condition{\n\t\tType: NetRefNotValid,\n\t\tStatus: True,\n\t\tCategory: Critical,\n\t\tMessage: \"Map.Network is not valid.\",\n\t}\n\tif !libref.RefSet(&ref) {\n\t\tnewCnd.Reason = NotSet\n\t\tplan.Status.SetCondition(newCnd)\n\t\treturn\n\t}\n\tkey := client.ObjectKey{\n\t\tNamespace: ref.Namespace,\n\t\tName: ref.Name,\n\t}\n\tmp := &api.NetworkMap{}\n\terr = r.Get(context.TODO(), key, mp)\n\tif k8serr.IsNotFound(err) {\n\t\terr = nil\n\t\tnewCnd.Reason = NotFound\n\t\tplan.Status.SetCondition(newCnd)\n\t\treturn\n\t}\n\tif err != nil {\n\t\terr = liberr.Wrap(err)\n\t\treturn\n\t}\n\tif !mp.Status.HasCondition(libcnd.Ready) {\n\t\tplan.Status.SetCondition(libcnd.Condition{\n\t\t\tType: NetMapNotReady,\n\t\t\tStatus: True,\n\t\t\tCategory: Critical,\n\t\t\tMessage: \"Map.Network does not have Ready condition.\",\n\t\t})\n\t}\n\n\tplan.Referenced.Map.Network = mp\n\n\treturn\n}",
"func (r *AWSManagedCluster) ValidateUpdate(old runtime.Object) error {\n\treturn nil\n}",
"func (e *Event) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}",
"func (l *Libvirt) NetworkUpdate(Net Network, Command uint32, Section uint32, ParentIndex int32, XML string, Flags NetworkUpdateFlags) (err error) {\n\tvar buf []byte\n\n\targs := NetworkUpdateArgs {\n\t\tNet: Net,\n\t\tCommand: Command,\n\t\tSection: Section,\n\t\tParentIndex: ParentIndex,\n\t\tXML: XML,\n\t\tFlags: Flags,\n\t}\n\n\tbuf, err = encode(&args)\n\tif err != nil {\n\t\treturn\n\t}\n\n\n\t_, err = l.requestStream(291, constants.Program, buf, nil, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}",
"func (r *Reconciler) validateTransferNetwork(plan *api.Plan) (err error) {\n\tif plan.Spec.TransferNetwork == nil {\n\t\treturn\n\t}\n\tnotFound := libcnd.Condition{\n\t\tType: TransferNetNotValid,\n\t\tStatus: True,\n\t\tCategory: Critical,\n\t\tReason: NotFound,\n\t\tMessage: \"Transfer network is not valid.\",\n\t}\n\tkey := client.ObjectKey{\n\t\tNamespace: plan.Spec.TransferNetwork.Namespace,\n\t\tName: plan.Spec.TransferNetwork.Name,\n\t}\n\tnetAttachDef := &net.NetworkAttachmentDefinition{}\n\terr = r.Get(context.TODO(), key, netAttachDef)\n\tif k8serr.IsNotFound(err) {\n\t\terr = nil\n\t\tplan.Status.SetCondition(notFound)\n\t\treturn\n\t}\n\tif err != nil {\n\t\terr = liberr.Wrap(err)\n\t}\n\n\treturn\n}",
"func (m *NodePoolUpdate) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateInstanceTypes(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (mr *MockContainerServerMockRecorder) UpdateNetwork(arg0, arg1, arg2 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"UpdateNetwork\", reflect.TypeOf((*MockContainerServer)(nil).UpdateNetwork), arg0, arg1, arg2)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ValidateNetworkSpec validates the specification of a Network object.
|
func ValidateNetworkSpec(spec *extensionsv1alpha1.NetworkSpec, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(spec.Type) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("type"), "field is required"))
}
var cidrs []cidrvalidation.CIDR
if len(spec.PodCIDR) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("podCIDR"), "field is required"))
} else {
cidrs = append(cidrs, cidrvalidation.NewCIDR(spec.PodCIDR, fldPath.Child("podCIDR")))
}
if len(spec.ServiceCIDR) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("serviceCIDR"), "field is required"))
} else {
cidrs = append(cidrs, cidrvalidation.NewCIDR(spec.ServiceCIDR, fldPath.Child("serviceCIDR")))
}
allErrs = append(allErrs, cidrvalidation.ValidateCIDRParse(cidrs...)...)
allErrs = append(allErrs, cidrvalidation.ValidateCIDROverlap(cidrs, cidrs, false)...)
return allErrs
}
|
[
"func (m *Network) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMasterInterface(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func validateNetwork(req abstract.SubnetRequest) (bool, fail.Error) {\n\terr := validation.ValidateStruct(&req,\n\t\tvalidation.Field(&req.Name, validation.Required, validation.Length(1, 64)),\n\t\tvalidation.Field(&req.Name, validation.Required, validation.Match(regexp.MustCompile(`^[a-zA-Z0-9_-]+$`))),\n\t)\n\tif err != nil {\n\t\treturn false, fail.Wrap(err, \"validation issue\")\n\t}\n\n\treturn true, nil\n}",
"func validateNetwork(t *testing.T, workingDir string) {\n\tterraformOptions := test_structure.LoadTerraformOptions(t, workingDir)\n\n\tsubnetNames := terraformOptions.Vars[\"subnet_names\"].([]interface{})\n\n\toutput := terraform.Output(t, terraformOptions, \"network_id\")\n\tassert.NotEmpty(t, output, \"network_id is empty\")\n\n\tsubnets := terraform.OutputList(t, terraformOptions, \"subnet_ids\")\n\tassert.Len(t, subnets, len(subnetNames), \"`subnet_ids` length is invalid\")\n\n\taddresses := terraform.OutputList(t, terraformOptions, \"subnet_address_ranges\")\n\tassert.Len(t, addresses, len(subnetNames), \"`subnet_address_ranges` length is invalid\")\n\n\t// check addresses\n\tfor _, cidr := range addresses {\n\t\t_, _, err := net.ParseCIDR(cidr)\n\t\tassert.Nil(t, err, \"net.ParseCIDR\")\n\t}\n}",
"func (m *PVMInstanceNetwork) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func ValidateNetworkSpecUpdate(new, old *extensionsv1alpha1.NetworkSpec, deletionTimestampSet bool, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif deletionTimestampSet && !apiequality.Semantic.DeepEqual(new, old) {\n\t\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new, old, fldPath)...)\n\t\treturn allErrs\n\t}\n\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.Type, old.Type, fldPath.Child(\"type\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.PodCIDR, old.PodCIDR, fldPath.Child(\"podCIDR\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.ServiceCIDR, old.ServiceCIDR, fldPath.Child(\"serviceCIDR\"))...)\n\n\treturn allErrs\n}",
"func ValidateNetwork(network *extensionsv1alpha1.Network) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMeta(&network.ObjectMeta, true, apivalidation.NameIsDNSSubdomain, field.NewPath(\"metadata\"))...)\n\tallErrs = append(allErrs, ValidateNetworkSpec(&network.Spec, field.NewPath(\"spec\"))...)\n\n\treturn allErrs\n}",
"func (m *Network2) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (in *NetworkSpec) DeepCopy() *NetworkSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(NetworkSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (m *DetectionHostNetwork) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *V1NetworkCreateRequest) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateDestinationprefixes(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNat(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateParentnetworkid(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePrefixes(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePrimary(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateUnderlay(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func IsCIDRNetwork(min, max int) schema.SchemaValidateFunc {\n\treturn func(i interface{}, k string) (warnings []string, errors []error) {\n\t\tv, ok := i.(string)\n\t\tif !ok {\n\t\t\terrors = append(errors, fmt.Errorf(\"expected type of %s to be string\", k))\n\t\t\treturn warnings, errors\n\t\t}\n\n\t\t_, ipnet, err := net.ParseCIDR(v)\n\t\tif err != nil {\n\t\t\terrors = append(errors, fmt.Errorf(\"expected %s to contain a valid Value, got: %s with err: %s\", k, v, err))\n\t\t\treturn warnings, errors\n\t\t}\n\n\t\tif ipnet == nil || v != ipnet.String() {\n\t\t\terrors = append(errors, fmt.Errorf(\"expected %s to contain a valid network Value, expected %s, got %s\",\n\t\t\t\tk, ipnet, v))\n\t\t}\n\n\t\tsigbits, _ := ipnet.Mask.Size()\n\t\tif sigbits < min || sigbits > max {\n\t\t\terrors = append(errors, fmt.Errorf(\"expected %q to contain a network Value with between %d and %d significant bits, got: %d\", k, min, max, sigbits))\n\t\t}\n\n\t\treturn warnings, errors\n\t}\n}",
"func ValidateNetMode(c *container.Config, hc *container.HostConfig) error {\n\t// We may not be passed a host config, such as in the case of docker commit\n\tif hc == nil {\n\t\treturn nil\n\t}\n\tparts := strings.Split(string(hc.NetworkMode), \":\")\n\tswitch mode := parts[0]; mode {\n\tcase \"default\", \"none\":\n\tdefault:\n\t\treturn fmt.Errorf(\"invalid --net: %s\", hc.NetworkMode)\n\t}\n\treturn nil\n}",
"func (m *NetworkV1Request) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateAws(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateAzure(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateGcp(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMock(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOutboundInternetTraffic(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func ValidateNetworkStatus(spec *extensionsv1alpha1.NetworkStatus, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\treturn allErrs\n}",
"func ValidateNetworkUpdate(new, old *extensionsv1alpha1.Network) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&new.ObjectMeta, &old.ObjectMeta, field.NewPath(\"metadata\"))...)\n\tallErrs = append(allErrs, ValidateNetworkSpecUpdate(&new.Spec, &old.Spec, new.DeletionTimestamp != nil, field.NewPath(\"spec\"))...)\n\tallErrs = append(allErrs, ValidateNetwork(new)...)\n\n\treturn allErrs\n}",
"func (m *NetworkElement) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// validation for a type composition with EquipmentBase\n\tif err := m.EquipmentBase.Validate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateCards(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateFanmodules(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateManagementContoller(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateManagementEntity(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePsus(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRegisteredDevice(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTopSystem(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateUcsmRunningFirmware(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (r *Reconciler) validateTransferNetwork(plan *api.Plan) (err error) {\n\tif plan.Spec.TransferNetwork == nil {\n\t\treturn\n\t}\n\tnotFound := libcnd.Condition{\n\t\tType: TransferNetNotValid,\n\t\tStatus: True,\n\t\tCategory: Critical,\n\t\tReason: NotFound,\n\t\tMessage: \"Transfer network is not valid.\",\n\t}\n\tkey := client.ObjectKey{\n\t\tNamespace: plan.Spec.TransferNetwork.Namespace,\n\t\tName: plan.Spec.TransferNetwork.Name,\n\t}\n\tnetAttachDef := &net.NetworkAttachmentDefinition{}\n\terr = r.Get(context.TODO(), key, netAttachDef)\n\tif k8serr.IsNotFound(err) {\n\t\terr = nil\n\t\tplan.Status.SetCondition(notFound)\n\t\treturn\n\t}\n\tif err != nil {\n\t\terr = liberr.Wrap(err)\n\t}\n\n\treturn\n}",
"func ValidateNetworkMode(c *Config) error {\n\tif c.General.Mode == \"\" {\n\t\tc.General.Mode = DefaultNetMode\n\t}\n\tc.General.Mode = strings.ToLower(c.General.Mode)\n\tswitch c.General.Mode {\n\tcase DualStackNetMode:\n\t\tfallthrough\n\tcase IPv4NetMode:\n\t\tfallthrough\n\tcase IPv6NetMode:\n\t\tglog.Infof(\"Building cluster in mode %q\", c.General.Mode)\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported network mode %q entered\", c.General.Mode)\n\t}\n\treturn nil\n}",
"func (m *RecoveryPlanNetworkInfo) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateNetworkMapping(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStaticIPAssignment(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ValidateNetworkSpecUpdate validates the spec of a Network object before an update.
|
func ValidateNetworkSpecUpdate(new, old *extensionsv1alpha1.NetworkSpec, deletionTimestampSet bool, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if deletionTimestampSet && !apiequality.Semantic.DeepEqual(new, old) {
allErrs = append(allErrs, apivalidation.ValidateImmutableField(new, old, fldPath)...)
return allErrs
}
allErrs = append(allErrs, apivalidation.ValidateImmutableField(new.Type, old.Type, fldPath.Child("type"))...)
allErrs = append(allErrs, apivalidation.ValidateImmutableField(new.PodCIDR, old.PodCIDR, fldPath.Child("podCIDR"))...)
allErrs = append(allErrs, apivalidation.ValidateImmutableField(new.ServiceCIDR, old.ServiceCIDR, fldPath.Child("serviceCIDR"))...)
return allErrs
}
|
[
"func ValidateNetworkUpdate(new, old *extensionsv1alpha1.Network) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&new.ObjectMeta, &old.ObjectMeta, field.NewPath(\"metadata\"))...)\n\tallErrs = append(allErrs, ValidateNetworkSpecUpdate(&new.Spec, &old.Spec, new.DeletionTimestamp != nil, field.NewPath(\"spec\"))...)\n\tallErrs = append(allErrs, ValidateNetwork(new)...)\n\n\treturn allErrs\n}",
"func ValidateNetworkSpec(spec *extensionsv1alpha1.NetworkSpec, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif len(spec.Type) == 0 {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"type\"), \"field is required\"))\n\t}\n\n\tvar cidrs []cidrvalidation.CIDR\n\n\tif len(spec.PodCIDR) == 0 {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"podCIDR\"), \"field is required\"))\n\t} else {\n\t\tcidrs = append(cidrs, cidrvalidation.NewCIDR(spec.PodCIDR, fldPath.Child(\"podCIDR\")))\n\t}\n\n\tif len(spec.ServiceCIDR) == 0 {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"serviceCIDR\"), \"field is required\"))\n\t} else {\n\t\tcidrs = append(cidrs, cidrvalidation.NewCIDR(spec.ServiceCIDR, fldPath.Child(\"serviceCIDR\")))\n\t}\n\n\tallErrs = append(allErrs, cidrvalidation.ValidateCIDRParse(cidrs...)...)\n\tallErrs = append(allErrs, cidrvalidation.ValidateCIDROverlap(cidrs, cidrs, false)...)\n\n\treturn allErrs\n}",
"func ValidateNetworkStatusUpdate(newStatus, oldStatus extensionsv1alpha1.NetworkStatus) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\treturn allErrs\n}",
"func (m *AzureManagedControlPlane) validateVirtualNetworkUpdate(old *AzureManagedControlPlane) field.ErrorList {\n\tvar allErrs field.ErrorList\n\tif old.Spec.VirtualNetwork.Name != m.Spec.VirtualNetwork.Name {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.Name\"),\n\t\t\t\tm.Spec.VirtualNetwork.Name,\n\t\t\t\t\"Virtual Network Name is immutable\"))\n\t}\n\n\tif old.Spec.VirtualNetwork.CIDRBlock != m.Spec.VirtualNetwork.CIDRBlock {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.CIDRBlock\"),\n\t\t\t\tm.Spec.VirtualNetwork.CIDRBlock,\n\t\t\t\t\"Virtual Network CIDRBlock is immutable\"))\n\t}\n\n\tif old.Spec.VirtualNetwork.Subnet.Name != m.Spec.VirtualNetwork.Subnet.Name {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.Subnet.Name\"),\n\t\t\t\tm.Spec.VirtualNetwork.Subnet.Name,\n\t\t\t\t\"Subnet Name is immutable\"))\n\t}\n\n\t// NOTE: This only works because we force the user to set the CIDRBlock for both the\n\t// managed and unmanaged Vnets. If we ever update the subnet cidr based on what's\n\t// actually set in the subnet, and it is different from what's in the Spec, for\n\t// unmanaged Vnets like we do with the AzureCluster this logic will break.\n\tif old.Spec.VirtualNetwork.Subnet.CIDRBlock != m.Spec.VirtualNetwork.Subnet.CIDRBlock {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.Subnet.CIDRBlock\"),\n\t\t\t\tm.Spec.VirtualNetwork.Subnet.CIDRBlock,\n\t\t\t\t\"Subnet CIDRBlock is immutable\"))\n\t}\n\n\tif old.Spec.VirtualNetwork.ResourceGroup != m.Spec.VirtualNetwork.ResourceGroup {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.ResourceGroup\"),\n\t\t\t\tm.Spec.VirtualNetwork.ResourceGroup,\n\t\t\t\t\"Virtual Network Resource Group is immutable\"))\n\t}\n\treturn allErrs\n}",
"func (h *hetzner) ValidateCloudSpecUpdate(_ context.Context, _ kubermaticv1.CloudSpec, _ kubermaticv1.CloudSpec) error {\n\treturn nil\n}",
"func (a *AmazonEC2) ValidateCloudSpecUpdate(oldSpec kubermaticv1.CloudSpec, newSpec kubermaticv1.CloudSpec) error {\n\treturn nil\n}",
"func ValidateSeedSpecUpdate(newSeedSpec, oldSeedSpec *core.SeedSpec, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(newSeedSpec.Networks.Pods, oldSeedSpec.Networks.Pods, fldPath.Child(\"networks\", \"pods\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(newSeedSpec.Networks.Services, oldSeedSpec.Networks.Services, fldPath.Child(\"networks\", \"services\"))...)\n\tif oldSeedSpec.Networks.Nodes != nil {\n\t\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(newSeedSpec.Networks.Nodes, oldSeedSpec.Networks.Nodes, fldPath.Child(\"networks\", \"nodes\"))...)\n\t}\n\n\tif oldSeedSpec.DNS.IngressDomain != nil && newSeedSpec.DNS.IngressDomain != nil {\n\t\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(*newSeedSpec.DNS.IngressDomain, *oldSeedSpec.DNS.IngressDomain, fldPath.Child(\"dns\", \"ingressDomain\"))...)\n\t}\n\tif oldSeedSpec.Ingress != nil && newSeedSpec.Ingress != nil {\n\t\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(newSeedSpec.Ingress.Domain, oldSeedSpec.Ingress.Domain, fldPath.Child(\"ingress\", \"domain\"))...)\n\t}\n\tif oldSeedSpec.Ingress != nil && newSeedSpec.DNS.IngressDomain != nil {\n\t\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(*newSeedSpec.DNS.IngressDomain, oldSeedSpec.Ingress.Domain, fldPath.Child(\"dns\", \"ingressDomain\"))...)\n\t}\n\tif oldSeedSpec.DNS.IngressDomain != nil && newSeedSpec.Ingress != nil {\n\t\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(newSeedSpec.Ingress.Domain, *oldSeedSpec.DNS.IngressDomain, fldPath.Child(\"ingress\", \"domain\"))...)\n\t}\n\n\tif oldSeedSpec.Backup != nil {\n\t\tif newSeedSpec.Backup != nil {\n\t\t\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(newSeedSpec.Backup.Provider, oldSeedSpec.Backup.Provider, fldPath.Child(\"backup\", \"provider\"))...)\n\t\t\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(newSeedSpec.Backup.Region, oldSeedSpec.Backup.Region, fldPath.Child(\"backup\", \"region\"))...)\n\t\t} else {\n\t\t\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(newSeedSpec.Backup, oldSeedSpec.Backup, fldPath.Child(\"backup\"))...)\n\t\t}\n\t}\n\t// If oldSeedSpec doesn't have backup configured, we allow to add it; but not the vice versa.\n\n\treturn allErrs\n}",
"func validateNetwork(req abstract.SubnetRequest) (bool, fail.Error) {\n\terr := validation.ValidateStruct(&req,\n\t\tvalidation.Field(&req.Name, validation.Required, validation.Length(1, 64)),\n\t\tvalidation.Field(&req.Name, validation.Required, validation.Match(regexp.MustCompile(`^[a-zA-Z0-9_-]+$`))),\n\t)\n\tif err != nil {\n\t\treturn false, fail.Wrap(err, \"validation issue\")\n\t}\n\n\treturn true, nil\n}",
"func (s *SubnetSpec) shouldUpdate(existingSubnet network.Subnet) bool {\n\t// No modifications for non-managed subnets\n\tif !s.IsVNetManaged {\n\t\treturn false\n\t}\n\n\t// Update the subnet a NAT Gateway was added for backwards compatibility.\n\tif s.NatGatewayName != \"\" && existingSubnet.SubnetPropertiesFormat.NatGateway == nil {\n\t\treturn true\n\t}\n\n\t// Update the subnet if the service endpoints changed.\n\tif existingSubnet.ServiceEndpoints != nil || len(s.ServiceEndpoints) > 0 {\n\t\tvar existingServiceEndpoints []network.ServiceEndpointPropertiesFormat\n\t\tif existingSubnet.ServiceEndpoints != nil {\n\t\t\tfor _, se := range *existingSubnet.ServiceEndpoints {\n\t\t\t\texistingServiceEndpoints = append(existingServiceEndpoints, network.ServiceEndpointPropertiesFormat{Service: se.Service, Locations: se.Locations})\n\t\t\t}\n\t\t}\n\t\tnewServiceEndpoints := make([]network.ServiceEndpointPropertiesFormat, len(s.ServiceEndpoints))\n\t\tfor _, se := range s.ServiceEndpoints {\n\t\t\tse := se\n\t\t\tnewServiceEndpoints = append(newServiceEndpoints, network.ServiceEndpointPropertiesFormat{Service: ptr.To(se.Service), Locations: &se.Locations})\n\t\t}\n\n\t\tdiff := cmp.Diff(newServiceEndpoints, existingServiceEndpoints)\n\t\treturn diff != \"\"\n\t}\n\treturn false\n}",
"func ValidateCloudProfileSpecUpdate(new, old *core.CloudProfileSpec, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tallErrs = append(allErrs, validateCloudProfileVersionsUpdate(new.Kubernetes.Versions, old.Kubernetes.Versions, fldPath.Child(\"kubernetes\", \"versions\"))...)\n\n\tfor _, oldImage := range old.MachineImages {\n\t\tfor index, newImage := range new.MachineImages {\n\t\t\tif oldImage.Name == newImage.Name {\n\t\t\t\tallErrs = append(\n\t\t\t\t\tallErrs,\n\t\t\t\t\tvalidateCloudProfileVersionsUpdate(\n\t\t\t\t\t\thelper.ToExpirableVersions(newImage.Versions),\n\t\t\t\t\t\thelper.ToExpirableVersions(oldImage.Versions),\n\t\t\t\t\t\tfldPath.Child(\"machineImages\").Index(index).Child(\"versions\"),\n\t\t\t\t\t)...,\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn allErrs\n}",
"func ValidateControllerInstallationSpecUpdate(new, old *core.ControllerInstallationSpec, deletionTimestampSet bool, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif deletionTimestampSet && !apiequality.Semantic.DeepEqual(new, old) {\n\t\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new, old, fldPath)...)\n\t\treturn allErrs\n\t}\n\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.RegistrationRef.Name, old.RegistrationRef.Name, fldPath.Child(\"registrationRef\", \"name\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.SeedRef.Name, old.SeedRef.Name, fldPath.Child(\"seedRef\", \"name\"))...)\n\n\treturn allErrs\n}",
"func ValidateSpecChanges(commonPoolSpecs *poolspecs, bdr *BlockDeviceReplacement) (bool, string) {\n\tfor i, oldPoolSpec := range commonPoolSpecs.oldSpec {\n\t\toldPoolSpec := oldPoolSpec\n\t\t// process only when there is change in pool specs\n\t\tif reflect.DeepEqual(&oldPoolSpec, &commonPoolSpecs.newSpec[i]) {\n\t\t\tcontinue\n\t\t}\n\t\tif ok, msg := bdr.IsPoolSpecChangeValid(&oldPoolSpec, &commonPoolSpecs.newSpec[i]); !ok {\n\t\t\treturn false, msg\n\t\t}\n\t}\n\treturn true, \"\"\n}",
"func (proc *Proc) NodeNetworkUpdate(node *nm.Node, desc *model.MetricDesc, label *nm.MetricNetworkLables, Log logrus.FieldLogger) error {\n\n\tisExist := false\n\tfor idx, net := range node.Net.Devices {\n\t\tif net.Name == label.Device {\n\t\t\tLog.Debugf(\"Update Network Device[ %s ] Type[ %s ]\", label.Device, desc.FqName)\n\t\t\tisExist = true\n\t\t\tswitch desc.FqName {\n\t\t\tcase nm.MetricNameNetInfo:\n\t\t\tcase nm.MetricNameNetRecvBytes:\n\t\t\t\t// calc\n\t\t\t\tif net.PrevNetStatus.RxBytes != 0 {\n\t\t\t\t\tif net.PrevNetStatus.RxBytes > uint64(desc.Value) {\n\t\t\t\t\t\t// ( UINT64_MAX - prev ) + cur;\n\t\t\t\t\t\tnode.Net.Devices[idx].RxBytes = (math.MaxUint64 - net.PrevNetStatus.RxBytes) + uint64(desc.Value)\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// cur - prev;\n\t\t\t\t\t\tnode.Net.Devices[idx].RxBytes = uint64(desc.Value) - net.PrevNetStatus.RxBytes\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Save\n\t\t\t\tnode.Net.Devices[idx].PrevNetStatus.RxBytes = uint64(desc.Value)\n\t\t\tcase nm.MetricNameNetRecvPackets:\n\t\t\t\tif net.PrevNetStatus.RxPackets != 0 {\n\t\t\t\t\tif net.PrevNetStatus.RxPackets > uint64(desc.Value) {\n\t\t\t\t\t\tnode.Net.Devices[idx].RxPackets = (math.MaxUint64 - net.PrevNetStatus.RxPackets) + uint64(desc.Value)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnode.Net.Devices[idx].RxPackets = uint64(desc.Value) - net.PrevNetStatus.RxPackets\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Save\n\t\t\t\tnode.Net.Devices[idx].PrevNetStatus.RxPackets = uint64(desc.Value)\n\t\t\tcase nm.MetricNameNetTransmitBytes:\n\t\t\t\tif net.PrevNetStatus.TxBytes != 0 {\n\t\t\t\t\tif net.PrevNetStatus.TxBytes > uint64(desc.Value) {\n\t\t\t\t\t\tnode.Net.Devices[idx].TxBytes = (math.MaxUint64 - net.PrevNetStatus.TxBytes) + uint64(desc.Value)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnode.Net.Devices[idx].TxBytes = uint64(desc.Value) - net.PrevNetStatus.TxBytes\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Save\n\t\t\t\tnode.Net.Devices[idx].PrevNetStatus.TxBytes = uint64(desc.Value)\n\t\t\tcase nm.MetricNameNetTransmitPackets:\n\t\t\t\tif net.PrevNetStatus.TxPackets != 0 {\n\t\t\t\t\tif net.PrevNetStatus.TxPackets > uint64(desc.Value) {\n\t\t\t\t\t\tnode.Net.Devices[idx].TxPackets = (math.MaxUint64 - net.PrevNetStatus.TxPackets) + uint64(desc.Value)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnode.Net.Devices[idx].TxPackets = uint64(desc.Value) - net.PrevNetStatus.TxPackets\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Save\n\t\t\t\tnode.Net.Devices[idx].PrevNetStatus.TxPackets = uint64(desc.Value)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\tif !isExist {\n\t\tisExist := false\n\t\t// 수집 대상인지 확인\n\t\tfor _, name := range proc.config.CollectionConfig.NetDevices {\n\t\t\tif name == label.Device {\n\t\t\t\tisExist = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif isExist {\n\t\t\tLog.Debugf(\"Add New Network Device[ %s ]\", label.Device)\n\t\t\tnd := nm.NetDevice{\n\t\t\t\tName: label.Device,\n\t\t\t\tStatus: \"up\",\n\t\t\t}\n\t\t\tswitch desc.FqName {\n\t\t\tcase nm.MetricNameNetInfo:\n\t\t\tcase nm.MetricNameNetRecvBytes:\n\t\t\t\tnd.PrevNetStatus.RxBytes = uint64(desc.Value / 1000)\n\t\t\tcase nm.MetricNameNetRecvPackets:\n\t\t\t\tnd.PrevNetStatus.RxPackets = uint64(desc.Value / 1000)\n\t\t\tcase nm.MetricNameNetTransmitBytes:\n\t\t\t\tnd.PrevNetStatus.TxBytes = uint64(desc.Value / 1000)\n\t\t\tcase nm.MetricNameNetTransmitPackets:\n\t\t\t\tnd.PrevNetStatus.TxPackets = uint64(desc.Value / 1000)\n\t\t\t}\n\t\t\tnode.Net.Devices = append(node.Net.Devices, nd)\n\t\t}\n\t}\n\treturn nil\n}",
"func ValidateNetwork(network *extensionsv1alpha1.Network) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMeta(&network.ObjectMeta, true, apivalidation.NameIsDNSSubdomain, field.NewPath(\"metadata\"))...)\n\tallErrs = append(allErrs, ValidateNetworkSpec(&network.Spec, field.NewPath(\"spec\"))...)\n\n\treturn allErrs\n}",
"func (m *ImageUpdateRequest) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateAPIVersion(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSpec(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (l *Libvirt) NetworkUpdate(Net Network, Command uint32, Section uint32, ParentIndex int32, XML string, Flags NetworkUpdateFlags) (err error) {\n\tvar buf []byte\n\n\targs := NetworkUpdateArgs {\n\t\tNet: Net,\n\t\tCommand: Command,\n\t\tSection: Section,\n\t\tParentIndex: ParentIndex,\n\t\tXML: XML,\n\t\tFlags: Flags,\n\t}\n\n\tbuf, err = encode(&args)\n\tif err != nil {\n\t\treturn\n\t}\n\n\n\t_, err = l.requestStream(291, constants.Program, buf, nil, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}",
"func ValidateBackupEntrySpecUpdate(newSpec, oldSpec *core.BackupEntrySpec, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(newSpec.BucketName, oldSpec.BucketName, fldPath.Child(\"bucketName\"))...)\n\n\treturn allErrs\n}",
"func (net *NetworkUpdateInput) UpdateNetwork() (UpdateNetworkResponse, error) {\n\n\tif status := support.DoesCloudSupports(strings.ToLower(net.Cloud.Name)); status != true {\n\t\treturn UpdateNetworkResponse{}, fmt.Errorf(common.DefaultCloudResponse + \"UpdateNetwork\")\n\t}\n\n\tswitch strings.ToLower(net.Cloud.Name) {\n\tcase \"aws\":\n\n\t\tcreds, err := common.GetCredentials(\n\t\t\t&common.GetCredentialsInput{\n\t\t\t\tProfile: net.Cloud.Profile,\n\t\t\t\tCloud: net.Cloud.Name,\n\t\t\t},\n\t\t)\n\n\t\tif err != nil {\n\t\t\treturn UpdateNetworkResponse{}, err\n\t\t}\n\t\t// I will establish session so that we can carry out the process in cloud\n\t\tsession_input := awssess.CreateSessionInput{Region: net.Cloud.Region, KeyId: creds.KeyId, AcessKey: creds.SecretAccess}\n\t\tsess := session_input.CreateAwsSession()\n\n\t\t//authorizing to request further\n\t\tauthinpt := auth.EstablishConnectionInput{Region: net.Cloud.Region, Resource: \"ec2\", Session: sess}\n\n\t\t// I will call UpdateNetwork of interface and get the things done\n\t\tserverin := awsnetwork.UpdateNetworkInput{\n\t\t\tResource: net.Catageory.Resource,\n\t\t\tAction: net.Catageory.Action,\n\t\t\tGetRaw: net.Cloud.GetRaw,\n\t\t\tNetwork: awsnetwork.NetworkCreateInput{\n\t\t\t\tName: net.Catageory.Name,\n\t\t\t\tVpcCidr: net.Catageory.VpcCidr,\n\t\t\t\tVpcId: net.Catageory.VpcId,\n\t\t\t\tSubCidrs: net.Catageory.SubCidrs,\n\t\t\t\tType: net.Catageory.Type,\n\t\t\t\tPorts: net.Catageory.Ports,\n\t\t\t\tZone: net.Catageory.Zone,\n\t\t\t},\n\t\t}\n\t\tresponse, err := serverin.UpdateNetwork(authinpt)\n\t\tif err != nil {\n\t\t\treturn UpdateNetworkResponse{}, err\n\t\t}\n\t\treturn UpdateNetworkResponse{AwsResponse: response}, nil\n\n\tcase \"azure\":\n\t\treturn UpdateNetworkResponse{DefaultResponse: common.DefaultAzResponse}, nil\n\tcase \"gcp\":\n\t\treturn UpdateNetworkResponse{DefaultResponse: common.DefaultGcpResponse}, nil\n\tcase \"openstack\":\n\t\treturn UpdateNetworkResponse{DefaultResponse: common.DefaultOpResponse}, nil\n\tdefault:\n\t\treturn UpdateNetworkResponse{DefaultResponse: common.DefaultCloudResponse + \"NetworkUpdate\"}, nil\n\t}\n}",
"func (r *AWSManagedCluster) ValidateUpdate(old runtime.Object) error {\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ValidateNetworkStatus validates the status of a Network object.
|
func ValidateNetworkStatus(spec *extensionsv1alpha1.NetworkStatus, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
return allErrs
}
|
[
"func ValidateNetworkStatusUpdate(newStatus, oldStatus extensionsv1alpha1.NetworkStatus) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\treturn allErrs\n}",
"func CheckNetworkStatus() error {\n\n\tvar networkDownFlag bool\n\tvar retryCount int\n\tnetworkIP, _ := os.LookupEnv(\"NETWORK_TEST_FQDN\")\n\n\tif len(networkIP) == 0 {\n\t\t//setting up IP to google.com as NETWORK_TEST_FQDN is empty.\n\t\tnetworkIP = \"www.google.com\"\n\t}\n\n\trwolog.Debug(\"Checking network Status ...\")\n\tnetworkDownFlag = false\n\tfor {\n\n\t\t_, err := net.LookupIP(networkIP)\n\t\tif err != nil {\n\t\t\trwolog.Error(\"Network is not up \", err.Error())\n\t\t\tnetworkDownFlag = true\n\t\t\ttime.Sleep(3 * time.Second) //sleep for 3 seconds to get network up.\n\t\t\tretryCount++\n\t\t\tif retryCount > 100 {\n\t\t\t\treturn fmt.Errorf(\"Network is down for more then 3000 seconds. Exiting member failed \")\n\t\t\t}\n\t\t} else {\n\t\t\trwolog.Debug(\"Network is up.\")\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif networkDownFlag == true {\n\t\trwolog.Debug(\" Network was offline, delayed start to allow other members to settle.\")\n\t\ttime.Sleep(30 * time.Second)\n\t}\n\n\treturn nil\n}",
"func (m *PVMInstanceNetwork) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *Network) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMasterInterface(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func ValidateNetwork(network *extensionsv1alpha1.Network) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMeta(&network.ObjectMeta, true, apivalidation.NameIsDNSSubdomain, field.NewPath(\"metadata\"))...)\n\tallErrs = append(allErrs, ValidateNetworkSpec(&network.Spec, field.NewPath(\"spec\"))...)\n\n\treturn allErrs\n}",
"func ValidateNetworkMode(c *Config) error {\n\tif c.General.Mode == \"\" {\n\t\tc.General.Mode = DefaultNetMode\n\t}\n\tc.General.Mode = strings.ToLower(c.General.Mode)\n\tswitch c.General.Mode {\n\tcase DualStackNetMode:\n\t\tfallthrough\n\tcase IPv4NetMode:\n\t\tfallthrough\n\tcase IPv6NetMode:\n\t\tglog.Infof(\"Building cluster in mode %q\", c.General.Mode)\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported network mode %q entered\", c.General.Mode)\n\t}\n\treturn nil\n}",
"func validateNetwork(req abstract.SubnetRequest) (bool, fail.Error) {\n\terr := validation.ValidateStruct(&req,\n\t\tvalidation.Field(&req.Name, validation.Required, validation.Length(1, 64)),\n\t\tvalidation.Field(&req.Name, validation.Required, validation.Match(regexp.MustCompile(`^[a-zA-Z0-9_-]+$`))),\n\t)\n\tif err != nil {\n\t\treturn false, fail.Wrap(err, \"validation issue\")\n\t}\n\n\treturn true, nil\n}",
"func (m *V1NetworkCreateRequest) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateDestinationprefixes(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNat(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateParentnetworkid(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePrefixes(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePrimary(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateUnderlay(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (nt NetworkType) Validate() error {\n\tswitch nt {\n\tcase NetworkTypeDefault, NetworkTypeHost, NetworkTypeWeave:\n\t\treturn nil\n\tdefault:\n\t\treturn maskAny(errgo.WithCausef(nil, ValidationError, \"unknown network type '%s'\", string(nt)))\n\t}\n}",
"func ValidateNetworkUpdate(new, old *extensionsv1alpha1.Network) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&new.ObjectMeta, &old.ObjectMeta, field.NewPath(\"metadata\"))...)\n\tallErrs = append(allErrs, ValidateNetworkSpecUpdate(&new.Spec, &old.Spec, new.DeletionTimestamp != nil, field.NewPath(\"spec\"))...)\n\tallErrs = append(allErrs, ValidateNetwork(new)...)\n\n\treturn allErrs\n}",
"func (m *NetworkV1Request) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateAws(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateAzure(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateGcp(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMock(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOutboundInternetTraffic(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *IpamNetworkDataData) SetNetworkIsValid(v string) {\n\to.NetworkIsValid = &v\n}",
"func validateNetwork(t *testing.T, workingDir string) {\n\tterraformOptions := test_structure.LoadTerraformOptions(t, workingDir)\n\n\tsubnetNames := terraformOptions.Vars[\"subnet_names\"].([]interface{})\n\n\toutput := terraform.Output(t, terraformOptions, \"network_id\")\n\tassert.NotEmpty(t, output, \"network_id is empty\")\n\n\tsubnets := terraform.OutputList(t, terraformOptions, \"subnet_ids\")\n\tassert.Len(t, subnets, len(subnetNames), \"`subnet_ids` length is invalid\")\n\n\taddresses := terraform.OutputList(t, terraformOptions, \"subnet_address_ranges\")\n\tassert.Len(t, addresses, len(subnetNames), \"`subnet_address_ranges` length is invalid\")\n\n\t// check addresses\n\tfor _, cidr := range addresses {\n\t\t_, _, err := net.ParseCIDR(cidr)\n\t\tassert.Nil(t, err, \"net.ParseCIDR\")\n\t}\n}",
"func (m *WifiNetwork) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func CheckNetworkStatus() error {\n\tif SunnyDay {\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"No Internet\")\n}",
"func (in *Network_STATUS) DeepCopy() *Network_STATUS {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Network_STATUS)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (o *IpamNetworkDataData) GetNetworkIsValidOk() (*string, bool) {\n\tif o == nil || o.NetworkIsValid == nil {\n\t\treturn nil, false\n\t}\n\treturn o.NetworkIsValid, true\n}",
"func (m VMStatus) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// value enum\n\tif err := m.validateVMStatusEnum(\"\", \"body\", m); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func ValidateNetworkSpec(spec *extensionsv1alpha1.NetworkSpec, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif len(spec.Type) == 0 {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"type\"), \"field is required\"))\n\t}\n\n\tvar cidrs []cidrvalidation.CIDR\n\n\tif len(spec.PodCIDR) == 0 {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"podCIDR\"), \"field is required\"))\n\t} else {\n\t\tcidrs = append(cidrs, cidrvalidation.NewCIDR(spec.PodCIDR, fldPath.Child(\"podCIDR\")))\n\t}\n\n\tif len(spec.ServiceCIDR) == 0 {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"serviceCIDR\"), \"field is required\"))\n\t} else {\n\t\tcidrs = append(cidrs, cidrvalidation.NewCIDR(spec.ServiceCIDR, fldPath.Child(\"serviceCIDR\")))\n\t}\n\n\tallErrs = append(allErrs, cidrvalidation.ValidateCIDRParse(cidrs...)...)\n\tallErrs = append(allErrs, cidrvalidation.ValidateCIDROverlap(cidrs, cidrs, false)...)\n\n\treturn allErrs\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ValidateNetworkStatusUpdate validates the status field of a Network object.
|
func ValidateNetworkStatusUpdate(newStatus, oldStatus extensionsv1alpha1.NetworkStatus) field.ErrorList {
allErrs := field.ErrorList{}
return allErrs
}
|
[
"func ValidateNetworkUpdate(new, old *extensionsv1alpha1.Network) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&new.ObjectMeta, &old.ObjectMeta, field.NewPath(\"metadata\"))...)\n\tallErrs = append(allErrs, ValidateNetworkSpecUpdate(&new.Spec, &old.Spec, new.DeletionTimestamp != nil, field.NewPath(\"spec\"))...)\n\tallErrs = append(allErrs, ValidateNetwork(new)...)\n\n\treturn allErrs\n}",
"func ValidateNetworkStatus(spec *extensionsv1alpha1.NetworkStatus, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\treturn allErrs\n}",
"func ValidateNetworkSpecUpdate(new, old *extensionsv1alpha1.NetworkSpec, deletionTimestampSet bool, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif deletionTimestampSet && !apiequality.Semantic.DeepEqual(new, old) {\n\t\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new, old, fldPath)...)\n\t\treturn allErrs\n\t}\n\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.Type, old.Type, fldPath.Child(\"type\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.PodCIDR, old.PodCIDR, fldPath.Child(\"podCIDR\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.ServiceCIDR, old.ServiceCIDR, fldPath.Child(\"serviceCIDR\"))...)\n\n\treturn allErrs\n}",
"func (r *NetworkReconciler) updateStatus(network *ethereumv1alpha1.Network) error {\n\tnetwork.Status.NodesCount = len(network.Spec.Nodes)\n\n\tif err := r.Status().Update(context.Background(), network); err != nil {\n\t\tr.Log.Error(err, \"unable to update network status\")\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (m *AzureManagedControlPlane) validateVirtualNetworkUpdate(old *AzureManagedControlPlane) field.ErrorList {\n\tvar allErrs field.ErrorList\n\tif old.Spec.VirtualNetwork.Name != m.Spec.VirtualNetwork.Name {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.Name\"),\n\t\t\t\tm.Spec.VirtualNetwork.Name,\n\t\t\t\t\"Virtual Network Name is immutable\"))\n\t}\n\n\tif old.Spec.VirtualNetwork.CIDRBlock != m.Spec.VirtualNetwork.CIDRBlock {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.CIDRBlock\"),\n\t\t\t\tm.Spec.VirtualNetwork.CIDRBlock,\n\t\t\t\t\"Virtual Network CIDRBlock is immutable\"))\n\t}\n\n\tif old.Spec.VirtualNetwork.Subnet.Name != m.Spec.VirtualNetwork.Subnet.Name {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.Subnet.Name\"),\n\t\t\t\tm.Spec.VirtualNetwork.Subnet.Name,\n\t\t\t\t\"Subnet Name is immutable\"))\n\t}\n\n\t// NOTE: This only works because we force the user to set the CIDRBlock for both the\n\t// managed and unmanaged Vnets. If we ever update the subnet cidr based on what's\n\t// actually set in the subnet, and it is different from what's in the Spec, for\n\t// unmanaged Vnets like we do with the AzureCluster this logic will break.\n\tif old.Spec.VirtualNetwork.Subnet.CIDRBlock != m.Spec.VirtualNetwork.Subnet.CIDRBlock {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.Subnet.CIDRBlock\"),\n\t\t\t\tm.Spec.VirtualNetwork.Subnet.CIDRBlock,\n\t\t\t\t\"Subnet CIDRBlock is immutable\"))\n\t}\n\n\tif old.Spec.VirtualNetwork.ResourceGroup != m.Spec.VirtualNetwork.ResourceGroup {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.ResourceGroup\"),\n\t\t\t\tm.Spec.VirtualNetwork.ResourceGroup,\n\t\t\t\t\"Virtual Network Resource Group is immutable\"))\n\t}\n\treturn allErrs\n}",
"func CheckNetworkStatus() error {\n\n\tvar networkDownFlag bool\n\tvar retryCount int\n\tnetworkIP, _ := os.LookupEnv(\"NETWORK_TEST_FQDN\")\n\n\tif len(networkIP) == 0 {\n\t\t//setting up IP to google.com as NETWORK_TEST_FQDN is empty.\n\t\tnetworkIP = \"www.google.com\"\n\t}\n\n\trwolog.Debug(\"Checking network Status ...\")\n\tnetworkDownFlag = false\n\tfor {\n\n\t\t_, err := net.LookupIP(networkIP)\n\t\tif err != nil {\n\t\t\trwolog.Error(\"Network is not up \", err.Error())\n\t\t\tnetworkDownFlag = true\n\t\t\ttime.Sleep(3 * time.Second) //sleep for 3 seconds to get network up.\n\t\t\tretryCount++\n\t\t\tif retryCount > 100 {\n\t\t\t\treturn fmt.Errorf(\"Network is down for more then 3000 seconds. Exiting member failed \")\n\t\t\t}\n\t\t} else {\n\t\t\trwolog.Debug(\"Network is up.\")\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif networkDownFlag == true {\n\t\trwolog.Debug(\" Network was offline, delayed start to allow other members to settle.\")\n\t\ttime.Sleep(30 * time.Second)\n\t}\n\n\treturn nil\n}",
"func (c *networkStatuses) Update(networkStatus *batch.NetworkStatus) (result *batch.NetworkStatus, err error) {\n\tresult = &batch.NetworkStatus{}\n\terr = c.client.Put().\n\t\tNamespace(c.ns).\n\t\tResource(\"networkstatuses\").\n\t\tName(networkStatus.Name).\n\t\tBody(networkStatus).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}",
"func TestNetworkStatus(t *testing.T) {\n\tedgeNode := tc.GetEdgeNode(tc.WithTest(t))\n\n\targs := flag.Args()\n\tif len(args) == 0 {\n\t\tt.Fatalf(\"Usage: %s [options] state vol_name...\\n\", os.Args[0])\n\t} else {\n\t\tsecs := int(timewait.Seconds())\n\t\tstate := args[0]\n\t\tt.Log(utils.AddTimestamp(fmt.Sprintf(\"networks: '%s' expected state: '%s' secs: %d\\n\",\n\t\t\targs[1:], state, secs)))\n\n\t\tnws := args[1:]\n\t\tif nws[len(nws)-1] == \"&\" {\n\t\t\tnws = nws[:len(nws)-1]\n\t\t}\n\t\tstates = make(map[string][]nwState)\n\t\tfor _, el := range nws {\n\t\t\tstates[el] = []nwState{{state: \"no info from controller\", timestamp: time.Now()}}\n\t\t}\n\n\t\tif !*newitems {\n\t\t\t// observe existing info object and feed them into eveState object\n\t\t\tif err := tc.GetController().InfoLastCallback(edgeNode.GetID(), nil, eveState.InfoCallback()); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\n\t\t// we are done if our eveState object is in required state\n\t\tif ready := checkState(eveState, state, nws); ready == nil {\n\n\t\t\ttc.AddProcInfo(edgeNode, checkNet(state, nws))\n\n\t\t\tcallback := func() {\n\t\t\t\tt.Errorf(\"ASSERTION FAILED (%s): expected networks %s in %s state\", time.Now().Format(time.RFC3339Nano), nws, state)\n\t\t\t\tfor k, v := range states {\n\t\t\t\t\tt.Errorf(\"\\tactual %s: %s\", k, v[len(v)-1].state)\n\t\t\t\t\tif checkNewLastState(k, state) {\n\t\t\t\t\t\tt.Errorf(\"\\thistory of states for %s:\", k)\n\t\t\t\t\t\tfor _, st := range v {\n\t\t\t\t\t\t\tt.Errorf(\"\\t\\tstate: %s received in: %s\", st.state, st.timestamp.Format(time.RFC3339Nano))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttc.WaitForProcWithErrorCallback(secs, callback)\n\n\t\t} else {\n\t\t\tt.Log(utils.AddTimestamp(ready.Error()))\n\t\t}\n\n\t\t// sleep to reduce concurrency effects\n\t\ttime.Sleep(1 * time.Second)\n\t}\n}",
"func (_PermInterface *PermInterfaceTransactor) UpdateNetworkBootStatus(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _PermInterface.contract.Transact(opts, \"updateNetworkBootStatus\")\n}",
"func ValidateIngressStatusUpdate(ingress, oldIngress *extensions.Ingress) field.ErrorList {\n\tallErrs := apivalidation.ValidateObjectMetaUpdate(&ingress.ObjectMeta, &oldIngress.ObjectMeta, field.NewPath(\"metadata\"))\n\tallErrs = append(allErrs, apivalidation.ValidateLoadBalancerStatus(&ingress.Status.LoadBalancer, field.NewPath(\"status\", \"loadBalancer\"))...)\n\treturn allErrs\n}",
"func ValidateRouteStatusUpdate(route *routeapi.Route, older *routeapi.Route) field.ErrorList {\n\tallErrs := validation.ValidateObjectMetaUpdate(&route.ObjectMeta, &older.ObjectMeta, field.NewPath(\"metadata\"))\n\n\t// TODO: validate route status\n\treturn allErrs\n}",
"func (v Validator) UpdateStatus(newStatus sdk.BondStatus) Validator {\n\tv.Status = newStatus\n\treturn v\n}",
"func (m *PVMInstanceNetwork) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (proc *Proc) NodeNetworkUpdate(node *nm.Node, desc *model.MetricDesc, label *nm.MetricNetworkLables, Log logrus.FieldLogger) error {\n\n\tisExist := false\n\tfor idx, net := range node.Net.Devices {\n\t\tif net.Name == label.Device {\n\t\t\tLog.Debugf(\"Update Network Device[ %s ] Type[ %s ]\", label.Device, desc.FqName)\n\t\t\tisExist = true\n\t\t\tswitch desc.FqName {\n\t\t\tcase nm.MetricNameNetInfo:\n\t\t\tcase nm.MetricNameNetRecvBytes:\n\t\t\t\t// calc\n\t\t\t\tif net.PrevNetStatus.RxBytes != 0 {\n\t\t\t\t\tif net.PrevNetStatus.RxBytes > uint64(desc.Value) {\n\t\t\t\t\t\t// ( UINT64_MAX - prev ) + cur;\n\t\t\t\t\t\tnode.Net.Devices[idx].RxBytes = (math.MaxUint64 - net.PrevNetStatus.RxBytes) + uint64(desc.Value)\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// cur - prev;\n\t\t\t\t\t\tnode.Net.Devices[idx].RxBytes = uint64(desc.Value) - net.PrevNetStatus.RxBytes\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Save\n\t\t\t\tnode.Net.Devices[idx].PrevNetStatus.RxBytes = uint64(desc.Value)\n\t\t\tcase nm.MetricNameNetRecvPackets:\n\t\t\t\tif net.PrevNetStatus.RxPackets != 0 {\n\t\t\t\t\tif net.PrevNetStatus.RxPackets > uint64(desc.Value) {\n\t\t\t\t\t\tnode.Net.Devices[idx].RxPackets = (math.MaxUint64 - net.PrevNetStatus.RxPackets) + uint64(desc.Value)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnode.Net.Devices[idx].RxPackets = uint64(desc.Value) - net.PrevNetStatus.RxPackets\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Save\n\t\t\t\tnode.Net.Devices[idx].PrevNetStatus.RxPackets = uint64(desc.Value)\n\t\t\tcase nm.MetricNameNetTransmitBytes:\n\t\t\t\tif net.PrevNetStatus.TxBytes != 0 {\n\t\t\t\t\tif net.PrevNetStatus.TxBytes > uint64(desc.Value) {\n\t\t\t\t\t\tnode.Net.Devices[idx].TxBytes = (math.MaxUint64 - net.PrevNetStatus.TxBytes) + uint64(desc.Value)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnode.Net.Devices[idx].TxBytes = uint64(desc.Value) - net.PrevNetStatus.TxBytes\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Save\n\t\t\t\tnode.Net.Devices[idx].PrevNetStatus.TxBytes = uint64(desc.Value)\n\t\t\tcase nm.MetricNameNetTransmitPackets:\n\t\t\t\tif net.PrevNetStatus.TxPackets != 0 {\n\t\t\t\t\tif net.PrevNetStatus.TxPackets > uint64(desc.Value) {\n\t\t\t\t\t\tnode.Net.Devices[idx].TxPackets = (math.MaxUint64 - net.PrevNetStatus.TxPackets) + uint64(desc.Value)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnode.Net.Devices[idx].TxPackets = uint64(desc.Value) - net.PrevNetStatus.TxPackets\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Save\n\t\t\t\tnode.Net.Devices[idx].PrevNetStatus.TxPackets = uint64(desc.Value)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\tif !isExist {\n\t\tisExist := false\n\t\t// 수집 대상인지 확인\n\t\tfor _, name := range proc.config.CollectionConfig.NetDevices {\n\t\t\tif name == label.Device {\n\t\t\t\tisExist = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif isExist {\n\t\t\tLog.Debugf(\"Add New Network Device[ %s ]\", label.Device)\n\t\t\tnd := nm.NetDevice{\n\t\t\t\tName: label.Device,\n\t\t\t\tStatus: \"up\",\n\t\t\t}\n\t\t\tswitch desc.FqName {\n\t\t\tcase nm.MetricNameNetInfo:\n\t\t\tcase nm.MetricNameNetRecvBytes:\n\t\t\t\tnd.PrevNetStatus.RxBytes = uint64(desc.Value / 1000)\n\t\t\tcase nm.MetricNameNetRecvPackets:\n\t\t\t\tnd.PrevNetStatus.RxPackets = uint64(desc.Value / 1000)\n\t\t\tcase nm.MetricNameNetTransmitBytes:\n\t\t\t\tnd.PrevNetStatus.TxBytes = uint64(desc.Value / 1000)\n\t\t\tcase nm.MetricNameNetTransmitPackets:\n\t\t\t\tnd.PrevNetStatus.TxPackets = uint64(desc.Value / 1000)\n\t\t\t}\n\t\t\tnode.Net.Devices = append(node.Net.Devices, nd)\n\t\t}\n\t}\n\treturn nil\n}",
"func (in *Network_STATUS) DeepCopy() *Network_STATUS {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Network_STATUS)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (m *Network) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMasterInterface(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *InterfaceConnectionStatus) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateLabel(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateValue(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func ValidateNetwork(network *extensionsv1alpha1.Network) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMeta(&network.ObjectMeta, true, apivalidation.NameIsDNSSubdomain, field.NewPath(\"metadata\"))...)\n\tallErrs = append(allErrs, ValidateNetworkSpec(&network.Spec, field.NewPath(\"spec\"))...)\n\n\treturn allErrs\n}",
"func ValidateBackupEntryStatusUpdate(newBackupEntry, oldBackupEntry *core.BackupEntry) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\treturn allErrs\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TestRejectStaleTermMessage tests that if a server receives a request with a stale term number, it rejects the request. Our implementation ignores the request instead. Reference: section 5.1
|
func TestRejectStaleTermMessage(t *testing.T) {
called := false
fakeStep := func(r *raft, m pb.Message) bool {
called = true
return false
}
r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
defer closeAndFreeRaft(r)
r.step = fakeStep
r.loadState(pb.HardState{Term: 2})
r.Step(pb.Message{Type: pb.MsgApp, Term: r.Term - 1})
if called {
t.Errorf("stepFunc called = %v, want %v", called, false)
}
}
|
[
"func (r *Raft) handleStaleTerm(replication *followerReplication) {\n\tklog.Errorf(fmt.Sprintf(\"peer:%s/%s has newer term, stopping replication\", replication.peer.ID, replication.peer.Address))\n\treplication.notifyAll(false) // No longer leader\n\tselect {\n\tcase replication.stepDown <- struct{}{}:\n\tdefault:\n\t}\n}",
"func TestREST2eTermNotRespondingToSubReq(t *testing.T) {\n\n\t// Init counter check\n\tmainCtrl.CounterValuesToBeVeriefied(t, CountersToBeAdded{\n\t\tCounter{cRestSubReqFromXapp, 1},\n\t\tCounter{cRestSubRespToXapp, 1},\n\t\tCounter{cSubReqToE2, 1},\n\t\tCounter{cSubReReqToE2, 1},\n\t\tCounter{cSubReqTimerExpiry, 2},\n\t\tCounter{cSubDelReReqToE2, 1},\n\t\tCounter{cRestSubFailNotifToXapp, 1},\n\t\tCounter{cSubDelReqToE2, 1},\n\t\tCounter{cSubDelReqTimerExpiry, 2},\n\t\tCounter{cRestSubDelReqFromXapp, 1},\n\t\tCounter{cRestSubDelRespToXapp, 1},\n\t})\n\n\tparams := xappConn1.GetRESTSubsReqReportParams(subReqCount)\n\trestSubId := xappConn1.SendRESTSubsReq(t, params)\n\txapp.Logger.Debug(\"Send REST subscriber request for subscriber : %v\", restSubId)\n\n\te2termConn1.RecvSubsReq(t)\n\txapp.Logger.Debug(\"Ignore 1st REST subscriber request for subscriber : %v\", restSubId)\n\n\te2termConn1.RecvSubsReq(t)\n\txapp.Logger.Debug(\"Ignore 2nd REST subscriber request for subscriber : %v\", restSubId)\n\n\te2termConn1.RecvSubsDelReq(t)\n\txapp.Logger.Debug(\"Ignore 1st INTERNAL delete request for subscriber : %v\", restSubId)\n\n\txappConn1.ExpectRESTNotificationNok(t, restSubId, \"allFail\")\n\te2termConn1.RecvSubsDelReq(t)\n\txapp.Logger.Debug(\"Ignore 2nd INTERNAL delete request for subscriber : %v\", restSubId)\n\n\te2SubsId := xappConn1.WaitRESTNotification(t, restSubId)\n\n\txappConn1.SendRESTSubsDelReq(t, &restSubId)\n\n\twaitSubsCleanup(t, e2SubsId, 10)\n\tmainCtrl.VerifyCounterValues(t)\n\tmainCtrl.VerifyAllClean(t)\n}",
"func TestTaskManifestStaleMessage(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\ttestResponseSender := func(response interface{}) error {\n\t\treturn nil\n\t}\n\n\tmockComparer := mock_session.NewMockTaskComparer(ctrl)\n\tmockIDA := mock_session.NewMockManifestMessageIDAccessor(ctrl)\n\tmockSNA := mock_session.NewMockSequenceNumberAccessor(ctrl)\n\n\ttestTaskManifestResponder := NewTaskManifestResponder(mockComparer, mockSNA, mockIDA, metrics.NewNopEntryFactory(), testResponseSender)\n\n\t// Create test task manifest.\n\ttestManifest := setupTestManifestMessage()\n\n\t// Set up a new manifest with a stale number and distinct message ID.\n\tnewManifest := &ecsacs.TaskManifestMessage{\n\t\tClusterArn: aws.String(testconst.ClusterName),\n\t\tContainerInstanceArn: aws.String(testconst.ContainerInstanceARN),\n\t\tMessageId: aws.String(\"456\"),\n\t\tTasks: []*ecsacs.TaskIdentifier{},\n\t\tTimeline: aws.Int64(StaleTaskManifestSequenceNumber),\n\t}\n\n\t// Expect GetLatestSequenceNumber once in each call to handleManifestMessage.\n\t// The first time, LatestSequenceNumber has not been set, so allow it to pass by comparing\n\t// against an older one.\n\tmockSNA.EXPECT().GetLatestSequenceNumber().Return(int64(StaleTaskManifestSequenceNumber))\n\tmockSNA.EXPECT().GetLatestSequenceNumber().Return(int64(StartingTaskManifestSequenceNumber))\n\n\t// The test manifest should be valid, updating sequence number and message ID only once.\n\tmockSNA.EXPECT().SetLatestSequenceNumber(*testManifest.Timeline)\n\tmockIDA.EXPECT().SetMessageID(testconst.MessageID)\n\tmockComparer.EXPECT().CompareRunningTasksOnInstanceWithManifest(testManifest).Return([]*ecsacs.TaskIdentifier{}, nil)\n\n\thandleManifestMessage := testTaskManifestResponder.HandlerFunc().(func(*ecsacs.TaskManifestMessage))\n\n\t// Handle the task manifest message update, this should correctly set the message ID and sequence number.\n\thandleManifestMessage(testManifest)\n\n\t// Now try to update manifest with a stale message. The responder should discard and ignore this message.\n\thandleManifestMessage(newManifest)\n}",
"func TestRESTSubDelReqRetransmission(t *testing.T) {\n\n\tmainCtrl.CounterValuesToBeVeriefied(t, CountersToBeAdded{\n\t\tCounter{cRestSubReqFromXapp, 1},\n\t\tCounter{cRestSubRespToXapp, 1},\n\t\tCounter{cSubReqToE2, 1},\n\t\tCounter{cSubRespFromE2, 1},\n\t\tCounter{cRestSubNotifToXapp, 1},\n\t\tCounter{cRestSubDelReqFromXapp, 2},\n\t\tCounter{cSubDelReqToE2, 1},\n\t\tCounter{cSubDelRespFromE2, 1},\n\t\tCounter{cRestSubDelRespToXapp, 2},\n\t})\n\n\tvar params *teststube2ap.RESTSubsReqParams = nil\n\n\t//Subs Create\n\trestSubId, e2SubsId := createSubscription(t, xappConn1, e2termConn1, params)\n\n\tqueryXappSubscription(t, int64(e2SubsId), \"RAN_NAME_1\", []string{\"localhost:13560\"})\n\n\t//Subs Delete\n\txappConn1.SendRESTSubsDelReq(t, &restSubId)\n\tdelreq, delmsg := e2termConn1.RecvSubsDelReq(t)\n\n\t//Resend delete req\n\tseqBef := mainCtrl.get_msgcounter(t)\n\txappConn1.SendRESTSubsDelReq(t, &restSubId)\n\tmainCtrl.wait_msgcounter_change(t, seqBef, 10)\n\n\t// Del resp\n\te2termConn1.SendSubsDelResp(t, delreq, delmsg)\n\n\twaitSubsCleanup(t, e2SubsId, 10)\n\tmainCtrl.VerifyCounterValues(t)\n\tmainCtrl.VerifyAllClean(t)\n}",
"func TestRESTUnpackSubscriptionDeleteFailureDecodeFail(t *testing.T) {\n\n\tmainCtrl.CounterValuesToBeVeriefied(t, CountersToBeAdded{\n\t\tCounter{cRestSubReqFromXapp, 1},\n\t\tCounter{cRestSubRespToXapp, 1},\n\t\tCounter{cSubReqToE2, 1},\n\t\tCounter{cSubRespFromE2, 1},\n\t\tCounter{cRestSubNotifToXapp, 1},\n\t\tCounter{cRestSubDelReqFromXapp, 1},\n\t\tCounter{cRestSubDelRespToXapp, 1},\n\t\tCounter{cSubDelReqToE2, 1},\n\t\tCounter{cSubDelReqTimerExpiry, 1},\n\t\tCounter{cSubDelReReqToE2, 1},\n\t\tCounter{cSubDelFailFromE2, 2},\n\t})\n\n\t// Req\n\tvar params *teststube2ap.RESTSubsReqParams = nil\n\trestSubId, e2SubsId := createSubscription(t, xappConn1, e2termConn1, params)\n\n\t// Del\n\txappConn1.SendRESTSubsDelReq(t, &restSubId)\n\n\t// E2t: Receive 1st SubsDelReq\n\tdelreq, delmsg := e2termConn1.RecvSubsDelReq(t)\n\n\t// Decode of this response fails which will result resending original request\n\te2termConn1.SendInvalidE2Asn1Resp(t, delmsg, xapp.RIC_SUB_DEL_FAILURE)\n\n\t// E2t: Receive 2nd SubsDelReq and send SubsDelResp\n\tdelreq, delmsg = e2termConn1.RecvSubsDelReq(t)\n\n\t// Subscription does not exist in in E2 Node.\n\te2termConn1.SendSubsDelFail(t, delreq, delmsg)\n\n\t// Wait that subs is cleaned\n\tmainCtrl.wait_subs_clean(t, e2SubsId, 10)\n\n\txappConn1.TestMsgChanEmpty(t)\n\te2termConn1.TestMsgChanEmpty(t)\n\tmainCtrl.wait_registry_empty(t, 10)\n\tmainCtrl.VerifyAllClean(t)\n\tmainCtrl.VerifyCounterValues(t)\n}",
"func (r *raft) onMessageTermNotMatched(m pb.Message) bool {\n\tif m.Term == 0 || m.Term == r.term {\n\t\treturn false\n\t}\n\tif r.dropRequestVoteFromHighTermNode(m) {\n\t\tplog.Warningf(\"%s dropped RequestVote at term %d from %s, leader available\",\n\t\t\tr.describe(), m.Term, ReplicaID(m.From))\n\t\treturn true\n\t}\n\tif m.Term > r.term {\n\t\tif !isPreVoteMessageWithExpectedHigherTerm(m) {\n\t\t\tplog.Warningf(\"%s received %s with higher term (%d) from %s\",\n\t\t\t\tr.describe(), m.Type, m.Term, ReplicaID(m.From))\n\t\t\tleaderID := NoLeader\n\t\t\tif isLeaderMessage(m.Type) {\n\t\t\t\tleaderID = m.From\n\t\t\t}\n\t\t\tif r.isNonVoting() {\n\t\t\t\tr.becomeNonVoting(m.Term, leaderID)\n\t\t\t} else if r.isWitness() {\n\t\t\t\tr.becomeWitness(m.Term, leaderID)\n\t\t\t} else {\n\t\t\t\tif m.Type == pb.RequestVote {\n\t\t\t\t\tplog.Warningf(\"%s become followerKE after receiving higher term from %s\",\n\t\t\t\t\t\tr.describe(), ReplicaID(m.From))\n\t\t\t\t\t// not to reset the electionTick value to avoid the risk of having the\n\t\t\t\t\t// local node not being to campaign at all. if the local node generates\n\t\t\t\t\t// the tick much slower than other nodes (e.g. bad config, hardware\n\t\t\t\t\t// clock issue, bad scheduling, overloaded etc.), it may lose the chance\n\t\t\t\t\t// to ever start a campaign unless we keep its electionTick value here.\n\t\t\t\t\tr.becomeFollowerKE(m.Term, leaderID)\n\t\t\t\t} else {\n\t\t\t\t\tplog.Warningf(\"%s become follower after receiving higher term from %s\",\n\t\t\t\t\t\tr.describe(), ReplicaID(m.From))\n\t\t\t\t\tr.becomeFollower(m.Term, leaderID)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else if m.Term < r.term {\n\t\tif m.Type == pb.RequestPreVote ||\n\t\t\t(isLeaderMessage(m.Type) && (r.checkQuorum || r.preVote)) {\n\t\t\t// see test TestFreeStuckCandidateWithCheckQuorum for details\n\t\t\tr.send(pb.Message{To: m.From, Type: pb.NoOP})\n\t\t} else {\n\t\t\tplog.Infof(\"%s ignored %s with lower term (%d) from %s\",\n\t\t\t\tr.describe(), m.Type, m.Term, ReplicaID(m.From))\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}",
"func TestRESTUnpackSubscriptionFailureDecodeFail(t *testing.T) {\n\n\tmainCtrl.CounterValuesToBeVeriefied(t, CountersToBeAdded{\n\t\tCounter{cRestSubReqFromXapp, 1},\n\t\tCounter{cRestSubRespToXapp, 1},\n\t\tCounter{cSubReqToE2, 1},\n\t\tCounter{cSubReqTimerExpiry, 1},\n\t\tCounter{cSubReReqToE2, 1},\n\t\tCounter{cSubFailFromE2, 2},\n\t\tCounter{cRestSubFailNotifToXapp, 1},\n\t\tCounter{cRestSubDelReqFromXapp, 1},\n\t\tCounter{cRestSubDelRespToXapp, 1},\n\t})\n\n\tconst subReqCount int = 1\n\n\t// Req\n\tparams := xappConn1.GetRESTSubsReqReportParams(subReqCount)\n\trestSubId := xappConn1.SendRESTSubsReq(t, params)\n\n\tcrereq, cremsg := e2termConn1.RecvSubsReq(t)\n\n\t// Decode of this response fails which will result resending original request\n\te2termConn1.SendInvalidE2Asn1Resp(t, cremsg, xapp.RIC_SUB_FAILURE)\n\n\t_, cremsg = e2termConn1.RecvSubsReq(t)\n\n\txappConn1.ExpectRESTNotificationNok(t, restSubId, \"allFail\")\n\n\t// Subscription already created in E2 Node.\n\tfparams := &teststube2ap.E2StubSubsFailParams{}\n\tfparams.Set(crereq)\n\tfparams.SetCauseVal(0, 1, 3) // CauseRIC / duplicate-action\n\te2termConn1.SendSubsFail(t, fparams, cremsg)\n\n\tinstanceId := xappConn1.WaitRESTNotification(t, restSubId)\n\txapp.Logger.Debug(\"TEST: REST notification received e2SubsId=%v\", instanceId)\n\n\txappConn1.SendRESTSubsDelReq(t, &restSubId)\n\n\t// Wait that subs is cleaned\n\tmainCtrl.wait_subs_clean(t, crereq.RequestId.InstanceId, 10)\n\n\txappConn1.TestMsgChanEmpty(t)\n\te2termConn1.TestMsgChanEmpty(t)\n\tmainCtrl.wait_registry_empty(t, 10)\n\tmainCtrl.VerifyAllClean(t)\n\tmainCtrl.VerifyCounterValues(t)\n}",
"func TestRESTUnpackSubscriptionDeleteResponseDecodeFail(t *testing.T) {\n\n\tmainCtrl.CounterValuesToBeVeriefied(t, CountersToBeAdded{\n\t\tCounter{cRestSubReqFromXapp, 1},\n\t\tCounter{cRestSubRespToXapp, 1},\n\t\tCounter{cSubReqToE2, 1},\n\t\tCounter{cSubRespFromE2, 1},\n\t\tCounter{cRestSubNotifToXapp, 1},\n\t\tCounter{cRestSubDelReqFromXapp, 1},\n\t\tCounter{cRestSubDelRespToXapp, 1},\n\t\tCounter{cSubDelReqToE2, 1},\n\t\tCounter{cSubDelReqTimerExpiry, 1},\n\t\tCounter{cSubDelReReqToE2, 1},\n\t\tCounter{cSubDelFailFromE2, 1},\n\t\tCounter{cSubDelRespFromE2, 1},\n\t})\n\n\t// Req\n\tvar params *teststube2ap.RESTSubsReqParams = nil\n\trestSubId, e2SubsId := createSubscription(t, xappConn1, e2termConn1, params)\n\n\t// Del\n\txappConn1.SendRESTSubsDelReq(t, &restSubId)\n\n\t// E2t: Receive 1st SubsDelReq\n\tdelreq, delmsg := e2termConn1.RecvSubsDelReq(t)\n\n\t// Decode of this response fails which will result resending original request\n\te2termConn1.SendInvalidE2Asn1Resp(t, delmsg, xapp.RIC_SUB_DEL_RESP)\n\n\t// E2t: Receive 2nd SubsDelReq and send SubsDelResp\n\tdelreq, delmsg = e2termConn1.RecvSubsDelReq(t)\n\n\t// Subscription does not exist in in E2 Node.\n\te2termConn1.SendSubsDelFail(t, delreq, delmsg)\n\n\t// Wait that subs is cleaned\n\tmainCtrl.wait_subs_clean(t, e2SubsId, 10)\n\n\txappConn1.TestMsgChanEmpty(t)\n\te2termConn1.TestMsgChanEmpty(t)\n\tmainCtrl.wait_registry_empty(t, 10)\n\tmainCtrl.VerifyAllClean(t)\n\tmainCtrl.VerifyCounterValues(t)\n}",
"func TestRESTUnpackSubscriptionResponseDecodeFail(t *testing.T) {\n\n\tmainCtrl.CounterValuesToBeVeriefied(t, CountersToBeAdded{\n\t\tCounter{cRestSubReqFromXapp, 1},\n\t\tCounter{cRestSubRespToXapp, 1},\n\t\tCounter{cSubReqToE2, 1},\n\t\tCounter{cSubReqTimerExpiry, 1},\n\t\tCounter{cSubReReqToE2, 1},\n\t\tCounter{cSubRespFromE2, 1},\n\t\tCounter{cSubFailFromE2, 1},\n\t\tCounter{cRestSubFailNotifToXapp, 1},\n\t\tCounter{cRestSubDelReqFromXapp, 1},\n\t\tCounter{cRestSubDelRespToXapp, 1},\n\t})\n\n\tconst subReqCount int = 1\n\n\t// Req\n\tparams := xappConn1.GetRESTSubsReqReportParams(subReqCount)\n\trestSubId := xappConn1.SendRESTSubsReq(t, params)\n\n\tcrereq, cremsg := e2termConn1.RecvSubsReq(t)\n\t// Decode of this response fails which will result resending original request\n\te2termConn1.SendInvalidE2Asn1Resp(t, cremsg, xapp.RIC_SUB_RESP)\n\n\t_, cremsg = e2termConn1.RecvSubsReq(t)\n\n\txappConn1.ExpectRESTNotificationNok(t, restSubId, \"allFail\")\n\n\t// Subscription already created in E2 Node.\n\tfparams := &teststube2ap.E2StubSubsFailParams{}\n\tfparams.Set(crereq)\n\tfparams.SetCauseVal(0, 1, 3) // CauseRIC / duplicate-action\n\te2termConn1.SendSubsFail(t, fparams, cremsg)\n\n\tinstanceId := xappConn1.WaitRESTNotification(t, restSubId)\n\txapp.Logger.Debug(\"TEST: REST notification received e2SubsId=%v\", instanceId)\n\n\txappConn1.SendRESTSubsDelReq(t, &restSubId)\n\n\t// Wait that subs is cleaned\n\tmainCtrl.wait_subs_clean(t, crereq.RequestId.InstanceId, 10)\n\n\txappConn1.TestMsgChanEmpty(t)\n\te2termConn1.TestMsgChanEmpty(t)\n\tmainCtrl.wait_registry_empty(t, 10)\n\tmainCtrl.VerifyAllClean(t)\n\tmainCtrl.VerifyCounterValues(t)\n}",
"func TestRESTSubDelReqSubDelFailRespInSubmgr(t *testing.T) {\n\n\tmainCtrl.CounterValuesToBeVeriefied(t, CountersToBeAdded{\n\t\tCounter{cRestSubReqFromXapp, 1},\n\t\tCounter{cRestSubRespToXapp, 1},\n\t\tCounter{cSubReqToE2, 1},\n\t\tCounter{cSubRespFromE2, 1},\n\t\tCounter{cRestSubNotifToXapp, 1},\n\t\tCounter{cRestSubDelReqFromXapp, 1},\n\t\tCounter{cSubDelReqToE2, 1},\n\t\tCounter{cSubDelFailFromE2, 1},\n\t\tCounter{cRestSubDelRespToXapp, 1},\n\t})\n\n\t// Req\n\tvar params *teststube2ap.RESTSubsReqParams = nil\n\trestSubId, e2SubsId := createSubscription(t, xappConn1, e2termConn1, params)\n\n\t// Del\n\txappConn1.SendRESTSubsDelReq(t, &restSubId)\n\n\t// E2t: Send receive SubsDelReq and send SubsDelFail\n\tdelreq, delmsg := e2termConn1.RecvSubsDelReq(t)\n\te2termConn1.SendSubsDelFail(t, delreq, delmsg)\n\n\t//Wait that subs is cleaned\n\twaitSubsCleanup(t, e2SubsId, 10)\n\n\tmainCtrl.VerifyCounterValues(t)\n\tmainCtrl.VerifyAllClean(t)\n}",
"func TestRESTUnpackSubscriptionDeleteFailureNoTransaction(t *testing.T) {\n\n\tmainCtrl.CounterValuesToBeVeriefied(t, CountersToBeAdded{\n\t\tCounter{cRestSubReqFromXapp, 1},\n\t\tCounter{cRestSubRespToXapp, 1},\n\t\tCounter{cSubReqToE2, 1},\n\t\tCounter{cSubRespFromE2, 1},\n\t\tCounter{cRestSubNotifToXapp, 1},\n\t\tCounter{cRestSubDelReqFromXapp, 1},\n\t\tCounter{cRestSubDelRespToXapp, 1},\n\t\tCounter{cSubDelReqToE2, 1},\n\t\tCounter{cSubDelReqTimerExpiry, 2},\n\t\tCounter{cSubDelReReqToE2, 1},\n\t\tCounter{cSubDelFailFromE2, 2},\n\t})\n\n\t// Req\n\tvar params *teststube2ap.RESTSubsReqParams = nil\n\trestSubId, e2SubsId := createSubscription(t, xappConn1, e2termConn1, params)\n\n\t// Del\n\txappConn1.SendRESTSubsDelReq(t, &restSubId)\n\n\t// E2t: Receive 1st SubsDelReq\n\tdelreq, delmsg := e2termConn1.RecvSubsDelReq(t)\n\n\tmainCtrl.MakeTransactionNil(t, e2SubsId)\n\n\t// No transaction exist for this response which will result resending original request\n\te2termConn1.SendSubsDelFail(t, delreq, delmsg)\n\n\t// E2t: Receive 2nd SubsDelReq\n\tdelreq, delmsg = e2termConn1.RecvSubsDelReq(t)\n\n\t// Subscription does not exist in in E2 Node.\n\te2termConn1.SendSubsDelFail(t, delreq, delmsg)\n\n\t// Wait that subs is cleaned\n\tmainCtrl.wait_subs_clean(t, e2SubsId, 10)\n\n\txappConn1.TestMsgChanEmpty(t)\n\te2termConn1.TestMsgChanEmpty(t)\n\tmainCtrl.wait_registry_empty(t, 10)\n\tmainCtrl.VerifyAllClean(t)\n\tmainCtrl.VerifyCounterValues(t)\n}",
"func TestRESTUnpackSubscriptionDeleteResponseUnknownInstanceId(t *testing.T) {\n\n\tmainCtrl.CounterValuesToBeVeriefied(t, CountersToBeAdded{\n\t\tCounter{cRestSubReqFromXapp, 1},\n\t\tCounter{cRestSubRespToXapp, 1},\n\t\tCounter{cSubReqToE2, 1},\n\t\tCounter{cSubRespFromE2, 1},\n\t\tCounter{cRestSubNotifToXapp, 1},\n\t\tCounter{cRestSubDelReqFromXapp, 1},\n\t\tCounter{cRestSubDelRespToXapp, 1},\n\t\tCounter{cSubDelReqToE2, 1},\n\t\tCounter{cSubDelReqTimerExpiry, 1},\n\t\tCounter{cSubDelReReqToE2, 1},\n\t\tCounter{cSubDelRespFromE2, 1},\n\t\tCounter{cSubDelFailFromE2, 1},\n\t})\n\n\t// Req\n\tvar params *teststube2ap.RESTSubsReqParams = nil\n\trestSubId, e2SubsId := createSubscription(t, xappConn1, e2termConn1, params)\n\n\t// Del\n\txappConn1.SendRESTSubsDelReq(t, &restSubId)\n\n\t// E2t: Receive 1st SubsDelReq\n\tdelreq, delmsg := e2termConn1.RecvSubsDelReq(t)\n\n\t// Unknown instanceId in this response which will result resending original request\n\tdelreq.RequestId.InstanceId = 0\n\te2termConn1.SendSubsDelResp(t, delreq, delmsg)\n\n\t// E2t: Receive 2nd SubsDelReq\n\tdelreq, delmsg = e2termConn1.RecvSubsDelReq(t)\n\n\t// Subscription does not exist in in E2 Node.\n\te2termConn1.SendSubsDelFail(t, delreq, delmsg)\n\n\t// Wait that subs is cleaned\n\tmainCtrl.wait_subs_clean(t, e2SubsId, 10)\n\n\txappConn1.TestMsgChanEmpty(t)\n\te2termConn1.TestMsgChanEmpty(t)\n\tmainCtrl.wait_registry_empty(t, 10)\n\tmainCtrl.VerifyAllClean(t)\n\tmainCtrl.VerifyCounterValues(t)\n}",
"func TestRESTUnpackSubscriptionFailureNoTransaction(t *testing.T) {\n\n\tconst subReqCount int = 1\n\n\tmainCtrl.CounterValuesToBeVeriefied(t, CountersToBeAdded{\n\t\tCounter{cRestSubReqFromXapp, 1},\n\t\tCounter{cRestSubRespToXapp, 1},\n\t\tCounter{cSubReqToE2, 1},\n\t\tCounter{cSubReqTimerExpiry, 2},\n\t\tCounter{cSubReReqToE2, 1},\n\t\tCounter{cSubFailFromE2, 2},\n\t\tCounter{cRestSubFailNotifToXapp, 1},\n\t\tCounter{cRestSubDelReqFromXapp, 1},\n\t\tCounter{cRestSubDelRespToXapp, 1},\n\t\tCounter{cSubDelReqToE2, 1},\n\t\tCounter{cSubDelReqTimerExpiry, 2},\n\t\tCounter{cSubDelReReqToE2, 1},\n\t\tCounter{cSubDelRespFromE2, 2},\n\t})\n\n\t// Req\n\tparams := xappConn1.GetRESTSubsReqReportParams(subReqCount)\n\trestSubId := xappConn1.SendRESTSubsReq(t, params)\n\n\tcrereq, cremsg := e2termConn1.RecvSubsReq(t)\n\n\tmainCtrl.MakeTransactionNil(t, crereq.RequestId.InstanceId)\n\n\t// No transaction exist for this response which will result resending original request\n\tfparams := &teststube2ap.E2StubSubsFailParams{}\n\tfparams.Set(crereq)\n\te2termConn1.SendSubsFail(t, fparams, cremsg)\n\n\t_, cremsg = e2termConn1.RecvSubsReq(t)\n\n\txappConn1.ExpectRESTNotificationNok(t, restSubId, \"allFail\")\n\n\t// Subscription already created in E2 Node.\n\tfparams.SetCauseVal(0, 1, 3) // CauseRIC / duplicate-action\n\te2termConn1.SendSubsFail(t, fparams, cremsg)\n\n\tdelreq, delmsg := e2termConn1.RecvSubsDelReq(t)\n\te2termConn1.SendSubsDelResp(t, delreq, delmsg)\n\n\t// Resending happens because there no transaction\n\tdelreq, delmsg = e2termConn1.RecvSubsDelReq(t)\n\te2termConn1.SendSubsDelResp(t, delreq, delmsg)\n\n\tinstanceId := xappConn1.WaitRESTNotification(t, restSubId)\n\txapp.Logger.Debug(\"TEST: REST notification received e2SubsId=%v\", instanceId)\n\n\txappConn1.SendRESTSubsDelReq(t, &restSubId)\n\n\t// Wait that subs is cleaned\n\tmainCtrl.wait_subs_clean(t, crereq.RequestId.InstanceId, 10)\n\n\txappConn1.TestMsgChanEmpty(t)\n\te2termConn1.TestMsgChanEmpty(t)\n\tmainCtrl.wait_registry_empty(t, 10)\n\tmainCtrl.VerifyAllClean(t)\n\tmainCtrl.VerifyCounterValues(t)\n}",
"func testDelayedRevokeWithLeaseRequest2() {\n\tst.SetDelay(2) // Lease expires before revoking finishes\n\tdefer st.ResetDelay()\n\n\tkey1 := \"revokekey:15\"\n\n\t// function called during revoke of key1\n\tf := func() bool {\n\t\tts := time.Now()\n\t\t// get key1 and want a lease\n\t\treplyG, err := st.Get(key1, true)\n\t\tif checkErrorStatus(err, replyG.Status, storageproto.OK) {\n\t\t\treturn true\n\t\t}\n\t\tif isTimeOK(time.Since(ts)) {\n\t\t\t// in this case, server should reply old value and refuse lease\n\t\t\tif replyG.Lease.Granted || replyG.Value != \"old-value\" {\n\t\t\t\tfmt.Fprintln(output, \"FAIL: server should return old value and not grant lease\")\n\t\t\t\tfailCount++\n\t\t\t\treturn true\n\t\t\t}\n\t\t} else {\n\t\t\tif st.comp_revoke[key1] || (!replyG.Lease.Granted || replyG.Value != \"new-value\") {\n\t\t\t\tfmt.Fprintln(output, \"FAIL: server should return new value and grant lease\")\n\t\t\t\tfailCount++\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\tif delayedRevoke(key1, f) {\n\t\treturn\n\t}\n\tfmt.Fprintln(output, \"PASS\")\n\tpassCount++\n}",
"func TestSubDelReqRetransmission(t *testing.T) {\n\tCaseBegin(\"TestSubDelReqRetransmission\")\n\n\t//Subs Create\n\tcretrans := xappConn1.SendSubsReq(t, nil, nil)\n\tcrereq, cremsg := e2termConn1.RecvSubsReq(t)\n\te2termConn1.SendSubsResp(t, crereq, cremsg)\n\te2SubsId := xappConn1.RecvSubsResp(t, cretrans)\n\n\t//Subs Delete\n\tdeltrans := xappConn1.SendSubsDelReq(t, nil, e2SubsId)\n\tdelreq, delmsg := e2termConn1.RecvSubsDelReq(t)\n\n\tseqBef := mainCtrl.get_msgcounter(t)\n\txappConn1.SendSubsDelReq(t, deltrans, e2SubsId) //Retransmitted SubDelReq\n\tmainCtrl.wait_msgcounter_change(t, seqBef, 10)\n\n\t// hack as there is no real way to see has message be handled.\n\t// Previuos counter check just tells that is has been received by submgr\n\t// --> artificial delay\n\t<-time.After(1 * time.Second)\n\n\te2termConn1.SendSubsDelResp(t, delreq, delmsg)\n\txappConn1.RecvSubsDelResp(t, deltrans)\n\n\t//Wait that subs is cleaned\n\tmainCtrl.wait_subs_clean(t, e2SubsId, 10)\n\n\txappConn1.TestMsgChanEmpty(t)\n\txappConn2.TestMsgChanEmpty(t)\n\te2termConn1.TestMsgChanEmpty(t)\n\tmainCtrl.wait_registry_empty(t, 10)\n}",
"func TestSubDelReqSubDelFailRespInSubmgr(t *testing.T) {\n\tCaseBegin(\"TestSubReqSubDelFailRespInSubmgr start\")\n\n\tmainCtrl.CounterValuesToBeVeriefied(t, CountersToBeAdded{\n\t\tCounter{cSubReqFromXapp, 1},\n\t\tCounter{cSubReqToE2, 1},\n\t\tCounter{cSubRespFromE2, 1},\n\t\tCounter{cSubRespToXapp, 1},\n\t\tCounter{cSubDelReqFromXapp, 1},\n\t\tCounter{cSubDelReqToE2, 1},\n\t\tCounter{cSubDelFailFromE2, 1},\n\t\tCounter{cSubDelRespToXapp, 1},\n\t})\n\n\t// Subs Create\n\tcretrans := xappConn1.SendSubsReq(t, nil, nil)\n\tcrereq, cremsg := e2termConn1.RecvSubsReq(t)\n\te2termConn1.SendSubsResp(t, crereq, cremsg)\n\te2SubsId := xappConn1.RecvSubsResp(t, cretrans)\n\n\t// Xapp: Send SubsDelReq\n\tdeltrans := xappConn1.SendSubsDelReq(t, nil, e2SubsId)\n\n\t// E2t: Send receive SubsDelReq and send SubsDelFail\n\tdelreq, delmsg := e2termConn1.RecvSubsDelReq(t)\n\te2termConn1.SendSubsDelFail(t, delreq, delmsg)\n\n\t// Xapp: Receive SubsDelResp\n\txappConn1.RecvSubsDelResp(t, deltrans)\n\n\t// Wait that subs is cleaned\n\tmainCtrl.wait_subs_clean(t, e2SubsId, 10)\n\n\txappConn1.TestMsgChanEmpty(t)\n\txappConn2.TestMsgChanEmpty(t)\n\te2termConn1.TestMsgChanEmpty(t)\n\tmainCtrl.wait_registry_empty(t, 10)\n\n\tmainCtrl.VerifyCounterValues(t)\n}",
"func TestRESTSubReqSubFailRespInSubmgr(t *testing.T) {\n\n\tmainCtrl.CounterValuesToBeVeriefied(t, CountersToBeAdded{\n\t\tCounter{cRestSubReqFromXapp, 1},\n\t\tCounter{cRestSubRespToXapp, 1},\n\t\tCounter{cSubReqToE2, 1},\n\t\tCounter{cSubFailFromE2, 1},\n\t\tCounter{cRestSubFailNotifToXapp, 1},\n\t\tCounter{cRestSubDelReqFromXapp, 1},\n\t\tCounter{cRestSubDelRespToXapp, 1},\n\t})\n\n\tconst subReqCount int = 1\n\tconst e2Timeout int64 = 2\n\tconst e2RetryCount int64 = 1\n\tconst routingNeeded bool = true\n\n\tparams := xappConn1.GetRESTSubsReqReportParams(subReqCount)\n\tparams.SetSubscriptionDirectives(e2Timeout, e2RetryCount, routingNeeded)\n\trestSubId := xappConn1.SendRESTSubsReq(t, params)\n\n\tcrereq1, cremsg1 := e2termConn1.RecvSubsReq(t)\n\tfparams1 := &teststube2ap.E2StubSubsFailParams{}\n\tfparams1.Set(crereq1)\n\txappConn1.ExpectRESTNotificationNok(t, restSubId, \"allFail\")\n\te2termConn1.SendSubsFail(t, fparams1, cremsg1)\n\n\te2SubsId := xappConn1.WaitRESTNotification(t, restSubId)\n\txapp.Logger.Debug(\"TEST: REST notification received e2SubsId=%v\", e2SubsId)\n\n\t// REST subscription sill there to be deleted\n\txappConn1.SendRESTSubsDelReq(t, &restSubId)\n\n\t// Wait that subs is cleaned\n\twaitSubsCleanup(t, e2SubsId, 10)\n\n\tmainCtrl.VerifyCounterValues(t)\n\tmainCtrl.VerifyAllClean(t)\n}",
"func TestRESTUnpackSubscriptionDeleteailureUnknownInstanceId(t *testing.T) {\n\n\tmainCtrl.CounterValuesToBeVeriefied(t, CountersToBeAdded{\n\t\tCounter{cRestSubReqFromXapp, 1},\n\t\tCounter{cRestSubRespToXapp, 1},\n\t\tCounter{cSubReqToE2, 1},\n\t\tCounter{cSubRespFromE2, 1},\n\t\tCounter{cRestSubNotifToXapp, 1},\n\t\tCounter{cRestSubDelReqFromXapp, 1},\n\t\tCounter{cRestSubDelRespToXapp, 1},\n\t\tCounter{cSubDelReqToE2, 1},\n\t\tCounter{cSubDelReqTimerExpiry, 1},\n\t\tCounter{cSubDelReReqToE2, 1},\n\t\tCounter{cSubDelFailFromE2, 2},\n\t})\n\n\t// Req\n\tvar params *teststube2ap.RESTSubsReqParams = nil\n\trestSubId, e2SubsId := createSubscription(t, xappConn1, e2termConn1, params)\n\n\t// Del\n\txappConn1.SendRESTSubsDelReq(t, &restSubId)\n\n\t// E2t: Receive 1st SubsDelReq\n\tdelreq, delmsg := e2termConn1.RecvSubsDelReq(t)\n\n\t// Unknown instanceId 0 in this response which will result resending original request\n\tdelreq.RequestId.InstanceId = 0\n\te2termConn1.SendSubsDelFail(t, delreq, delmsg)\n\n\t// E2t: Receive 2nd SubsDelReq\n\tdelreq, delmsg = e2termConn1.RecvSubsDelReq(t)\n\n\t// Subscription does not exist in in E2 Node. E2 Node responds with failure but there is also same unknown instanceId 0\n\te2termConn1.SendSubsDelFail(t, delreq, delmsg)\n\n\t// Wait that subs is cleaned\n\tmainCtrl.wait_subs_clean(t, e2SubsId, 10)\n\n\txappConn1.TestMsgChanEmpty(t)\n\te2termConn1.TestMsgChanEmpty(t)\n\tmainCtrl.wait_registry_empty(t, 10)\n\tmainCtrl.VerifyAllClean(t)\n\tmainCtrl.VerifyCounterValues(t)\n}",
"func TestRESTPolicySubReqAndSubDelOk19E2Subs(t *testing.T) {\n\n\tmainCtrl.CounterValuesToBeVeriefied(t, CountersToBeAdded{\n\t\tCounter{cRestSubReqFromXapp, 1},\n\t\tCounter{cRestSubRespToXapp, 1},\n\t\tCounter{cSubReqToE2, 19},\n\t\tCounter{cSubRespFromE2, 19},\n\t\tCounter{cRestSubNotifToXapp, 19},\n\t\tCounter{cRestSubDelReqFromXapp, 1},\n\t\tCounter{cSubDelReqToE2, 19},\n\t\tCounter{cSubDelRespFromE2, 19},\n\t\tCounter{cRestSubDelRespToXapp, 1},\n\t})\n\n\tconst subReqCount int = 19\n\t// Req\n\tparams := xappConn1.GetRESTSubsReqPolicyParams(subReqCount)\n\trestSubId := xappConn1.SendRESTSubsReq(t, params)\n\te2SubsIds := sendAndReceiveMultipleE2SubReqs(t, subReqCount, xappConn1, e2termConn1, restSubId)\n\n\tassert.Equal(t, len(e2SubsIds), 19)\n\n\txappConn1.SendRESTSubsDelReq(t, &restSubId)\n\tsendAndReceiveMultipleE2DelReqs(t, e2SubsIds, e2termConn1)\n\n\txappConn1.TestMsgChanEmpty(t)\n\te2termConn1.TestMsgChanEmpty(t)\n\tmainCtrl.wait_registry_empty(t, 10)\n\n\tmainCtrl.VerifyCounterValues(t)\n\tmainCtrl.VerifyAllClean(t)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TestStartAsFollower tests that when servers start up, they begin as followers. Reference: section 5.2
|
func TestStartAsFollower(t *testing.T) {
r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
defer closeAndFreeRaft(r)
if r.state != StateFollower {
t.Errorf("state = %s, want %s", r.state, StateFollower)
}
}
|
[
"func BeFollower(ctx context.Context) {\n\tlg.Log.Info(\"Started waiting for heartbeats\")\n\n\trunning := true\n\tfor running && ctx.Err() == nil {\n\t\t// Randomize timeout every loop to prevent from forcing a node into special role\n\t\tconfig.Default.RandomizeHeartbeatTimeout()\n\t\tselect {\n\t\tcase <-Heartbeat:\n\t\t\tlg.Log.Debug(\"Got heartbeat\")\n\t\tcase <-time.After(config.Default.HeartbeatTimeout):\n\t\t\tlg.Log.Info(\"Heartbeat timed out\")\n\t\t\tif BeCandidate() {\n\t\t\t\tstate.DefaultVolatileState.CurrentLeader = \"\"\n\t\t\t\trunning = false\n\t\t\t\tlg.Log.Infof(\"I'm master now for term %d\", state.DefaultPersistentState.GetCurrentTerm())\n\t\t\t}\n\t\t}\n\t}\n\n\tlg.Log.Info(\"Stopped waiting for heartbeats\")\n}",
"func TestFollower(t *testing.T) {\n\tf := newFixture(t)\n\n\tprevUpdateTime := time.Now().Add(-10 * time.Second)\n\tprevUpdateTimeKube := metav1.NewTime(prevUpdateTime)\n\tmetric0, metric0Typed := newFakeDatadogMetric(\"default\", \"dd-metric-0\", \"metric query0\", datadoghq.DatadogMetricStatus{\n\t\tValue: \"10\",\n\t\tConditions: []datadoghq.DatadogMetricCondition{\n\t\t\t{\n\t\t\t\tType: datadoghq.DatadogMetricConditionTypeActive,\n\t\t\t\tStatus: corev1.ConditionTrue,\n\t\t\t\tLastTransitionTime: prevUpdateTimeKube,\n\t\t\t\tLastUpdateTime: prevUpdateTimeKube,\n\t\t\t},\n\t\t\t{\n\t\t\t\tType: datadoghq.DatadogMetricConditionTypeValid,\n\t\t\t\tStatus: corev1.ConditionTrue,\n\t\t\t\tLastTransitionTime: prevUpdateTimeKube,\n\t\t\t\tLastUpdateTime: prevUpdateTimeKube,\n\t\t\t},\n\t\t\t{\n\t\t\t\tType: datadoghq.DatadogMetricConditionTypeUpdated,\n\t\t\t\tStatus: corev1.ConditionTrue,\n\t\t\t\tLastTransitionTime: prevUpdateTimeKube,\n\t\t\t\tLastUpdateTime: prevUpdateTimeKube,\n\t\t\t},\n\t\t\t{\n\t\t\t\tType: datadoghq.DatadogMetricConditionTypeError,\n\t\t\t\tStatus: corev1.ConditionFalse,\n\t\t\t\tLastTransitionTime: prevUpdateTimeKube,\n\t\t\t\tLastUpdateTime: prevUpdateTimeKube,\n\t\t\t},\n\t\t},\n\t})\n\tmetric1, metric1Typed := newFakeDatadogMetric(\"default\", \"autogen-1\", \"metric query1\", datadoghq.DatadogMetricStatus{\n\t\tValue: \"10\",\n\t\tConditions: []datadoghq.DatadogMetricCondition{\n\t\t\t{\n\t\t\t\tType: datadoghq.DatadogMetricConditionTypeActive,\n\t\t\t\tStatus: corev1.ConditionTrue,\n\t\t\t\tLastTransitionTime: prevUpdateTimeKube,\n\t\t\t\tLastUpdateTime: prevUpdateTimeKube,\n\t\t\t},\n\t\t\t{\n\t\t\t\tType: datadoghq.DatadogMetricConditionTypeValid,\n\t\t\t\tStatus: corev1.ConditionTrue,\n\t\t\t\tLastTransitionTime: prevUpdateTimeKube,\n\t\t\t\tLastUpdateTime: prevUpdateTimeKube,\n\t\t\t},\n\t\t\t{\n\t\t\t\tType: datadoghq.DatadogMetricConditionTypeUpdated,\n\t\t\t\tStatus: corev1.ConditionTrue,\n\t\t\t\tLastTransitionTime: prevUpdateTimeKube,\n\t\t\t\tLastUpdateTime: prevUpdateTimeKube,\n\t\t\t},\n\t\t\t{\n\t\t\t\tType: datadoghq.DatadogMetricConditionTypeError,\n\t\t\t\tStatus: corev1.ConditionFalse,\n\t\t\t\tLastTransitionTime: prevUpdateTimeKube,\n\t\t\t\tLastUpdateTime: prevUpdateTimeKube,\n\t\t\t},\n\t\t},\n\t})\n\tunstructured.SetNestedField(metric1.Object, \"dd-metric-1\", \"spec\", \"externalMetricName\")\n\tmetric1Typed.Spec.ExternalMetricName = \"dd-metric-1\"\n\n\tupdateTime := time.Now()\n\tf.datadogMetricLister = append(f.datadogMetricLister, metric0, metric1)\n\tf.objects = append(f.objects, metric0Typed, metric1Typed)\n\t// We have new updates locally (maybe leader changed or something. Followers should still overwrite local cache)\n\tddm := model.DatadogMetricInternal{\n\t\tID: \"default/dd-metric-0\",\n\t\tValid: true,\n\t\tActive: true,\n\t\tValue: 20.0,\n\t\tUpdateTime: kubernetes.TimeWithoutWall(updateTime),\n\t\tDataTime: kubernetes.TimeWithoutWall(updateTime),\n\t\tError: fmt.Errorf(\"Error from backend while fetching metric\"),\n\t}\n\tddm.SetQueries(\"metric query0\")\n\tf.store.Set(\"default/dd-metric-0\", ddm, \"utest\")\n\n\tf.runControllerSync(false, \"default/dd-metric-0\", nil)\n\n\t// Check internal store content\n\tassert.Equal(t, 1, f.store.Count())\n\tddm = model.DatadogMetricInternal{\n\t\tID: \"default/dd-metric-0\",\n\t\tValid: true,\n\t\tActive: true,\n\t\tValue: 10.0,\n\t\tUpdateTime: kubernetes.TimeWithoutWall(prevUpdateTime.UTC()),\n\t\tDataTime: kubernetes.TimeWithoutWall(prevUpdateTime.UTC()),\n\t\tError: nil,\n\t}\n\tddm.SetQueries(\"metric query0\")\n\tassert.Equal(t, &ddm, f.store.Get(\"default/dd-metric-0\"))\n\n\tf.runControllerSync(false, \"default/autogen-1\", nil)\n\tassert.Equal(t, 2, f.store.Count())\n\n\tddm = model.DatadogMetricInternal{\n\t\tID: \"default/autogen-1\",\n\t\tValid: true,\n\t\tActive: true,\n\t\tAutogen: true,\n\t\tExternalMetricName: \"dd-metric-1\",\n\t\tValue: 10.0,\n\t\tUpdateTime: kubernetes.TimeWithoutWall(prevUpdateTime.UTC()),\n\t\tDataTime: kubernetes.TimeWithoutWall(prevUpdateTime.UTC()),\n\t\tError: nil,\n\t}\n\tddm.SetQueries(\"metric query1\")\n\tassert.Equal(t, &ddm, f.store.Get(\"default/autogen-1\"))\n}",
"func (s *ConnectionSuite) TestGetFollowedByUsersList(c *C) {\n\tc.Skip(\"not impletented\")\n}",
"func testLeaderCycle(t *testing.T, preVote bool) {\n\tvar cfg func(*Config)\n\tif preVote {\n\t\tcfg = preVoteConfig\n\t}\n\tn := newNetworkWithConfig(cfg, nil, nil, nil)\n\tdefer n.closeAll()\n\tfor campaignerID := uint64(1); campaignerID <= 3; campaignerID++ {\n\t\tn.send(pb.Message{From: campaignerID, To: campaignerID, Type: pb.MsgHup})\n\n\t\tfor _, peer := range n.peers {\n\t\t\tsm := peer.(*raft)\n\t\t\tif sm.id == campaignerID && sm.state != StateLeader {\n\t\t\t\tt.Errorf(\"preVote=%v: campaigning node %d state = %v, want StateLeader\",\n\t\t\t\t\tpreVote, sm.id, sm.state)\n\t\t\t} else if sm.id != campaignerID && sm.state != StateFollower {\n\t\t\t\tt.Errorf(\"preVote=%v: after campaign of node %d, \"+\n\t\t\t\t\t\"node %d had state = %v, want StateFollower\",\n\t\t\t\t\tpreVote, campaignerID, sm.id, sm.state)\n\t\t\t}\n\t\t}\n\t}\n}",
"func TestPresenceStart(t *testing.T){\n PrintTestMessage(\"==========Presence tests start==========\")\n}",
"func TestVote_Follower(t *testing.T) {\n\tutil.SuppressLoggers()\n\tconfig := DefaultConfig()\n\n\tt.Run(\"Handle RequestVote with Stale Term\", func(t *testing.T) {\n\t\tcluster, err := CreateLocalCluster(config)\n\t\tdefer cleanupCluster(cluster)\n\n\t\ttime.Sleep(2 * time.Second)\n\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tleader, err := findLeader(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfollowers, err := findAllFollowers(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tleader.setCurrentTerm(3)\n\t\ttime.Sleep(1 * time.Second)\n\n\t\t// make sure the client get the correct response while registering itself with a candidate\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(1),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(3),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've denied vote\")\n\t\t}\n\t})\n\n\tt.Run(\"Handle RequestVote with Higher Term\", func(t *testing.T) {\n\t\tcluster, err := CreateLocalCluster(config)\n\t\tdefer cleanupCluster(cluster)\n\n\t\ttime.Sleep(2 * time.Second)\n\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tleader, err := findLeader(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfollowers, err := findAllFollowers(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tleader.leaderMutex.Lock()\n\t\tlogEntry := &rpc.LogEntry{\n\t\t\tIndex: leader.LastLogIndex() + 1,\n\t\t\tTermId: leader.GetCurrentTerm(),\n\t\t\tType: rpc.CommandType_NOOP,\n\t\t\tData: []byte{1, 2, 3, 4},\n\t\t}\n\t\tleader.StoreLog(logEntry)\n\t\tleader.leaderMutex.Unlock()\n\t\ttime.Sleep(1 * time.Second)\n\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(3),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(1),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've denied vote\")\n\t\t}\n\n\t\treply, _ = followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(3),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(2),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif !reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've granted vote\")\n\t\t}\n\t})\n}",
"func TestStart(t *testing.T) {\n\ts := SetUpSuite(t)\n\n\t// Fetch the services.App that the service heartbeat.\n\tservers, err := s.authServer.AuthServer.GetApplicationServers(s.closeContext, defaults.Namespace)\n\trequire.NoError(t, err)\n\n\t// Check that the services.Server sent via heartbeat is correct. For example,\n\t// check that the dynamic labels have been evaluated.\n\tappFoo, err := types.NewAppV3(types.Metadata{\n\t\tName: \"foo\",\n\t\tLabels: staticLabels,\n\t}, types.AppSpecV3{\n\t\tURI: s.testhttp.URL,\n\t\tPublicAddr: \"foo.example.com\",\n\t\tDynamicLabels: map[string]types.CommandLabelV2{\n\t\t\tdynamicLabelName: {\n\t\t\t\tPeriod: dynamicLabelPeriod,\n\t\t\t\tCommand: dynamicLabelCommand,\n\t\t\t\tResult: \"4\",\n\t\t\t},\n\t\t},\n\t})\n\trequire.NoError(t, err)\n\tserverFoo, err := types.NewAppServerV3FromApp(appFoo, \"test\", s.hostUUID)\n\trequire.NoError(t, err)\n\tappAWS, err := types.NewAppV3(types.Metadata{\n\t\tName: \"awsconsole\",\n\t\tLabels: staticLabels,\n\t}, types.AppSpecV3{\n\t\tURI: constants.AWSConsoleURL,\n\t\tPublicAddr: \"aws.example.com\",\n\t})\n\trequire.NoError(t, err)\n\tserverAWS, err := types.NewAppServerV3FromApp(appAWS, \"test\", s.hostUUID)\n\trequire.NoError(t, err)\n\n\tsort.Sort(types.AppServers(servers))\n\trequire.Empty(t, cmp.Diff([]types.AppServer{serverAWS, serverFoo}, servers,\n\t\tcmpopts.IgnoreFields(types.Metadata{}, \"ID\", \"Expires\")))\n\n\t// Check the expiry time is correct.\n\tfor _, server := range servers {\n\t\trequire.True(t, s.clock.Now().Before(server.Expiry()))\n\t\trequire.True(t, s.clock.Now().Add(2*defaults.ServerAnnounceTTL).After(server.Expiry()))\n\t}\n}",
"func TestLeaderTransferToUpToDateNodeFromFollower(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}",
"func (r *Raft) runFollower() {\n\tfor {\n\t\tselect {\n\t\tcase rpc := <-r.rpcCh:\n\t\t\t// Handle the command\n\t\t\tswitch cmd := rpc.Command.(type) {\n\t\t\tcase *AppendEntriesRequest:\n\t\t\t\tr.appendEntries(rpc, cmd)\n\t\t\tcase *RequestVoteRequest:\n\t\t\t\tr.requestVote(rpc, cmd)\n\t\t\tdefault:\n\t\t\t\tr.logE.Printf(\"In follower state, got unexpected command: %#v\", rpc.Command)\n\t\t\t\trpc.Respond(nil, fmt.Errorf(\"unexpected command\"))\n\t\t\t}\n\t\tcase a := <-r.applyCh:\n\t\t\t// Reject any operations since we are not the leader\n\t\t\ta.response = ErrNotLeader\n\t\t\ta.Response()\n\t\tcase <-randomTimeout(r.conf.HeartbeatTimeout, r.conf.ElectionTimeout):\n\t\t\t// Heartbeat failed! Go to the candidate state\n\t\t\tr.logW.Printf(\"Heartbeat timeout, start election process\")\n\t\t\tr.setState(Candidate)\n\t\t\treturn\n\n\t\tcase <-r.shutdownCh:\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (m *TestFixClient) Start() error {\n\treturn m.initiator.Start()\n}",
"func TestPartition(t *testing.T) {\n\traftConf := &RaftConfig{MemberRegSocket: \"127.0.0.1:8124\", PeerSocket: \"127.0.0.1:9987\", TimeoutInMillis: 1500, HbTimeoutInMillis: 150, LogDirectoryPath: \"logs1\", StableStoreDirectoryPath: \"./stable1\", RaftLogDirectoryPath: \"../LocalLog1\"}\n\n\t// delete stored state to avoid unnecessary effect on following test cases\n\tinitState(raftConf.StableStoreDirectoryPath, raftConf.LogDirectoryPath, raftConf.RaftLogDirectoryPath)\n\n\t// launch cluster proxy servers\n\tcluster.NewProxyWithConfig(RaftToClusterConf(raftConf))\n\n\tfmt.Println(\"Started Proxy\")\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tserverCount := 5\n\traftServers := make([]raft.Raft, serverCount+1)\n\tpseudoClusters := make([]*test.PseudoCluster, serverCount+1)\n\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\t// create cluster.Server\n\t\tclusterServer, err := cluster.NewWithConfig(i, \"127.0.0.1\", 8500+i, RaftToClusterConf(raftConf))\n\t\tpseudoCluster := test.NewPseudoCluster(clusterServer)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error in creating cluster server. \" + err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tlogStore, err := llog.Create(raftConf.RaftLogDirectoryPath + \"/\" + strconv.Itoa(i))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error in creating log. \" + err.Error())\n\t\t\treturn\n\t\t}\n\n\t\ts, err := NewWithConfig(pseudoCluster, logStore, raftConf)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error in creating Raft servers. \" + err.Error())\n\t\t\treturn\n\t\t}\n\t\traftServers[i] = s\n\t\tpseudoClusters[i] = pseudoCluster\n\t}\n\n\t// wait for leader to be elected\n\ttime.Sleep(20 * time.Second)\n\tcount := 0\n\toldLeader := 0\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\tif raftServers[i].Leader() == raftServers[i].Pid() {\n\t\t\toldLeader = i\n\t\t\tcount++\n\t\t}\n\t}\n\tif count != 1 {\n\t\tt.Errorf(\"No leader was chosen in 1 minute\")\n\t\treturn\n\t}\n\n\t// isolate Leader and any one follower\n\tfollower := 0\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\tif i != oldLeader {\n\t\t\tfollower = i\n\t\t\tbreak\n\t\t}\n\t}\n\tfmt.Println(\"Server \" + strconv.Itoa(follower) + \" was chosen as follower in minority partition\")\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\tpseudoClusters[oldLeader].AddToInboxFilter(raftServers[i].Pid())\n\t\tpseudoClusters[oldLeader].AddToOutboxFilter(raftServers[i].Pid())\n\t\tpseudoClusters[follower].AddToInboxFilter(raftServers[i].Pid())\n\t\tpseudoClusters[follower].AddToOutboxFilter(raftServers[i].Pid())\n\t}\n\n\tpseudoClusters[oldLeader].AddToOutboxFilter(cluster.BROADCAST)\n\tpseudoClusters[follower].AddToOutboxFilter(cluster.BROADCAST)\n\n\t// wait for other servers to discover that leader\n\t// has crashed and to elect a new leader\n\ttime.Sleep(20 * time.Second)\n\n\tcount = 0\n\tfor i := 1; i <= serverCount; i += 1 {\n\n\t\tif i != oldLeader && i != follower && raftServers[i].Leader() == raftServers[i].Pid() {\n\t\t\tfmt.Println(\"Server \" + strconv.Itoa(i) + \" was chosen as new leader in majority partition.\")\n\t\t\tcount++\n\t\t}\n\t}\n\t// new leader must be chosen\n\tif count != 1 {\n\t\tt.Errorf(\"No leader was chosen in majority partition\")\n\t}\n}",
"func TestRaftNewLeader(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tclusterPrefix := \"TestRaftNewLeader\"\n\n\t// Create n1 node.\n\tstorage := NewStorage(NewMemSnapshotMgr(), NewMemLog(), NewMemState(0))\n\tfsm1 := newTestFSM(ID1)\n\t// NOTE we use different cluster ID for nodes within same cluster to avoid\n\t// registering same metric path twice. This should never happen in real world\n\t// because we'll never run nodes of a same cluster within one process.\n\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), storage)\n\t// Create n2 node.\n\tstorage = NewStorage(NewMemSnapshotMgr(), NewMemLog(), NewMemState(0))\n\tfsm2 := newTestFSM(ID2)\n\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), storage)\n\tconnectAllNodes(n1, n2)\n\tn1.Start(fsm1)\n\tn2.Start(fsm2)\n\tn2.ProposeInitialMembership([]string{ID1, ID2})\n\n\t// Wait until a leader is elected.\n\tselect {\n\tcase <-fsm1.leaderCh:\n\tcase <-fsm2.leaderCh:\n\t}\n}",
"func (m *MsgPing) Follower(interfaces.IState) bool {\n\treturn true\n}",
"func (handler *RuleHandler) FollowerOnAddServer(msg iface.MsgAddServer, log iface.RaftLog, status iface.Status) []interface{} {\n\t// leader should be responsible for this\n\treturn []interface{}{iface.ReplyNotLeader{}}\n}",
"func ExampleSync(runenv *runtime.RunEnv) error {\n\tvar (\n\t\treadyState = sync.State(\"ready\")\n\t\tstartState = sync.State(\"start\")\n\t)\n\n\tctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)\n\tdefer cancel()\n\n\tclient := sync.MustBoundClient(ctx, runenv)\n\tdefer client.Close()\n\n\tnetclient := network.NewClient(client, runenv)\n\trunenv.RecordMessage(\"Waiting for network initialization\")\n\n\tnetclient.MustWaitNetworkInitialized(ctx)\n\trunenv.RecordMessage(\"Network initilization complete\")\n\n\ttopic := sync.NewTopic(\"messages\", \"\")\n\n\tseq, err := client.Publish(ctx, topic, runenv.TestRun)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trunenv.RecordMessage(\"My sequence ID: %d\", seq)\n\n\tif seq == 1 {\n\t\trunenv.RecordMessage(\"I'm the boss.\")\n\t\tnumFollowers := runenv.TestInstanceCount - 1\n\n\t\trunenv.RecordMessage(\"Waiting for %d instances to become ready\", numFollowers)\n\t\terr := <-client.MustBarrier(ctx, readyState, numFollowers).C\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trunenv.RecordMessage(\"The followers are all ready\")\n\t\trunenv.RecordMessage(\"Ready...\")\n\t\ttime.Sleep(1 * time.Second)\n\t\trunenv.RecordMessage(\"Set...\")\n\t\ttime.Sleep(5 * time.Second)\n\t\trunenv.RecordMessage(\"Go!\")\n\n\t\tclient.MustSignalEntry(ctx, startState)\n\t\treturn nil\n\t}\n\n\trand.Seed(time.Now().UnixNano())\n\tsleepTime := rand.Intn(10)\n\trunenv.RecordMessage(\"I'm a follower. Signaling ready after %d seconds\", sleepTime)\n\ttime.Sleep(time.Duration(sleepTime) * time.Second)\n\n\tclient.MustSignalEntry(ctx, readyState)\n\n\terr = <-client.MustBarrier(ctx, startState, 1).C\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trunenv.RecordMessage(\"Received Start\")\n\treturn nil\n}",
"func (t *Task) AddFollower(u *user.User, reply *user.User) error {\n\t// err := user.AddFollower(u, follower)\n\n\treturn nil\n}",
"func (r *Raft) startReplication(state *leaderState, peer net.Addr) {\n\ts := &followerReplication{\n\t\tpeer: peer,\n\t\tinflight: state.inflight,\n\t\tstopCh: make(chan struct{}),\n\t\ttriggerCh: make(chan struct{}, 1),\n\t\tmatchIndex: r.getLastLogIndex(),\n\t\tnextIndex: r.getLastLogIndex() + 1,\n\t}\n\tstate.replicationState[peer.String()] = s\n\tgo r.replicate(s)\n}",
"func (f *Base) TestRunStarted() {}",
"func (s *ConnectionSuite) TestGetFollowedByUsersListOfConnection(c *C) {\n\tc.Skip(\"not impletented\")\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TestLeaderBcastBeat tests that if the leader receives a heartbeat tick, it will send a msgApp with m.Index = 0, m.LogTerm=0 and empty entries as heartbeat to all followers. Reference: section 5.2
|
func TestLeaderBcastBeat(t *testing.T) {
// heartbeat interval
hi := 1
r := newTestRaft(1, []uint64{1, 2, 3}, 10, hi, NewMemoryStorage())
defer closeAndFreeRaft(r)
r.becomeCandidate()
r.becomeLeader()
for i := 0; i < 10; i++ {
r.appendEntry(pb.Entry{Index: uint64(i) + 1})
}
for i := 0; i < hi; i++ {
r.tick()
}
msgs := r.readMessages()
sort.Sort(messageSlice(msgs))
wmsgs := []pb.Message{
{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},
To: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},
Term: 1, Type: pb.MsgHeartbeat},
{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},
To: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3},
Term: 1, Type: pb.MsgHeartbeat},
}
if !reflect.DeepEqual(msgs, wmsgs) {
t.Errorf("msgs = %v, want %v", msgs, wmsgs)
}
}
|
[
"func TestRecvMsgBeat(t *testing.T) {\n\ttests := []struct {\n\t\tstate StateType\n\t\twMsg int\n\t}{\n\t\t{StateLeader, 2},\n\t\t// candidate and follower should ignore MsgBeat\n\t\t{StateCandidate, 0},\n\t\t{StateFollower, 0},\n\t}\n\n\tfor i, tt := range tests {\n\t\tsm := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tsm.raftLog = &raftLog{storage: newInitedMemoryStorage([]pb.Entry{{}, {Index: 1, Term: 0}, {Index: 2, Term: 1}})}\n\t\tdefer closeAndFreeRaft(sm)\n\t\tsm.Term = 1\n\t\tsm.state = tt.state\n\t\tswitch tt.state {\n\t\tcase StateFollower:\n\t\t\tsm.step = stepFollower\n\t\tcase StateCandidate:\n\t\t\tsm.step = stepCandidate\n\t\tcase StateLeader:\n\t\t\tsm.step = stepLeader\n\t\t}\n\t\tsm.Step(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})\n\n\t\tmsgs := sm.readMessages()\n\t\tif len(msgs) != tt.wMsg {\n\t\t\tt.Errorf(\"%d: len(msgs) = %d, want %d\", i, len(msgs), tt.wMsg)\n\t\t}\n\t\tfor _, m := range msgs {\n\t\t\tif m.Type != pb.MsgHeartbeat {\n\t\t\t\tt.Errorf(\"%d: msg.type = %v, want %v\", i, m.Type, pb.MsgHeartbeat)\n\t\t\t}\n\t\t}\n\t}\n}",
"func TestHB11SendReceiveApollo(t *testing.T) {\n\tif os.Getenv(\"STOMP_TEST11\") == \"\" {\n\t\tfmt.Println(\"TestHB11SendReceiveApollo norun\")\n\t\treturn\n\t}\n\tif os.Getenv(\"STOMP_HB11LONG\") == \"\" {\n\t\tfmt.Println(\"TestHB11SendReceiveApollo norun LONG\")\n\t\treturn\n\t}\n\t//\n\tn, _ := openConn(t)\n\tconn_headers := check11(TEST_HEADERS)\n\tconn_headers = conn_headers.Add(\"heart-beat\", \"10000,10\")\n\tc, e := Connect(n, conn_headers)\n\t// Error checks\n\tif e != nil {\n\t\tt.Errorf(\"Heartbeat send-receive connect error, unexpected: %q\", e)\n\t}\n\tif c.hbd == nil {\n\t\tt.Errorf(\"Heartbeat send-receive error expected hbd value.\")\n\t}\n\t//\n\tl := log.New(os.Stdout, \"\", log.Ldate|log.Lmicroseconds)\n\tc.SetLogger(l)\n\t//\n\tfmt.Println(\"TestHB11SendReceiveApollo start sleep\")\n\ttime.Sleep(1e9 * 120) // 120 secs\n\tfmt.Println(\"TestHB11SendReceiveApollo end sleep\")\n\tc.SetLogger(nil)\n\tif c.Hbrf {\n\t\tt.Errorf(\"Error, dirty heart beat read detected\")\n\t}\n\t//\n\t_ = c.Disconnect(empty_headers)\n\t_ = closeConn(t, n)\n}",
"func TestHeartbeatResponseFanout(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tstopper := stop.NewStopper()\n\tdefer stopper.Stop()\n\n\tcluster := newTestCluster(nil, 3, stopper, t)\n\tgroupID1 := proto.RangeID(1)\n\tcluster.createGroup(groupID1, 0, 3 /* replicas */)\n\n\tgroupID2 := proto.RangeID(2)\n\tcluster.createGroup(groupID2, 0, 3 /* replicas */)\n\n\tleaderIndex := 0\n\n\tcluster.elect(leaderIndex, groupID1)\n\t// GroupID2 will have 3 round of election, so it will have different\n\t// term with groupID1, but both leader on the same node.\n\tfor i := 2; i >= 0; i-- {\n\t\tleaderIndex = i\n\t\tcluster.elect(leaderIndex, groupID2)\n\t}\n\t// Send a coalesced heartbeat.\n\t// Heartbeat response from groupID2 will have a big term than which from groupID1.\n\tcluster.nodes[0].coalescedHeartbeat()\n\t// Start submit a command to see if groupID1's leader changed?\n\tcluster.nodes[0].SubmitCommand(groupID1, makeCommandID(), []byte(\"command\"))\n\n\tselect {\n\tcase _ = <-cluster.events[0].CommandCommitted:\n\t\tlog.Infof(\"SubmitCommand succeed after Heartbeat Response fanout\")\n\tcase <-time.After(500 * time.Millisecond):\n\t\tt.Fatalf(\"No leader after Heartbeat Response fanout\")\n\t}\n}",
"func TestHandleHeartbeatResp(t *testing.T) {\n\tstorage := NewMemoryStorage()\n\tdefer storage.Close()\n\tstorage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}, {Index: 3, Term: 3}})\n\tsm := newTestRaft(1, []uint64{1, 2}, 5, 1, storage)\n\tsm.becomeCandidate()\n\tsm.becomeLeader()\n\tsm.raftLog.commitTo(sm.raftLog.lastIndex())\n\n\t// A heartbeat response from a node that is behind; re-send MsgApp\n\tsm.Step(pb.Message{From: 2, Type: pb.MsgHeartbeatResp})\n\tmsgs := sm.readMessages()\n\tif len(msgs) != 1 {\n\t\tt.Fatalf(\"len(msgs) = %d, want 1\", len(msgs))\n\t}\n\tif msgs[0].Type != pb.MsgApp {\n\t\tt.Errorf(\"type = %v, want MsgApp\", msgs[0].Type)\n\t}\n\n\t// A second heartbeat response generates another MsgApp re-send\n\tsm.Step(pb.Message{From: 2, Type: pb.MsgHeartbeatResp})\n\tmsgs = sm.readMessages()\n\tif len(msgs) != 1 {\n\t\tt.Fatalf(\"len(msgs) = %d, want 1\", len(msgs))\n\t}\n\tif msgs[0].Type != pb.MsgApp {\n\t\tt.Errorf(\"type = %v, want MsgApp\", msgs[0].Type)\n\t}\n\n\t// Once we have an MsgAppResp, heartbeats no longer send MsgApp.\n\tsm.Step(pb.Message{\n\t\tFrom: 2,\n\t\tType: pb.MsgAppResp,\n\t\tIndex: msgs[0].Index + uint64(len(msgs[0].Entries)),\n\t})\n\t// Consume the message sent in response to MsgAppResp\n\tsm.readMessages()\n\n\tsm.Step(pb.Message{From: 2, Type: pb.MsgHeartbeatResp})\n\tmsgs = sm.readMessages()\n\tif len(msgs) != 0 {\n\t\tt.Fatalf(\"len(msgs) = %d, want 0: %+v\", len(msgs), msgs)\n\t}\n}",
"func TestHB11SendReceive(t *testing.T) {\n\tif os.Getenv(\"STOMP_TEST11\") == \"\" {\n\t\tfmt.Println(\"TestHB11SendReceive norun\")\n\t\treturn\n\t}\n\tif os.Getenv(\"STOMP_HB11LONG\") == \"\" {\n\t\tfmt.Println(\"TestHB11SendReceive norun LONG\")\n\t\treturn\n\t}\n\t//\n\tn, _ := openConn(t)\n\tconn_headers := check11(TEST_HEADERS)\n\tconn_headers = conn_headers.Add(\"heart-beat\", \"10000,6000\")\n\tc, e := Connect(n, conn_headers)\n\t// Error checks\n\tif e != nil {\n\t\tt.Errorf(\"Heartbeat send-receive connect error, unexpected: %q\", e)\n\t}\n\tif c.hbd == nil {\n\t\tt.Errorf(\"Heartbeat send-receive error expected hbd value.\")\n\t}\n\t//\n\tl := log.New(os.Stdout, \"\", log.Ldate|log.Lmicroseconds)\n\tc.SetLogger(l)\n\t//\n\tfmt.Println(\"TestHB11SendReceive start sleep\")\n\ttime.Sleep(1e9 * 120) // 120 secs\n\tfmt.Println(\"TestHB11SendReceive end sleep\")\n\tc.SetLogger(nil)\n\tif c.Hbrf {\n\t\tt.Errorf(\"Error, dirty heart beat read detected\")\n\t}\n\t//\n\t_ = c.Disconnect(empty_headers)\n\t_ = closeConn(t, n)\n}",
"func TestHandleHeartbeat(t *testing.T) {\n\tcommit := uint64(2)\n\ttests := []struct {\n\t\tm pb.Message\n\t\twCommit uint64\n\t}{\n\t\t{pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeat, Term: 2, Commit: commit + 1}, commit + 1},\n\t\t{pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeat, Term: 2, Commit: commit - 1}, commit}, // do not decrease commit\n\t}\n\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tdefer storage.Close()\n\t\tstorage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}, {Index: 3, Term: 3}})\n\t\tsm := newTestRaft(1, []uint64{1, 2}, 5, 1, storage)\n\t\tsm.becomeFollower(2, 2)\n\t\tsm.raftLog.commitTo(commit)\n\t\tsm.handleHeartbeat(tt.m)\n\t\tif sm.raftLog.committed != tt.wCommit {\n\t\t\tt.Errorf(\"#%d: committed = %d, want %d\", i, sm.raftLog.committed, tt.wCommit)\n\t\t}\n\t\tm := sm.readMessages()\n\t\tif len(m) != 1 {\n\t\t\tt.Fatalf(\"#%d: msg = nil, want 1\", i)\n\t\t}\n\t\tif m[0].Type != pb.MsgHeartbeatResp {\n\t\t\tt.Errorf(\"#%d: type = %v, want MsgHeartbeatResp\", i, m[0].Type)\n\t\t}\n\t}\n}",
"func TestHB11SendReceiveApolloRev(t *testing.T) {\n\tif os.Getenv(\"STOMP_TEST11\") == \"\" {\n\t\tfmt.Println(\"TestHB11SendReceiveApolloRev norun\")\n\t\treturn\n\t}\n\tif os.Getenv(\"STOMP_HB11LONG\") == \"\" {\n\t\tfmt.Println(\"TestHB11SendReceiveApolloRev norun LONG\")\n\t\treturn\n\t}\n\t//\n\tn, _ := openConn(t)\n\tconn_headers := check11(TEST_HEADERS)\n\tconn_headers = conn_headers.Add(\"heart-beat\", \"10,10000\")\n\tc, e := Connect(n, conn_headers)\n\t// Error checks\n\tif e != nil {\n\t\tt.Errorf(\"Heartbeat send-receive connect error, unexpected: %q\", e)\n\t}\n\tif c.hbd == nil {\n\t\tt.Errorf(\"Heartbeat send-receive error expected hbd value.\")\n\t}\n\t//\n\tl := log.New(os.Stdout, \"\", log.Ldate|log.Lmicroseconds)\n\tc.SetLogger(l)\n\t//\n\tfmt.Println(\"TestHB11SendReceiveApolloRev start sleep\")\n\ttime.Sleep(1e9 * 120) // 120 secs\n\tfmt.Println(\"TestHB11SendReceiveApolloRev end sleep\")\n\tc.SetLogger(nil)\n\tif c.Hbrf {\n\t\tt.Errorf(\"Error, dirty heart beat read detected\")\n\t}\n\t//\n\t_ = c.Disconnect(empty_headers)\n\t_ = closeConn(t, n)\n}",
"func SendBeat(){\n\tif r.IsLeader==1{\n\t\t\t\t//\tlogMutex.Lock()\n\t\t\t\t//\tlog_len := len(r.Log)-1\n\t\t\t\t//\tlogMutex.Unlock()\n\t\t\t\t\tmajorityCount:=1\n\t\t\t\t\tfor _,server:= range r.ClusterConfigV.Servers {\n\t\t\t\t\t\t//if i.Id !=raft.ServerId && i!=value && raft.matchIndex[i.Id] >majorityCheck {\n\t\t\t\t\tif server.Id !=r.Id && r.MatchIndex[server.Id] >= r.CommitIndex && r.MatchIndex[server.Id]!=0{\n\t\t\t\t\t\t\tmajorityCount++\n\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif majorityCount>len(r.ClusterConfigV.Servers)/2 && majorityCount!=len(r.ClusterConfigV.Servers) && r.CommitIndex != -1 {\n\t\t\t\t\t\t//fmt.Println(\"Sync will be called \",r.CommitIndex)\n\t\t\t\t\t\tSyncAllLog(Log_Conn{r.Log[r.CommitIndex],nil})\n\t\t\t\t\t}else{\n\t\t\t\t\t\targs:=prepareHeartBeat()\n\t\t\t\t\t\tvar AppendAck_ch = make(chan int,len(r.ClusterConfigV.Servers)-1)\n\t\t\t\t\t\tfor _,server := range r.ClusterConfigV.Servers {\t\t\t \n\t\t\t\t\t\t\tif server.Id == r.Id { continue }\t\t\t\t\n\t \t\t\t\t\t\tgo r.sendAppendRpc(server,args,AppendAck_ch,false)\n\t\t\t\t\t\t} \n\t\t\t\t\t\theartBeatAck:=0\n\t\t\t\t\t\tfor j:=0;j<len(r.ClusterConfigV.Servers)-1;j++{\n\t\t\t\t\t\t\t<- AppendAck_ch \n\t\t\t\t\t\t\theartBeatAck = heartBeatAck+ 1\n\t\t\t\t\t\t\tif heartBeatAck > len(r.ClusterConfigV.Servers)/2 { \n\t\t\t\t\t\t\t\tbreak\t\t\t\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}//end of if\n\t\t\t\t\t\n}",
"func TestHeartbeatAnnounce(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tmode HeartbeatMode\n\t\tkind string\n\t}{\n\t\t{mode: HeartbeatModeProxy, kind: types.KindProxy},\n\t\t{mode: HeartbeatModeAuth, kind: types.KindAuthServer},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.mode.String(), func(t *testing.T) {\n\t\t\tctx, cancel := context.WithCancel(context.Background())\n\t\t\tdefer cancel()\n\t\t\tclock := clockwork.NewFakeClock()\n\n\t\t\tannouncer := newFakeAnnouncer(ctx)\n\t\t\thb, err := NewHeartbeat(HeartbeatConfig{\n\t\t\t\tContext: ctx,\n\t\t\t\tMode: tt.mode,\n\t\t\t\tComponent: \"test\",\n\t\t\t\tAnnouncer: announcer,\n\t\t\t\tCheckPeriod: time.Second,\n\t\t\t\tAnnouncePeriod: 60 * time.Second,\n\t\t\t\tKeepAlivePeriod: 10 * time.Second,\n\t\t\t\tServerTTL: 600 * time.Second,\n\t\t\t\tClock: clock,\n\t\t\t\tGetServerInfo: func() (types.Resource, error) {\n\t\t\t\t\tsrv := &types.ServerV2{\n\t\t\t\t\t\tKind: tt.kind,\n\t\t\t\t\t\tVersion: types.V2,\n\t\t\t\t\t\tMetadata: types.Metadata{\n\t\t\t\t\t\t\tNamespace: apidefaults.Namespace,\n\t\t\t\t\t\t\tName: \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tSpec: types.ServerSpecV2{\n\t\t\t\t\t\t\tAddr: \"127.0.0.1:1234\",\n\t\t\t\t\t\t\tHostname: \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t\tsrv.SetExpiry(clock.Now().UTC().Add(apidefaults.ServerAnnounceTTL))\n\t\t\t\t\treturn srv, nil\n\t\t\t\t},\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateInit)\n\n\t\t\t// on the first run, heartbeat will move to announce state,\n\t\t\t// will call announce right away\n\t\t\terr = hb.fetch()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounce)\n\n\t\t\terr = hb.announce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, announcer.upsertCalls[hb.Mode], 1)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounceWait)\n\t\t\trequire.Equal(t, hb.nextAnnounce, clock.Now().UTC().Add(hb.AnnouncePeriod))\n\n\t\t\t// next call will not move to announce, because time is not up yet\n\t\t\terr = hb.fetchAndAnnounce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounceWait)\n\n\t\t\t// advance time, and heartbeat will move to announce\n\t\t\tclock.Advance(hb.AnnouncePeriod + time.Second)\n\t\t\terr = hb.fetch()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounce)\n\t\t\terr = hb.announce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, announcer.upsertCalls[hb.Mode], 2)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounceWait)\n\t\t\trequire.Equal(t, hb.nextAnnounce, clock.Now().UTC().Add(hb.AnnouncePeriod))\n\n\t\t\t// in case of error, system will move to announce wait state,\n\t\t\t// with next attempt scheduled on the next keep alive period\n\t\t\tannouncer.err = trace.ConnectionProblem(nil, \"boom\")\n\t\t\tclock.Advance(hb.AnnouncePeriod + time.Second)\n\t\t\terr = hb.fetchAndAnnounce()\n\t\t\trequire.Error(t, err)\n\t\t\trequire.True(t, trace.IsConnectionProblem(err))\n\t\t\trequire.Equal(t, announcer.upsertCalls[hb.Mode], 3)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounceWait)\n\t\t\trequire.Equal(t, hb.nextAnnounce, clock.Now().UTC().Add(hb.KeepAlivePeriod))\n\n\t\t\t// once announce is successful, next announce is set on schedule\n\t\t\tannouncer.err = nil\n\t\t\tclock.Advance(hb.KeepAlivePeriod + time.Second)\n\t\t\terr = hb.fetchAndAnnounce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, announcer.upsertCalls[hb.Mode], 4)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounceWait)\n\t\t\trequire.Equal(t, hb.nextAnnounce, clock.Now().UTC().Add(hb.AnnouncePeriod))\n\t\t})\n\t}\n}",
"func TestHB11Connect(t *testing.T) {\n\tif os.Getenv(\"STOMP_TEST11\") == \"\" {\n\t\tfmt.Println(\"TestHB11Connect norun\")\n\t\treturn\n\t}\n\t//\n\tn, _ := openConn(t)\n\tconn_headers := check11(TEST_HEADERS)\n\tconn_headers = conn_headers.Add(\"heart-beat\", \"100,10000\")\n\tc, e := Connect(n, conn_headers)\n\tif e != nil {\n\t\tt.Errorf(\"Heartbeat expected connection, got error: %q\\n\", e)\n\t}\n\tif c.hbd == nil {\n\t\tt.Errorf(\"Heartbeat expected data, got nil\")\n\t}\n\t//\n\t_ = c.Disconnect(empty_headers)\n\t_ = closeConn(t, n)\n}",
"func sendHeartBeat(targetNodeAddress string, nodeSeed int64, c chan bool) {\n\taddr, err := net.ResolveUDPAddr(\"udp\", fmt.Sprintf(\"%s:%d\", targetNodeAddress, configuration.UDPPort))\n\tcheckError(err)\n\n\tconn, err := net.DialUDP(\"udp\", nil, addr)\n\n\tdefer conn.Close()\n\n\tcount := 0\n\tcountTo50 := 0\n\tdroppedMessagesList := []int{}\n\n\tfor {\n\n\t\t//if message dropped simulation mode is enabled, it will try to drop a random message by input rate\n\t\tif enableSimulationMode && countTo50 == 0 {\n\t\t\tdroppedMessagesList = generateRandomDroppedMessagesList(nodeSeed)\n\t\t}\n\n\t\t//if simulation mode is enabled and message number is indicated as a dropped one, just simply ignore this turn\n\t\tmessageIsDropped := shouldDropMessage(droppedMessagesList, countTo50)\n\t\tif !enableSimulationMode || !messageIsDropped {\n\t\t\t//loop sending heartbeat message forever, till active member changes?\n\t\t\thbPacket := &fs533pb.HeartbeatPacket{HbCount: int32(count), SentTime: time.Now().Unix()}\n\t\t\tdata, err := proto.Marshal(hbPacket)\n\t\t\tif err != nil {\n\t\t\t\thblog(fmt.Sprintln(\"marshalling error: \", err))\n\t\t\t}\n\n\t\t\tbuf := []byte(data)\n\n\t\t\tif hbPacket.GetHbCount() < int32(configuration.NumberOfHeartbeatsShown) {\n\t\t\t\thblog(fmt.Sprintf(\"Send heartbeat to node '%s' at '%s', count = %d\", addr.String(), ConvertCurrentTimeToString(), hbPacket.GetHbCount()))\n\t\t\t}\n\n\t\t\tif count == 0 {\n\t\t\t\thblog(fmt.Sprintln(\"************** HEARTBEAT PACKET: size = \", len(data)))\n\t\t\t}\n\t\t\t_, err = conn.Write(buf)\n\t\t\tif err != nil {\n\t\t\t\thblog(fmt.Sprintln(\"Error :\", err))\n\t\t\t\t//hblog(fmt.Sprintf(\"Testing ... 3. sendHeartBeat() membershipservice.needRestartSendingHeartBeats = %v. membershipservice.membershipStatus =%v\", membershipservice.needRestartSendingHeartBeats, membershipservice.membershipStatus))\n\n\t\t\t\tif membershipservice.needRestartSendingHeartBeats || !membershipservice.membershipStatus {\n\t\t\t\t\thblog(fmt.Sprintf(\"Stop sending heartbeat to node '%s' because membership has changed. membershipservice.needRestartSendingHeartBeats = %v. membershipservice.membershipStatus =%v\", targetNodeAddress, membershipservice.needRestartSendingHeartBeats, membershipservice.membershipStatus))\n\t\t\t\t\tc <- true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif enableSimulationMode && messageIsDropped {\n\t\t\thblog(fmt.Sprintf(\"************** DROP HEARTBEAT PACKET '%d' to node %s (%d/50) \\n\", count, addr.String(), countTo50))\n\t\t}\n\n\t\tcount = count + 1\n\t\tcountTo50 = (countTo50 + 1) % 50\n\n\t\ttime.Sleep(time.Duration(configuration.HeartbeatIntervalTimeInMilliseconds) * time.Millisecond)\n\t\tif membershipservice.needRestartSendingHeartBeats {\n\t\t\t//hblog(fmt.Sprintf(\"Testing ... 5. Stop sending heartbeat to node '%s' because membership has changed. membershipservice.needRestartSendingHeartBeats = %v. membershipservice.membershipStatus =%v\", targetNodeAddress, membershipservice.needRestartSendingHeartBeats, membershipservice.membershipStatus))\n\t\t\tc <- true\n\t\t\tbreak\n\t\t}\n\t}\n}",
"func StartHBeat(epochNonce uint64, peer *Peer, requestChan chan Request, ackChan chan HBeatAck, notifyChan chan<- FailureDetected, stopHBeat chan string) {\n\toutstandingMsgs := uint8(0)\n\toutstandingRequests := make(map[uint64]HBeatRequest)\n\tdone := make(chan struct{})\n\tfor outstandingMsgs < peer.GetThreshold() {\n\t\t//logger.Println(fmt.Sprintf(\"StartHBeat - outstanding messages: [%d]\", outstandingMsgs))\n\t\tselect {\n\t\tcase id := <- stopHBeat:\n\t\t\tif id == peer.Address.String() {\n\t\t\t\tdone <- struct{}{}\n\t\t\t}\n\t\tdefault:\n\t\t\tdelay := peer.getDelay()\n\t\t\tseqNum := sequenceNumber.getSeqNum()\n\t\t\thbeatMsg := HBeatMessage{epochNonce, seqNum}\n\t\t\tbufOut, err := encodeHBeat(hbeatMsg)\n\t\t\tif checkError(err) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\trequest := Request{bufOut, peer.Address}\n\t\t\thbeatRequest := HBeatRequest{time.Now(), hbeatMsg}\n\t\t\toutstandingRequests[seqNum] = hbeatRequest\n\t\t\trequestChan <- request // Send the request\n\t\t\tticker := time.NewTicker(delay)\n\t\t\tdefer ticker.Stop()\n\n\t\tInner:\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <- done:\n\t\t\t\t\t//logger.Println(fmt.Sprintf(\"StartHBeat - Stopping heartbeat for [%s]\", peer.Address.String()))\n\t\t\t\t\treturn\n\t\t\t\tcase ack :=<- ackChan:\n\t\t\t\t\t// Respond to ack\n\t\t\t\t\tbufDecoder := bytes.NewBuffer(ack.Buffer)\n\t\t\t\t\tdecoder := gob.NewDecoder(bufDecoder)\n\t\t\t\t\tackMsg, err := decodeAck(decoder)\n\t\t\t\t\t//err := json.Unmarshal(ack.Buffer[:ack.N], &ackMsg)\n\t\t\t\t\tif checkError(err) {\n\t\t\t\t\t\tcontinue Inner\n\t\t\t\t\t}\n\t\t\t\t\toutstandingRequest, ok := outstandingRequests[ackMsg.HBEatSeqNum]\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tcontinue Inner\n\t\t\t\t\t}\n\t\t\t\t\tif outstandingRequest.EpochNonce == ackMsg.HBEatEpochNonce && outstandingRequest.SeqNum == ackMsg.HBEatSeqNum {\n\t\t\t\t\t\t//logger.Println(fmt.Sprintf(\"StartHBeat - Successfully received ack [%d] from [%s]\", outstandingRequest.SeqNum, peer.Address))\n\t\t\t\t\t\tpeer.UpdateDelay(outstandingRequest.RequestStartTime, time.Now())\n\t\t\t\t\t\tdelete(outstandingRequests, ackMsg.HBEatSeqNum) // Remove from outstanding requests\n\t\t\t\t\t\toutstandingMsgs = 0\n\t\t\t\t\t\tbreak Inner\n\t\t\t\t\t}\n\t\t\t\tcase <- ticker.C:\n\t\t\t\t\t//logger.Println(fmt.Sprintf(\"StartHbeat - Timeout monitoring [%s]\", peer.Address.String()))\n\t\t\t\t\toutstandingMsgs += 1\n\t\t\t\t\tbreak Inner\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t//TODO notify failure and shutdown\n\tcurrentTime := time.Now()\n\tfailureDetectedMsg := FailureDetected{peer.Address.String(), currentTime}\n\tnotifyChan <- failureDetectedMsg\n\treturn\n\n}",
"func (s *raftServer) sendHeartBeat() {\n\tae := &AppendEntry{Term: s.Term(), LeaderId: s.server.Pid()}\n\tae.LeaderCommit = s.commitIndex.Get()\n\te := &cluster.Envelope{Pid: cluster.BROADCAST, Msg: ae}\n\ts.server.Outbox() <- e\n}",
"func (s *Server) HeartBeat(ctx context.Context,req *pb.HBReq) (*pb.HBRsp, error){\n //log.Infof(\"node %v receive heartbeat\",s.id)\n s.lastack = true\n s.changeTerm(int(req.Term))\n if s.role == Candidate{\n s.role = Follower\n }\n return &pb.HBRsp{\n Term: int64(s.term),\n }, nil\n}",
"func (r *Raft) handleHeartbeat(m pb.Message) {\n\t// Your Code Here (2A).\n\tr.electionElapsed = 0\n\tr.Lead = m.From\n\t//r.RaftLog.committed = m.Commit\n\tif m.Term < r.Term {\n\t\tr.Lead = None\n\t}\n\tr.msgs = append(r.msgs, pb.Message{MsgType: pb.MessageType_MsgHeartbeatResponse, From: r.id, To: m.From, Term: r.Term, Commit: r.RaftLog.committed})\n}",
"func startSendingHeartbeatMessages() {\n\thblog(fmt.Sprintln(\"Start sending heart beats...\"))\n\tfor {\n\t\t//if it is no longer a member of the group then stop sending heartbeat messages\n\t\tif !membershipservice.membershipStatus {\n\t\t\thblog(fmt.Sprintln(\"Stop sending heartbeats since membership status is inactive now. 111111\"))\n\t\t\tbreak\n\t\t}\n\n\t\tif membershipservice.Len() <= 1 {\n\t\t\t//if there is no successors, it should wait for a while before next checkup\n\t\t\ttime.Sleep(time.Duration(configuration.HeartbeatIntervalTimeInMilliseconds) * time.Millisecond)\n\t\t\tcontinue\n\t\t}\n\n\t\tmembershipservice.needRestartSendingHeartBeats = false\n\t\t//hblog(fmt.Sprintf(\"Testing ... 2. membershipservice.needRestartSendingHeartBeats = %v. membershipservice.membershipStatus =%v\", membershipservice.needRestartSendingHeartBeats, membershipservice.membershipStatus))\n\n\t\t//if activeMembersByAddress is greater than 2 nodes, send heartbeat to its 1st successor\n\t\tmemberIndex := getMemberIndex(configuration.IPAddress)\n\t\tc := make(chan bool)\n\t\tactiveMembersByAddressCount := membershipservice.Len()\n\t\tnumberOfSuccessors := 0\n\n\t\tfor i := 0; i < configuration.NumberOfPrecessorsAndSuccessors; i++ {\n\t\t\tif activeMembersByAddressCount < i+2 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tnextMemberIndex := getMemberOffsetIndex(memberIndex, 1+i)\n\t\t\thblog(fmt.Sprintf(\"Start sending heartbeats to node %d at '%s', every %d milliseconds.\\n\", i+1, membershipservice.activeMembers.getIds()[nextMemberIndex], configuration.HeartbeatIntervalTimeInMilliseconds))\n\t\t\tgo sendHeartBeat(membershipservice.activeMembers.getIds()[nextMemberIndex], int64(i), c)\n\n\t\t\tnumberOfSuccessors++\n\t\t}\n\n\t\tfor i := 0; i < numberOfSuccessors; i++ {\n\t\t\t<-c\n\t\t}\n\n\t\tif membershipservice.membershipStatus {\n\t\t\thblog(fmt.Sprintln(\"Restart sending heartbeats since active members list has changed.\"))\n\t\t}\n\t}\n}",
"func TestHeartbeatHealth(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\n\tstopper := stop.NewStopper()\n\tdefer stopper.Stop()\n\n\t// Can't be zero because that'd be an empty offset.\n\tclock := hlc.NewClock(time.Unix(0, 1).UnixNano)\n\n\tserverCtx := newNodeTestContext(clock, stopper)\n\ts, ln := newTestServer(t, serverCtx, true)\n\tremoteAddr := ln.Addr().String()\n\n\theartbeat := &ManualHeartbeatService{\n\t\tready: make(chan struct{}),\n\t\tstopper: stopper,\n\t\tclock: clock,\n\t\tremoteClockMonitor: serverCtx.RemoteClocks,\n\t}\n\tRegisterHeartbeatServer(s, heartbeat)\n\n\tclientCtx := newNodeTestContext(clock, stopper)\n\t// Make the intervals and timeouts shorter to speed up the tests.\n\tclientCtx.HeartbeatInterval = 1 * time.Millisecond\n\tclientCtx.HeartbeatTimeout = 1 * time.Millisecond\n\tif _, err := clientCtx.GRPCDial(remoteAddr); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// This code is inherently racy so when we need to verify heartbeats we want\n\t// them to always succeed.\n\tsendHeartbeats := func() func() {\n\t\tdone := make(chan struct{})\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-done:\n\t\t\t\t\treturn\n\t\t\t\tcase heartbeat.ready <- struct{}{}:\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\treturn func() {\n\t\t\tdone <- struct{}{}\n\t\t}\n\t}\n\n\t// Should be healthy after the first successful heartbeat.\n\tstopHeartbeats := sendHeartbeats()\n\tutil.SucceedsSoon(t, func() error {\n\t\tif !clientCtx.IsConnHealthy(remoteAddr) {\n\t\t\treturn errors.Errorf(\"expected %s to be healthy\", remoteAddr)\n\t\t}\n\t\treturn nil\n\t})\n\tstopHeartbeats()\n\n\t// Should no longer be healthy after heartbeating stops.\n\tutil.SucceedsSoon(t, func() error {\n\t\tif clientCtx.IsConnHealthy(remoteAddr) {\n\t\t\treturn errors.Errorf(\"expected %s to be unhealthy\", remoteAddr)\n\t\t}\n\t\treturn nil\n\t})\n\n\t// Should return to healthy after another successful heartbeat.\n\tstopHeartbeats = sendHeartbeats()\n\tutil.SucceedsSoon(t, func() error {\n\t\tif !clientCtx.IsConnHealthy(remoteAddr) {\n\t\t\treturn errors.Errorf(\"expected %s to be healthy\", remoteAddr)\n\t\t}\n\t\treturn nil\n\t})\n\tstopHeartbeats()\n\n\tif clientCtx.IsConnHealthy(\"non-existent connection\") {\n\t\tt.Errorf(\"non-existent connection is reported as healthy\")\n\t}\n}",
"func TestMockOnHeartbeat(t *testing.T) {\n\tmockServer := &MockRailsServer{T: t, Behaviour: MockHeartbeat}\n\n\tdialer := wstest.NewDialer(mockServer)\n\tdialer.HandshakeTimeout = time.Second * 2\n\n\tclient := NewClient(fakeEndpoint).WithDialer(dialer)\n\n\tcalled := make(chan struct{})\n\tcount := 0\n\n\tclient.OnHeartbeat(func(conn *websocket.Conn, payload *Payload) error {\n\t\tcount++\n\t\tif count >= 4 {\n\t\t\tcalled <- struct{}{}\n\t\t}\n\t\treturn nil\n\t})\n\terr := client.Serve()\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treceiveSleepMs(2000, called, t)\n}",
"func TestHB11ZeroHeader(t *testing.T) {\n\tn, _ := openConn(t)\n\tconn_headers := check11(TEST_HEADERS)\n\tc, _ := Connect(n, conn_headers.Add(\"heart-beat\", \"0,0\"))\n\tif c.protocol == SPL_10 {\n\t\t_ = closeConn(t, n)\n\t\treturn\n\t}\n\tif c.hbd != nil {\n\t\tt.Errorf(\"Expected no heartbeats for 1.1, zero header\")\n\t}\n\t_ = c.Disconnect(empty_headers)\n\t_ = closeConn(t, n)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
testNonleaderStartElection tests that if a follower receives no communication over election timeout, it begins an election to choose a new leader. It increments its current term and transitions to candidate state. It then votes for itself and issues RequestVote RPCs in parallel to each of the other servers in the cluster. Reference: section 5.2 Also if a candidate fails to obtain a majority, it will time out and start a new election by incrementing its term and initiating another round of RequestVote RPCs. Reference: section 5.2
|
func testNonleaderStartElection(t *testing.T, state StateType) {
// election timeout
et := 10
r := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage())
defer closeAndFreeRaft(r)
switch state {
case StateFollower:
r.becomeFollower(1, 2)
case StateCandidate:
r.becomeCandidate()
}
for i := 1; i < 2*et; i++ {
r.tick()
}
if r.Term != 2 {
t.Errorf("term = %d, want 2", r.Term)
}
if r.state != StateCandidate {
t.Errorf("state = %s, want %s", r.state, StateCandidate)
}
if !r.votes[r.id] {
t.Errorf("vote for self = false, want true")
}
msgs := r.readMessages()
sort.Sort(messageSlice(msgs))
wmsgs := []pb.Message{
{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},
To: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},
Term: 2, Type: pb.MsgVote},
{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},
To: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3},
Term: 2, Type: pb.MsgVote},
}
if !reflect.DeepEqual(msgs, wmsgs) {
t.Errorf("msgs = %v, want %v", msgs, wmsgs)
}
}
|
[
"func TestVote_Candidate(t *testing.T) {\n\tutil.SuppressLoggers()\n\tconfig := DefaultConfig()\n\tconfig.ClusterSize = 5\n\n\tcluster, err := CreateLocalCluster(config)\n\tdefer cleanupCluster(cluster)\n\n\ttime.Sleep(2 * time.Second)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tleader, err := findLeader(cluster)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tleader.setCurrentTerm(3)\n\tleader.leaderMutex.Lock()\n\tlogEntry := &rpc.LogEntry{\n\t\tIndex: leader.LastLogIndex() + 1,\n\t\tTermId: leader.GetCurrentTerm(),\n\t\tType: rpc.CommandType_NOOP,\n\t\tData: []byte{1, 2, 3, 4},\n\t}\n\tleader.StoreLog(logEntry)\n\tleader.leaderMutex.Unlock()\n\ttime.Sleep(1 * time.Second)\n\n\tfollowers, err := findAllFollowers(cluster)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif followers[0].GetCurrentTerm() != 3 {\n\t\tt.Fatalf(\"Term should've changed to %d but is %d\", 3, followers[0].GetCurrentTerm())\n\t}\n\n\tfollowers[1].setCurrentTerm(3)\n\tfollowers[1].config.ElectionTimeout = 1 * time.Second\n\tfollowers[3].NetworkPolicy.PauseWorld(true)\n\tfollowers[2].NetworkPolicy.PauseWorld(true)\n\tleader.NetworkPolicy.PauseWorld(true)\n\n\tt.Run(\"Handle competing RequestVote with Stale Term\", func(t *testing.T) {\n\n\t\ttime.Sleep(500 * time.Millisecond)\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(1),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(3),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've denied vote\")\n\t\t}\n\t})\n\n\tt.Run(\"Handle competing RequestVote with Higher Term and Out-of-date log\", func(t *testing.T) {\n\n\t\ttime.Sleep(500 * time.Millisecond)\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(100),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(1),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've denied vote\")\n\t\t}\n\t})\n\n\tt.Run(\"Handle competing RequestVote with Higher Term and Up-to-date log\", func(t *testing.T) {\n\n\t\ttime.Sleep(500 * time.Millisecond)\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(200),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(2),\n\t\t\tLastLogTerm: uint64(3),\n\t\t})\n\t\tif !reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've granted vote\")\n\t\t}\n\t})\n}",
"func TestLeaderElectionInOneRoundRPC(t *testing.T) {\n\ttests := []struct {\n\t\tsize int\n\t\tvotes map[uint64]bool\n\t\tstate StateType\n\t}{\n\t\t// win the election when receiving votes from a majority of the servers\n\t\t{1, map[uint64]bool{}, StateLeader},\n\t\t{3, map[uint64]bool{2: true, 3: true}, StateLeader},\n\t\t{3, map[uint64]bool{2: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true}, StateLeader},\n\n\t\t// return to follower state if it receives vote denial from a majority\n\t\t{3, map[uint64]bool{2: false, 3: false}, StateFollower},\n\t\t{5, map[uint64]bool{2: false, 3: false, 4: false, 5: false}, StateFollower},\n\t\t{5, map[uint64]bool{2: true, 3: false, 4: false, 5: false}, StateFollower},\n\n\t\t// stay in candidate if it does not obtain the majority\n\t\t{3, map[uint64]bool{}, StateCandidate},\n\t\t{5, map[uint64]bool{2: true}, StateCandidate},\n\t\t{5, map[uint64]bool{2: false, 3: false}, StateCandidate},\n\t\t{5, map[uint64]bool{}, StateCandidate},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, idsBySize(tt.size), 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\n\t\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\tfor id, vote := range tt.votes {\n\t\t\tr.Step(pb.Message{From: id, To: 1, Term: r.Term, Type: pb.MsgVoteResp, Reject: !vote})\n\t\t}\n\n\t\tif r.state != tt.state {\n\t\t\tt.Errorf(\"#%d: state = %s, want %s\", i, r.state, tt.state)\n\t\t}\n\t\tif g := r.Term; g != 1 {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", i, g, 1)\n\t\t}\n\t}\n}",
"func TestNodeWithSmallerTermCanCompleteElection(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\tdefer closeAndFreeRaft(n3)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\t// cause a network partition to isolate node 3\n\tnt := newNetwork(n1, n2, n3)\n\tnt.cut(1, 3)\n\tnt.cut(2, 3)\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tsm := nt.peers[1].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StatePreCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StatePreCandidate)\n\t}\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// check whether the term values are expected\n\t// a.Term == 3\n\t// b.Term == 3\n\t// c.Term == 1\n\tsm = nt.peers[1].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 1 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\tsm = nt.peers[2].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 1 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 1)\n\t}\n\n\t// check state\n\t// a == follower\n\t// b == leader\n\t// c == pre-candidate\n\tsm = nt.peers[1].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StatePreCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StatePreCandidate)\n\t}\n\n\tsm.logger.Infof(\"going to bring back peer 3 and kill peer 2\")\n\t// recover the network then immediately isolate b which is currently\n\t// the leader, this is to emulate the crash of b.\n\tnt.recover()\n\tnt.cut(2, 1)\n\tnt.cut(2, 3)\n\n\t// call for election\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// do we have a leader?\n\tsma := nt.peers[1].(*raft)\n\tsmb := nt.peers[3].(*raft)\n\tif sma.state != StateLeader && smb.state != StateLeader {\n\t\tt.Errorf(\"no leader\")\n\t}\n}",
"func TestPreVoteWithSplitVote(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\tdefer closeAndFreeRaft(n3)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\tnt := newNetwork(n1, n2, n3)\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// simulate leader down. followers start split vote.\n\tnt.isolate(1)\n\tnt.send([]pb.Message{\n\t\t{From: 2, To: 2, Type: pb.MsgHup},\n\t\t{From: 3, To: 3, Type: pb.MsgHup},\n\t}...)\n\n\t// check whether the term values are expected\n\t// n2.Term == 3\n\t// n3.Term == 3\n\tsm := nt.peers[2].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 3)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\t// check state\n\t// n2 == candidate\n\t// n3 == candidate\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateCandidate {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateCandidate)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StateCandidate)\n\t}\n\n\t// node 2 election timeout first\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// check whether the term values are expected\n\t// n2.Term == 4\n\t// n3.Term == 4\n\tsm = nt.peers[2].(*raft)\n\tif sm.Term != 4 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 4)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 4 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 4)\n\t}\n\n\t// check state\n\t// n2 == leader\n\t// n3 == follower\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StateFollower)\n\t}\n}",
"func StartElection(s *Server) {\n\ts.Voted = s.ID\n\ts.TotalVotes[s.ID] = true\n fmt.Printf(\"%v alive servers\\n\", s.NumAliveServers)\n\tfor _, val := range s.Servers {\n\t\t// Let's assume he votes for himself\n\t\tif val.ID != s.ID && s.AliveServers[val.ID] {\n\t\t\tgo RequestVote(s, val)\n\t\t}\n\t}\n\n\tfor i := 0; i < s.NumAliveServers; i++ {\n\t\t<-s.VoteReceived\n\t\tif x := CheckVotes(s); x > s.NumAliveServers/2 {\n\t\t\ts.State = 2\n\t\t\treturn\n\t\t}\n\t}\n\n}",
"func (s *raftServer) startElection() {\n\ts.setState(CANDIDATE)\n\tpeers := s.server.Peers()\n\ts.writeToLog(\"Number of peers: \" + strconv.Itoa(len(peers)))\n\tvotes := make(map[int]bool) // map to store received votes\n\tvotes[s.server.Pid()] = true\n\ts.voteFor(s.server.Pid(), s.Term())\n\tfor s.State() == CANDIDATE {\n\t\ts.incrTerm() // increment term for current\n\t\tcandidateTimeout := time.Duration(s.duration + s.rng.Int63n(RandomTimeoutRange)) // random timeout used by Raft authors\n\t\ts.sendRequestVote()\n\t\ts.writeToLog(\"Sent RequestVote message \" + strconv.Itoa(int(candidateTimeout)))\n\t\ts.eTimeout.Reset(candidateTimeout * time.Millisecond) // start re-election timer\n\t\tfor {\n\t\t\tacc := false\n\t\t\tselect {\n\t\t\tcase e, _ := <-s.server.Inbox():\n\t\t\t\t// received a message on server's inbox\n\t\t\t\tmsg := e.Msg\n\t\t\t\tif ae, ok := msg.(AppendEntry); ok { // AppendEntry\n\t\t\t\t\tacc = s.handleAppendEntry(e.Pid, &ae)\n\t\t\t\t} else if rv, ok := msg.(RequestVote); ok { // RequestVote\n\t\t\t\t\tacc = s.handleRequestVote(e.Pid, &rv)\n\n\t\t\t\t} else if grantV, ok := msg.(GrantVote); ok && grantV.VoteGranted {\n\t\t\t\t\tvotes[e.Pid] = true\n\t\t\t\t\ts.writeToLog(\"Received grantVote message from \" + strconv.Itoa(e.Pid) + \" with term #\" + strconv.Itoa(grantV.Term))\n\t\t\t\t\ts.writeToLog(\"Votes received so far \" + strconv.Itoa(len(votes)))\n\t\t\t\t\tif len(votes) == len(peers)/2+1 { // received majority votes\n\t\t\t\t\t\ts.setState(LEADER)\n\t\t\t\t\t\ts.sendHeartBeat()\n\t\t\t\t\t\tacc = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase <-s.eTimeout.C:\n\t\t\t\t// received timeout on election timer\n\t\t\t\ts.writeToLog(\"Received re-election timeout\")\n\t\t\t\tacc = true\n\t\t\tdefault:\n\t\t\t\ttime.Sleep(1 * time.Millisecond) // sleep to avoid busy looping\n\t\t\t}\n\n\t\t\tif acc {\n\t\t\t\ts.eTimeout.Reset(candidateTimeout * time.Millisecond) // start re-election timer\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}",
"func TestFreeStuckCandidateWithCheckQuorum(t *testing.T) {\n\ta := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tb := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tc := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(a)\n\tdefer closeAndFreeRaft(b)\n\tdefer closeAndFreeRaft(c)\n\n\ta.checkQuorum = true\n\tb.checkQuorum = true\n\tc.checkQuorum = true\n\n\tnt := newNetwork(a, b, c)\n\tsetRandomizedElectionTimeout(b, b.electionTimeout+1)\n\n\tfor i := 0; i < b.electionTimeout; i++ {\n\t\tb.tick()\n\t}\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(1)\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tif c.Term != b.Term+1 {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, b.Term+1)\n\t}\n\n\t// Vote again for safety\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tif c.Term != b.Term+2 {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, b.Term+2)\n\t}\n\n\tnt.recover()\n\tnt.send(pb.Message{From: 1, To: 3, Type: pb.MsgHeartbeat, Term: a.Term})\n\n\t// Disrupt the leader so that the stuck peer is freed\n\tif a.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateFollower)\n\t}\n\n\tif c.Term != a.Term {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, a.Term)\n\t}\n\n\t// Vote again, should become leader this time\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif c.state != StateLeader {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", c.state, StateLeader)\n\t}\n\n}",
"func Test_MultiNodeClusterWithNonVoter(t *testing.T) {\n\tnode1 := mustNewLeaderNode()\n\tdefer node1.Deprovision()\n\n\tnode2 := mustNewNode(false)\n\tdefer node2.Deprovision()\n\tif err := node2.Join(node1); err != nil {\n\t\tt.Fatalf(\"node failed to join leader: %s\", err.Error())\n\t}\n\t_, err := node2.WaitForLeader()\n\tif err != nil {\n\t\tt.Fatalf(\"failed waiting for leader: %s\", err.Error())\n\t}\n\n\t// Get the new leader, in case it changed.\n\tc := Cluster{node1, node2}\n\tleader, err := c.Leader()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to find cluster leader: %s\", err.Error())\n\t}\n\n\tnode3 := mustNewNode(false)\n\tdefer node3.Deprovision()\n\tif err := node3.Join(leader); err != nil {\n\t\tt.Fatalf(\"node failed to join leader: %s\", err.Error())\n\t}\n\t_, err = node3.WaitForLeader()\n\tif err != nil {\n\t\tt.Fatalf(\"failed waiting for leader: %s\", err.Error())\n\t}\n\n\t// Get the new leader, in case it changed.\n\tc = Cluster{node1, node2, node3}\n\tleader, err = c.Leader()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to find cluster leader: %s\", err.Error())\n\t}\n\n\tnonVoter := mustNewNode(false)\n\tdefer nonVoter.Deprovision()\n\tif err := nonVoter.JoinAsNonVoter(leader); err != nil {\n\t\tt.Fatalf(\"non-voting node failed to join leader: %s\", err.Error())\n\t}\n\t_, err = nonVoter.WaitForLeader()\n\tif err != nil {\n\t\tt.Fatalf(\"failed waiting for leader: %s\", err.Error())\n\t}\n\tc = Cluster{node1, node2, node3, nonVoter}\n\n\t// Run queries against cluster.\n\ttests := []struct {\n\t\tstmt string\n\t\texpected string\n\t\texecute bool\n\t}{\n\t\t{\n\t\t\tstmt: `CREATE TABLE foo (id integer not null primary key, name text)`,\n\t\t\texpected: `{\"results\":[{}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: `INSERT INTO foo(name) VALUES(\"fiona\")`,\n\t\t\texpected: `{\"results\":[{\"last_insert_id\":1,\"rows_affected\":1}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: `SELECT * FROM foo`,\n\t\t\texpected: `{\"results\":[{\"columns\":[\"id\",\"name\"],\"types\":[\"integer\",\"text\"],\"values\":[[1,\"fiona\"]]}]}`,\n\t\t\texecute: false,\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\tvar r string\n\t\tvar err error\n\t\tif tt.execute {\n\t\t\tr, err = leader.Execute(tt.stmt)\n\t\t} else {\n\t\t\tr, err = leader.Query(tt.stmt)\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Fatalf(`test %d failed \"%s\": %s`, i, tt.stmt, err.Error())\n\t\t}\n\t\tif r != tt.expected {\n\t\t\tt.Fatalf(`test %d received wrong result \"%s\" got: %s exp: %s`, i, tt.stmt, r, tt.expected)\n\t\t}\n\t}\n\n\t// Kill the leader and wait for the new leader.\n\tleader.Deprovision()\n\tc.RemoveNode(leader)\n\tleader, err = c.WaitForNewLeader(leader)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to find new cluster leader after killing leader: %s\", err.Error())\n\t}\n\n\t// Run queries against the now 3-node cluster.\n\ttests = []struct {\n\t\tstmt string\n\t\texpected string\n\t\texecute bool\n\t}{\n\t\t{\n\t\t\tstmt: `CREATE TABLE foo (id integer not null primary key, name text)`,\n\t\t\texpected: `{\"results\":[{\"error\":\"table foo already exists\"}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: `INSERT INTO foo(name) VALUES(\"sinead\")`,\n\t\t\texpected: `{\"results\":[{\"last_insert_id\":2,\"rows_affected\":1}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: `SELECT * FROM foo`,\n\t\t\texpected: `{\"results\":[{\"columns\":[\"id\",\"name\"],\"types\":[\"integer\",\"text\"],\"values\":[[1,\"fiona\"],[2,\"sinead\"]]}]}`,\n\t\t\texecute: false,\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\tvar r string\n\t\tvar err error\n\t\tif tt.execute {\n\t\t\tr, err = leader.Execute(tt.stmt)\n\t\t} else {\n\t\t\tr, err = leader.Query(tt.stmt)\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Fatalf(`test %d failed \"%s\": %s`, i, tt.stmt, err.Error())\n\t\t}\n\t\tif r != tt.expected {\n\t\t\tt.Fatalf(`test %d received wrong result \"%s\" got: %s exp: %s`, i, tt.stmt, r, tt.expected)\n\t\t}\n\t}\n}",
"func TestElect(t *testing.T) {\n\traftConf := &RaftConfig{MemberRegSocket: \"127.0.0.1:9999\", PeerSocket: \"127.0.0.1:9009\", TimeoutInMillis: 1500, HbTimeoutInMillis: 50, LogDirectoryPath: \"logs\", StableStoreDirectoryPath: \"./stable\"}\n\n\t// delete stored state to avoid unnecessary effect on following test cases\n\tdeleteState(raftConf.StableStoreDirectoryPath)\n\n\t// launch cluster proxy servers\n\tcluster.NewProxyWithConfig(RaftToClusterConf(raftConf))\n\n\tfmt.Println(\"Started Proxy\")\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tserverCount := 5\n\traftServers := make([]Raft, serverCount+1)\n\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\t// create cluster.Server\n\t\tclusterServer, err := cluster.NewWithConfig(i, \"127.0.0.1\", 5000+i, RaftToClusterConf(raftConf))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error in creating cluster server. \" + err.Error())\n\t\t\treturn\n\t\t}\n\t\ts, err := NewWithConfig(clusterServer, raftConf)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error in creating Raft servers. \" + err.Error())\n\t\t\treturn\n\t\t}\n\t\traftServers[i] = s\n\t}\n\n\t// there should be a leader after sufficiently long duration\n\tcount := 0\n\ttime.Sleep(10 * time.Second)\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\tif raftServers[i].isLeader() {\n\t\t\tfmt.Println(\"Server \" + strconv.Itoa(i) + \" was chosen as leader.\")\n\t\t\tcount++\n\t\t}\n\t}\n\tif count != 1 {\n\t\tt.Errorf(\"No leader was chosen\")\n\t}\n}",
"func TestVote_Follower(t *testing.T) {\n\tutil.SuppressLoggers()\n\tconfig := DefaultConfig()\n\n\tt.Run(\"Handle RequestVote with Stale Term\", func(t *testing.T) {\n\t\tcluster, err := CreateLocalCluster(config)\n\t\tdefer cleanupCluster(cluster)\n\n\t\ttime.Sleep(2 * time.Second)\n\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tleader, err := findLeader(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfollowers, err := findAllFollowers(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tleader.setCurrentTerm(3)\n\t\ttime.Sleep(1 * time.Second)\n\n\t\t// make sure the client get the correct response while registering itself with a candidate\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(1),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(3),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've denied vote\")\n\t\t}\n\t})\n\n\tt.Run(\"Handle RequestVote with Higher Term\", func(t *testing.T) {\n\t\tcluster, err := CreateLocalCluster(config)\n\t\tdefer cleanupCluster(cluster)\n\n\t\ttime.Sleep(2 * time.Second)\n\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tleader, err := findLeader(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfollowers, err := findAllFollowers(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tleader.leaderMutex.Lock()\n\t\tlogEntry := &rpc.LogEntry{\n\t\t\tIndex: leader.LastLogIndex() + 1,\n\t\t\tTermId: leader.GetCurrentTerm(),\n\t\t\tType: rpc.CommandType_NOOP,\n\t\t\tData: []byte{1, 2, 3, 4},\n\t\t}\n\t\tleader.StoreLog(logEntry)\n\t\tleader.leaderMutex.Unlock()\n\t\ttime.Sleep(1 * time.Second)\n\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(3),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(1),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've denied vote\")\n\t\t}\n\n\t\treply, _ = followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(3),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(2),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif !reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've granted vote\")\n\t\t}\n\t})\n}",
"func testNonleaderElectionTimeoutRandomized(t *testing.T, state StateType) {\n\tet := 10\n\tr := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\ttimeouts := make(map[int]bool)\n\tfor round := 0; round < 50*et; round++ {\n\t\tswitch state {\n\t\tcase StateFollower:\n\t\t\tr.becomeFollower(r.Term+1, 2)\n\t\tcase StateCandidate:\n\t\t\tr.becomeCandidate()\n\t\t}\n\n\t\ttime := 0\n\t\tfor len(r.readMessages()) == 0 {\n\t\t\tr.tick()\n\t\t\ttime++\n\t\t}\n\t\ttimeouts[time] = true\n\t}\n\n\tfor d := et + 1; d < 2*et; d++ {\n\t\tif !timeouts[d] {\n\t\t\tt.Errorf(\"timeout in %d ticks should happen\", d)\n\t\t}\n\t}\n}",
"func testCandidateResetTerm(t *testing.T, mt pb.MessageType) {\n\ta := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tb := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tc := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\n\tnt := newNetwork(a, b, c)\n\tdefer nt.closeAll()\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\tif a.state != StateLeader {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateLeader)\n\t}\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\tif c.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateFollower)\n\t}\n\n\t// isolate 3 and increase term in rest\n\tnt.isolate(3)\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tif a.state != StateLeader {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateLeader)\n\t}\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\t// trigger campaign in isolated c\n\tc.resetRandomizedElectionTimeout()\n\tfor i := 0; i < c.randomizedElectionTimeout; i++ {\n\t\tc.tick()\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tnt.recover()\n\n\t// leader sends to isolated candidate\n\t// and expects candidate to revert to follower\n\tnt.send(pb.Message{From: 1, To: 3, Term: a.Term, Type: mt})\n\n\tif c.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateFollower)\n\t}\n\n\t// follower c term is reset with leader's\n\tif a.Term != c.Term {\n\t\tt.Errorf(\"follower term expected same term as leader's %d, got %d\", a.Term, c.Term)\n\t}\n}",
"func TestLeaderElectionReceiveMessages(t *testing.T) {\n\tp := newPaxos(&Config{ID: 1, NodeCount: 5})\n\n\tif p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected unset progress timer\")\n\t}\n\tif p.prepare != nil {\n\t\tt.Fatalf(\"expected nil *pb.Prepare\")\n\t}\n\tif exp, a := uint64(0), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d; found %d\", exp, a)\n\t}\n\tassertLeaderElectionState := func(expLastAttempted, expViewChanges int) {\n\t\tassertState(t, p, StateLeaderElection)\n\t\tif exp, a := uint64(expLastAttempted), p.lastAttempted; a != exp {\n\t\t\tt.Fatalf(\"expected last attempted view %d; found %d\", exp, a)\n\t\t}\n\t\tif exp, a := expViewChanges, len(p.viewChanges); a != exp {\n\t\t\tt.Fatalf(\"expected %d ViewChange message, found %d\", exp, a)\n\t\t}\n\t}\n\tassertLeaderElectionState(1, 1)\n\n\t// ViewChange message from node 0 should be applied successfully.\n\tvc := &pb.ViewChange{NodeId: 0, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// Replayed ViewChange message should be ignored. Idempotent.\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message from self should be ignored.\n\tvc = &pb.ViewChange{NodeId: 1, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message for past view should be ignored.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 0}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message from node 2 should be applied successfully.\n\t// Leader election should complete, because quorum of 3 nodes achieved.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 3)\n\tif exp, a := uint64(1), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d after leader election; found %d\", exp, a)\n\t}\n\tif !p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected progress timer to be set after completed leader election\")\n\t}\n\n\t// The rest of the ViewChange messages should be ignored.\n\tvc = &pb.ViewChange{NodeId: 3, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tvc = &pb.ViewChange{NodeId: 4, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 3)\n}",
"func TestPreVoteWithCheckQuorum(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\tn1.checkQuorum = true\n\tn2.checkQuorum = true\n\tn3.checkQuorum = true\n\n\tnt := newNetwork(n1, n2, n3)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// isolate node 1. node 2 and node 3 have leader info\n\tnt.isolate(1)\n\n\t// check state\n\tsm := nt.peers[1].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Fatalf(\"peer 1 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Fatalf(\"peer 2 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Fatalf(\"peer 3 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\n\t// node 2 will ignore node 3's PreVote\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// Do we have a leader?\n\tif n2.state != StateLeader && n3.state != StateFollower {\n\t\tt.Errorf(\"no leader\")\n\t}\n}",
"func testLeaderCycle(t *testing.T, preVote bool) {\n\tvar cfg func(*Config)\n\tif preVote {\n\t\tcfg = preVoteConfig\n\t}\n\tn := newNetworkWithConfig(cfg, nil, nil, nil)\n\tdefer n.closeAll()\n\tfor campaignerID := uint64(1); campaignerID <= 3; campaignerID++ {\n\t\tn.send(pb.Message{From: campaignerID, To: campaignerID, Type: pb.MsgHup})\n\n\t\tfor _, peer := range n.peers {\n\t\t\tsm := peer.(*raft)\n\t\t\tif sm.id == campaignerID && sm.state != StateLeader {\n\t\t\t\tt.Errorf(\"preVote=%v: campaigning node %d state = %v, want StateLeader\",\n\t\t\t\t\tpreVote, sm.id, sm.state)\n\t\t\t} else if sm.id != campaignerID && sm.state != StateFollower {\n\t\t\t\tt.Errorf(\"preVote=%v: after campaign of node %d, \"+\n\t\t\t\t\t\"node %d had state = %v, want StateFollower\",\n\t\t\t\t\tpreVote, campaignerID, sm.id, sm.state)\n\t\t\t}\n\t\t}\n\t}\n}",
"func TestOnlineElection(t *testing.T) {\n\tvar cases = []struct {\n\t\tpersons []int\n\t\ttimes []int\n\t\tq []int\n\t\ta []int\n\t}{\n\t\t//{\n\t\t//\tpersons: []int{0, 1, 1, 0, 0, 1, 0},\n\t\t//\ttimes: []int{0, 5, 10, 15, 20, 25, 30},\n\t\t//\tq: []int{3, 12, 25, 15, 24, 8},\n\t\t//\ta: []int{0, 1, 1, 0, 0, 1},\n\t\t//},\n\t\t{\n\t\t\tpersons: []int{0, 1, 0, 1, 1},\n\t\t\ttimes: []int{24, 29, 31, 76, 81},\n\t\t\tq: []int{28, 24, 29, 77, 30, 25, 76, 75, 81, 80},\n\t\t\ta: []int{0, 0, 1, 1, 1, 0, 1, 0, 1, 1},\n\t\t},\n\t}\n\tfor _, c := range cases {\n\t\tvote := Constructor(c.persons, c.times)\n\t\tfor i := 0; i < len(c.q); i++ {\n\t\t\tif vote.Q(c.q[i]) != c.a[i] {\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t}\n\t}\n}",
"func (r *Raft) CallElection(){\n\t\n\tr.CurrentTerm+=1 // increase the current term by 1 to avoid conflict\n\tVoteAckcount:=1 // Number of vote received, initialised to 1 as own vote fo candiate is positive\n\tr.IsLeader = 0 // Set the state of server as candiate\n\tvar VoteCount =make (chan int,(len(r.ClusterConfigV.Servers)-1))\n\t//fmt.Println(\"Sending vote requests for:\",r.Id)\n\t\n\tfor _,server := range r.ClusterConfigV.Servers {\t\t\t\n\t\t\t\tif server.Id != r.Id{\n\t\t\t\t\tgo r.sendVoteRequestRpc(server,VoteCount) \t\t\t\t\t\n\t\t\t\t}}\n\n\tfor i:=0;i< len(r.ClusterConfigV.Servers)-1;i++ {\n\t\t\t\t\tVoteAckcount = VoteAckcount+ <- VoteCount \n\t\t\t\t\t// if Candiate gets majoirty, declare candiate as Leader and send immediae heartbeat to followers declaring\n\t\t\t\t\t// election of new leader\n\t\t\t\tif VoteAckcount > (len(r.ClusterConfigV.Servers)/2) && r.IsLeader == 0 { \n\t\t\t\t\tlog.Println(\"New leader is:\",r.Id)\n\t\t\t\t\tr.IsLeader=1\n\t\t\t\t\tr.LeaderId=r.Id\n\t\t\t\t\traft.SendImmediateHeartBit <- 1\n\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\t\t\n\t\tif r.IsLeader==1{\n\t\t\t// initlised next index to lastlog index, and match index to 0 fro all servers\n\t\tfor _,server := range r.ClusterConfigV.Servers {\n\t\t\t\tr.NextIndex[server.Id]=len(r.Log)\n\t\t\t\tr.MatchIndex[server.Id]=0\n\t\t\t\tr.ResetTimer()\n\t\t\t}\n\t\t}else{ \n\t\t\t// Is candidate fails to get elected, fall back to follower state and reset timer for reelection \n\t\t\tr.IsLeader=2\n\t\t\tr.ResetTimer()\n\t\t}\n}",
"func newPreVoteMigrationCluster(t *testing.T) *network {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\t// We intentionally do not enable PreVote for n3, this is done so in order\n\t// to simulate a rolling restart process where it's possible to have a mixed\n\t// version cluster with replicas with PreVote enabled, and replicas without.\n\n\tnt := newNetwork(n1, n2, n3)\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// Cause a network partition to isolate n3.\n\tnt.isolate(3)\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\t// check state\n\t// n1.state == StateLeader\n\t// n2.state == StateFollower\n\t// n3.state == StateCandidate\n\tif n1.state != StateLeader {\n\t\tt.Fatalf(\"node 1 state: %s, want %s\", n1.state, StateLeader)\n\t}\n\tif n2.state != StateFollower {\n\t\tt.Fatalf(\"node 2 state: %s, want %s\", n2.state, StateFollower)\n\t}\n\tif n3.state != StateCandidate {\n\t\tt.Fatalf(\"node 3 state: %s, want %s\", n3.state, StateCandidate)\n\t}\n\n\t// check term\n\t// n1.Term == 2\n\t// n2.Term == 2\n\t// n3.Term == 4\n\tif n1.Term != 2 {\n\t\tt.Fatalf(\"node 1 term: %d, want %d\", n1.Term, 2)\n\t}\n\tif n2.Term != 2 {\n\t\tt.Fatalf(\"node 2 term: %d, want %d\", n2.Term, 2)\n\t}\n\tif n3.Term != 4 {\n\t\tt.Fatalf(\"node 3 term: %d, want %d\", n3.Term, 4)\n\t}\n\n\t// Enable prevote on n3, then recover the network\n\tn3.preVote = true\n\tnt.recover()\n\n\treturn nt\n}",
"func testNonleadersElectionTimeoutNonconflict(t *testing.T, state StateType) {\n\tet := 10\n\tsize := 5\n\trs := make([]*raft, size)\n\tids := idsBySize(size)\n\tfor k := range rs {\n\t\trs[k] = newTestRaft(ids[k], ids, et, 1, NewMemoryStorage())\n\t}\n\tdefer func() {\n\t\tfor k := range rs {\n\t\t\tcloseAndFreeRaft(rs[k])\n\t\t}\n\t}()\n\tconflicts := 0\n\tfor round := 0; round < 1000; round++ {\n\t\tfor _, r := range rs {\n\t\t\tswitch state {\n\t\t\tcase StateFollower:\n\t\t\t\tr.becomeFollower(r.Term+1, None)\n\t\t\tcase StateCandidate:\n\t\t\t\tr.becomeCandidate()\n\t\t\t}\n\t\t}\n\n\t\ttimeoutNum := 0\n\t\tfor timeoutNum == 0 {\n\t\t\tfor _, r := range rs {\n\t\t\t\tr.tick()\n\t\t\t\tif len(r.readMessages()) > 0 {\n\t\t\t\t\ttimeoutNum++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// several rafts time out at the same tick\n\t\tif timeoutNum > 1 {\n\t\t\tconflicts++\n\t\t}\n\t}\n\n\tif g := float64(conflicts) / 1000; g > 0.3 {\n\t\tt.Errorf(\"probability of conflicts = %v, want <= 0.3\", g)\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TestLeaderElectionInOneRoundRPC tests all cases that may happen in leader election during one round of RequestVote RPC: a) it wins the election b) it loses the election c) it is unclear about the result Reference: section 5.2
|
func TestLeaderElectionInOneRoundRPC(t *testing.T) {
tests := []struct {
size int
votes map[uint64]bool
state StateType
}{
// win the election when receiving votes from a majority of the servers
{1, map[uint64]bool{}, StateLeader},
{3, map[uint64]bool{2: true, 3: true}, StateLeader},
{3, map[uint64]bool{2: true}, StateLeader},
{5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, StateLeader},
{5, map[uint64]bool{2: true, 3: true, 4: true}, StateLeader},
{5, map[uint64]bool{2: true, 3: true}, StateLeader},
// return to follower state if it receives vote denial from a majority
{3, map[uint64]bool{2: false, 3: false}, StateFollower},
{5, map[uint64]bool{2: false, 3: false, 4: false, 5: false}, StateFollower},
{5, map[uint64]bool{2: true, 3: false, 4: false, 5: false}, StateFollower},
// stay in candidate if it does not obtain the majority
{3, map[uint64]bool{}, StateCandidate},
{5, map[uint64]bool{2: true}, StateCandidate},
{5, map[uint64]bool{2: false, 3: false}, StateCandidate},
{5, map[uint64]bool{}, StateCandidate},
}
for i, tt := range tests {
r := newTestRaft(1, idsBySize(tt.size), 10, 1, NewMemoryStorage())
defer closeAndFreeRaft(r)
r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
for id, vote := range tt.votes {
r.Step(pb.Message{From: id, To: 1, Term: r.Term, Type: pb.MsgVoteResp, Reject: !vote})
}
if r.state != tt.state {
t.Errorf("#%d: state = %s, want %s", i, r.state, tt.state)
}
if g := r.Term; g != 1 {
t.Errorf("#%d: term = %d, want %d", i, g, 1)
}
}
}
|
[
"func TestVote_Candidate(t *testing.T) {\n\tutil.SuppressLoggers()\n\tconfig := DefaultConfig()\n\tconfig.ClusterSize = 5\n\n\tcluster, err := CreateLocalCluster(config)\n\tdefer cleanupCluster(cluster)\n\n\ttime.Sleep(2 * time.Second)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tleader, err := findLeader(cluster)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tleader.setCurrentTerm(3)\n\tleader.leaderMutex.Lock()\n\tlogEntry := &rpc.LogEntry{\n\t\tIndex: leader.LastLogIndex() + 1,\n\t\tTermId: leader.GetCurrentTerm(),\n\t\tType: rpc.CommandType_NOOP,\n\t\tData: []byte{1, 2, 3, 4},\n\t}\n\tleader.StoreLog(logEntry)\n\tleader.leaderMutex.Unlock()\n\ttime.Sleep(1 * time.Second)\n\n\tfollowers, err := findAllFollowers(cluster)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif followers[0].GetCurrentTerm() != 3 {\n\t\tt.Fatalf(\"Term should've changed to %d but is %d\", 3, followers[0].GetCurrentTerm())\n\t}\n\n\tfollowers[1].setCurrentTerm(3)\n\tfollowers[1].config.ElectionTimeout = 1 * time.Second\n\tfollowers[3].NetworkPolicy.PauseWorld(true)\n\tfollowers[2].NetworkPolicy.PauseWorld(true)\n\tleader.NetworkPolicy.PauseWorld(true)\n\n\tt.Run(\"Handle competing RequestVote with Stale Term\", func(t *testing.T) {\n\n\t\ttime.Sleep(500 * time.Millisecond)\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(1),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(3),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've denied vote\")\n\t\t}\n\t})\n\n\tt.Run(\"Handle competing RequestVote with Higher Term and Out-of-date log\", func(t *testing.T) {\n\n\t\ttime.Sleep(500 * time.Millisecond)\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(100),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(1),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've denied vote\")\n\t\t}\n\t})\n\n\tt.Run(\"Handle competing RequestVote with Higher Term and Up-to-date log\", func(t *testing.T) {\n\n\t\ttime.Sleep(500 * time.Millisecond)\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(200),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(2),\n\t\t\tLastLogTerm: uint64(3),\n\t\t})\n\t\tif !reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've granted vote\")\n\t\t}\n\t})\n}",
"func testNonleaderStartElection(t *testing.T, state StateType) {\n\t// election timeout\n\tet := 10\n\tr := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tswitch state {\n\tcase StateFollower:\n\t\tr.becomeFollower(1, 2)\n\tcase StateCandidate:\n\t\tr.becomeCandidate()\n\t}\n\n\tfor i := 1; i < 2*et; i++ {\n\t\tr.tick()\n\t}\n\n\tif r.Term != 2 {\n\t\tt.Errorf(\"term = %d, want 2\", r.Term)\n\t}\n\tif r.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", r.state, StateCandidate)\n\t}\n\tif !r.votes[r.id] {\n\t\tt.Errorf(\"vote for self = false, want true\")\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},\n\t\t\tTerm: 2, Type: pb.MsgVote},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3},\n\t\t\tTerm: 2, Type: pb.MsgVote},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %v, want %v\", msgs, wmsgs)\n\t}\n}",
"func TestPreVoteWithSplitVote(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\tdefer closeAndFreeRaft(n3)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\tnt := newNetwork(n1, n2, n3)\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// simulate leader down. followers start split vote.\n\tnt.isolate(1)\n\tnt.send([]pb.Message{\n\t\t{From: 2, To: 2, Type: pb.MsgHup},\n\t\t{From: 3, To: 3, Type: pb.MsgHup},\n\t}...)\n\n\t// check whether the term values are expected\n\t// n2.Term == 3\n\t// n3.Term == 3\n\tsm := nt.peers[2].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 3)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\t// check state\n\t// n2 == candidate\n\t// n3 == candidate\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateCandidate {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateCandidate)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StateCandidate)\n\t}\n\n\t// node 2 election timeout first\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// check whether the term values are expected\n\t// n2.Term == 4\n\t// n3.Term == 4\n\tsm = nt.peers[2].(*raft)\n\tif sm.Term != 4 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 4)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 4 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 4)\n\t}\n\n\t// check state\n\t// n2 == leader\n\t// n3 == follower\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StateFollower)\n\t}\n}",
"func TestVote_Follower(t *testing.T) {\n\tutil.SuppressLoggers()\n\tconfig := DefaultConfig()\n\n\tt.Run(\"Handle RequestVote with Stale Term\", func(t *testing.T) {\n\t\tcluster, err := CreateLocalCluster(config)\n\t\tdefer cleanupCluster(cluster)\n\n\t\ttime.Sleep(2 * time.Second)\n\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tleader, err := findLeader(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfollowers, err := findAllFollowers(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tleader.setCurrentTerm(3)\n\t\ttime.Sleep(1 * time.Second)\n\n\t\t// make sure the client get the correct response while registering itself with a candidate\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(1),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(3),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've denied vote\")\n\t\t}\n\t})\n\n\tt.Run(\"Handle RequestVote with Higher Term\", func(t *testing.T) {\n\t\tcluster, err := CreateLocalCluster(config)\n\t\tdefer cleanupCluster(cluster)\n\n\t\ttime.Sleep(2 * time.Second)\n\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tleader, err := findLeader(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfollowers, err := findAllFollowers(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tleader.leaderMutex.Lock()\n\t\tlogEntry := &rpc.LogEntry{\n\t\t\tIndex: leader.LastLogIndex() + 1,\n\t\t\tTermId: leader.GetCurrentTerm(),\n\t\t\tType: rpc.CommandType_NOOP,\n\t\t\tData: []byte{1, 2, 3, 4},\n\t\t}\n\t\tleader.StoreLog(logEntry)\n\t\tleader.leaderMutex.Unlock()\n\t\ttime.Sleep(1 * time.Second)\n\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(3),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(1),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've denied vote\")\n\t\t}\n\n\t\treply, _ = followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(3),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(2),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif !reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've granted vote\")\n\t\t}\n\t})\n}",
"func TestLeaderElectionReceiveMessages(t *testing.T) {\n\tp := newPaxos(&Config{ID: 1, NodeCount: 5})\n\n\tif p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected unset progress timer\")\n\t}\n\tif p.prepare != nil {\n\t\tt.Fatalf(\"expected nil *pb.Prepare\")\n\t}\n\tif exp, a := uint64(0), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d; found %d\", exp, a)\n\t}\n\tassertLeaderElectionState := func(expLastAttempted, expViewChanges int) {\n\t\tassertState(t, p, StateLeaderElection)\n\t\tif exp, a := uint64(expLastAttempted), p.lastAttempted; a != exp {\n\t\t\tt.Fatalf(\"expected last attempted view %d; found %d\", exp, a)\n\t\t}\n\t\tif exp, a := expViewChanges, len(p.viewChanges); a != exp {\n\t\t\tt.Fatalf(\"expected %d ViewChange message, found %d\", exp, a)\n\t\t}\n\t}\n\tassertLeaderElectionState(1, 1)\n\n\t// ViewChange message from node 0 should be applied successfully.\n\tvc := &pb.ViewChange{NodeId: 0, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// Replayed ViewChange message should be ignored. Idempotent.\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message from self should be ignored.\n\tvc = &pb.ViewChange{NodeId: 1, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message for past view should be ignored.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 0}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message from node 2 should be applied successfully.\n\t// Leader election should complete, because quorum of 3 nodes achieved.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 3)\n\tif exp, a := uint64(1), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d after leader election; found %d\", exp, a)\n\t}\n\tif !p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected progress timer to be set after completed leader election\")\n\t}\n\n\t// The rest of the ViewChange messages should be ignored.\n\tvc = &pb.ViewChange{NodeId: 3, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tvc = &pb.ViewChange{NodeId: 4, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 3)\n}",
"func (a *RPC) VoteForLeader(args *RequestVoteRPCArgs,reply *bool) error{\n\t//r.ResetTimer()\n \t//fmt.Println(\"received Vote request parameter \",(*args).CandidateId,\" \",(*args).Term,\" \",(*args).LastLogTerm,\" \",(*args).LastLogIndex)\n \t//if len(r.Log)>1{\n \t//\tfmt.Println(\"Vote Request folloer parameter \",r.Id,\" \", r.CurrentTerm,\" \",r.Log[len(r.Log)-1].Term ,\" \",len(r.Log)-1)\n \t//}\n\tif r.IsLeader==2 { // if this server is follower\n\t\t//r.ResetTimer() //TODO\n\t\tif r.CurrentTerm > args.Term || r.VotedFor >-1 { // if follower has updated Term or already voted for other candidate in same term , reply nagative\n\t\t\t*reply = false\n\t\t} else if r.VotedFor== -1{ // if follower has not voted for anyone in current Term \n\t\t\tlastIndex:= len(r.Log) \n\t\t\tif lastIndex > 0 && args.LastLogIndex >0{ // if Candiate log and this server log is not empty. \n\t\t\t\tif r.Log[lastIndex-1].Term > args.LastLogTerm { // and Term of last log in follower is updated than Candidate, reject vote\n *reply=false\n }else if r.Log[lastIndex-1].Term == args.LastLogTerm{ // else if Terms of Follower and candidate is same\n \tif (lastIndex-1) >args.LastLogIndex { // but follower log is more updated, reject vote\n \t\t*reply = false\n \t} else {\n \t\t\t*reply = true // If last log terms is match and followe log is sync with candiate, vote for candidate\n \t\t}\n }else{ // if last log term is not updated and Term does not match, \n \t \t\t*reply=true//means follower is lagging behind candiate in log entries, vote for candidate\n \t\t}\n \t\n\t\t\t} else if lastIndex >args.LastLogIndex { // either of them is Zero\n\t\t\t\t*reply = false // if Follower has entries in Log, its more updated, reject vote\n\t\t\t}else{\n\t\t\t\t\t*reply = true // else Vote for candiate\n\t\t\t\t}\n\t\t}else{\n\t\t\t*reply=false\n\t\t}\n\t}else{\n\t\t*reply = false // This server is already a leader or candiate, reject vote\n\t}\n\n\tif(*reply) {\n r.VotedFor=args.CandidateId // Set Voted for to candiate Id if this server has voted positive\n }\n\t/*if(*reply) {\n\t\tfmt.Println(\"Follower \",r.Id,\" Voted for \",r.VotedFor)\n\t}else{\n\t\tfmt.Println(\"Follower \",r.Id,\" rejected vote for \",args.CandidateId)\n\t}*/\n\treturn nil\n}",
"func TestPreVoteWithCheckQuorum(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\tn1.checkQuorum = true\n\tn2.checkQuorum = true\n\tn3.checkQuorum = true\n\n\tnt := newNetwork(n1, n2, n3)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// isolate node 1. node 2 and node 3 have leader info\n\tnt.isolate(1)\n\n\t// check state\n\tsm := nt.peers[1].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Fatalf(\"peer 1 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Fatalf(\"peer 2 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Fatalf(\"peer 3 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\n\t// node 2 will ignore node 3's PreVote\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// Do we have a leader?\n\tif n2.state != StateLeader && n3.state != StateFollower {\n\t\tt.Errorf(\"no leader\")\n\t}\n}",
"func (r *Raft) CallElection(){\n\t\n\tr.CurrentTerm+=1 // increase the current term by 1 to avoid conflict\n\tVoteAckcount:=1 // Number of vote received, initialised to 1 as own vote fo candiate is positive\n\tr.IsLeader = 0 // Set the state of server as candiate\n\tvar VoteCount =make (chan int,(len(r.ClusterConfigV.Servers)-1))\n\t//fmt.Println(\"Sending vote requests for:\",r.Id)\n\t\n\tfor _,server := range r.ClusterConfigV.Servers {\t\t\t\n\t\t\t\tif server.Id != r.Id{\n\t\t\t\t\tgo r.sendVoteRequestRpc(server,VoteCount) \t\t\t\t\t\n\t\t\t\t}}\n\n\tfor i:=0;i< len(r.ClusterConfigV.Servers)-1;i++ {\n\t\t\t\t\tVoteAckcount = VoteAckcount+ <- VoteCount \n\t\t\t\t\t// if Candiate gets majoirty, declare candiate as Leader and send immediae heartbeat to followers declaring\n\t\t\t\t\t// election of new leader\n\t\t\t\tif VoteAckcount > (len(r.ClusterConfigV.Servers)/2) && r.IsLeader == 0 { \n\t\t\t\t\tlog.Println(\"New leader is:\",r.Id)\n\t\t\t\t\tr.IsLeader=1\n\t\t\t\t\tr.LeaderId=r.Id\n\t\t\t\t\traft.SendImmediateHeartBit <- 1\n\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\t\t\n\t\tif r.IsLeader==1{\n\t\t\t// initlised next index to lastlog index, and match index to 0 fro all servers\n\t\tfor _,server := range r.ClusterConfigV.Servers {\n\t\t\t\tr.NextIndex[server.Id]=len(r.Log)\n\t\t\t\tr.MatchIndex[server.Id]=0\n\t\t\t\tr.ResetTimer()\n\t\t\t}\n\t\t}else{ \n\t\t\t// Is candidate fails to get elected, fall back to follower state and reset timer for reelection \n\t\t\tr.IsLeader=2\n\t\t\tr.ResetTimer()\n\t\t}\n}",
"func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\trf.mu.Lock()\n\t//DPrintf(\"server %d RequestVote lock\", rf.me)\n\tdefer rf.mu.Unlock()\n\tDPrintf(\"#%d server receive RequestVote rpc from #%d at term %d\\n\", rf.me, args.CandidateId, args.Term)\n\t// Your code here (2A, 2B).\n\treply.Term = rf.currentTerm\n\tif args.Term < rf.currentTerm {\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = -1\n\t\trf.state = 0\n\t\trf.persist()\n\t\tif rf.leaderTimer != nil {\n\t\t\trf.leaderTimer.Stop()\n\t\t}\n\t}\n\tif rf.votedFor == -1 || rf.votedFor == args.CandidateId {\n\t\tif args.LastLogTerm > rf.log[rf.lastIndex - rf.offset].Term {\n\t\t\treply.VoteGranted = true\n\t\t} else if args.LastLogTerm == rf.log[rf.lastIndex - rf.offset].Term && args.LastLogIndex >= rf.lastIndex {\n\t\t\treply.VoteGranted = true\n\t\t} else {\n\t\t\tDPrintf(\"#%d server is more update than requestvoter\\n\", rf.me)\n\t\t\treply.VoteGranted = false\n\t\t}\n\t} else {\n\t\tDPrintf(\"#%d server has voted other\\n\", rf.me)\n\t\treply.VoteGranted = false\n\t}\n\tif (reply.VoteGranted) {\n\t\trf.votedFor = args.CandidateId\n\t\trf.state = 1\n\t\tif rf.leaderTimer != nil {\n\t\t\trf.leaderTimer.Stop()\n\t\t}\n\t\trf.persist()\n\t\tvar d time.Duration\n\t\td = time.Duration(333 * rand.Float64() + 533)\n\t\tif rf.timer != nil {\n\t\t\trf.timer.Reset(d * time.Millisecond)\n\t\t}\n\t}\n\tDPrintf(\"#%d server receive RequestVote rpc from #%d at term %d, result: %v\\n\", rf.me, args.CandidateId, rf.currentTerm, reply.VoteGranted)\n\t//DPrintf(\"server %d RequestVote unlock\", rf.me)\n\treturn\n}",
"func TestLeaderElectionJumpToGreaterView(t *testing.T) {\n\tp := newPaxos(&Config{ID: 1, NodeCount: 5})\n\n\tif p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected unset progress timer\")\n\t}\n\tif p.prepare != nil {\n\t\tt.Fatalf(\"expected nil *pb.Prepare\")\n\t}\n\tif exp, a := uint64(0), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d; found %d\", exp, a)\n\t}\n\tassertLeaderElectionState := func(expLastAttempted, expViewChanges int) {\n\t\tassertState(t, p, StateLeaderElection)\n\t\tif exp, a := uint64(expLastAttempted), p.lastAttempted; a != exp {\n\t\t\tt.Fatalf(\"expected last attempted view %d; found %d\", exp, a)\n\t\t}\n\t\tif exp, a := expViewChanges, len(p.viewChanges); a != exp {\n\t\t\tt.Fatalf(\"expected %d ViewChange message, found %d\", exp, a)\n\t\t}\n\t}\n\tassertLeaderElectionState(1, 1)\n\n\t// ViewChange message from node 0 should be applied successfully.\n\tvc := &pb.ViewChange{NodeId: 0, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message for larger view should replace current attempt.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 6}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(6, 2)\n\n\t// ViewChange message from node 1 should be applied successfully.\n\t// Leader election should complete, because quorum of 3 nodes achieved.\n\tvc = &pb.ViewChange{NodeId: 3, AttemptedView: 6}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(6, 3)\n\tif exp, a := uint64(6), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d after leader election; found %d\", exp, a)\n\t}\n\tif !p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected progress timer to be set after completed leader election\")\n\t}\n}",
"func TestElection_HasVoted(t *testing.T) {\n\ttestDatabase := database.StormDB{\n\t\tFile: \"election_testdb.db\",\n\t}\n\terr := testDatabase.Connect()\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't connect to database. Error: %s\", err.Error())\n\t}\n\ttestVoter := models.Voter{\n\t\tStudentID: 1,\n\t\tCohort: 1,\n\t\tName: \"Prof Sturman\",\n\t}\n\ttestVoterWontVote := models.Voter{\n\t\tStudentID: 2,\n\t\tCohort: 1,\n\t\tName: \"Prof Goldschmidt\",\n\t}\n\ttestCandidate := models.Candidate{\n\t\tID: 1,\n\t\tCohort: 1,\n\t\tName: \"Joey Lyon\",\n\t}\n\n\terr = testDatabase.StoreVoter(testVoter)\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't add test voter to database\")\n\t}\n\terr = testDatabase.StoreVoter(testVoterWontVote)\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't add test voter to database\")\n\t}\n\terr = testDatabase.StoreCandidate(testCandidate)\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't add test candidate to database\")\n\t}\n\n\te := New(&testDatabase, false, []string{})\n\t// Begin testing HasVoted function\n\tret, err := e.HasVoted(testVoter)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error in checking if voter has voted\")\n\t}\n\tif *ret {\n\t\tt.Errorf(\"HasVoted returned true when a voter hasn't voted\")\n\t}\n\n\tvote := &models.Vote{\n\t\tCandidate: 1,\n\t\tStudentID: 1,\n\t}\n\tvote.HashVote(&testVoter)\n\te.CastVotes(&testVoter, []models.Vote{*vote})\n\tret, err = e.HasVoted(testVoter)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error in checking if voter has voted\")\n\t}\n\tif *ret == false {\n\t\tt.Errorf(\"HasVoted returned false when a voter has voted\")\n\t}\n\n\tret, err = e.HasVoted(testVoterWontVote)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error in checking if voter has voted\")\n\t}\n\tif *ret {\n\t\tt.Errorf(\"HasVoted returned true when a voter has not voted\")\n\t}\n\terr = os.Remove(\"election_testdb.db\")\n\tif err != nil {\n\t\tt.Log(\"Cleanup failed\")\n\t}\n}",
"func (s *VotingChaincode) vote(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tif len(args) != 3 {\n\t\treturn shim.Error(msg.GetErrMsg(\"COM_ERR_01\", []string{\"vote\", \"3\"}))\n\t}\n\n\ttodayDate := string(time.Now().UTC().Format(\"2006/01/02\"))\n\n\tvoterSSN := args[0]\n\telectionType := args[1]\n\tcandidatePubKey := args[2]\n\n\telection, err := u.FindCompositeKey(stub, c.ELECTION, []string{electionType})\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\tif election == \"\" {\n\t\treturn shim.Error(msg.GetErrMsg(\"VOT_ERR_15\", []string{electionType}))\n\t}\n\n\t_, keyParts, err := stub.SplitCompositeKey(election)\n\tif err != nil {\n\t\treturn shim.Error(msg.GetErrMsg(\"COM_ERR_07\", []string{election}))\n\t}\n\n\tisElectionPeriod := u.IsWithinRange(todayDate, keyParts[1], keyParts[2], \"2006/01/02\")\n\tif isElectionPeriod != true {\n\t\treturn shim.Error(msg.GetErrMsg(\"VOT_ERR_13\", []string{todayDate, electionType, fmt.Sprint(keyParts[1] + \"-\" + keyParts[2])}))\n\t}\n\n\tfound, voterPubKey := u.FindUserBySSN(stub, voterSSN)\n\tif !found {\n\t\treturn shim.Error(msg.GetErrMsg(\"COM_ERR_14\", []string{voterSSN}))\n\t}\n\n\tvoterAsBytes, err := stub.GetState(voterPubKey)\n\tif err != nil {\n\t\treturn shim.Error(msg.GetErrMsg(\"COM_ERR_10\", []string{voterSSN, err.Error()}))\n\t}\n\n\tvoter := User{}\n\terr = json.Unmarshal(voterAsBytes, &voter)\n\n\tif err != nil {\n\t\treturn shim.Error(\"Failed to unmarshal voter\")\n\t}\n\n\thasVoted := strings.Contains(voter.Election, c.VOTED)\n\tif hasVoted == true {\n\t\treturn shim.Error(msg.GetErrMsg(\"VOT_ERR_14\", []string{voterSSN}))\n\t}\n\n\tisRegistered := strings.Contains(voter.Election, c.REGISTERED)\n\tif isRegistered != true {\n\t\treturn shim.Error(msg.GetErrMsg(\"VOT_ERR_11\", []string{fmt.Sprint(\"Voter\" + voterSSN + \" Not Registered\")}))\n\t}\n\n\tisEligibleToVote := strings.Split(voter.Election, c.SEPARATOR)\n\tif isEligibleToVote[6] != \"true\" {\n\t\treturn shim.Error(msg.GetErrMsg(\"VOT_ERR_11\", []string{fmt.Sprint(isEligibleToVote[5] + \" Voter Min Age \" + strconv.Itoa(c.VOTER_MIN_AGE))}))\n\t}\n\n\tvoterAge := isEligibleToVote[5]\n\n\tcandidateAsBytes, err := stub.GetState(candidatePubKey)\n\tif err != nil {\n\t\treturn shim.Error(msg.GetErrMsg(\"COM_ERR_10\", []string{voterSSN, err.Error()}))\n\t}\n\n\tcandidate := User{}\n\tjson.Unmarshal(candidateAsBytes, &candidate)\n\n\tisCandidate := strings.Split(candidate.Election, c.SEPARATOR)\n\tif isCandidate[4] != \"true\" {\n\t\treturn shim.Error(msg.GetErrMsg(\"VOT_ERR_12\", []string{candidatePubKey, \"Not Registered\"}))\n\t}\n\n\tif candidate.SSN == voter.SSN {\n\t\treturn shim.Error(msg.GetErrMsg(\"VOT_ERR_12\", []string{candidatePubKey, fmt.Sprint(\"Same Voter \" + voterSSN + \" and Candidate \" + candidate.SSN)}))\n\t}\n\n\t_, err = s.callOtherCC(stub, c.CCNAME, c.CHANNELID, []string{\"giveVote\", voter.SSN, candidatePubKey, electionType, todayDate})\n\tif err != nil {\n\t\treturn shim.Error(msg.GetErrMsg(\"COM_ERR_17\", []string{c.CCNAME, err.Error()}))\n\t}\n\n\tvoter.Election = strings.Replace(voter.Election, c.REGISTERED, c.VOTED, -1)\n\n\tvoterAsBytes, _ = json.Marshal(voter)\n\n\terr = stub.PutState(voterPubKey, voterAsBytes)\n\tif err != nil {\n\t\treturn shim.Error(msg.GetErrMsg(\"COM_ERR_09\", []string{voterPubKey, err.Error()}))\n\t}\n\n\tvote := Vote{\n\t\tvoterSSN,\n\t\tvoter.FirstName,\n\t\tvoter.LastName,\n\t\tvoterAge,\n\t\tcandidatePubKey,\n\t\ttodayDate,\n\t\telectionType,\n\t\tstub.GetTxID()}\n\n\tvoteJSON, _ := json.Marshal(vote)\n\n\treturn shim.Success(voteJSON)\n}",
"func (rf *Raft) runElection() {\n\t// get election start time\n\tlastElectionCheck := time.Now()\n\n\trf.mu.Lock()\n\trf.currentTerm++\n\t// persist - updated current term\n\tdata := rf.GetStateBytes(false)\n\trf.persister.SaveRaftState(data)\n\trf.Log(LogInfo, \"running as candidate\")\n\n\t// set as candidate state and vote for ourselves,\n\t// also reset the timer\n\trf.votedFor = rf.me\n\trf.state = Candidate\n\trf.electionTimeout = GetRandomElectionTimeout()\n\n\t// for holding replies - we send out the requests concurrently\n\treplies := make([]*RequestVoteReply, len(rf.peers))\n\tfor servIdx := range rf.peers {\n\t\treplies[servIdx] = &RequestVoteReply{}\n\t}\n\n\t// send out requests concurrently\n\tfor servIdx := range rf.peers {\n\t\tif servIdx != rf.me {\n\t\t\targs := &RequestVoteArgs{\n\t\t\t\tCandidateTerm: rf.currentTerm,\n\t\t\t}\n\n\t\t\t// grab last log index and term - default to snapshot if log is []\n\t\t\tif len(rf.log) > 0 {\n\t\t\t\targs.LastLogIndex = rf.log[len(rf.log)-1].Index\n\t\t\t\targs.LastLogTerm = rf.log[len(rf.log)-1].Term\n\t\t\t} else {\n\t\t\t\targs.LastLogIndex = rf.lastIncludedIndex\n\t\t\t\targs.LastLogTerm = rf.lastIncludedTerm\n\t\t\t}\n\t\t\t// only place the reply into replies, when the RPC completes,\n\t\t\t// to prevent partial data (unsure if this can happen but seems like a good idea)\n\t\t\tgo func(servIdx int) {\n\t\t\t\treply := &RequestVoteReply{}\n\t\t\t\trf.Log(LogDebug, \"Sending RequestVote to servIdx\", servIdx)\n\t\t\t\tok := rf.sendRequestVote(servIdx, args, reply)\n\t\t\t\tif ok {\n\t\t\t\t\trf.Log(LogDebug, \"Received RequestVote reply from server\", servIdx, \"\\n - reply\", reply)\n\t\t\t\t\treplies[servIdx] = reply\n\t\t\t\t}\n\t\t\t}(servIdx)\n\t\t}\n\t}\n\trf.mu.Unlock()\n\n\t// while we still have time on the clock, poll\n\t// for election result\n\tfor !rf.killed() {\n\t\trf.mu.Lock()\n\t\tif rf.state == Follower {\n\t\t\trf.Log(LogInfo, \"now a follower\")\n\t\t\t// we must have received a heartbeat message from a new leader\n\t\t\t// stop the election\n\t\t\trf.mu.Unlock()\n\t\t\treturn\n\t\t} else if rf.electionTimeout > 0 {\n\t\t\t// election still running\n\t\t\t// do a vote count and update time remaining\n\t\t\tcurrentTime := time.Now()\n\t\t\trf.electionTimeout -= (currentTime.Sub(lastElectionCheck))\n\t\t\tlastElectionCheck = currentTime\n\t\t\tvotes := 1 // we vote for ourselves automatically\n\t\t\tfor servIdx := range rf.peers {\n\t\t\t\t// need a successful vote AND need that our term hasn't increased (e.g. if\n\t\t\t\t// since the last loop, we voted for a server with a higher term)\n\t\t\t\tif servIdx != rf.me && replies[servIdx].VoteGranted && replies[servIdx].CurrentTerm == rf.currentTerm {\n\t\t\t\t\tvotes++\n\t\t\t\t}\n\t\t\t}\n\t\t\t// majority vote achieved - set state as leader and\n\t\t\t// start sending heartbeats\n\t\t\tif votes >= int(math.Ceil(float64(len(rf.peers))/2.0)) {\n\t\t\t\trf.Log(LogInfo, \"elected leader\", \"\\n - rf.log:\", rf.log, \"\\n - rf.commitIndex\", rf.commitIndex)\n\t\t\t\trf.state = Leader\n\n\t\t\t\t// get next index of the log for rf.nextIndex\n\t\t\t\tnextIdx := rf.lastIncludedIndex + 1\n\t\t\t\tif len(rf.log) > 0 {\n\t\t\t\t\tnextIdx = rf.log[len(rf.log)-1].Index + 1\n\t\t\t\t}\n\n\t\t\t\t// this volatile state is reinitialized on election\n\t\t\t\tfor servIdx := range rf.peers {\n\t\t\t\t\tif servIdx != rf.me {\n\t\t\t\t\t\trf.nextIndex[servIdx] = nextIdx\n\t\t\t\t\t\trf.matchIndex[servIdx] = 0\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\trf.mu.Unlock()\n\t\t\t\tgo rf.heartbeatAppendEntries()\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\t// no result - need to rerun election\n\t\t\trf.Log(LogInfo, \"timed out as candidate\")\n\t\t\trf.mu.Unlock()\n\t\t\tgo rf.runElection()\n\t\t\treturn\n\t\t}\n\t\trf.mu.Unlock()\n\t\ttime.Sleep(defaultPollInterval)\n\t}\n}",
"func TestOnlineElection(t *testing.T) {\n\tvar cases = []struct {\n\t\tpersons []int\n\t\ttimes []int\n\t\tq []int\n\t\ta []int\n\t}{\n\t\t//{\n\t\t//\tpersons: []int{0, 1, 1, 0, 0, 1, 0},\n\t\t//\ttimes: []int{0, 5, 10, 15, 20, 25, 30},\n\t\t//\tq: []int{3, 12, 25, 15, 24, 8},\n\t\t//\ta: []int{0, 1, 1, 0, 0, 1},\n\t\t//},\n\t\t{\n\t\t\tpersons: []int{0, 1, 0, 1, 1},\n\t\t\ttimes: []int{24, 29, 31, 76, 81},\n\t\t\tq: []int{28, 24, 29, 77, 30, 25, 76, 75, 81, 80},\n\t\t\ta: []int{0, 0, 1, 1, 1, 0, 1, 0, 1, 1},\n\t\t},\n\t}\n\tfor _, c := range cases {\n\t\tvote := Constructor(c.persons, c.times)\n\t\tfor i := 0; i < len(c.q); i++ {\n\t\t\tif vote.Q(c.q[i]) != c.a[i] {\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t}\n\t}\n}",
"func testLeaderCycle(t *testing.T, preVote bool) {\n\tvar cfg func(*Config)\n\tif preVote {\n\t\tcfg = preVoteConfig\n\t}\n\tn := newNetworkWithConfig(cfg, nil, nil, nil)\n\tdefer n.closeAll()\n\tfor campaignerID := uint64(1); campaignerID <= 3; campaignerID++ {\n\t\tn.send(pb.Message{From: campaignerID, To: campaignerID, Type: pb.MsgHup})\n\n\t\tfor _, peer := range n.peers {\n\t\t\tsm := peer.(*raft)\n\t\t\tif sm.id == campaignerID && sm.state != StateLeader {\n\t\t\t\tt.Errorf(\"preVote=%v: campaigning node %d state = %v, want StateLeader\",\n\t\t\t\t\tpreVote, sm.id, sm.state)\n\t\t\t} else if sm.id != campaignerID && sm.state != StateFollower {\n\t\t\t\tt.Errorf(\"preVote=%v: after campaign of node %d, \"+\n\t\t\t\t\t\"node %d had state = %v, want StateFollower\",\n\t\t\t\t\tpreVote, campaignerID, sm.id, sm.state)\n\t\t\t}\n\t\t}\n\t}\n}",
"func (s *raftServer) startElection() {\n\ts.setState(CANDIDATE)\n\tpeers := s.server.Peers()\n\ts.writeToLog(\"Number of peers: \" + strconv.Itoa(len(peers)))\n\tvotes := make(map[int]bool) // map to store received votes\n\tvotes[s.server.Pid()] = true\n\ts.voteFor(s.server.Pid(), s.Term())\n\tfor s.State() == CANDIDATE {\n\t\ts.incrTerm() // increment term for current\n\t\tcandidateTimeout := time.Duration(s.duration + s.rng.Int63n(RandomTimeoutRange)) // random timeout used by Raft authors\n\t\ts.sendRequestVote()\n\t\ts.writeToLog(\"Sent RequestVote message \" + strconv.Itoa(int(candidateTimeout)))\n\t\ts.eTimeout.Reset(candidateTimeout * time.Millisecond) // start re-election timer\n\t\tfor {\n\t\t\tacc := false\n\t\t\tselect {\n\t\t\tcase e, _ := <-s.server.Inbox():\n\t\t\t\t// received a message on server's inbox\n\t\t\t\tmsg := e.Msg\n\t\t\t\tif ae, ok := msg.(AppendEntry); ok { // AppendEntry\n\t\t\t\t\tacc = s.handleAppendEntry(e.Pid, &ae)\n\t\t\t\t} else if rv, ok := msg.(RequestVote); ok { // RequestVote\n\t\t\t\t\tacc = s.handleRequestVote(e.Pid, &rv)\n\n\t\t\t\t} else if grantV, ok := msg.(GrantVote); ok && grantV.VoteGranted {\n\t\t\t\t\tvotes[e.Pid] = true\n\t\t\t\t\ts.writeToLog(\"Received grantVote message from \" + strconv.Itoa(e.Pid) + \" with term #\" + strconv.Itoa(grantV.Term))\n\t\t\t\t\ts.writeToLog(\"Votes received so far \" + strconv.Itoa(len(votes)))\n\t\t\t\t\tif len(votes) == len(peers)/2+1 { // received majority votes\n\t\t\t\t\t\ts.setState(LEADER)\n\t\t\t\t\t\ts.sendHeartBeat()\n\t\t\t\t\t\tacc = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase <-s.eTimeout.C:\n\t\t\t\t// received timeout on election timer\n\t\t\t\ts.writeToLog(\"Received re-election timeout\")\n\t\t\t\tacc = true\n\t\t\tdefault:\n\t\t\t\ttime.Sleep(1 * time.Millisecond) // sleep to avoid busy looping\n\t\t\t}\n\n\t\t\tif acc {\n\t\t\t\ts.eTimeout.Reset(candidateTimeout * time.Millisecond) // start re-election timer\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}",
"func TestNodeWithSmallerTermCanCompleteElection(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\tdefer closeAndFreeRaft(n3)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\t// cause a network partition to isolate node 3\n\tnt := newNetwork(n1, n2, n3)\n\tnt.cut(1, 3)\n\tnt.cut(2, 3)\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tsm := nt.peers[1].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StatePreCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StatePreCandidate)\n\t}\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// check whether the term values are expected\n\t// a.Term == 3\n\t// b.Term == 3\n\t// c.Term == 1\n\tsm = nt.peers[1].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 1 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\tsm = nt.peers[2].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 1 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 1)\n\t}\n\n\t// check state\n\t// a == follower\n\t// b == leader\n\t// c == pre-candidate\n\tsm = nt.peers[1].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StatePreCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StatePreCandidate)\n\t}\n\n\tsm.logger.Infof(\"going to bring back peer 3 and kill peer 2\")\n\t// recover the network then immediately isolate b which is currently\n\t// the leader, this is to emulate the crash of b.\n\tnt.recover()\n\tnt.cut(2, 1)\n\tnt.cut(2, 3)\n\n\t// call for election\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// do we have a leader?\n\tsma := nt.peers[1].(*raft)\n\tsmb := nt.peers[3].(*raft)\n\tif sma.state != StateLeader && smb.state != StateLeader {\n\t\tt.Errorf(\"no leader\")\n\t}\n}",
"func TestFreeStuckCandidateWithCheckQuorum(t *testing.T) {\n\ta := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tb := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tc := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(a)\n\tdefer closeAndFreeRaft(b)\n\tdefer closeAndFreeRaft(c)\n\n\ta.checkQuorum = true\n\tb.checkQuorum = true\n\tc.checkQuorum = true\n\n\tnt := newNetwork(a, b, c)\n\tsetRandomizedElectionTimeout(b, b.electionTimeout+1)\n\n\tfor i := 0; i < b.electionTimeout; i++ {\n\t\tb.tick()\n\t}\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(1)\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tif c.Term != b.Term+1 {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, b.Term+1)\n\t}\n\n\t// Vote again for safety\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tif c.Term != b.Term+2 {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, b.Term+2)\n\t}\n\n\tnt.recover()\n\tnt.send(pb.Message{From: 1, To: 3, Type: pb.MsgHeartbeat, Term: a.Term})\n\n\t// Disrupt the leader so that the stuck peer is freed\n\tif a.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateFollower)\n\t}\n\n\tif c.Term != a.Term {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, a.Term)\n\t}\n\n\t// Vote again, should become leader this time\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif c.state != StateLeader {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", c.state, StateLeader)\n\t}\n\n}",
"func (r *Raft) runCandidate() {\n\t// Start vote for us, and set a timeout\n\tvoteCh := r.electSelf()\n\telectionTimeout := randomTimeout(r.conf.ElectionTimeout, 2*r.conf.ElectionTimeout)\n\n\t// Tally the votes, need a simple majority\n\tgrantedVotes := 0\n\tquorum := r.quorumSize()\n\tr.logD.Printf(\"Cluster size: %d, votes needed: %d\", len(r.peers)+1, quorum)\n\n\ttransition := false\n\tfor !transition {\n\t\tselect {\n\t\tcase rpc := <-r.rpcCh:\n\t\t\tswitch cmd := rpc.Command.(type) {\n\t\t\tcase *AppendEntriesRequest:\n\t\t\t\ttransition = r.appendEntries(rpc, cmd)\n\t\t\tcase *RequestVoteRequest:\n\t\t\t\ttransition = r.requestVote(rpc, cmd)\n\t\t\tdefault:\n\t\t\t\tr.logE.Printf(\"Candidate state, got unexpected command: %#v\",\n\t\t\t\t\trpc.Command)\n\t\t\t\trpc.Respond(nil, fmt.Errorf(\"unexpected command\"))\n\t\t\t}\n\n\t\t// Got response from peers on voting request\n\t\tcase vote := <-voteCh:\n\t\t\t// Check if the term is greater than ours, bail\n\t\t\tif vote.Term > r.getCurrentTerm() {\n\t\t\t\tr.logD.Printf(\"Newer term discovered\")\n\t\t\t\tr.setState(Follower)\n\t\t\t\tif err := r.setCurrentTerm(vote.Term); err != nil {\n\t\t\t\t\tr.logE.Printf(\"Failed to update current term: %w\", err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Check if the vote is granted\n\t\t\tif vote.Granted {\n\t\t\t\tgrantedVotes++\n\t\t\t\tr.logD.Printf(\"Vote granted. Tally: %d\", grantedVotes)\n\t\t\t}\n\n\t\t\t// Check if we've become the leader\n\t\t\tif grantedVotes >= quorum {\n\t\t\t\tr.logD.Printf(\"Election won. Tally: %d\", grantedVotes)\n\t\t\t\tr.setState(Leader)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase a := <-r.applyCh:\n\t\t\t// Reject any operations since we are not the leader\n\t\t\ta.response = ErrNotLeader\n\t\t\ta.Response()\n\n\t\tcase <-electionTimeout:\n\t\t\t// Election failed! Restart the election. We simply return,\n\t\t\t// which will kick us back into runCandidate\n\t\t\tr.logW.Printf(\"Election timeout reached, restarting election\")\n\t\t\treturn\n\n\t\tcase <-r.shutdownCh:\n\t\t\treturn\n\t\t}\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TestFollowerVote tests that each follower will vote for at most one candidate in a given term, on a firstcomefirstserved basis. Reference: section 5.2
|
func TestFollowerVote(t *testing.T) {
tests := []struct {
vote uint64
nvote uint64
wreject bool
}{
{None, 1, false},
{None, 2, false},
{1, 1, false},
{2, 2, false},
{1, 2, true},
{2, 1, true},
}
for i, tt := range tests {
r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
defer closeAndFreeRaft(r)
r.loadState(pb.HardState{Term: 1, Vote: tt.vote})
r.Step(pb.Message{From: tt.nvote, FromGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},
To: 1, ToGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},
Term: 1, Type: pb.MsgVote})
msgs := r.readMessages()
wmsgs := []pb.Message{
{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},
To: tt.nvote, ToGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},
Term: 1, Type: pb.MsgVoteResp, Reject: tt.wreject},
}
if !reflect.DeepEqual(msgs, wmsgs) {
t.Errorf("#%d: msgs = %v, want %v", i, msgs, wmsgs)
}
}
}
|
[
"func TestVote_Follower(t *testing.T) {\n\tutil.SuppressLoggers()\n\tconfig := DefaultConfig()\n\n\tt.Run(\"Handle RequestVote with Stale Term\", func(t *testing.T) {\n\t\tcluster, err := CreateLocalCluster(config)\n\t\tdefer cleanupCluster(cluster)\n\n\t\ttime.Sleep(2 * time.Second)\n\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tleader, err := findLeader(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfollowers, err := findAllFollowers(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tleader.setCurrentTerm(3)\n\t\ttime.Sleep(1 * time.Second)\n\n\t\t// make sure the client get the correct response while registering itself with a candidate\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(1),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(3),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've denied vote\")\n\t\t}\n\t})\n\n\tt.Run(\"Handle RequestVote with Higher Term\", func(t *testing.T) {\n\t\tcluster, err := CreateLocalCluster(config)\n\t\tdefer cleanupCluster(cluster)\n\n\t\ttime.Sleep(2 * time.Second)\n\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tleader, err := findLeader(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfollowers, err := findAllFollowers(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tleader.leaderMutex.Lock()\n\t\tlogEntry := &rpc.LogEntry{\n\t\t\tIndex: leader.LastLogIndex() + 1,\n\t\t\tTermId: leader.GetCurrentTerm(),\n\t\t\tType: rpc.CommandType_NOOP,\n\t\t\tData: []byte{1, 2, 3, 4},\n\t\t}\n\t\tleader.StoreLog(logEntry)\n\t\tleader.leaderMutex.Unlock()\n\t\ttime.Sleep(1 * time.Second)\n\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(3),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(1),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've denied vote\")\n\t\t}\n\n\t\treply, _ = followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(3),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(2),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif !reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've granted vote\")\n\t\t}\n\t})\n}",
"func TestVote_Candidate(t *testing.T) {\n\tutil.SuppressLoggers()\n\tconfig := DefaultConfig()\n\tconfig.ClusterSize = 5\n\n\tcluster, err := CreateLocalCluster(config)\n\tdefer cleanupCluster(cluster)\n\n\ttime.Sleep(2 * time.Second)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tleader, err := findLeader(cluster)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tleader.setCurrentTerm(3)\n\tleader.leaderMutex.Lock()\n\tlogEntry := &rpc.LogEntry{\n\t\tIndex: leader.LastLogIndex() + 1,\n\t\tTermId: leader.GetCurrentTerm(),\n\t\tType: rpc.CommandType_NOOP,\n\t\tData: []byte{1, 2, 3, 4},\n\t}\n\tleader.StoreLog(logEntry)\n\tleader.leaderMutex.Unlock()\n\ttime.Sleep(1 * time.Second)\n\n\tfollowers, err := findAllFollowers(cluster)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif followers[0].GetCurrentTerm() != 3 {\n\t\tt.Fatalf(\"Term should've changed to %d but is %d\", 3, followers[0].GetCurrentTerm())\n\t}\n\n\tfollowers[1].setCurrentTerm(3)\n\tfollowers[1].config.ElectionTimeout = 1 * time.Second\n\tfollowers[3].NetworkPolicy.PauseWorld(true)\n\tfollowers[2].NetworkPolicy.PauseWorld(true)\n\tleader.NetworkPolicy.PauseWorld(true)\n\n\tt.Run(\"Handle competing RequestVote with Stale Term\", func(t *testing.T) {\n\n\t\ttime.Sleep(500 * time.Millisecond)\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(1),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(3),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've denied vote\")\n\t\t}\n\t})\n\n\tt.Run(\"Handle competing RequestVote with Higher Term and Out-of-date log\", func(t *testing.T) {\n\n\t\ttime.Sleep(500 * time.Millisecond)\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(100),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(1),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've denied vote\")\n\t\t}\n\t})\n\n\tt.Run(\"Handle competing RequestVote with Higher Term and Up-to-date log\", func(t *testing.T) {\n\n\t\ttime.Sleep(500 * time.Millisecond)\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(200),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(2),\n\t\t\tLastLogTerm: uint64(3),\n\t\t})\n\t\tif !reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've granted vote\")\n\t\t}\n\t})\n}",
"func TestPreVoteWithSplitVote(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\tdefer closeAndFreeRaft(n3)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\tnt := newNetwork(n1, n2, n3)\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// simulate leader down. followers start split vote.\n\tnt.isolate(1)\n\tnt.send([]pb.Message{\n\t\t{From: 2, To: 2, Type: pb.MsgHup},\n\t\t{From: 3, To: 3, Type: pb.MsgHup},\n\t}...)\n\n\t// check whether the term values are expected\n\t// n2.Term == 3\n\t// n3.Term == 3\n\tsm := nt.peers[2].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 3)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\t// check state\n\t// n2 == candidate\n\t// n3 == candidate\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateCandidate {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateCandidate)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StateCandidate)\n\t}\n\n\t// node 2 election timeout first\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// check whether the term values are expected\n\t// n2.Term == 4\n\t// n3.Term == 4\n\tsm = nt.peers[2].(*raft)\n\tif sm.Term != 4 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 4)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 4 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 4)\n\t}\n\n\t// check state\n\t// n2 == leader\n\t// n3 == follower\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StateFollower)\n\t}\n}",
"func TestLeaderElectionInOneRoundRPC(t *testing.T) {\n\ttests := []struct {\n\t\tsize int\n\t\tvotes map[uint64]bool\n\t\tstate StateType\n\t}{\n\t\t// win the election when receiving votes from a majority of the servers\n\t\t{1, map[uint64]bool{}, StateLeader},\n\t\t{3, map[uint64]bool{2: true, 3: true}, StateLeader},\n\t\t{3, map[uint64]bool{2: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true}, StateLeader},\n\n\t\t// return to follower state if it receives vote denial from a majority\n\t\t{3, map[uint64]bool{2: false, 3: false}, StateFollower},\n\t\t{5, map[uint64]bool{2: false, 3: false, 4: false, 5: false}, StateFollower},\n\t\t{5, map[uint64]bool{2: true, 3: false, 4: false, 5: false}, StateFollower},\n\n\t\t// stay in candidate if it does not obtain the majority\n\t\t{3, map[uint64]bool{}, StateCandidate},\n\t\t{5, map[uint64]bool{2: true}, StateCandidate},\n\t\t{5, map[uint64]bool{2: false, 3: false}, StateCandidate},\n\t\t{5, map[uint64]bool{}, StateCandidate},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, idsBySize(tt.size), 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\n\t\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\tfor id, vote := range tt.votes {\n\t\t\tr.Step(pb.Message{From: id, To: 1, Term: r.Term, Type: pb.MsgVoteResp, Reject: !vote})\n\t\t}\n\n\t\tif r.state != tt.state {\n\t\t\tt.Errorf(\"#%d: state = %s, want %s\", i, r.state, tt.state)\n\t\t}\n\t\tif g := r.Term; g != 1 {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", i, g, 1)\n\t\t}\n\t}\n}",
"func TestPreVoteWithCheckQuorum(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\tn1.checkQuorum = true\n\tn2.checkQuorum = true\n\tn3.checkQuorum = true\n\n\tnt := newNetwork(n1, n2, n3)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// isolate node 1. node 2 and node 3 have leader info\n\tnt.isolate(1)\n\n\t// check state\n\tsm := nt.peers[1].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Fatalf(\"peer 1 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Fatalf(\"peer 2 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Fatalf(\"peer 3 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\n\t// node 2 will ignore node 3's PreVote\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// Do we have a leader?\n\tif n2.state != StateLeader && n3.state != StateFollower {\n\t\tt.Errorf(\"no leader\")\n\t}\n}",
"func (rft *Raft) follower() int {\n\t//start candidate timeout\n\trft.et = time.NewTimer(time.Millisecond * time.Duration(getRandTime(rft.Info)))\n\tSetIsLeader(false)\n\tfor {\n\t\t//wrap in select\n\t\tselect {\n\t\tcase <-rft.et.C:\n\t\t\trft.Info.Println(\"[F]: election timeout\")\n\t\t\treturn CANDIDATE\n\t\tcase event := <-rft.eventCh:\n\t\t\tswitch event.(type) {\n\t\t\tcase *ClientAppend:\n\t\t\t\trft.Info.Println(\"[F]: got client append\")\n\t\t\t\t//Do not handle clients in follower mode.\n\t\t\t\t//Send it back up the pipeline.\n\t\t\t\tevent.(*ClientAppend).logEntry.SetCommitted(false)\n\t\t\t\trft.eventCh <- event.(*ClientAppend).logEntry\n\n\t\t\tcase *VoteRequest:\n\t\t\t\treq := event.(*VoteRequest)\n\t\t\t\trft.Info.Println(\"[F]: got vote request\", req)\n\t\t\t\treply := false\n\t\t\t\tif req.Term < rft.currentTerm {\n\t\t\t\t\trft.Info.Println(\"[F]: req.Term < rft.currentTerm\")\n\t\t\t\t\treply = false\n\t\t\t\t}\n\n\t\t\t\tif req.Term > rft.currentTerm ||\n\t\t\t\t\treq.LastLogTerm > rft.currentTerm ||\n\t\t\t\t\t(req.LastLogTerm == rft.currentTerm && req.LastLogIndex >= len(rft.LogArray)) {\n\t\t\t\t\trft.Info.Println(\"[F]: updating term and vote\", req.Term, NULL_VOTE)\n\t\t\t\t\trft.updateTermAndVote(req.Term)\n\t\t\t\t\treply = true\n\t\t\t\t}\n\n\t\t\t\tif reply && rft.votedFor == NULL_VOTE {\n\t\t\t\t\trft.et.Reset(time.Millisecond * 300)\n\t\t\t\t\trft.Info.Println(\"[F]: timer reset, after vote\")\n\t\t\t\t\trft.Info.Println(\"[F]: voted for \", req.CandidateId)\n\t\t\t\t\trft.votedFor = req.CandidateId\n\t\t\t\t\twriteFile(VOTED_FOR, rft.id, req.CandidateId, rft.Info)\n\t\t\t\t}\n\t\t\t\t//let the asker know about the vote\n\t\t\t\tvoteReply := &VoteRequestReply{rft.currentTerm, reply}\n\t\t\t\trft.voteReplyCh <- voteReply\n\n\t\t\tcase *AppendRPC:\n\t\t\t\trft.et.Reset(time.Millisecond * time.Duration(getRandTime(rft.Info)))\n\t\t\t\trft.Info.Println(\"[F]:\", \"Timer reset on AppendRPC\")\n\t\t\t\treq := event.(*AppendRPC)\n\t\t\t\t//heartbeat\n\t\t\t\tif len(req.Entries) == 0 {\n\t\t\t\t\trft.Info.Println(\"[F]: got hearbeat from \" + strconv.Itoa(req.LeaderId))\n\t\t\t\t\ttemp := &AppendReply{-1, true, -1, -1}\n\t\t\t\t\trft.Info.Println(\"[F]: sending dummy reply to \" + strconv.Itoa(req.LeaderId))\n\t\t\t\t\trft.appendReplyCh <- temp\n\t\t\t\t\trft.LeaderId = req.LeaderId\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\treply := true\n\n\t\t\t\tif req.PrevLogIndex == LOG_INVALID_INDEX || req.PrevLogIndex == LOG_INVALID_TERM {\n\t\t\t\t\t//rft.updateTermAndVote(req.Term)\n\t\t\t\t\treply = true\n\t\t\t\t} else if req.Term < rft.currentTerm {\n\t\t\t\t\treply = false\n\t\t\t\t} else if req.Term > rft.currentTerm {\n\t\t\t\t\trft.updateTermAndVote(req.Term)\n\t\t\t\t\treply = true\n\t\t\t\t}\n\n\t\t\t\t//first condition to prevent out of bounds except\n\t\t\t\tif !(req.PrevLogIndex == LOG_INVALID_INDEX) && rft.LogArray[req.PrevLogIndex].Term != req.PrevLogTerm {\n\t\t\t\t\trft.Info.Println(\"[F]: terms unequal\")\n\t\t\t\t\treply = false\n\t\t\t\t}\n\n\t\t\t\tif reply {\n\t\t\t\t\ti := req.PrevLogIndex + 1\n\t\t\t\t\tfor ; i < len(rft.LogArray); i++ {\n\t\t\t\t\t\tif req.PrevLogIndex == LOG_INVALID_INDEX || req.Entries[i-req.PrevLogIndex-1].Term != rft.LogArray[i].Term {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif req.PrevLogIndex == LOG_INVALID_INDEX {\n\t\t\t\t\t\trft.LogArray = append(rft.LogArray, req.Entries...)\n\t\t\t\t\t} else {\n\t\t\t\t\t\trft.LogArray = append(rft.LogArray[0:i], req.Entries[i-req.PrevLogIndex-1:]...)\n\t\t\t\t\t}\n\n\t\t\t\t\tif req.LeaderCommit > rft.commitIndex {\n\t\t\t\t\t\tif req.LeaderCommit > len(rft.LogArray)-1 {\n\t\t\t\t\t\t\trft.commitIndex = len(rft.LogArray) - 1\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\trft.commitIndex = req.LeaderCommit\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\ttemp := &AppendReply{rft.currentTerm, reply, rft.id, len(rft.LogArray)}\n\t\t\t\trft.appendReplyCh <- temp\n\t\t\t\tif reply {\n\t\t\t\t\trft.persistLog(req.Entries)\n\t\t\t\t}\n\t\t\t\trft.Info.Println(\"[F]: log is size\", len(rft.LogArray))\n\t\t\t}\n\t\t}\n\t}\n}",
"func TestFollower(t *testing.T) {\n\tf := newFixture(t)\n\n\tprevUpdateTime := time.Now().Add(-10 * time.Second)\n\tprevUpdateTimeKube := metav1.NewTime(prevUpdateTime)\n\tmetric0, metric0Typed := newFakeDatadogMetric(\"default\", \"dd-metric-0\", \"metric query0\", datadoghq.DatadogMetricStatus{\n\t\tValue: \"10\",\n\t\tConditions: []datadoghq.DatadogMetricCondition{\n\t\t\t{\n\t\t\t\tType: datadoghq.DatadogMetricConditionTypeActive,\n\t\t\t\tStatus: corev1.ConditionTrue,\n\t\t\t\tLastTransitionTime: prevUpdateTimeKube,\n\t\t\t\tLastUpdateTime: prevUpdateTimeKube,\n\t\t\t},\n\t\t\t{\n\t\t\t\tType: datadoghq.DatadogMetricConditionTypeValid,\n\t\t\t\tStatus: corev1.ConditionTrue,\n\t\t\t\tLastTransitionTime: prevUpdateTimeKube,\n\t\t\t\tLastUpdateTime: prevUpdateTimeKube,\n\t\t\t},\n\t\t\t{\n\t\t\t\tType: datadoghq.DatadogMetricConditionTypeUpdated,\n\t\t\t\tStatus: corev1.ConditionTrue,\n\t\t\t\tLastTransitionTime: prevUpdateTimeKube,\n\t\t\t\tLastUpdateTime: prevUpdateTimeKube,\n\t\t\t},\n\t\t\t{\n\t\t\t\tType: datadoghq.DatadogMetricConditionTypeError,\n\t\t\t\tStatus: corev1.ConditionFalse,\n\t\t\t\tLastTransitionTime: prevUpdateTimeKube,\n\t\t\t\tLastUpdateTime: prevUpdateTimeKube,\n\t\t\t},\n\t\t},\n\t})\n\tmetric1, metric1Typed := newFakeDatadogMetric(\"default\", \"autogen-1\", \"metric query1\", datadoghq.DatadogMetricStatus{\n\t\tValue: \"10\",\n\t\tConditions: []datadoghq.DatadogMetricCondition{\n\t\t\t{\n\t\t\t\tType: datadoghq.DatadogMetricConditionTypeActive,\n\t\t\t\tStatus: corev1.ConditionTrue,\n\t\t\t\tLastTransitionTime: prevUpdateTimeKube,\n\t\t\t\tLastUpdateTime: prevUpdateTimeKube,\n\t\t\t},\n\t\t\t{\n\t\t\t\tType: datadoghq.DatadogMetricConditionTypeValid,\n\t\t\t\tStatus: corev1.ConditionTrue,\n\t\t\t\tLastTransitionTime: prevUpdateTimeKube,\n\t\t\t\tLastUpdateTime: prevUpdateTimeKube,\n\t\t\t},\n\t\t\t{\n\t\t\t\tType: datadoghq.DatadogMetricConditionTypeUpdated,\n\t\t\t\tStatus: corev1.ConditionTrue,\n\t\t\t\tLastTransitionTime: prevUpdateTimeKube,\n\t\t\t\tLastUpdateTime: prevUpdateTimeKube,\n\t\t\t},\n\t\t\t{\n\t\t\t\tType: datadoghq.DatadogMetricConditionTypeError,\n\t\t\t\tStatus: corev1.ConditionFalse,\n\t\t\t\tLastTransitionTime: prevUpdateTimeKube,\n\t\t\t\tLastUpdateTime: prevUpdateTimeKube,\n\t\t\t},\n\t\t},\n\t})\n\tunstructured.SetNestedField(metric1.Object, \"dd-metric-1\", \"spec\", \"externalMetricName\")\n\tmetric1Typed.Spec.ExternalMetricName = \"dd-metric-1\"\n\n\tupdateTime := time.Now()\n\tf.datadogMetricLister = append(f.datadogMetricLister, metric0, metric1)\n\tf.objects = append(f.objects, metric0Typed, metric1Typed)\n\t// We have new updates locally (maybe leader changed or something. Followers should still overwrite local cache)\n\tddm := model.DatadogMetricInternal{\n\t\tID: \"default/dd-metric-0\",\n\t\tValid: true,\n\t\tActive: true,\n\t\tValue: 20.0,\n\t\tUpdateTime: kubernetes.TimeWithoutWall(updateTime),\n\t\tDataTime: kubernetes.TimeWithoutWall(updateTime),\n\t\tError: fmt.Errorf(\"Error from backend while fetching metric\"),\n\t}\n\tddm.SetQueries(\"metric query0\")\n\tf.store.Set(\"default/dd-metric-0\", ddm, \"utest\")\n\n\tf.runControllerSync(false, \"default/dd-metric-0\", nil)\n\n\t// Check internal store content\n\tassert.Equal(t, 1, f.store.Count())\n\tddm = model.DatadogMetricInternal{\n\t\tID: \"default/dd-metric-0\",\n\t\tValid: true,\n\t\tActive: true,\n\t\tValue: 10.0,\n\t\tUpdateTime: kubernetes.TimeWithoutWall(prevUpdateTime.UTC()),\n\t\tDataTime: kubernetes.TimeWithoutWall(prevUpdateTime.UTC()),\n\t\tError: nil,\n\t}\n\tddm.SetQueries(\"metric query0\")\n\tassert.Equal(t, &ddm, f.store.Get(\"default/dd-metric-0\"))\n\n\tf.runControllerSync(false, \"default/autogen-1\", nil)\n\tassert.Equal(t, 2, f.store.Count())\n\n\tddm = model.DatadogMetricInternal{\n\t\tID: \"default/autogen-1\",\n\t\tValid: true,\n\t\tActive: true,\n\t\tAutogen: true,\n\t\tExternalMetricName: \"dd-metric-1\",\n\t\tValue: 10.0,\n\t\tUpdateTime: kubernetes.TimeWithoutWall(prevUpdateTime.UTC()),\n\t\tDataTime: kubernetes.TimeWithoutWall(prevUpdateTime.UTC()),\n\t\tError: nil,\n\t}\n\tddm.SetQueries(\"metric query1\")\n\tassert.Equal(t, &ddm, f.store.Get(\"default/autogen-1\"))\n}",
"func TestVoting(t *testing.T) {\n\t// Define the various voting scenarios to test\n\ttests := []struct {\n\t\tepoch uint64\n\t\tvalidators []string\n\t\tvotes []testerVote\n\t\tresults []string\n\t}{\n\t\t{\n\t\t\t// Single validator, no votes cast\n\t\t\tvalidators: []string{\"A\"},\n\t\t\tvotes: []testerVote{{validator: \"A\"}},\n\t\t\tresults: []string{\"A\"},\n\t\t}, {\n\t\t\t// Single validator, voting to add two others (only accept first, second needs 2 votes)\n\t\t\tvalidators: []string{\"A\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Two validators, voting to add three others (only accept first two, third needs 3 votes already)\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: true},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: true},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"E\", auth: true},\n\t\t\t\t{validator: \"B\", voted: \"E\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t}, {\n\t\t\t// Single validator, dropping itself (weird, but one less cornercase by explicitly allowing this)\n\t\t\tvalidators: []string{\"A\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"A\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{},\n\t\t}, {\n\t\t\t// Two validators, actually needing mutual consent to drop either of them (not fulfilled)\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Two validators, actually needing mutual consent to drop either of them (fulfilled)\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\"},\n\t\t}, {\n\t\t\t// Three validators, two of them deciding to drop the third\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Four validators, consensus of two not being enough to drop anyone\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t}, {\n\t\t\t// Four validators, consensus of three already being enough to drop someone\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\"},\n\t\t}, {\n\t\t\t// Authorizations are counted once per validator per target\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Authorizing multiple accounts concurrently is permitted\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: true},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t}, {\n\t\t\t// Deauthorizations are counted once per validator per target\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Deauthorizing multiple accounts concurrently is permitted\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Votes from deauthorized validators are discarded immediately (deauth votes)\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"C\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Votes from deauthorized validators are discarded immediately (auth votes)\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"C\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Cascading changes are not allowed, only the the account being voted on may change\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\"},\n\t\t}, {\n\t\t\t// Changes reaching consensus out of bounds (via a deauth) execute on touch\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"C\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Changes reaching consensus out of bounds (via a deauth) may go out of consensus on first touch\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\"},\n\t\t}, {\n\t\t\t// Ensure that pending votes don't survive authorization status changes. This\n\t\t\t// corner case can only appear if a validator is quickly added, remove and then\n\t\t\t// readded (or the inverse), while one of the original voters dropped. If a\n\t\t\t// past vote is left cached in the system somewhere, this will interfere with\n\t\t\t// the final validator outcome.\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\", \"E\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"F\", auth: true}, // Authorize F, 3 votes needed\n\t\t\t\t{validator: \"B\", voted: \"F\", auth: true},\n\t\t\t\t{validator: \"C\", voted: \"F\", auth: true},\n\t\t\t\t{validator: \"D\", voted: \"F\", auth: false}, // Deauthorize F, 4 votes needed (leave A's previous vote \"unchanged\")\n\t\t\t\t{validator: \"E\", voted: \"F\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"F\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"F\", auth: false},\n\t\t\t\t{validator: \"D\", voted: \"F\", auth: true}, // Almost authorize F, 2/3 votes needed\n\t\t\t\t{validator: \"E\", voted: \"F\", auth: true},\n\t\t\t\t{validator: \"B\", voted: \"A\", auth: false}, // Deauthorize A, 3 votes needed\n\t\t\t\t{validator: \"C\", voted: \"A\", auth: false},\n\t\t\t\t{validator: \"D\", voted: \"A\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"F\", auth: true}, // Finish authorizing F, 3/3 votes needed\n\t\t\t},\n\t\t\tresults: []string{\"B\", \"C\", \"D\", \"E\", \"F\"},\n\t\t}, {\n\t\t\t// Epoch transitions reset all votes to allow chain checkpointing\n\t\t\tepoch: 3,\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\"}, // Checkpoint block, (don't vote here, it's validated outside of snapshots)\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t},\n\t}\n\n\t// Run through the scenarios and test them\n\tfor i, tt := range tests {\n\t\t// Create the account pool and generate the initial set of validators\n\t\taccounts := newTesterAccountPool()\n\n\t\tvalidators := make([]common.Address, len(tt.validators))\n\t\tfor j, validator := range tt.validators {\n\t\t\tvalidators[j] = accounts.address(validator)\n\t\t}\n\t\tfor j := 0; j < len(validators); j++ {\n\t\t\tfor k := j + 1; k < len(validators); k++ {\n\t\t\t\tif bytes.Compare(validators[j][:], validators[k][:]) > 0 {\n\t\t\t\t\tvalidators[j], validators[k] = validators[k], validators[j]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tgenesis := testutils.Genesis(validators, true)\n\t\tconfig := new(istanbul.Config)\n\t\t*config = *istanbul.DefaultConfig\n\t\tconfig.TestQBFTBlock = big.NewInt(0)\n\t\tif tt.epoch != 0 {\n\t\t\tconfig.Epoch = tt.epoch\n\t\t}\n\n\t\tchain, backend := newBlockchainFromConfig(\n\t\t\tgenesis,\n\t\t\t[]*ecdsa.PrivateKey{accounts.accounts[tt.validators[0]]},\n\t\t\tconfig,\n\t\t)\n\n\t\t// Assemble a chain of headers from the cast votes\n\t\theaders := make([]*types.Header, len(tt.votes))\n\t\tfor j, vote := range tt.votes {\n\t\t\tblockNumber := big.NewInt(int64(j) + 1)\n\t\t\theaders[j] = &types.Header{\n\t\t\t\tNumber: blockNumber,\n\t\t\t\tTime: uint64(int64(j) * int64(config.GetConfig(blockNumber).BlockPeriod)),\n\t\t\t\tCoinbase: accounts.address(vote.validator),\n\t\t\t\tDifficulty: istanbulcommon.DefaultDifficulty,\n\t\t\t\tMixDigest: types.IstanbulDigest,\n\t\t\t}\n\t\t\t_ = qbftengine.ApplyHeaderQBFTExtra(\n\t\t\t\theaders[j],\n\t\t\t\tqbftengine.WriteValidators(validators),\n\t\t\t)\n\n\t\t\tif j > 0 {\n\t\t\t\theaders[j].ParentHash = headers[j-1].Hash()\n\t\t\t}\n\n\t\t\tcopy(headers[j].Extra, genesis.ExtraData)\n\n\t\t\tif len(vote.voted) > 0 {\n\t\t\t\tif err := accounts.writeValidatorVote(headers[j], vote.validator, vote.voted, vote.auth); err != nil {\n\t\t\t\t\tt.Errorf(\"Error writeValidatorVote test: %d, validator: %s, voteType: %v (err=%v)\", j, vote.voted, vote.auth, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Pass all the headers through clique and ensure tallying succeeds\n\t\thead := headers[len(headers)-1]\n\n\t\tsnap, err := backend.snapshot(chain, head.Number.Uint64(), head.Hash(), headers)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"test %d: failed to create voting snapshot: %v\", i, err)\n\t\t\tbackend.Stop()\n\t\t\tcontinue\n\t\t}\n\t\t// Verify the final list of validators against the expected ones\n\t\tvalidators = make([]common.Address, len(tt.results))\n\t\tfor j, validator := range tt.results {\n\t\t\tvalidators[j] = accounts.address(validator)\n\t\t}\n\t\tfor j := 0; j < len(validators); j++ {\n\t\t\tfor k := j + 1; k < len(validators); k++ {\n\t\t\t\tif bytes.Compare(validators[j][:], validators[k][:]) > 0 {\n\t\t\t\t\tvalidators[j], validators[k] = validators[k], validators[j]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tresult := snap.validators()\n\t\tif len(result) != len(validators) {\n\t\t\tt.Errorf(\"test %d: validators mismatch: have %x, want %x\", i, result, validators)\n\t\t\tbackend.Stop()\n\t\t\tcontinue\n\t\t}\n\t\tfor j := 0; j < len(result); j++ {\n\t\t\tif !bytes.Equal(result[j][:], validators[j][:]) {\n\t\t\t\tt.Errorf(\"test %d, validator %d: validator mismatch: have %x, want %x\", i, j, result[j], validators[j])\n\t\t\t}\n\t\t}\n\t\tbackend.Stop()\n\t}\n}",
"func TestVoter(t *testing.T) {\n\ttests := []struct {\n\t\tents []pb.Entry\n\t\tlogterm uint64\n\t\tindex uint64\n\n\t\twreject bool\n\t}{\n\t\t// same logterm\n\t\t{[]pb.Entry{{Term: 1, Index: 1}}, 1, 1, false},\n\t\t{[]pb.Entry{{Term: 1, Index: 1}}, 1, 2, false},\n\t\t{[]pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}}, 1, 1, true},\n\t\t// candidate higher logterm\n\t\t{[]pb.Entry{{Term: 1, Index: 1}}, 2, 1, false},\n\t\t{[]pb.Entry{{Term: 1, Index: 1}}, 2, 2, false},\n\t\t{[]pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}}, 2, 1, false},\n\t\t// voter higher logterm\n\t\t{[]pb.Entry{{Term: 2, Index: 1}}, 1, 1, true},\n\t\t{[]pb.Entry{{Term: 2, Index: 1}}, 1, 2, true},\n\t\t{[]pb.Entry{{Term: 2, Index: 1}, {Term: 1, Index: 2}}, 1, 1, true},\n\t}\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tstorage.Append(tt.ents)\n\t\tr := newTestRaft(1, []uint64{1, 2}, 10, 1, storage)\n\t\tdefer closeAndFreeRaft(r)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgVote, Term: 3, LogTerm: tt.logterm, Index: tt.index})\n\n\t\tmsgs := r.readMessages()\n\t\tif len(msgs) != 1 {\n\t\t\tt.Fatalf(\"#%d: len(msg) = %d, want %d\", i, len(msgs), 1)\n\t\t}\n\t\tm := msgs[0]\n\t\tif m.Type != pb.MsgVoteResp {\n\t\t\tt.Errorf(\"#%d: msgType = %d, want %d\", i, m.Type, pb.MsgVoteResp)\n\t\t}\n\t\tif m.Reject != tt.wreject {\n\t\t\tt.Errorf(\"#%d: reject = %t, want %t\", i, m.Reject, tt.wreject)\n\t\t}\n\t}\n}",
"func (a *RPC) VoteForLeader(args *RequestVoteRPCArgs,reply *bool) error{\n\t//r.ResetTimer()\n \t//fmt.Println(\"received Vote request parameter \",(*args).CandidateId,\" \",(*args).Term,\" \",(*args).LastLogTerm,\" \",(*args).LastLogIndex)\n \t//if len(r.Log)>1{\n \t//\tfmt.Println(\"Vote Request folloer parameter \",r.Id,\" \", r.CurrentTerm,\" \",r.Log[len(r.Log)-1].Term ,\" \",len(r.Log)-1)\n \t//}\n\tif r.IsLeader==2 { // if this server is follower\n\t\t//r.ResetTimer() //TODO\n\t\tif r.CurrentTerm > args.Term || r.VotedFor >-1 { // if follower has updated Term or already voted for other candidate in same term , reply nagative\n\t\t\t*reply = false\n\t\t} else if r.VotedFor== -1{ // if follower has not voted for anyone in current Term \n\t\t\tlastIndex:= len(r.Log) \n\t\t\tif lastIndex > 0 && args.LastLogIndex >0{ // if Candiate log and this server log is not empty. \n\t\t\t\tif r.Log[lastIndex-1].Term > args.LastLogTerm { // and Term of last log in follower is updated than Candidate, reject vote\n *reply=false\n }else if r.Log[lastIndex-1].Term == args.LastLogTerm{ // else if Terms of Follower and candidate is same\n \tif (lastIndex-1) >args.LastLogIndex { // but follower log is more updated, reject vote\n \t\t*reply = false\n \t} else {\n \t\t\t*reply = true // If last log terms is match and followe log is sync with candiate, vote for candidate\n \t\t}\n }else{ // if last log term is not updated and Term does not match, \n \t \t\t*reply=true//means follower is lagging behind candiate in log entries, vote for candidate\n \t\t}\n \t\n\t\t\t} else if lastIndex >args.LastLogIndex { // either of them is Zero\n\t\t\t\t*reply = false // if Follower has entries in Log, its more updated, reject vote\n\t\t\t}else{\n\t\t\t\t\t*reply = true // else Vote for candiate\n\t\t\t\t}\n\t\t}else{\n\t\t\t*reply=false\n\t\t}\n\t}else{\n\t\t*reply = false // This server is already a leader or candiate, reject vote\n\t}\n\n\tif(*reply) {\n r.VotedFor=args.CandidateId // Set Voted for to candiate Id if this server has voted positive\n }\n\t/*if(*reply) {\n\t\tfmt.Println(\"Follower \",r.Id,\" Voted for \",r.VotedFor)\n\t}else{\n\t\tfmt.Println(\"Follower \",r.Id,\" rejected vote for \",args.CandidateId)\n\t}*/\n\treturn nil\n}",
"func (v *verifyFuture) vote(leader bool) {\n\tv.voteLock.Lock()\n\tdefer v.voteLock.Unlock()\n\n\t// Guard against having notified already\n\tif v.notifyCh == nil {\n\t\treturn\n\t}\n\n\tif leader {\n\t\tv.votes++\n\t\tif v.votes >= v.quorumSize {\n\t\t\tv.notifyCh <- v\n\t\t\tv.notifyCh = nil\n\t\t}\n\t} else {\n\t\tv.notifyCh <- v\n\t\tv.notifyCh = nil\n\t}\n}",
"func (handler *RuleHandler) FollowerOnRequestVote(msg iface.MsgRequestVote, log iface.RaftLog, status iface.Status) []interface{} {\n\tactions := []interface{}{}\n\n\t// reject if we recently heard from leader\n\t// (to avoid \"disruptive servers\" during cluster configuration change)\n\tif time.Now().Sub(status.LeaderLastHeard()) < status.MinElectionTimeout() {\n\t\tactions = append(actions, iface.ReplyRequestVote{\n\t\t\tVoteGranted: false,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t})\n\t\treturn actions\n\t}\n\n\t// maybe we are outdated\n\tif msg.Term > status.CurrentTerm() {\n\t\tactions = append(actions, iface.ActionSetCurrentTerm{\n\t\t\tNewCurrentTerm: msg.Term,\n\t\t})\n\t}\n\n\t// if candidate is still in a previous term, reject vote\n\tif msg.Term < status.CurrentTerm() {\n\t\tactions = append(actions, iface.ReplyRequestVote{\n\t\t\tVoteGranted: false,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t})\n\t\treturn actions\n\t}\n\n\t// reject vote if we voted on another peer already\n\tif status.VotedFor() != \"\" && status.VotedFor() != msg.CandidateAddress {\n\t\tactions = append(actions, iface.ReplyRequestVote{\n\t\t\tVoteGranted: false,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t\tAddress: status.NodeAddress(),\n\t\t})\n\t\treturn actions\n\t}\n\n\tlastEntry, _ := log.Get(log.LastIndex())\n\n\t// if we have no log, surely peer is at least as updated as us. so grant vote\n\tif lastEntry == nil {\n\t\tactions = append(actions, iface.ReplyRequestVote{\n\t\t\tVoteGranted: true,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t\tAddress: status.NodeAddress(),\n\t\t})\n\t\tactions = append(actions, iface.ActionSetVotedFor{\n\t\t\tNewVotedFor: msg.CandidateAddress,\n\t\t})\n\t\treturn actions\n\t}\n\n\t// ok, we have log. grant vote if peer is as updated as us\n\tif msg.LastLogTerm > lastEntry.Term || (msg.LastLogTerm == lastEntry.Term && msg.LastLogIndex >= log.LastIndex()) {\n\t\tactions = append(actions, iface.ReplyRequestVote{\n\t\t\tVoteGranted: true,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t\tAddress: status.NodeAddress(),\n\t\t})\n\t\tactions = append(actions, iface.ActionSetVotedFor{\n\t\t\tNewVotedFor: msg.CandidateAddress,\n\t\t})\n\t\treturn actions\n\t}\n\n\t// ok, peer is not as updated as us\n\tactions = append(actions, iface.ReplyRequestVote{\n\t\tVoteGranted: false,\n\t\tTerm: status.CurrentTerm(),\n\t\tAddress: status.NodeAddress(),\n\t})\n\treturn actions\n\n}",
"func TestStartAsFollower(t *testing.T) {\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tif r.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", r.state, StateFollower)\n\t}\n}",
"func (r *Raft) runCandidate() {\n\tklog.Infof(fmt.Sprintf(\"%s/%s entering candidate state in term:%d for leader:%s\", r.localID, r.localAddr, r.getCurrentTerm()+1, r.Leader()))\n\n\t// Start vote for local and peers, and set a timeout\n\tvoteCh := r.startElection()\n\n\t// Make sure the leadership transfer flag is reset after each run. Having this\n\t// flag will set the field LeadershipTransfer in a RequestVoteRequst to true,\n\t// which will make other servers vote even though they have a leader already.\n\t// It is important to reset that flag, because this priviledge could be abused\n\t// otherwise.\n\tdefer func() { r.candidateFromLeadershipTransfer = false }()\n\n\telectionTimer := randomTimeout(r.config().ElectionTimeout) // [10s, 20s]\n\tgrantedVotes := 0\n\tvotesNeeded := r.quorumSize()\n\tklog.Infof(fmt.Sprintf(\"need %d votes at least\", votesNeeded))\n\tfor r.getState() == Candidate {\n\t\tselect {\n\t\tcase c := <-r.configurationsCh:\n\t\t\tc.configurations = r.configurations.Clone()\n\t\t\tc.respond(nil)\n\n\t\tcase vote := <-voteCh:\n\t\t\t// Check if the term is greater than ours, bail\n\t\t\tif vote.Term > r.getCurrentTerm() { // INFO: @see raft paper 3.4\n\t\t\t\t// @see https://thesquareplanet.com/blog/students-guide-to-raft/\n\t\t\t\t// \"If RPC request or response contains term T > currentTerm: set currentTerm = T, convert to follower\"\n\t\t\t\tklog.Warningf(\"newer term discovered, fallback to follower\")\n\t\t\t\tr.setState(Follower)\n\t\t\t\tr.setCurrentTerm(vote.Term)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Check if the vote is granted\n\t\t\tif vote.Granted {\n\t\t\t\tgrantedVotes++\n\t\t\t\tklog.Infof(fmt.Sprintf(\"vote granted from %s/%s to %s/%s at term:%d and votes is %d/%d now\",\n\t\t\t\t\tvote.voterID, vote.voterAddress, r.localID, r.localAddr, vote.Term, grantedVotes, r.totalVoteSize()))\n\t\t\t}\n\t\t\t// Check if we've become the leader\n\t\t\tif grantedVotes >= votesNeeded {\n\t\t\t\tklog.Infof(fmt.Sprintf(\"%s/%s election win %d votes\", r.localID, r.localAddr, grantedVotes))\n\t\t\t\tr.setState(Leader)\n\t\t\t\tr.setLeader(r.localAddr)\n\t\t\t\treturn\n\t\t\t}\n\n\t\tcase <-electionTimer:\n\t\t\t// INFO: @see https://thesquareplanet.com/blog/students-guide-to-raft/\n\t\t\t// Follow Figure 2’s directions as to when you should start an election. In particular, note that if you are\n\t\t\t// a candidate (i.e., you are currently running an election), but the election timer fires, you should start another election.\n\t\t\t// This is important to avoid the system stalling due to delayed or dropped RPCs.\n\t\t\t// Election failed! Restart the election. We simply return,\n\t\t\t// which will kick us back into runCandidate\n\t\t\tklog.Warningf(\"Election timeout reached, restarting election\")\n\t\t\treturn\n\n\t\tcase rpc := <-r.rpcCh:\n\t\t\tr.processRPC(rpc)\n\n\t\tcase restore := <-r.userRestoreCh:\n\t\t\trestore.respond(ErrNotLeader)\n\n\t\tcase <-r.shutdownCh:\n\t\t\treturn\n\t\t}\n\t}\n}",
"func testNonleaderStartElection(t *testing.T, state StateType) {\n\t// election timeout\n\tet := 10\n\tr := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tswitch state {\n\tcase StateFollower:\n\t\tr.becomeFollower(1, 2)\n\tcase StateCandidate:\n\t\tr.becomeCandidate()\n\t}\n\n\tfor i := 1; i < 2*et; i++ {\n\t\tr.tick()\n\t}\n\n\tif r.Term != 2 {\n\t\tt.Errorf(\"term = %d, want 2\", r.Term)\n\t}\n\tif r.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", r.state, StateCandidate)\n\t}\n\tif !r.votes[r.id] {\n\t\tt.Errorf(\"vote for self = false, want true\")\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},\n\t\t\tTerm: 2, Type: pb.MsgVote},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3},\n\t\t\tTerm: 2, Type: pb.MsgVote},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %v, want %v\", msgs, wmsgs)\n\t}\n}",
"func testLeaderCycle(t *testing.T, preVote bool) {\n\tvar cfg func(*Config)\n\tif preVote {\n\t\tcfg = preVoteConfig\n\t}\n\tn := newNetworkWithConfig(cfg, nil, nil, nil)\n\tdefer n.closeAll()\n\tfor campaignerID := uint64(1); campaignerID <= 3; campaignerID++ {\n\t\tn.send(pb.Message{From: campaignerID, To: campaignerID, Type: pb.MsgHup})\n\n\t\tfor _, peer := range n.peers {\n\t\t\tsm := peer.(*raft)\n\t\t\tif sm.id == campaignerID && sm.state != StateLeader {\n\t\t\t\tt.Errorf(\"preVote=%v: campaigning node %d state = %v, want StateLeader\",\n\t\t\t\t\tpreVote, sm.id, sm.state)\n\t\t\t} else if sm.id != campaignerID && sm.state != StateFollower {\n\t\t\t\tt.Errorf(\"preVote=%v: after campaign of node %d, \"+\n\t\t\t\t\t\"node %d had state = %v, want StateFollower\",\n\t\t\t\t\tpreVote, campaignerID, sm.id, sm.state)\n\t\t\t}\n\t\t}\n\t}\n}",
"func TestCandidateFallback(t *testing.T) {\n\ttests := []pb.Message{\n\t\t{From: 2, To: 1, Term: 1, Type: pb.MsgApp},\n\t\t{From: 2, To: 1, Term: 2, Type: pb.MsgApp},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\tif r.state != StateCandidate {\n\t\t\tt.Fatalf(\"unexpected state = %s, want %s\", r.state, StateCandidate)\n\t\t}\n\n\t\tr.Step(tt)\n\n\t\tif g := r.state; g != StateFollower {\n\t\t\tt.Errorf(\"#%d: state = %s, want %s\", i, g, StateFollower)\n\t\t}\n\t\tif g := r.Term; g != tt.Term {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", i, g, tt.Term)\n\t\t}\n\t}\n}",
"func TestNodeWithSmallerTermCanCompleteElection(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\tdefer closeAndFreeRaft(n3)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\t// cause a network partition to isolate node 3\n\tnt := newNetwork(n1, n2, n3)\n\tnt.cut(1, 3)\n\tnt.cut(2, 3)\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tsm := nt.peers[1].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StatePreCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StatePreCandidate)\n\t}\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// check whether the term values are expected\n\t// a.Term == 3\n\t// b.Term == 3\n\t// c.Term == 1\n\tsm = nt.peers[1].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 1 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\tsm = nt.peers[2].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 1 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 1)\n\t}\n\n\t// check state\n\t// a == follower\n\t// b == leader\n\t// c == pre-candidate\n\tsm = nt.peers[1].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StatePreCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StatePreCandidate)\n\t}\n\n\tsm.logger.Infof(\"going to bring back peer 3 and kill peer 2\")\n\t// recover the network then immediately isolate b which is currently\n\t// the leader, this is to emulate the crash of b.\n\tnt.recover()\n\tnt.cut(2, 1)\n\tnt.cut(2, 3)\n\n\t// call for election\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// do we have a leader?\n\tsma := nt.peers[1].(*raft)\n\tsmb := nt.peers[3].(*raft)\n\tif sma.state != StateLeader && smb.state != StateLeader {\n\t\tt.Errorf(\"no leader\")\n\t}\n}",
"func (r *Raft) runCandidate() {\n\t// Start vote for us, and set a timeout\n\tvoteCh := r.electSelf()\n\telectionTimeout := randomTimeout(r.conf.ElectionTimeout, 2*r.conf.ElectionTimeout)\n\n\t// Tally the votes, need a simple majority\n\tgrantedVotes := 0\n\tquorum := r.quorumSize()\n\tr.logD.Printf(\"Cluster size: %d, votes needed: %d\", len(r.peers)+1, quorum)\n\n\ttransition := false\n\tfor !transition {\n\t\tselect {\n\t\tcase rpc := <-r.rpcCh:\n\t\t\tswitch cmd := rpc.Command.(type) {\n\t\t\tcase *AppendEntriesRequest:\n\t\t\t\ttransition = r.appendEntries(rpc, cmd)\n\t\t\tcase *RequestVoteRequest:\n\t\t\t\ttransition = r.requestVote(rpc, cmd)\n\t\t\tdefault:\n\t\t\t\tr.logE.Printf(\"Candidate state, got unexpected command: %#v\",\n\t\t\t\t\trpc.Command)\n\t\t\t\trpc.Respond(nil, fmt.Errorf(\"unexpected command\"))\n\t\t\t}\n\n\t\t// Got response from peers on voting request\n\t\tcase vote := <-voteCh:\n\t\t\t// Check if the term is greater than ours, bail\n\t\t\tif vote.Term > r.getCurrentTerm() {\n\t\t\t\tr.logD.Printf(\"Newer term discovered\")\n\t\t\t\tr.setState(Follower)\n\t\t\t\tif err := r.setCurrentTerm(vote.Term); err != nil {\n\t\t\t\t\tr.logE.Printf(\"Failed to update current term: %w\", err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Check if the vote is granted\n\t\t\tif vote.Granted {\n\t\t\t\tgrantedVotes++\n\t\t\t\tr.logD.Printf(\"Vote granted. Tally: %d\", grantedVotes)\n\t\t\t}\n\n\t\t\t// Check if we've become the leader\n\t\t\tif grantedVotes >= quorum {\n\t\t\t\tr.logD.Printf(\"Election won. Tally: %d\", grantedVotes)\n\t\t\t\tr.setState(Leader)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase a := <-r.applyCh:\n\t\t\t// Reject any operations since we are not the leader\n\t\t\ta.response = ErrNotLeader\n\t\t\ta.Response()\n\n\t\tcase <-electionTimeout:\n\t\t\t// Election failed! Restart the election. We simply return,\n\t\t\t// which will kick us back into runCandidate\n\t\t\tr.logW.Printf(\"Election timeout reached, restarting election\")\n\t\t\treturn\n\n\t\tcase <-r.shutdownCh:\n\t\t\treturn\n\t\t}\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TestCandidateFallback tests that while waiting for votes, if a candidate receives an AppendEntries RPC from another server claiming to be leader whose term is at least as large as the candidate's current term, it recognizes the leader as legitimate and returns to follower state. Reference: section 5.2
|
func TestCandidateFallback(t *testing.T) {
tests := []pb.Message{
{From: 2, To: 1, Term: 1, Type: pb.MsgApp},
{From: 2, To: 1, Term: 2, Type: pb.MsgApp},
}
for i, tt := range tests {
r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
defer closeAndFreeRaft(r)
r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
if r.state != StateCandidate {
t.Fatalf("unexpected state = %s, want %s", r.state, StateCandidate)
}
r.Step(tt)
if g := r.state; g != StateFollower {
t.Errorf("#%d: state = %s, want %s", i, g, StateFollower)
}
if g := r.Term; g != tt.Term {
t.Errorf("#%d: term = %d, want %d", i, g, tt.Term)
}
}
}
|
[
"func TestVote_Candidate(t *testing.T) {\n\tutil.SuppressLoggers()\n\tconfig := DefaultConfig()\n\tconfig.ClusterSize = 5\n\n\tcluster, err := CreateLocalCluster(config)\n\tdefer cleanupCluster(cluster)\n\n\ttime.Sleep(2 * time.Second)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tleader, err := findLeader(cluster)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tleader.setCurrentTerm(3)\n\tleader.leaderMutex.Lock()\n\tlogEntry := &rpc.LogEntry{\n\t\tIndex: leader.LastLogIndex() + 1,\n\t\tTermId: leader.GetCurrentTerm(),\n\t\tType: rpc.CommandType_NOOP,\n\t\tData: []byte{1, 2, 3, 4},\n\t}\n\tleader.StoreLog(logEntry)\n\tleader.leaderMutex.Unlock()\n\ttime.Sleep(1 * time.Second)\n\n\tfollowers, err := findAllFollowers(cluster)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif followers[0].GetCurrentTerm() != 3 {\n\t\tt.Fatalf(\"Term should've changed to %d but is %d\", 3, followers[0].GetCurrentTerm())\n\t}\n\n\tfollowers[1].setCurrentTerm(3)\n\tfollowers[1].config.ElectionTimeout = 1 * time.Second\n\tfollowers[3].NetworkPolicy.PauseWorld(true)\n\tfollowers[2].NetworkPolicy.PauseWorld(true)\n\tleader.NetworkPolicy.PauseWorld(true)\n\n\tt.Run(\"Handle competing RequestVote with Stale Term\", func(t *testing.T) {\n\n\t\ttime.Sleep(500 * time.Millisecond)\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(1),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(3),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've denied vote\")\n\t\t}\n\t})\n\n\tt.Run(\"Handle competing RequestVote with Higher Term and Out-of-date log\", func(t *testing.T) {\n\n\t\ttime.Sleep(500 * time.Millisecond)\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(100),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(1),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've denied vote\")\n\t\t}\n\t})\n\n\tt.Run(\"Handle competing RequestVote with Higher Term and Up-to-date log\", func(t *testing.T) {\n\n\t\ttime.Sleep(500 * time.Millisecond)\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(200),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(2),\n\t\t\tLastLogTerm: uint64(3),\n\t\t})\n\t\tif !reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've granted vote\")\n\t\t}\n\t})\n}",
"func testNonleaderStartElection(t *testing.T, state StateType) {\n\t// election timeout\n\tet := 10\n\tr := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tswitch state {\n\tcase StateFollower:\n\t\tr.becomeFollower(1, 2)\n\tcase StateCandidate:\n\t\tr.becomeCandidate()\n\t}\n\n\tfor i := 1; i < 2*et; i++ {\n\t\tr.tick()\n\t}\n\n\tif r.Term != 2 {\n\t\tt.Errorf(\"term = %d, want 2\", r.Term)\n\t}\n\tif r.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", r.state, StateCandidate)\n\t}\n\tif !r.votes[r.id] {\n\t\tt.Errorf(\"vote for self = false, want true\")\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},\n\t\t\tTerm: 2, Type: pb.MsgVote},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3},\n\t\t\tTerm: 2, Type: pb.MsgVote},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %v, want %v\", msgs, wmsgs)\n\t}\n}",
"func TestLeaderElectionInOneRoundRPC(t *testing.T) {\n\ttests := []struct {\n\t\tsize int\n\t\tvotes map[uint64]bool\n\t\tstate StateType\n\t}{\n\t\t// win the election when receiving votes from a majority of the servers\n\t\t{1, map[uint64]bool{}, StateLeader},\n\t\t{3, map[uint64]bool{2: true, 3: true}, StateLeader},\n\t\t{3, map[uint64]bool{2: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true}, StateLeader},\n\n\t\t// return to follower state if it receives vote denial from a majority\n\t\t{3, map[uint64]bool{2: false, 3: false}, StateFollower},\n\t\t{5, map[uint64]bool{2: false, 3: false, 4: false, 5: false}, StateFollower},\n\t\t{5, map[uint64]bool{2: true, 3: false, 4: false, 5: false}, StateFollower},\n\n\t\t// stay in candidate if it does not obtain the majority\n\t\t{3, map[uint64]bool{}, StateCandidate},\n\t\t{5, map[uint64]bool{2: true}, StateCandidate},\n\t\t{5, map[uint64]bool{2: false, 3: false}, StateCandidate},\n\t\t{5, map[uint64]bool{}, StateCandidate},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, idsBySize(tt.size), 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\n\t\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\tfor id, vote := range tt.votes {\n\t\t\tr.Step(pb.Message{From: id, To: 1, Term: r.Term, Type: pb.MsgVoteResp, Reject: !vote})\n\t\t}\n\n\t\tif r.state != tt.state {\n\t\t\tt.Errorf(\"#%d: state = %s, want %s\", i, r.state, tt.state)\n\t\t}\n\t\tif g := r.Term; g != 1 {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", i, g, 1)\n\t\t}\n\t}\n}",
"func TestVote_Follower(t *testing.T) {\n\tutil.SuppressLoggers()\n\tconfig := DefaultConfig()\n\n\tt.Run(\"Handle RequestVote with Stale Term\", func(t *testing.T) {\n\t\tcluster, err := CreateLocalCluster(config)\n\t\tdefer cleanupCluster(cluster)\n\n\t\ttime.Sleep(2 * time.Second)\n\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tleader, err := findLeader(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfollowers, err := findAllFollowers(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tleader.setCurrentTerm(3)\n\t\ttime.Sleep(1 * time.Second)\n\n\t\t// make sure the client get the correct response while registering itself with a candidate\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(1),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(3),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've denied vote\")\n\t\t}\n\t})\n\n\tt.Run(\"Handle RequestVote with Higher Term\", func(t *testing.T) {\n\t\tcluster, err := CreateLocalCluster(config)\n\t\tdefer cleanupCluster(cluster)\n\n\t\ttime.Sleep(2 * time.Second)\n\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tleader, err := findLeader(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfollowers, err := findAllFollowers(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tleader.leaderMutex.Lock()\n\t\tlogEntry := &rpc.LogEntry{\n\t\t\tIndex: leader.LastLogIndex() + 1,\n\t\t\tTermId: leader.GetCurrentTerm(),\n\t\t\tType: rpc.CommandType_NOOP,\n\t\t\tData: []byte{1, 2, 3, 4},\n\t\t}\n\t\tleader.StoreLog(logEntry)\n\t\tleader.leaderMutex.Unlock()\n\t\ttime.Sleep(1 * time.Second)\n\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(3),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(1),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've denied vote\")\n\t\t}\n\n\t\treply, _ = followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(3),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(2),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif !reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've granted vote\")\n\t\t}\n\t})\n}",
"func TestFreeStuckCandidateWithCheckQuorum(t *testing.T) {\n\ta := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tb := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tc := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(a)\n\tdefer closeAndFreeRaft(b)\n\tdefer closeAndFreeRaft(c)\n\n\ta.checkQuorum = true\n\tb.checkQuorum = true\n\tc.checkQuorum = true\n\n\tnt := newNetwork(a, b, c)\n\tsetRandomizedElectionTimeout(b, b.electionTimeout+1)\n\n\tfor i := 0; i < b.electionTimeout; i++ {\n\t\tb.tick()\n\t}\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(1)\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tif c.Term != b.Term+1 {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, b.Term+1)\n\t}\n\n\t// Vote again for safety\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tif c.Term != b.Term+2 {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, b.Term+2)\n\t}\n\n\tnt.recover()\n\tnt.send(pb.Message{From: 1, To: 3, Type: pb.MsgHeartbeat, Term: a.Term})\n\n\t// Disrupt the leader so that the stuck peer is freed\n\tif a.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateFollower)\n\t}\n\n\tif c.Term != a.Term {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, a.Term)\n\t}\n\n\t// Vote again, should become leader this time\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif c.state != StateLeader {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", c.state, StateLeader)\n\t}\n\n}",
"func TestPreVoteWithSplitVote(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\tdefer closeAndFreeRaft(n3)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\tnt := newNetwork(n1, n2, n3)\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// simulate leader down. followers start split vote.\n\tnt.isolate(1)\n\tnt.send([]pb.Message{\n\t\t{From: 2, To: 2, Type: pb.MsgHup},\n\t\t{From: 3, To: 3, Type: pb.MsgHup},\n\t}...)\n\n\t// check whether the term values are expected\n\t// n2.Term == 3\n\t// n3.Term == 3\n\tsm := nt.peers[2].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 3)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\t// check state\n\t// n2 == candidate\n\t// n3 == candidate\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateCandidate {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateCandidate)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StateCandidate)\n\t}\n\n\t// node 2 election timeout first\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// check whether the term values are expected\n\t// n2.Term == 4\n\t// n3.Term == 4\n\tsm = nt.peers[2].(*raft)\n\tif sm.Term != 4 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 4)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 4 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 4)\n\t}\n\n\t// check state\n\t// n2 == leader\n\t// n3 == follower\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StateFollower)\n\t}\n}",
"func (r *Raft) runCandidate() {\n\t// Start vote for us, and set a timeout\n\tvoteCh := r.electSelf()\n\telectionTimeout := randomTimeout(r.conf.ElectionTimeout, 2*r.conf.ElectionTimeout)\n\n\t// Tally the votes, need a simple majority\n\tgrantedVotes := 0\n\tquorum := r.quorumSize()\n\tr.logD.Printf(\"Cluster size: %d, votes needed: %d\", len(r.peers)+1, quorum)\n\n\ttransition := false\n\tfor !transition {\n\t\tselect {\n\t\tcase rpc := <-r.rpcCh:\n\t\t\tswitch cmd := rpc.Command.(type) {\n\t\t\tcase *AppendEntriesRequest:\n\t\t\t\ttransition = r.appendEntries(rpc, cmd)\n\t\t\tcase *RequestVoteRequest:\n\t\t\t\ttransition = r.requestVote(rpc, cmd)\n\t\t\tdefault:\n\t\t\t\tr.logE.Printf(\"Candidate state, got unexpected command: %#v\",\n\t\t\t\t\trpc.Command)\n\t\t\t\trpc.Respond(nil, fmt.Errorf(\"unexpected command\"))\n\t\t\t}\n\n\t\t// Got response from peers on voting request\n\t\tcase vote := <-voteCh:\n\t\t\t// Check if the term is greater than ours, bail\n\t\t\tif vote.Term > r.getCurrentTerm() {\n\t\t\t\tr.logD.Printf(\"Newer term discovered\")\n\t\t\t\tr.setState(Follower)\n\t\t\t\tif err := r.setCurrentTerm(vote.Term); err != nil {\n\t\t\t\t\tr.logE.Printf(\"Failed to update current term: %w\", err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Check if the vote is granted\n\t\t\tif vote.Granted {\n\t\t\t\tgrantedVotes++\n\t\t\t\tr.logD.Printf(\"Vote granted. Tally: %d\", grantedVotes)\n\t\t\t}\n\n\t\t\t// Check if we've become the leader\n\t\t\tif grantedVotes >= quorum {\n\t\t\t\tr.logD.Printf(\"Election won. Tally: %d\", grantedVotes)\n\t\t\t\tr.setState(Leader)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase a := <-r.applyCh:\n\t\t\t// Reject any operations since we are not the leader\n\t\t\ta.response = ErrNotLeader\n\t\t\ta.Response()\n\n\t\tcase <-electionTimeout:\n\t\t\t// Election failed! Restart the election. We simply return,\n\t\t\t// which will kick us back into runCandidate\n\t\t\tr.logW.Printf(\"Election timeout reached, restarting election\")\n\t\t\treturn\n\n\t\tcase <-r.shutdownCh:\n\t\t\treturn\n\t\t}\n\t}\n}",
"func TestNodeWithSmallerTermCanCompleteElection(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\tdefer closeAndFreeRaft(n3)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\t// cause a network partition to isolate node 3\n\tnt := newNetwork(n1, n2, n3)\n\tnt.cut(1, 3)\n\tnt.cut(2, 3)\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tsm := nt.peers[1].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StatePreCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StatePreCandidate)\n\t}\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// check whether the term values are expected\n\t// a.Term == 3\n\t// b.Term == 3\n\t// c.Term == 1\n\tsm = nt.peers[1].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 1 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\tsm = nt.peers[2].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 1 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 1)\n\t}\n\n\t// check state\n\t// a == follower\n\t// b == leader\n\t// c == pre-candidate\n\tsm = nt.peers[1].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StatePreCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StatePreCandidate)\n\t}\n\n\tsm.logger.Infof(\"going to bring back peer 3 and kill peer 2\")\n\t// recover the network then immediately isolate b which is currently\n\t// the leader, this is to emulate the crash of b.\n\tnt.recover()\n\tnt.cut(2, 1)\n\tnt.cut(2, 3)\n\n\t// call for election\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// do we have a leader?\n\tsma := nt.peers[1].(*raft)\n\tsmb := nt.peers[3].(*raft)\n\tif sma.state != StateLeader && smb.state != StateLeader {\n\t\tt.Errorf(\"no leader\")\n\t}\n}",
"func (r *Raft) runCandidate() {\n\tklog.Infof(fmt.Sprintf(\"%s/%s entering candidate state in term:%d for leader:%s\", r.localID, r.localAddr, r.getCurrentTerm()+1, r.Leader()))\n\n\t// Start vote for local and peers, and set a timeout\n\tvoteCh := r.startElection()\n\n\t// Make sure the leadership transfer flag is reset after each run. Having this\n\t// flag will set the field LeadershipTransfer in a RequestVoteRequst to true,\n\t// which will make other servers vote even though they have a leader already.\n\t// It is important to reset that flag, because this priviledge could be abused\n\t// otherwise.\n\tdefer func() { r.candidateFromLeadershipTransfer = false }()\n\n\telectionTimer := randomTimeout(r.config().ElectionTimeout) // [10s, 20s]\n\tgrantedVotes := 0\n\tvotesNeeded := r.quorumSize()\n\tklog.Infof(fmt.Sprintf(\"need %d votes at least\", votesNeeded))\n\tfor r.getState() == Candidate {\n\t\tselect {\n\t\tcase c := <-r.configurationsCh:\n\t\t\tc.configurations = r.configurations.Clone()\n\t\t\tc.respond(nil)\n\n\t\tcase vote := <-voteCh:\n\t\t\t// Check if the term is greater than ours, bail\n\t\t\tif vote.Term > r.getCurrentTerm() { // INFO: @see raft paper 3.4\n\t\t\t\t// @see https://thesquareplanet.com/blog/students-guide-to-raft/\n\t\t\t\t// \"If RPC request or response contains term T > currentTerm: set currentTerm = T, convert to follower\"\n\t\t\t\tklog.Warningf(\"newer term discovered, fallback to follower\")\n\t\t\t\tr.setState(Follower)\n\t\t\t\tr.setCurrentTerm(vote.Term)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Check if the vote is granted\n\t\t\tif vote.Granted {\n\t\t\t\tgrantedVotes++\n\t\t\t\tklog.Infof(fmt.Sprintf(\"vote granted from %s/%s to %s/%s at term:%d and votes is %d/%d now\",\n\t\t\t\t\tvote.voterID, vote.voterAddress, r.localID, r.localAddr, vote.Term, grantedVotes, r.totalVoteSize()))\n\t\t\t}\n\t\t\t// Check if we've become the leader\n\t\t\tif grantedVotes >= votesNeeded {\n\t\t\t\tklog.Infof(fmt.Sprintf(\"%s/%s election win %d votes\", r.localID, r.localAddr, grantedVotes))\n\t\t\t\tr.setState(Leader)\n\t\t\t\tr.setLeader(r.localAddr)\n\t\t\t\treturn\n\t\t\t}\n\n\t\tcase <-electionTimer:\n\t\t\t// INFO: @see https://thesquareplanet.com/blog/students-guide-to-raft/\n\t\t\t// Follow Figure 2’s directions as to when you should start an election. In particular, note that if you are\n\t\t\t// a candidate (i.e., you are currently running an election), but the election timer fires, you should start another election.\n\t\t\t// This is important to avoid the system stalling due to delayed or dropped RPCs.\n\t\t\t// Election failed! Restart the election. We simply return,\n\t\t\t// which will kick us back into runCandidate\n\t\t\tklog.Warningf(\"Election timeout reached, restarting election\")\n\t\t\treturn\n\n\t\tcase rpc := <-r.rpcCh:\n\t\t\tr.processRPC(rpc)\n\n\t\tcase restore := <-r.userRestoreCh:\n\t\t\trestore.respond(ErrNotLeader)\n\n\t\tcase <-r.shutdownCh:\n\t\t\treturn\n\t\t}\n\t}\n}",
"func TestProposalBufferRejectLeaseAcqOnFollower(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\tdefer log.Scope(t).Close(t)\n\tctx := context.Background()\n\n\tself := uint64(1)\n\t// Each subtest will try to propose a lease acquisition in a different Raft\n\t// scenario. Some proposals should be allowed, some should be rejected.\n\tfor _, tc := range []struct {\n\t\tname string\n\t\tstate raft.StateType\n\t\t// raft.None means there's no leader, or the leader is unknown.\n\t\tleader uint64\n\t\t// Empty means VOTER_FULL.\n\t\tleaderRepType roachpb.ReplicaType\n\t\t// Set to simulate situations where the local replica is so behind that the\n\t\t// leader is not even part of the range descriptor.\n\t\tleaderNotInRngDesc bool\n\t\t// If true, the follower has a valid lease.\n\t\townsValidLease bool\n\n\t\texpRejection bool\n\t}{\n\t\t{\n\t\t\tname: \"leader\",\n\t\t\tstate: raft.StateLeader,\n\t\t\tleader: self,\n\t\t\t// No rejection. The leader can request a lease.\n\t\t\texpRejection: false,\n\t\t},\n\t\t{\n\t\t\tname: \"follower, known eligible leader\",\n\t\t\tstate: raft.StateFollower,\n\t\t\t// Someone else is leader.\n\t\t\tleader: self + 1,\n\t\t\t// Rejection - a follower can't request a lease.\n\t\t\texpRejection: true,\n\t\t},\n\t\t{\n\t\t\tname: \"follower, lease extension despite known eligible leader\",\n\t\t\tstate: raft.StateFollower,\n\t\t\t// Someone else is leader, but we're the leaseholder.\n\t\t\tleader: self + 1,\n\t\t\townsValidLease: true,\n\t\t\t// No rejection of lease extensions.\n\t\t\texpRejection: false,\n\t\t},\n\t\t{\n\t\t\tname: \"follower, known ineligible leader\",\n\t\t\tstate: raft.StateFollower,\n\t\t\t// Someone else is leader.\n\t\t\tleader: self + 1,\n\t\t\t// The leader type makes it ineligible to get the lease. Thus, the local\n\t\t\t// proposal will not be rejected.\n\t\t\tleaderRepType: roachpb.VOTER_DEMOTING_LEARNER,\n\t\t\texpRejection: false,\n\t\t},\n\t\t{\n\t\t\t// Here we simulate the leader being known by Raft, but the local replica\n\t\t\t// is so far behind that it doesn't contain the leader replica.\n\t\t\tname: \"follower, known leader not in range descriptor\",\n\t\t\tstate: raft.StateFollower,\n\t\t\t// Someone else is leader.\n\t\t\tleader: self + 1,\n\t\t\tleaderNotInRngDesc: true,\n\t\t\t// We assume that the leader is eligible, and redirect.\n\t\t\texpRejection: true,\n\t\t},\n\t\t{\n\t\t\tname: \"follower, unknown leader\",\n\t\t\tstate: raft.StateFollower,\n\t\t\t// Unknown leader.\n\t\t\tleader: raft.None,\n\t\t\t// No rejection if the leader is unknown. See comments in\n\t\t\t// FlushLockedWithRaftGroup().\n\t\t\texpRejection: false,\n\t\t},\n\t} {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tvar p testProposer\n\t\t\tvar pc proposalCreator\n\t\t\t// p.replicaID() is hardcoded; it'd better be hardcoded to what this test\n\t\t\t// expects.\n\t\t\trequire.Equal(t, self, uint64(p.replicaID()))\n\n\t\t\tvar rejected roachpb.ReplicaID\n\t\t\tif tc.expRejection {\n\t\t\t\tp.onRejectProposalWithRedirectLocked = func(_ *ProposalData, redirectTo roachpb.ReplicaID) {\n\t\t\t\t\tif rejected != 0 {\n\t\t\t\t\t\tt.Fatalf(\"unexpected 2nd rejection\")\n\t\t\t\t\t}\n\t\t\t\t\trejected = redirectTo\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tp.onRejectProposalWithRedirectLocked = func(_ *ProposalData, _ roachpb.ReplicaID) {\n\t\t\t\t\tt.Fatalf(\"unexpected redirection\")\n\t\t\t\t}\n\t\t\t}\n\n\t\t\traftStatus := raft.BasicStatus{\n\t\t\t\tID: self,\n\t\t\t\tSoftState: raft.SoftState{\n\t\t\t\t\tRaftState: tc.state,\n\t\t\t\t\tLead: tc.leader,\n\t\t\t\t},\n\t\t\t}\n\t\t\tr := &testProposerRaft{\n\t\t\t\tstatus: raftStatus,\n\t\t\t}\n\t\t\tp.raftGroup = r\n\t\t\tp.leaderReplicaInDescriptor = !tc.leaderNotInRngDesc\n\t\t\tp.leaderReplicaType = tc.leaderRepType\n\t\t\tp.ownsValidLease = tc.ownsValidLease\n\n\t\t\tvar b propBuf\n\t\t\tclock := hlc.NewClock(hlc.UnixNano, time.Nanosecond)\n\t\t\ttracker := tracker.NewLockfreeTracker()\n\t\t\tb.Init(&p, tracker, clock, cluster.MakeTestingClusterSettings())\n\n\t\t\tpd, data := pc.newLeaseProposal(roachpb.Lease{})\n\t\t\t_, tok := b.TrackEvaluatingRequest(ctx, hlc.MinTimestamp)\n\t\t\t_, err := b.Insert(ctx, pd, data, tok.Move(ctx))\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NoError(t, b.flushLocked(ctx))\n\t\t\tif tc.expRejection {\n\t\t\t\trequire.Equal(t, roachpb.ReplicaID(tc.leader), rejected)\n\t\t\t} else {\n\t\t\t\trequire.Equal(t, roachpb.ReplicaID(0), rejected)\n\t\t\t}\n\t\t\trequire.Zero(t, tracker.Count())\n\t\t})\n\t}\n}",
"func TestLearnerCannotVote(t *testing.T) {\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n2)\n\n\tn2.becomeFollower(1, None)\n\n\tn2.Step(pb.Message{From: 1, To: 2, Term: 2, Type: pb.MsgVote, LogTerm: 11, Index: 11})\n\n\tif len(n2.msgs) != 0 {\n\t\tt.Errorf(\"expect learner not to vote, but received %v messages\", n2.msgs)\n\t}\n}",
"func testCandidateResetTerm(t *testing.T, mt pb.MessageType) {\n\ta := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tb := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tc := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\n\tnt := newNetwork(a, b, c)\n\tdefer nt.closeAll()\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\tif a.state != StateLeader {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateLeader)\n\t}\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\tif c.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateFollower)\n\t}\n\n\t// isolate 3 and increase term in rest\n\tnt.isolate(3)\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tif a.state != StateLeader {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateLeader)\n\t}\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\t// trigger campaign in isolated c\n\tc.resetRandomizedElectionTimeout()\n\tfor i := 0; i < c.randomizedElectionTimeout; i++ {\n\t\tc.tick()\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tnt.recover()\n\n\t// leader sends to isolated candidate\n\t// and expects candidate to revert to follower\n\tnt.send(pb.Message{From: 1, To: 3, Term: a.Term, Type: mt})\n\n\tif c.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateFollower)\n\t}\n\n\t// follower c term is reset with leader's\n\tif a.Term != c.Term {\n\t\tt.Errorf(\"follower term expected same term as leader's %d, got %d\", a.Term, c.Term)\n\t}\n}",
"func (r *Raft) becomeCandidate() {\n\tr.State = StateCandidate\n\tr.Term++\n\tr.Lead = 0\n\tr.electionElapsed = 0\n\tr.actualElectionTimeout = r.generateElectionTimeout()\n\tr.votes = map[uint64]bool{}\n}",
"func (d *dummyContractStakingIndexer) CandidateVotes(ownerAddr address.Address) *big.Int {\n\treturn big.NewInt(0)\n}",
"func TestLearnerElectionTimeout(t *testing.T) {\n\tn1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\n\t// n2 is learner. Learner should not start election even when times out.\n\tsetRandomizedElectionTimeout(n2, n2.electionTimeout)\n\tfor i := 0; i < n2.electionTimeout; i++ {\n\t\tn2.tick()\n\t}\n\n\tif n2.state != StateFollower {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", n2.state, StateFollower)\n\t}\n}",
"func (r *Node) requestVotes(electionResults chan bool, fallbackChan chan bool, currTerm uint64) {\n\t// Votes received\n\tremaining := 0\n\tresultChan := make(chan RequestVoteResult)\n\tfor _, peer := range r.Peers {\n\t\tif r.Self.GetId() == peer.GetId() {\n\t\t\tcontinue\n\t\t}\n\t\tmsg := rpc.RequestVoteRequest{\n\t\t\tTerm: currTerm,\n\t\t\tCandidate: r.Self,\n\t\t\tLastLogIndex: r.LastLogIndex(),\n\t\t\tLastLogTerm: r.GetLog(r.LastLogIndex()).GetTermId(),\n\t\t}\n\t\tremaining++\n\t\tgo r.requestPeerVote(peer, &msg, resultChan)\n\t}\n\n\tvote := 1\n\treject := 0\n\tmajority := r.config.ClusterSize/2 + 1\n\tif vote >= majority {\n\t\telectionResults <- true\n\t\treturn\n\t}\n\tfor remaining > 0 {\n\t\trequestVoteResult := <-resultChan\n\t\tremaining--\n\t\tif requestVoteResult == RequestVoteFallback {\n\t\t\tfallbackChan <- true\n\t\t\treturn\n\t\t}\n\t\tif requestVoteResult == RequestVoteSuccess {\n\t\t\tvote++\n\t\t\tif vote >= majority {\n\t\t\t\telectionResults <- true\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\treject++\n\t\t\tif reject >= majority {\n\t\t\t\telectionResults <- false\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}",
"func TestLeaderFailure(t *testing.T){\r\n\tif !TESTLEADERFAILURE{\r\n\t\treturn\r\n\t}\r\n rafts,_ := makeRafts(5, \"input_spec.json\", \"log\", 200, 300)\t\r\n\ttime.Sleep(2*time.Second)\t\t\t\r\n\trunning := make(map[int]bool)\r\n\tfor i:=1;i<=5;i++{\r\n\t\trunning[i] = true\r\n\t}\r\n\trafts[0].smLock.RLock()\r\n\tlid := rafts[0].LeaderId()\r\n\trafts[0].smLock.RUnlock()\r\n\tdebugRaftTest(fmt.Sprintf(\"leader Id:%v\\n\", lid))\r\n\tif lid != -1{\r\n\t\trafts[lid-1].Shutdown()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"Leader(id:%v) is down, now\\n\", lid))\r\n\t\ttime.Sleep(4*time.Second)\t\t\t\r\n\t\trunning[lid] = false\r\n\t\tfor i := 1; i<= 5;i++{\r\n\t\t\tif running[i] {\r\n\t\t\t\trafts[i-1].Append([]byte(\"first\"))\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\ttime.Sleep(5*time.Second)\t\t\t\r\n\r\n\tfor idx, node := range rafts {\r\n\t\tnode.smLock.RLock()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"%v\", node.sm.String()))\r\n\t\tnode.smLock.RUnlock()\r\n\t\tif running[idx-1]{\r\n\t\t\tnode.Shutdown()\r\n\t\t}\r\n\t}\r\n}",
"func TestFollowerVote(t *testing.T) {\n\ttests := []struct {\n\t\tvote uint64\n\t\tnvote uint64\n\t\twreject bool\n\t}{\n\t\t{None, 1, false},\n\t\t{None, 2, false},\n\t\t{1, 1, false},\n\t\t{2, 2, false},\n\t\t{1, 2, true},\n\t\t{2, 1, true},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.loadState(pb.HardState{Term: 1, Vote: tt.vote})\n\n\t\tr.Step(pb.Message{From: tt.nvote, FromGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},\n\t\t\tTo: 1, ToGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTerm: 1, Type: pb.MsgVote})\n\n\t\tmsgs := r.readMessages()\n\t\twmsgs := []pb.Message{\n\t\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\t\tTo: tt.nvote, ToGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},\n\t\t\t\tTerm: 1, Type: pb.MsgVoteResp, Reject: tt.wreject},\n\t\t}\n\t\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\t\tt.Errorf(\"#%d: msgs = %v, want %v\", i, msgs, wmsgs)\n\t\t}\n\t}\n}",
"func TestVoter(t *testing.T) {\n\ttests := []struct {\n\t\tents []pb.Entry\n\t\tlogterm uint64\n\t\tindex uint64\n\n\t\twreject bool\n\t}{\n\t\t// same logterm\n\t\t{[]pb.Entry{{Term: 1, Index: 1}}, 1, 1, false},\n\t\t{[]pb.Entry{{Term: 1, Index: 1}}, 1, 2, false},\n\t\t{[]pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}}, 1, 1, true},\n\t\t// candidate higher logterm\n\t\t{[]pb.Entry{{Term: 1, Index: 1}}, 2, 1, false},\n\t\t{[]pb.Entry{{Term: 1, Index: 1}}, 2, 2, false},\n\t\t{[]pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}}, 2, 1, false},\n\t\t// voter higher logterm\n\t\t{[]pb.Entry{{Term: 2, Index: 1}}, 1, 1, true},\n\t\t{[]pb.Entry{{Term: 2, Index: 1}}, 1, 2, true},\n\t\t{[]pb.Entry{{Term: 2, Index: 1}, {Term: 1, Index: 2}}, 1, 1, true},\n\t}\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tstorage.Append(tt.ents)\n\t\tr := newTestRaft(1, []uint64{1, 2}, 10, 1, storage)\n\t\tdefer closeAndFreeRaft(r)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgVote, Term: 3, LogTerm: tt.logterm, Index: tt.index})\n\n\t\tmsgs := r.readMessages()\n\t\tif len(msgs) != 1 {\n\t\t\tt.Fatalf(\"#%d: len(msg) = %d, want %d\", i, len(msgs), 1)\n\t\t}\n\t\tm := msgs[0]\n\t\tif m.Type != pb.MsgVoteResp {\n\t\t\tt.Errorf(\"#%d: msgType = %d, want %d\", i, m.Type, pb.MsgVoteResp)\n\t\t}\n\t\tif m.Reject != tt.wreject {\n\t\t\tt.Errorf(\"#%d: reject = %t, want %t\", i, m.Reject, tt.wreject)\n\t\t}\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
testNonleaderElectionTimeoutRandomized tests that election timeout for follower or candidate is randomized. Reference: section 5.2
|
func testNonleaderElectionTimeoutRandomized(t *testing.T, state StateType) {
et := 10
r := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage())
defer closeAndFreeRaft(r)
timeouts := make(map[int]bool)
for round := 0; round < 50*et; round++ {
switch state {
case StateFollower:
r.becomeFollower(r.Term+1, 2)
case StateCandidate:
r.becomeCandidate()
}
time := 0
for len(r.readMessages()) == 0 {
r.tick()
time++
}
timeouts[time] = true
}
for d := et + 1; d < 2*et; d++ {
if !timeouts[d] {
t.Errorf("timeout in %d ticks should happen", d)
}
}
}
|
[
"func (node *Node) randElectionTimeout() time.Duration {\n\treturn time.Duration(150+rand.Intn(150)) * time.Millisecond\n}",
"func TestLearnerElectionTimeout(t *testing.T) {\n\tn1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\n\t// n2 is learner. Learner should not start election even when times out.\n\tsetRandomizedElectionTimeout(n2, n2.electionTimeout)\n\tfor i := 0; i < n2.electionTimeout; i++ {\n\t\tn2.tick()\n\t}\n\n\tif n2.state != StateFollower {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", n2.state, StateFollower)\n\t}\n}",
"func setRandomizedElectionTimeout(r *raft, v int) {\n\tr.randomizedElectionTimeout = v\n}",
"func testNonleadersElectionTimeoutNonconflict(t *testing.T, state StateType) {\n\tet := 10\n\tsize := 5\n\trs := make([]*raft, size)\n\tids := idsBySize(size)\n\tfor k := range rs {\n\t\trs[k] = newTestRaft(ids[k], ids, et, 1, NewMemoryStorage())\n\t}\n\tdefer func() {\n\t\tfor k := range rs {\n\t\t\tcloseAndFreeRaft(rs[k])\n\t\t}\n\t}()\n\tconflicts := 0\n\tfor round := 0; round < 1000; round++ {\n\t\tfor _, r := range rs {\n\t\t\tswitch state {\n\t\t\tcase StateFollower:\n\t\t\t\tr.becomeFollower(r.Term+1, None)\n\t\t\tcase StateCandidate:\n\t\t\t\tr.becomeCandidate()\n\t\t\t}\n\t\t}\n\n\t\ttimeoutNum := 0\n\t\tfor timeoutNum == 0 {\n\t\t\tfor _, r := range rs {\n\t\t\t\tr.tick()\n\t\t\t\tif len(r.readMessages()) > 0 {\n\t\t\t\t\ttimeoutNum++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// several rafts time out at the same tick\n\t\tif timeoutNum > 1 {\n\t\t\tconflicts++\n\t\t}\n\t}\n\n\tif g := float64(conflicts) / 1000; g > 0.3 {\n\t\tt.Errorf(\"probability of conflicts = %v, want <= 0.3\", g)\n\t}\n}",
"func GetRandomElectionTimeout() time.Duration {\n\treturn time.Duration(minElectionTimeout+rand.Intn(maxElectionTimeout-minElectionTimeout)) * time.Millisecond\n}",
"func getElectionTimeout() time.Duration {\n\treturn time.Duration(rand.Intn(300) + 150)\n}",
"func (rf *Raft) resetElectionTimeout() time.Duration {\n\trand.Seed(time.Now().UTC().UnixNano())\n\trf.randomizedElectionTimeout = rf.electionTimeout + time.Duration(rand.Int63n(rf.electionTimeout.Nanoseconds()))\n\treturn rf.randomizedElectionTimeout\n}",
"func TestClock_AfterElectionTimeout(t *testing.T) {\n\tc := raft.NewClock()\n\tc.ElectionTimeout = 10 * time.Millisecond\n\tt0 := time.Now()\n\t<-c.AfterElectionTimeout()\n\tif d := time.Since(t0); d < c.ElectionTimeout {\n\t\tt.Fatalf(\"channel fired too soon: %v\", d)\n\t}\n}",
"func (rf *Raft) isElectionTimeout() bool {\n\td := rf.elapsed - rf.electionTimeout\n\tif d < 0 {\n\t\treturn false\n\t}\n\tdif := rf.rand.Int() % rf.electionTimeout\n\tif d > dif {\n\t\tDPrintf(\"[[email protected]][%d] return ture , because d[%d] > deff[%d], rf.elapsed=[%d]\", rf.me, d, dif, rf.elapsed)\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}",
"func generateElectionTime() int {\n rand.Seed(time.Now().UnixNano())\n return rand.Intn(150)*2 + 300\n}",
"func testNonleaderStartElection(t *testing.T, state StateType) {\n\t// election timeout\n\tet := 10\n\tr := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tswitch state {\n\tcase StateFollower:\n\t\tr.becomeFollower(1, 2)\n\tcase StateCandidate:\n\t\tr.becomeCandidate()\n\t}\n\n\tfor i := 1; i < 2*et; i++ {\n\t\tr.tick()\n\t}\n\n\tif r.Term != 2 {\n\t\tt.Errorf(\"term = %d, want 2\", r.Term)\n\t}\n\tif r.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", r.state, StateCandidate)\n\t}\n\tif !r.votes[r.id] {\n\t\tt.Errorf(\"vote for self = false, want true\")\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},\n\t\t\tTerm: 2, Type: pb.MsgVote},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3},\n\t\t\tTerm: 2, Type: pb.MsgVote},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %v, want %v\", msgs, wmsgs)\n\t}\n}",
"func TestNoTimeout2(t *testing.T) {\n\tr := commonTestlibExampleReplica()\n\tr.InstanceMatrix[0][1] = commonTestlibExampleCommittedInstance()\n\tr.MaxInstanceNum[0] = 1\n\ttime.Sleep(2 * r.TimeoutInterval)\n\tgo r.checkTimeout()\n\tselect {\n\tcase <-r.MessageChan:\n\t\tt.Fatal(\"shouldn't get a timeout message for committed instance\")\n\tdefault:\n\t}\n}",
"func (server *Server) timeouts() {\n\tfor {\n\t\ttimeout := rand.Intn(ElectionMaxTimeout-ElectionMinTimeout) + ElectionMinTimeout\n\n\t\tgo server.applyLogs()\n\n\t\tselect {\n\t\tcase <-server.heartbeat:\n\t\t\tif !server.Ready {\n\t\t\t\tserver.lock <- true\n\t\t\t\tserver.Ready = true\n\t\t\t\t<-server.lock\n\t\t\t}\n\t\tcase <-time.After(time.Duration(timeout) * time.Millisecond):\n\t\t\tlog.Println(\"Heartbeat timeout passed, election starting\")\n\t\t\tgo server.startCandidacy()\n\t\t}\n\t}\n}",
"func randomTimeout(minVal, maxVal time.Duration) <-chan time.Time {\n\textra := time.Duration(rand.Int()) % maxVal\n\treturn time.After((minVal + extra) % maxVal)\n}",
"func (c *Clock) AfterElectionTimeout() <-chan chan struct{} {\n\td := c.ElectionTimeout + time.Duration(rand.Intn(int(c.ElectionTimeout)))\n\treturn newClockChan(d)\n}",
"func (s *BrokerSuite) TestDialRandomized(c *C) {\n\tsrv1 := NewServer()\n\tsrv1.Start()\n\tdefer srv1.Close()\n\n\tsrv2 := NewServer()\n\tsrv2.Start()\n\tdefer srv2.Close()\n\n\tsrv3 := NewServer()\n\tsrv3.Start()\n\tdefer srv3.Close()\n\n\tnodes := []string{srv1.Address(), srv2.Address(), srv3.Address()}\n\tconf := s.newTestBrokerConf(\"tester\")\n\n\tfor i := 0; i < 30; i++ {\n\t\t_, err := NewCluster(nodes, conf.ClusterConnectionConf)\n\t\tc.Assert(err, IsNil)\n\t}\n\n\tc.Assert(srv1.Processed, Not(Equals), 30)\n\tc.Assert(srv2.Processed, Not(Equals), 30)\n\tc.Assert(srv3.Processed, Not(Equals), 30)\n\tc.Assert(srv1.Processed+srv2.Processed+srv3.Processed, Equals, 30)\n\tc.Assert(srv1.Processed, Not(Equals), 0)\n\tc.Assert(srv2.Processed, Not(Equals), 0)\n\tc.Assert(srv3.Processed, Not(Equals), 0)\n}",
"func Test_TaskOption_LeadershipTimeout(t *testing.T) {\n\t// given\n\toption := crontask.LeadershipTimeout(time.Second)\n\toptions := &crontask.TaskOptions{LeadershipTimeout: time.Hour}\n\n\t// when\n\toption(options)\n\n\t// then\n\tif options.LeadershipTimeout != time.Second {\n\t\tt.Errorf(\"leadership timeout not correctly applied, got %s\", options.LeadershipTimeout)\n\t}\n}",
"func randomTimeOut() time.Duration {\n\tt := time.Duration(rand.Intn(150)+150) * time.Millisecond // rand [150,300) ms to time out\n\treturn t\n}",
"func (s *DockerSuite) TestWaitNonBlockedExitRandom(c *check.C) {\n\tout, _ := dockerCmd(c, \"run\", \"-d\", \"busybox\", \"sh\", \"-c\", \"exit 99\")\n\tcontainerID := strings.TrimSpace(out)\n\n\terr := waitInspect(containerID, \"{{.State.Running}}\", \"false\", 30*time.Second)\n\tc.Assert(err, checker.IsNil) //Container should have stopped by now\n\tout, _ = dockerCmd(c, \"wait\", containerID)\n\tc.Assert(strings.TrimSpace(out), checker.Equals, \"99\", check.Commentf(\"failed to set up container, %v\", out))\n\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
testNonleadersElectionTimeoutNonconflict tests that in most cases only a single server(follower or candidate) will time out, which reduces the likelihood of split vote in the new election. Reference: section 5.2
|
func testNonleadersElectionTimeoutNonconflict(t *testing.T, state StateType) {
et := 10
size := 5
rs := make([]*raft, size)
ids := idsBySize(size)
for k := range rs {
rs[k] = newTestRaft(ids[k], ids, et, 1, NewMemoryStorage())
}
defer func() {
for k := range rs {
closeAndFreeRaft(rs[k])
}
}()
conflicts := 0
for round := 0; round < 1000; round++ {
for _, r := range rs {
switch state {
case StateFollower:
r.becomeFollower(r.Term+1, None)
case StateCandidate:
r.becomeCandidate()
}
}
timeoutNum := 0
for timeoutNum == 0 {
for _, r := range rs {
r.tick()
if len(r.readMessages()) > 0 {
timeoutNum++
}
}
}
// several rafts time out at the same tick
if timeoutNum > 1 {
conflicts++
}
}
if g := float64(conflicts) / 1000; g > 0.3 {
t.Errorf("probability of conflicts = %v, want <= 0.3", g)
}
}
|
[
"func TestLearnerElectionTimeout(t *testing.T) {\n\tn1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\n\t// n2 is learner. Learner should not start election even when times out.\n\tsetRandomizedElectionTimeout(n2, n2.electionTimeout)\n\tfor i := 0; i < n2.electionTimeout; i++ {\n\t\tn2.tick()\n\t}\n\n\tif n2.state != StateFollower {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", n2.state, StateFollower)\n\t}\n}",
"func testNonleaderElectionTimeoutRandomized(t *testing.T, state StateType) {\n\tet := 10\n\tr := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\ttimeouts := make(map[int]bool)\n\tfor round := 0; round < 50*et; round++ {\n\t\tswitch state {\n\t\tcase StateFollower:\n\t\t\tr.becomeFollower(r.Term+1, 2)\n\t\tcase StateCandidate:\n\t\t\tr.becomeCandidate()\n\t\t}\n\n\t\ttime := 0\n\t\tfor len(r.readMessages()) == 0 {\n\t\t\tr.tick()\n\t\t\ttime++\n\t\t}\n\t\ttimeouts[time] = true\n\t}\n\n\tfor d := et + 1; d < 2*et; d++ {\n\t\tif !timeouts[d] {\n\t\t\tt.Errorf(\"timeout in %d ticks should happen\", d)\n\t\t}\n\t}\n}",
"func getElectionTimeout() time.Duration {\n\treturn time.Duration(rand.Intn(300) + 150)\n}",
"func (server *Server) timeouts() {\n\tfor {\n\t\ttimeout := rand.Intn(ElectionMaxTimeout-ElectionMinTimeout) + ElectionMinTimeout\n\n\t\tgo server.applyLogs()\n\n\t\tselect {\n\t\tcase <-server.heartbeat:\n\t\t\tif !server.Ready {\n\t\t\t\tserver.lock <- true\n\t\t\t\tserver.Ready = true\n\t\t\t\t<-server.lock\n\t\t\t}\n\t\tcase <-time.After(time.Duration(timeout) * time.Millisecond):\n\t\t\tlog.Println(\"Heartbeat timeout passed, election starting\")\n\t\t\tgo server.startCandidacy()\n\t\t}\n\t}\n}",
"func TestNoTimeout2(t *testing.T) {\n\tr := commonTestlibExampleReplica()\n\tr.InstanceMatrix[0][1] = commonTestlibExampleCommittedInstance()\n\tr.MaxInstanceNum[0] = 1\n\ttime.Sleep(2 * r.TimeoutInterval)\n\tgo r.checkTimeout()\n\tselect {\n\tcase <-r.MessageChan:\n\t\tt.Fatal(\"shouldn't get a timeout message for committed instance\")\n\tdefault:\n\t}\n}",
"func (node *Node) randElectionTimeout() time.Duration {\n\treturn time.Duration(150+rand.Intn(150)) * time.Millisecond\n}",
"func testNonleaderStartElection(t *testing.T, state StateType) {\n\t// election timeout\n\tet := 10\n\tr := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tswitch state {\n\tcase StateFollower:\n\t\tr.becomeFollower(1, 2)\n\tcase StateCandidate:\n\t\tr.becomeCandidate()\n\t}\n\n\tfor i := 1; i < 2*et; i++ {\n\t\tr.tick()\n\t}\n\n\tif r.Term != 2 {\n\t\tt.Errorf(\"term = %d, want 2\", r.Term)\n\t}\n\tif r.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", r.state, StateCandidate)\n\t}\n\tif !r.votes[r.id] {\n\t\tt.Errorf(\"vote for self = false, want true\")\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},\n\t\t\tTerm: 2, Type: pb.MsgVote},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3},\n\t\t\tTerm: 2, Type: pb.MsgVote},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %v, want %v\", msgs, wmsgs)\n\t}\n}",
"func TestClock_AfterElectionTimeout(t *testing.T) {\n\tc := raft.NewClock()\n\tc.ElectionTimeout = 10 * time.Millisecond\n\tt0 := time.Now()\n\t<-c.AfterElectionTimeout()\n\tif d := time.Since(t0); d < c.ElectionTimeout {\n\t\tt.Fatalf(\"channel fired too soon: %v\", d)\n\t}\n}",
"func TestLeaderElectionInOneRoundRPC(t *testing.T) {\n\ttests := []struct {\n\t\tsize int\n\t\tvotes map[uint64]bool\n\t\tstate StateType\n\t}{\n\t\t// win the election when receiving votes from a majority of the servers\n\t\t{1, map[uint64]bool{}, StateLeader},\n\t\t{3, map[uint64]bool{2: true, 3: true}, StateLeader},\n\t\t{3, map[uint64]bool{2: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true}, StateLeader},\n\n\t\t// return to follower state if it receives vote denial from a majority\n\t\t{3, map[uint64]bool{2: false, 3: false}, StateFollower},\n\t\t{5, map[uint64]bool{2: false, 3: false, 4: false, 5: false}, StateFollower},\n\t\t{5, map[uint64]bool{2: true, 3: false, 4: false, 5: false}, StateFollower},\n\n\t\t// stay in candidate if it does not obtain the majority\n\t\t{3, map[uint64]bool{}, StateCandidate},\n\t\t{5, map[uint64]bool{2: true}, StateCandidate},\n\t\t{5, map[uint64]bool{2: false, 3: false}, StateCandidate},\n\t\t{5, map[uint64]bool{}, StateCandidate},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, idsBySize(tt.size), 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\n\t\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\tfor id, vote := range tt.votes {\n\t\t\tr.Step(pb.Message{From: id, To: 1, Term: r.Term, Type: pb.MsgVoteResp, Reject: !vote})\n\t\t}\n\n\t\tif r.state != tt.state {\n\t\t\tt.Errorf(\"#%d: state = %s, want %s\", i, r.state, tt.state)\n\t\t}\n\t\tif g := r.Term; g != 1 {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", i, g, 1)\n\t\t}\n\t}\n}",
"func TestFreeStuckCandidateWithCheckQuorum(t *testing.T) {\n\ta := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tb := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tc := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(a)\n\tdefer closeAndFreeRaft(b)\n\tdefer closeAndFreeRaft(c)\n\n\ta.checkQuorum = true\n\tb.checkQuorum = true\n\tc.checkQuorum = true\n\n\tnt := newNetwork(a, b, c)\n\tsetRandomizedElectionTimeout(b, b.electionTimeout+1)\n\n\tfor i := 0; i < b.electionTimeout; i++ {\n\t\tb.tick()\n\t}\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(1)\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tif c.Term != b.Term+1 {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, b.Term+1)\n\t}\n\n\t// Vote again for safety\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tif c.Term != b.Term+2 {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, b.Term+2)\n\t}\n\n\tnt.recover()\n\tnt.send(pb.Message{From: 1, To: 3, Type: pb.MsgHeartbeat, Term: a.Term})\n\n\t// Disrupt the leader so that the stuck peer is freed\n\tif a.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateFollower)\n\t}\n\n\tif c.Term != a.Term {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, a.Term)\n\t}\n\n\t// Vote again, should become leader this time\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif c.state != StateLeader {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", c.state, StateLeader)\n\t}\n\n}",
"func Test_TimeoutKeepAlive(t *testing.T) {\n\tkey := \"Test_Keepalive\"\n\twg := new(sync.WaitGroup)\n\n\tconn1, err := redigo.Dial(\"tcp\", RedisHost)\n\n\tif err != nil {\n\t\tt.Errorf(\"redigo.Dial failure due to '%s'\", err)\n\t\treturn\n\t}\n\n\tconn2, err := redigo.Dial(\"tcp\", RedisHost)\n\n\tif err != nil {\n\t\tt.Errorf(\"redigo.Dial failure due to '%s'\", err)\n\t\treturn\n\t}\n\n\tlock1 := New(conn1, key, 1000, 1000, 0, 5)\n\tstatus, err := lock1.Lock()\n\n\tif err != nil || !status {\n\t\tt.Error(\"unable to lock\")\n\t}\n\n\twg.Add(20)\n\tgo func() {\n\t\tfor i := 0; i < 20; i++ {\n\t\t\terr := lock1.KeepAlive()\n\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"timed out during lock contention due to '%v'\", err)\n\t\t\t}\n\n\t\t\twg.Done()\n\t\t\ttime.Sleep(time.Second / 2)\n\t\t}\n\t}()\n\n\ttime.Sleep(time.Second * 2)\n\n\tlock2 := New(conn2, key, 1000, 1000, 0, 5)\n\tstatus, err = lock2.Lock()\n\n\tif status {\n\t\tt.Error(\"should not have been able to lock\")\n\t}\n\n\twg.Wait()\n\ttime.Sleep(time.Second * 2)\n\n\tstatus, err = lock2.Lock()\n\n\tif err != nil || !status {\n\t\tt.Error(\"should have been able to lock\")\n\t}\n}",
"func setRandomizedElectionTimeout(r *raft, v int) {\n\tr.randomizedElectionTimeout = v\n}",
"func (this *P2PServer) timeout() {\n\tpeers := this.network.GetNeighbors()\n\tvar periodTime uint\n\tperiodTime = common.DEFAULT_GEN_BLOCK_TIME / common.UPDATE_RATE_PER_BLOCK\n\tfor _, p := range peers {\n\t\tif p.GetSyncState() == common.ESTABLISH {\n\t\t\tt := p.GetContactTime()\n\t\t\tif t.Before(time.Now().Add(-1 * time.Second *\n\t\t\t\ttime.Duration(periodTime) * common.KEEPALIVE_TIMEOUT)) {\n\t\t\t\tlog.Warn(\"[p2p]keep alive timeout!!!lost remote\", \"peer\", p.GetID(), \"Addr\", p.SyncLink.GetAddr(), \"Addr\", t.String())\n\t\t\t\tp.CloseSync()\n\t\t\t\tp.CloseCons()\n\t\t\t}\n\t\t}\n\t}\n}",
"func TestTimeouts(t *testing.T) {\n\tt.Parallel()\n\tvar testCases = []struct {\n\t\tdesc string\n\t\tcallTimeout time.Duration\n\t\tpluginDelay time.Duration\n\t\tkubeAPIServerDelay time.Duration\n\t\twantErr string\n\t}{\n\t\t{\n\t\t\tdesc: \"timeout zero - expect failure when call from kube-apiserver arrives before plugin starts\",\n\t\t\tcallTimeout: 0 * time.Second,\n\t\t\tpluginDelay: 3 * time.Second,\n\t\t\twantErr: \"rpc error: code = DeadlineExceeded desc = context deadline exceeded\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"timeout zero but kms-plugin already up - still failure - zero timeout is an invalid value\",\n\t\t\tcallTimeout: 0 * time.Second,\n\t\t\tpluginDelay: 0 * time.Second,\n\t\t\tkubeAPIServerDelay: 2 * time.Second,\n\t\t\twantErr: \"rpc error: code = DeadlineExceeded desc = context deadline exceeded\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"timeout greater than kms-plugin delay - expect success\",\n\t\t\tcallTimeout: 6 * time.Second,\n\t\t\tpluginDelay: 3 * time.Second,\n\t\t},\n\t\t{\n\t\t\tdesc: \"timeout less than kms-plugin delay - expect failure\",\n\t\t\tcallTimeout: 3 * time.Second,\n\t\t\tpluginDelay: 6 * time.Second,\n\t\t\twantErr: \"rpc error: code = DeadlineExceeded desc = context deadline exceeded\",\n\t\t},\n\t}\n\n\tfor _, tt := range testCases {\n\t\ttt := tt\n\t\tt.Run(tt.desc, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tvar (\n\t\t\t\tservice Service\n\t\t\t\terr error\n\t\t\t\tdata = []byte(\"test data\")\n\t\t\t\tkubeAPIServerWG sync.WaitGroup\n\t\t\t\tkmsPluginWG sync.WaitGroup\n\t\t\t\ttestCompletedWG sync.WaitGroup\n\t\t\t\tsocketName = newEndpoint()\n\t\t\t)\n\n\t\t\ttestCompletedWG.Add(1)\n\t\t\tdefer testCompletedWG.Done()\n\n\t\t\tctx := testContext(t)\n\n\t\t\tkubeAPIServerWG.Add(1)\n\t\t\tgo func() {\n\t\t\t\t// Simulating late start of kube-apiserver - plugin is up before kube-apiserver, if requested by the testcase.\n\t\t\t\ttime.Sleep(tt.kubeAPIServerDelay)\n\n\t\t\t\tservice, err = NewGRPCService(ctx, socketName.endpoint, tt.callTimeout)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"failed to create envelope service, error: %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer destroyService(service)\n\t\t\t\tkubeAPIServerWG.Done()\n\t\t\t\t// Keeping kube-apiserver up to process requests.\n\t\t\t\ttestCompletedWG.Wait()\n\t\t\t}()\n\n\t\t\tkmsPluginWG.Add(1)\n\t\t\tgo func() {\n\t\t\t\t// Simulating delayed start of kms-plugin, kube-apiserver is up before the plugin, if requested by the testcase.\n\t\t\t\ttime.Sleep(tt.pluginDelay)\n\n\t\t\t\t_ = mock.NewBase64Plugin(t, socketName.path)\n\n\t\t\t\tkmsPluginWG.Done()\n\t\t\t\t// Keeping plugin up to process requests.\n\t\t\t\ttestCompletedWG.Wait()\n\t\t\t}()\n\n\t\t\tkubeAPIServerWG.Wait()\n\t\t\tif t.Failed() {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t_, err = service.Encrypt(data)\n\n\t\t\tif err == nil && tt.wantErr != \"\" {\n\t\t\t\tt.Fatalf(\"got nil, want %s\", tt.wantErr)\n\t\t\t}\n\n\t\t\tif err != nil && tt.wantErr == \"\" {\n\t\t\t\tt.Fatalf(\"got %q, want nil\", err.Error())\n\t\t\t}\n\n\t\t\t// Collecting kms-plugin - allowing plugin to clean-up.\n\t\t\tkmsPluginWG.Wait()\n\t\t})\n\t}\n}",
"func (rf *Raft) isElectionTimeout() bool {\n\td := rf.elapsed - rf.electionTimeout\n\tif d < 0 {\n\t\treturn false\n\t}\n\tdif := rf.rand.Int() % rf.electionTimeout\n\tif d > dif {\n\t\tDPrintf(\"[[email protected]][%d] return ture , because d[%d] > deff[%d], rf.elapsed=[%d]\", rf.me, d, dif, rf.elapsed)\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}",
"func GetRandomElectionTimeout() time.Duration {\n\treturn time.Duration(minElectionTimeout+rand.Intn(maxElectionTimeout-minElectionTimeout)) * time.Millisecond\n}",
"func TestThatAByzantineLeaderCanNotCauseAForkBySendingTwoBlocks(t *testing.T) {\n\ttest.WithContextWithTimeout(t, 15*time.Second, func(ctx context.Context) {\n\t\tblock1 := mocks.ABlock(interfaces.GenesisBlock)\n\t\tnet := network.\n\t\t\tNewTestNetworkBuilder().\n\t\t\tWithNodeCount(4).\n\t\t\tWithTimeBasedElectionTrigger(1000 * time.Millisecond).\n\t\t\tWithBlocks(block1).\n\t\t\tBuild(ctx)\n\n\t\tnode0 := net.Nodes[0]\n\t\tnode1 := net.Nodes[1]\n\t\tnode2 := net.Nodes[2]\n\n\t\tnode0.Communication.SetOutgoingWhitelist([]primitives.MemberId{\n\t\t\tnode1.MemberId,\n\t\t\tnode2.MemberId,\n\t\t})\n\n\t\t// the leader (node0) is suggesting block1 to node1 and node2 (not to node3)\n\t\tnet.StartConsensus(ctx)\n\n\t\t// node0, node1 and node2 should reach consensus\n\t\tnet.WaitUntilNodesEventuallyCommitASpecificBlock(ctx, t, 0, block1, node0, node1, node2)\n\t})\n}",
"func TestServerAliveInterval(t *testing.T) {\n\tt.Parallel()\n\tf := newFixtureWithoutDiskBasedLogging(t)\n\n\tok, _, err := f.ssh.clt.SendRequest(context.Background(), teleport.KeepAliveReqType, true, nil)\n\trequire.NoError(t, err)\n\trequire.True(t, ok)\n}",
"func TestElect(t *testing.T) {\n\traftConf := &RaftConfig{MemberRegSocket: \"127.0.0.1:9999\", PeerSocket: \"127.0.0.1:9009\", TimeoutInMillis: 1500, HbTimeoutInMillis: 50, LogDirectoryPath: \"logs\", StableStoreDirectoryPath: \"./stable\"}\n\n\t// delete stored state to avoid unnecessary effect on following test cases\n\tdeleteState(raftConf.StableStoreDirectoryPath)\n\n\t// launch cluster proxy servers\n\tcluster.NewProxyWithConfig(RaftToClusterConf(raftConf))\n\n\tfmt.Println(\"Started Proxy\")\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tserverCount := 5\n\traftServers := make([]Raft, serverCount+1)\n\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\t// create cluster.Server\n\t\tclusterServer, err := cluster.NewWithConfig(i, \"127.0.0.1\", 5000+i, RaftToClusterConf(raftConf))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error in creating cluster server. \" + err.Error())\n\t\t\treturn\n\t\t}\n\t\ts, err := NewWithConfig(clusterServer, raftConf)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error in creating Raft servers. \" + err.Error())\n\t\t\treturn\n\t\t}\n\t\traftServers[i] = s\n\t}\n\n\t// there should be a leader after sufficiently long duration\n\tcount := 0\n\ttime.Sleep(10 * time.Second)\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\tif raftServers[i].isLeader() {\n\t\t\tfmt.Println(\"Server \" + strconv.Itoa(i) + \" was chosen as leader.\")\n\t\t\tcount++\n\t\t}\n\t}\n\tif count != 1 {\n\t\tt.Errorf(\"No leader was chosen\")\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TestLeaderStartReplication tests that when receiving client proposals, the leader appends the proposal to its log as a new entry, then issues AppendEntries RPCs in parallel to each of the other servers to replicate the entry. Also, when sending an AppendEntries RPC, the leader includes the index and term of the entry in its log that immediately precedes the new entries. Also, it writes the new entry into stable storage. Reference: section 5.3
|
func TestLeaderStartReplication(t *testing.T) {
s := NewMemoryStorage()
r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)
defer closeAndFreeRaft(r)
r.becomeCandidate()
r.becomeLeader()
commitNoopEntry(r, s)
li := r.raftLog.lastIndex()
ents := []pb.Entry{{Data: []byte("some data")}}
r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: ents})
if g := r.raftLog.lastIndex(); g != li+1 {
t.Errorf("lastIndex = %d, want %d", g, li+1)
}
if g := r.raftLog.committed; g != li {
t.Errorf("committed = %d, want %d", g, li)
}
msgs := r.readMessages()
sort.Sort(messageSlice(msgs))
wents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte("some data")}}
wmsgs := []pb.Message{
{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},
To: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},
{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},
To: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},
}
if !reflect.DeepEqual(msgs, wmsgs) {
t.Errorf("msgs = %+v, want %+v", msgs, wmsgs)
}
if g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, wents) {
t.Errorf("ents = %+v, want %+v", g, wents)
}
}
|
[
"func TestLogReplication1(t *testing.T) {\n\n\tack := make(chan bool)\n\n\t//Get leader\n\tleaderId := raft.GetLeaderId()\n\n\t//Append a log entry to leader as client\n\traft.InsertFakeLogEntry(leaderId)\n\n\tleaderLog := raft.GetLogAsString(leaderId)\n\t// log.Println(leaderLog)\n\n\ttime.AfterFunc(1*time.Second, func() { ack <- true })\n\t<-ack //Wait for 1 second for log replication to happen\n\n\t//Get logs of all others and compare with each\n\tfor i := 0; i < 5; i++ {\n\t\tcheckIfExpected(t, raft.GetLogAsString(i), leaderLog)\n\t}\n\n}",
"func Test_LeaderChanges(t *testing.T) {\n\t//giving time to elect a new leader\n\ttime.Sleep(time.Second * 1)\n\t//Start a timer here and verify that Append call doesn't succeed and timer times out which means S1 is partitioned--PENDING\n\tconst n int = 4\n\tset1 := \"set abc 20 8\\r\\nabcdefjg\\r\\n\"\n\texpected := []bool{false, true, false, false}\n\tchann := make([]chan LogEntry, n)\n\tr := [n]*Raft{r0, r2, r3, r4}\n\tfor k := 0; k < n; k++ {\n\t\tchann[k] = make(chan LogEntry)\n\t}\n\tfor i := 0; i < n; i++ {\n\t\tgo r[i].Client(chann[i], set1)\n\t}\n\n\tfor i := 0; i < n; i++ {\n\t\tresponse := <-chann[i]\n\t\tcommitStatus := response.Committed()\n\t\tif expected[i] != commitStatus {\n\t\t\tt.Error(\"Mismatch!\", expected, string(response.Data()))\n\t\t}\n\t}\n}",
"func TestLogReplication2(t *testing.T) {\n\tack := make(chan bool)\n\n\t//Kill one server\n\traft.KillServer(1)\n\n\t//Append log to server\n\ttime.AfterFunc(1*time.Second, func() {\n\t\t//Get leader\n\t\tleaderId := raft.GetLeaderId()\n\n\t\t//Append a log entry to leader as client\n\t\traft.InsertFakeLogEntry(leaderId)\n\t})\n\n\t//Resurrect old server after enough time for other to move on\n\ttime.AfterFunc(2*time.Second, func() {\n\t\t//Resurrect old server\n\t\traft.ResurrectServer(1)\n\t})\n\n\t//Check log after some time to see if it matches with current leader\n\ttime.AfterFunc(3*time.Second, func() {\n\t\tleaderId := raft.GetLeaderId()\n\t\tleaderLog := raft.GetLogAsString(leaderId)\n\t\tserverLog := raft.GetLogAsString(1)\n\n\t\tcheckIfExpected(t, serverLog, leaderLog)\n\n\t\tack <- true\n\t})\n\n\t<-ack\n\n}",
"func TestStartFixesReplicationData(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\tcell := \"cell1\"\n\tts := memorytopo.NewServer(ctx, cell, \"cell2\")\n\ttm := newTestTM(t, ts, 1, \"ks\", \"0\")\n\tdefer tm.Stop()\n\ttabletAlias := tm.tabletAlias\n\n\tsri, err := ts.GetShardReplication(ctx, cell, \"ks\", \"0\")\n\trequire.NoError(t, err)\n\tutils.MustMatch(t, tabletAlias, sri.Nodes[0].TabletAlias)\n\n\t// Remove the ShardReplication record, try to create the\n\t// tablets again, make sure it's fixed.\n\terr = topo.RemoveShardReplicationRecord(ctx, ts, cell, \"ks\", \"0\", tabletAlias)\n\trequire.NoError(t, err)\n\tsri, err = ts.GetShardReplication(ctx, cell, \"ks\", \"0\")\n\trequire.NoError(t, err)\n\tassert.Equal(t, 0, len(sri.Nodes))\n\n\t// An initTablet will recreate the shard replication data.\n\terr = tm.initTablet(context.Background())\n\trequire.NoError(t, err)\n\n\tsri, err = ts.GetShardReplication(ctx, cell, \"ks\", \"0\")\n\trequire.NoError(t, err)\n\tutils.MustMatch(t, tabletAlias, sri.Nodes[0].TabletAlias)\n}",
"func (client *FakeTabletManagerClient) StartReplication(ctx context.Context, tablet *topodatapb.Tablet, semiSync bool) error {\n\treturn nil\n}",
"func TestLeaderCommitEntry(t *testing.T) {\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tcommitNoopEntry(r, s)\n\tli := r.raftLog.lastIndex()\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tfor _, m := range r.readMessages() {\n\t\tr.Step(acceptAndReply(m))\n\t}\n\n\tif g := r.raftLog.committed; g != li+1 {\n\t\tt.Errorf(\"committed = %d, want %d\", g, li+1)\n\t}\n\twents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte(\"some data\")}}\n\tif g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {\n\t\tt.Errorf(\"nextEnts = %+v, want %+v\", g, wents)\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\tfor i, m := range msgs {\n\t\tif w := uint64(i + 2); m.To != w {\n\t\t\tt.Errorf(\"to = %x, want %x\", m.To, w)\n\t\t}\n\t\tif m.Type != pb.MsgApp {\n\t\t\tt.Errorf(\"type = %v, want %v\", m.Type, pb.MsgApp)\n\t\t}\n\t\tif m.Commit != li+1 {\n\t\t\tt.Errorf(\"commit = %d, want %d\", m.Commit, li+1)\n\t\t}\n\t}\n}",
"func (r *Raft) startReplication() {\n\tinConfig := make(map[ServerID]bool, len(r.configurations.latest.Servers))\n\tlastIdx := r.getLastIndex()\n\n\t// Start replication goroutines that need starting\n\tfor _, server := range r.configurations.latest.Servers {\n\t\tif server.ID == r.localID {\n\t\t\tcontinue\n\t\t}\n\n\t\tinConfig[server.ID] = true\n\n\t\treplication, ok := r.leaderState.replState[server.ID]\n\t\tif !ok {\n\t\t\tklog.Infof(fmt.Sprintf(\"leader:%s/%s start a new follower replication for follower:%s/%s\",\n\t\t\t\tr.localID, r.localAddr, server.ID, server.Address))\n\n\t\t\treplication = &followerReplication{\n\t\t\t\tcurrentTerm: r.getCurrentTerm(),\n\t\t\t\tnextIndex: lastIdx + 1,\n\t\t\t\tpeer: server,\n\t\t\t\tcommitment: r.leaderState.commitment,\n\t\t\t\tstepDown: r.leaderState.stepDown,\n\t\t\t\tlastContact: time.Now(),\n\t\t\t\tstopCh: make(chan uint64, 1),\n\t\t\t\ttriggerCh: make(chan struct{}, 1), // buffer channel\n\t\t\t\ttriggerDeferErrorCh: make(chan *deferError, 1),\n\t\t\t\tnotify: make(map[*verifyFuture]struct{}),\n\t\t\t\tnotifyCh: make(chan struct{}, 1),\n\t\t\t}\n\t\t\tr.leaderState.replState[server.ID] = replication\n\t\t\tgo r.replicate(replication)\n\n\t\t\tselect {\n\t\t\tcase replication.triggerCh <- struct{}{}:\n\t\t\tdefault:\n\t\t\t}\n\n\t\t\tr.observe(PeerObservation{Peer: server, Removed: false})\n\t\t} else {\n\t\t\treplication.peerLock.RLock()\n\t\t\tpeer := replication.peer\n\t\t\treplication.peerLock.RUnlock()\n\t\t\tif peer.Address != server.Address {\n\t\t\t\tklog.Infof(fmt.Sprintf(\"live change the peer address for %s/%s\", server.ID, server.Address))\n\t\t\t\treplication.peerLock.Lock()\n\t\t\t\tpeer.Address = server.Address\n\t\t\t\treplication.peerLock.Unlock()\n\t\t\t}\n\t\t}\n\t}\n\n\t// Stop replication goroutines that need stopping\n\t// Before removing a peer, it'll instruct the replication routines to try to replicate to the current index.\n\tfor serverID, repl := range r.leaderState.replState {\n\t\tif inConfig[serverID] {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Replicate up to lastIdx and stop\n\t\tklog.Infof(fmt.Sprintf(\"removed peer:%s/%s from leader:%s/%s peers, and stopping replication until up to lastIndex:%d\",\n\t\t\trepl.peer.ID, repl.peer.Address, r.localID, r.localAddr, lastIdx))\n\t\trepl.stopCh <- lastIdx\n\t\tclose(repl.stopCh)\n\t\tdelete(r.leaderState.replState, serverID)\n\t\tr.observe(PeerObservation{Peer: repl.peer, Removed: true})\n\t}\n}",
"func TestLearnerLogReplication(t *testing.T) {\n\tn1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\n\tnt := newNetwork(n1, n2)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\n\tsetRandomizedElectionTimeout(n1, n1.electionTimeout)\n\tfor i := 0; i < n1.electionTimeout; i++ {\n\t\tn1.tick()\n\t}\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})\n\n\t// n1 is leader and n2 is learner\n\tif n1.state != StateLeader {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", n1.state, StateLeader)\n\t}\n\tif !n2.isLearner {\n\t\tt.Error(\"peer 2 state: not learner, want yes\")\n\t}\n\n\tnextCommitted := n1.raftLog.committed + 1\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"somedata\")}}})\n\tif n1.raftLog.committed != nextCommitted {\n\t\tt.Errorf(\"peer 1 wants committed to %d, but still %d\", nextCommitted, n1.raftLog.committed)\n\t}\n\n\tif n1.raftLog.committed != n2.raftLog.committed {\n\t\tt.Errorf(\"peer 2 wants committed to %d, but still %d\", n1.raftLog.committed, n2.raftLog.committed)\n\t}\n\n\tmatch := n1.getProgress(2).Match\n\tif match != n2.raftLog.committed {\n\t\tt.Errorf(\"progress 2 of leader 1 wants match %d, but got %d\", n2.raftLog.committed, match)\n\t}\n}",
"func TestPartition(t *testing.T) {\n\traftConf := &RaftConfig{MemberRegSocket: \"127.0.0.1:8124\", PeerSocket: \"127.0.0.1:9987\", TimeoutInMillis: 1500, HbTimeoutInMillis: 150, LogDirectoryPath: \"logs1\", StableStoreDirectoryPath: \"./stable1\", RaftLogDirectoryPath: \"../LocalLog1\"}\n\n\t// delete stored state to avoid unnecessary effect on following test cases\n\tinitState(raftConf.StableStoreDirectoryPath, raftConf.LogDirectoryPath, raftConf.RaftLogDirectoryPath)\n\n\t// launch cluster proxy servers\n\tcluster.NewProxyWithConfig(RaftToClusterConf(raftConf))\n\n\tfmt.Println(\"Started Proxy\")\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tserverCount := 5\n\traftServers := make([]raft.Raft, serverCount+1)\n\tpseudoClusters := make([]*test.PseudoCluster, serverCount+1)\n\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\t// create cluster.Server\n\t\tclusterServer, err := cluster.NewWithConfig(i, \"127.0.0.1\", 8500+i, RaftToClusterConf(raftConf))\n\t\tpseudoCluster := test.NewPseudoCluster(clusterServer)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error in creating cluster server. \" + err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tlogStore, err := llog.Create(raftConf.RaftLogDirectoryPath + \"/\" + strconv.Itoa(i))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error in creating log. \" + err.Error())\n\t\t\treturn\n\t\t}\n\n\t\ts, err := NewWithConfig(pseudoCluster, logStore, raftConf)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error in creating Raft servers. \" + err.Error())\n\t\t\treturn\n\t\t}\n\t\traftServers[i] = s\n\t\tpseudoClusters[i] = pseudoCluster\n\t}\n\n\t// wait for leader to be elected\n\ttime.Sleep(20 * time.Second)\n\tcount := 0\n\toldLeader := 0\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\tif raftServers[i].Leader() == raftServers[i].Pid() {\n\t\t\toldLeader = i\n\t\t\tcount++\n\t\t}\n\t}\n\tif count != 1 {\n\t\tt.Errorf(\"No leader was chosen in 1 minute\")\n\t\treturn\n\t}\n\n\t// isolate Leader and any one follower\n\tfollower := 0\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\tif i != oldLeader {\n\t\t\tfollower = i\n\t\t\tbreak\n\t\t}\n\t}\n\tfmt.Println(\"Server \" + strconv.Itoa(follower) + \" was chosen as follower in minority partition\")\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\tpseudoClusters[oldLeader].AddToInboxFilter(raftServers[i].Pid())\n\t\tpseudoClusters[oldLeader].AddToOutboxFilter(raftServers[i].Pid())\n\t\tpseudoClusters[follower].AddToInboxFilter(raftServers[i].Pid())\n\t\tpseudoClusters[follower].AddToOutboxFilter(raftServers[i].Pid())\n\t}\n\n\tpseudoClusters[oldLeader].AddToOutboxFilter(cluster.BROADCAST)\n\tpseudoClusters[follower].AddToOutboxFilter(cluster.BROADCAST)\n\n\t// wait for other servers to discover that leader\n\t// has crashed and to elect a new leader\n\ttime.Sleep(20 * time.Second)\n\n\tcount = 0\n\tfor i := 1; i <= serverCount; i += 1 {\n\n\t\tif i != oldLeader && i != follower && raftServers[i].Leader() == raftServers[i].Pid() {\n\t\t\tfmt.Println(\"Server \" + strconv.Itoa(i) + \" was chosen as new leader in majority partition.\")\n\t\t\tcount++\n\t\t}\n\t}\n\t// new leader must be chosen\n\tif count != 1 {\n\t\tt.Errorf(\"No leader was chosen in majority partition\")\n\t}\n}",
"func runReplication(t *testing.T, updates, inserts int) {\n\tt.Run(fmt.Sprintf(\"replicate-%v-%v\", updates, inserts), func(t *testing.T) {\n\t\twriter := make(commit.Channel, 1024)\n\t\tobject := map[string]interface{}{\n\t\t\t\"float64\": float64(0),\n\t\t\t\"int32\": int32(0),\n\t\t\t\"string\": \"\",\n\t\t}\n\n\t\t// Create a primary\n\t\tprimary := NewCollection(Options{\n\t\t\tCapacity: inserts,\n\t\t\tWriter: &writer,\n\t\t})\n\t\t// Replica with the same schema\n\t\treplica := NewCollection(Options{\n\t\t\tCapacity: inserts,\n\t\t})\n\n\t\t// Create schemas and start streaming replication into the replica\n\t\tprimary.CreateColumnsOf(object)\n\t\treplica.CreateColumnsOf(object)\n\t\tvar done sync.WaitGroup\n\t\tgo func() {\n\t\t\tdone.Add(1)\n\t\t\tdefer done.Done()\n\t\t\tfor change := range writer {\n\t\t\t\tassert.NoError(t, replica.Replay(change))\n\t\t\t}\n\t\t}()\n\n\t\t// Write some objects\n\t\tfor i := 0; i < inserts; i++ {\n\t\t\tprimary.Insert(object)\n\t\t}\n\n\t\t// Random concurrent updates\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(updates)\n\t\tfor i := 0; i < updates; i++ {\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\toffset := uint32(rand.Int31n(int32(inserts - 1)))\n\t\t\t\tswitch rand.Int31n(3) {\n\t\t\t\tcase 0:\n\t\t\t\t\tprimary.UpdateAt(offset, \"float64\", math.Round(rand.Float64()*1000)/100)\n\t\t\t\tcase 1:\n\t\t\t\t\tprimary.UpdateAt(offset, \"int32\", rand.Int31n(100000))\n\t\t\t\tcase 2:\n\t\t\t\t\tprimary.UpdateAt(offset, \"string\", fmt.Sprintf(\"hi %v\", rand.Int31n(100)))\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\n\t\t// Replay all of the changes into the replica\n\t\twg.Wait()\n\t\tclose(writer)\n\t\tdone.Wait()\n\n\t\t// Check if replica and primary are the same\n\t\tassert.Equal(t, primary.Count(), replica.Count())\n\t\tprimary.Query(func(txn *Txn) error {\n\t\t\treturn txn.Range(\"float64\", func(v Cursor) bool {\n\t\t\t\tv1, v2 := v.FloatAt(\"float64\"), v.IntAt(\"int32\")\n\t\t\t\tif v1 != 0 {\n\t\t\t\t\tclone, _ := replica.Fetch(v.idx)\n\t\t\t\t\tassert.Equal(t, v.FloatAt(\"float64\"), clone.FloatAt(\"float64\"))\n\t\t\t\t}\n\n\t\t\t\tif v2 != 0 {\n\t\t\t\t\tclone, _ := replica.Fetch(v.idx)\n\t\t\t\t\tassert.Equal(t, v.IntAt(\"int32\"), clone.IntAt(\"int32\"))\n\t\t\t\t}\n\t\t\t\treturn true\n\t\t\t})\n\t\t})\n\t})\n}",
"func TestNormalReplication(t *testing.T) {\n\tctl := gomock.NewController(t)\n\tdefer ctl.Finish()\n\n\tnextSeq := int64(5)\n\tmockServiceClient := storagemock.NewMockWriteServiceClient(ctl)\n\tmockServiceClient.EXPECT().Next(gomock.Any(), gomock.Any()).Return(&storage.NextSeqResponse{\n\t\tSeq: nextSeq,\n\t}, nil)\n\n\tdone := make(chan struct{})\n\tmockClientStream := storagemock.NewMockWriteService_WriteClient(ctl)\n\tmockClientStream.EXPECT().Recv().DoAndReturn(func() (*storage.WriteResponse, error) {\n\t\t<-done\n\t\treturn nil, errors.New(\"stream canceled\")\n\t})\n\n\t// replica 5~15\n\twr1, _ := buildWriteRequest(5, 15)\n\tmockClientStream.EXPECT().Send(wr1).Return(nil)\n\n\t// replica 15 ~ 20\n\twr2, _ := buildWriteRequest(15, 20)\n\tmockClientStream.EXPECT().Send(wr2).Return(nil)\n\n\tmockFct := rpc.NewMockClientStreamFactory(ctl)\n\tmockFct.EXPECT().CreateWriteServiceClient(node).Return(mockServiceClient, nil)\n\tmockFct.EXPECT().LogicNode().Return(node)\n\tmockFct.EXPECT().CreateWriteClient(database, shardID, node).Return(mockClientStream, nil)\n\n\tmockFanOut := queue.NewMockFanOut(ctl)\n\tmockFanOut.EXPECT().SetHeadSeq(nextSeq).Return(nil)\n\n\tfor i := 5; i < 20; i++ {\n\t\tmockFanOut.EXPECT().Consume().Return(int64(i))\n\t\tmockFanOut.EXPECT().Get(int64(i)).Return(buildMessageBytes(i), nil)\n\t}\n\tmockFanOut.EXPECT().Consume().Return(queue.SeqNoNewMessageAvailable).AnyTimes()\n\n\trep := newReplicator(node, database, shardID, mockFanOut, mockFct)\n\n\ttime.Sleep(time.Second * 2)\n\trep.Stop()\n\tclose(done)\n}",
"func TestRaftNewLeader(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tclusterPrefix := \"TestRaftNewLeader\"\n\n\t// Create n1 node.\n\tstorage := NewStorage(NewMemSnapshotMgr(), NewMemLog(), NewMemState(0))\n\tfsm1 := newTestFSM(ID1)\n\t// NOTE we use different cluster ID for nodes within same cluster to avoid\n\t// registering same metric path twice. This should never happen in real world\n\t// because we'll never run nodes of a same cluster within one process.\n\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), storage)\n\t// Create n2 node.\n\tstorage = NewStorage(NewMemSnapshotMgr(), NewMemLog(), NewMemState(0))\n\tfsm2 := newTestFSM(ID2)\n\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), storage)\n\tconnectAllNodes(n1, n2)\n\tn1.Start(fsm1)\n\tn2.Start(fsm2)\n\tn2.ProposeInitialMembership([]string{ID1, ID2})\n\n\t// Wait until a leader is elected.\n\tselect {\n\tcase <-fsm1.leaderCh:\n\tcase <-fsm2.leaderCh:\n\t}\n}",
"func (r *Raft) startReplication(state *leaderState, peer net.Addr) {\n\ts := &followerReplication{\n\t\tpeer: peer,\n\t\tinflight: state.inflight,\n\t\tstopCh: make(chan struct{}),\n\t\ttriggerCh: make(chan struct{}, 1),\n\t\tmatchIndex: r.getLastLogIndex(),\n\t\tnextIndex: r.getLastLogIndex() + 1,\n\t}\n\tstate.replicationState[peer.String()] = s\n\tgo r.replicate(s)\n}",
"func TestLeaderSyncFollowerLog(t *testing.T) {\n\tents := []pb.Entry{\n\t\t{},\n\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t}\n\tterm := uint64(8)\n\ttests := [][]pb.Entry{\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10}, {Term: 6, Index: 11},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t\t\t{Term: 7, Index: 11}, {Term: 7, Index: 12},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5}, {Term: 4, Index: 6}, {Term: 4, Index: 7},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 2, Index: 4}, {Term: 2, Index: 5}, {Term: 2, Index: 6},\n\t\t\t{Term: 3, Index: 7}, {Term: 3, Index: 8}, {Term: 3, Index: 9}, {Term: 3, Index: 10}, {Term: 3, Index: 11},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tleadStorage := NewMemoryStorage()\n\t\tdefer leadStorage.Close()\n\t\tleadStorage.Append(ents)\n\t\tlead := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, leadStorage)\n\t\tlead.loadState(pb.HardState{Commit: lead.raftLog.lastIndex(), Term: term})\n\t\tfollowerStorage := NewMemoryStorage()\n\t\tdefer followerStorage.Close()\n\t\tfollowerStorage.Append(tt)\n\t\tfollower := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, followerStorage)\n\t\tfollower.loadState(pb.HardState{Term: term - 1})\n\t\t// It is necessary to have a three-node cluster.\n\t\t// The second may have more up-to-date log than the first one, so the\n\t\t// first node needs the vote from the third node to become the leader.\n\t\tn := newNetwork(lead, follower, nopStepper)\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\t// The election occurs in the term after the one we loaded with\n\t\t// lead.loadState above.\n\t\tn.send(pb.Message{From: 3, To: 1, Type: pb.MsgVoteResp, Term: term + 1})\n\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\t\tif g := diffu(ltoa(lead.raftLog), ltoa(follower.raftLog)); g != \"\" {\n\t\t\tt.Errorf(\"#%d: log diff:\\n%s\", i, g)\n\t\t}\n\t}\n}",
"func TestMultiNodeStart(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tcc := raftpb.ConfChange{Type: raftpb.ConfChangeAddNode, NodeID: 1}\n\tccdata, err := cc.Marshal()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected marshal error: %v\", err)\n\t}\n\twants := []Ready{\n\t\t{\n\t\t\tSoftState: &SoftState{Lead: 1, RaftState: StateLeader},\n\t\t\tHardState: raftpb.HardState{Term: 2, Commit: 2, Vote: 1},\n\t\t\tEntries: []raftpb.Entry{\n\t\t\t\t{Type: raftpb.EntryConfChange, Term: 1, Index: 1, Data: ccdata},\n\t\t\t\t{Term: 2, Index: 2},\n\t\t\t},\n\t\t\tCommittedEntries: []raftpb.Entry{\n\t\t\t\t{Type: raftpb.EntryConfChange, Term: 1, Index: 1, Data: ccdata},\n\t\t\t\t{Term: 2, Index: 2},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tHardState: raftpb.HardState{Term: 2, Commit: 3, Vote: 1},\n\t\t\tEntries: []raftpb.Entry{{Term: 2, Index: 3, Data: []byte(\"foo\")}},\n\t\t\tCommittedEntries: []raftpb.Entry{{Term: 2, Index: 3, Data: []byte(\"foo\")}},\n\t\t},\n\t}\n\tmn := StartMultiNode(1)\n\tstorage := NewMemoryStorage()\n\tmn.CreateGroup(1, newTestConfig(1, nil, 10, 1, storage), []Peer{{ID: 1}})\n\tmn.Campaign(ctx, 1)\n\tgs := <-mn.Ready()\n\tg := gs[1]\n\tif !reflect.DeepEqual(g, wants[0]) {\n\t\tt.Fatalf(\"#%d: g = %+v,\\n w %+v\", 1, g, wants[0])\n\t} else {\n\t\tstorage.Append(g.Entries)\n\t\tmn.Advance(gs)\n\t}\n\n\tmn.Propose(ctx, 1, []byte(\"foo\"))\n\tif gs2 := <-mn.Ready(); !reflect.DeepEqual(gs2[1], wants[1]) {\n\t\tt.Errorf(\"#%d: g = %+v,\\n w %+v\", 2, gs2[1], wants[1])\n\t} else {\n\t\tstorage.Append(gs2[1].Entries)\n\t\tmn.Advance(gs2)\n\t}\n\n\tselect {\n\tcase rd := <-mn.Ready():\n\t\tt.Errorf(\"unexpected Ready: %+v\", rd)\n\tcase <-time.After(time.Millisecond):\n\t}\n}",
"func TestSetLocalHeadSeqSuccess(t *testing.T) {\n\tctl := gomock.NewController(t)\n\tdefer ctl.Finish()\n\n\tnextSeq := int64(5)\n\tmockServiceClient := storagemock.NewMockWriteServiceClient(ctl)\n\tmockServiceClient.EXPECT().Next(gomock.Any(), gomock.Any()).Return(&storage.NextSeqResponse{\n\t\tSeq: nextSeq,\n\t}, nil)\n\n\tmockFct := rpc.NewMockClientStreamFactory(ctl)\n\tmockFct.EXPECT().CreateWriteServiceClient(node).Return(mockServiceClient, nil)\n\tmockFct.EXPECT().LogicNode().Return(node)\n\tmockFct.EXPECT().CreateWriteClient(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New(\"create stream client error\"))\n\n\tdone := make(chan struct{})\n\tmockFct.EXPECT().CreateWriteServiceClient(node).DoAndReturn(func(_ models.Node) (storage.WriteServiceClient, error) {\n\t\tclose(done)\n\t\t// wait for <- done to stop replica\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\treturn nil, errors.New(\"get service client error any\")\n\t})\n\n\tmockFanOut := queue.NewMockFanOut(ctl)\n\tmockFanOut.EXPECT().SetHeadSeq(nextSeq).Return(nil)\n\n\trep := newReplicator(node, database, shardID, mockFanOut, mockFct)\n\n\t<-done\n\trep.Stop()\n}",
"func TestSplitCloneV2_NoMasterAvailable(t *testing.T) {\n\tdelay := discovery.GetTabletPickerRetryDelay()\n\tdefer func() {\n\t\tdiscovery.SetTabletPickerRetryDelay(delay)\n\t}()\n\tdiscovery.SetTabletPickerRetryDelay(5 * time.Millisecond)\n\n\ttc := &splitCloneTestCase{t: t}\n\ttc.setUp(false /* v3 */)\n\tdefer tc.tearDown()\n\n\t// Only wait 1 ms between retries, so that the test passes faster.\n\t*executeFetchRetryTime = 1 * time.Millisecond\n\n\t// leftReplica will take over for the last, 30th, insert and the vreplication checkpoint.\n\ttc.leftReplicaFakeDb.AddExpectedQuery(\"INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*\", nil)\n\n\t// During the 29th write, let the MASTER disappear.\n\ttc.leftMasterFakeDb.GetEntry(28).AfterFunc = func() {\n\t\tt.Logf(\"setting MASTER tablet to REPLICA\")\n\t\ttc.leftMasterQs.UpdateType(topodatapb.TabletType_REPLICA)\n\t\ttc.leftMasterQs.AddDefaultHealthResponse()\n\t}\n\n\t// If the HealthCheck didn't pick up the change yet, the 30th write would\n\t// succeed. To prevent this from happening, replace it with an error.\n\ttc.leftMasterFakeDb.DeleteAllEntriesAfterIndex(28)\n\ttc.leftMasterFakeDb.AddExpectedQuery(\"INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*\", errReadOnly)\n\ttc.leftMasterFakeDb.EnableInfinite()\n\t// vtworker may not retry on leftMaster again if HealthCheck picks up the\n\t// change very fast. In that case, the error was never encountered.\n\t// Delete it or verifyAllExecutedOrFail() will fail because it was not\n\t// processed.\n\tdefer tc.leftMasterFakeDb.DeleteAllEntriesAfterIndex(28)\n\n\t// Wait for a retry due to NoMasterAvailable to happen, expect the 30th write\n\t// on leftReplica and change leftReplica from REPLICA to MASTER.\n\t//\n\t// Reset the stats now. It also happens when the worker starts but that's too\n\t// late because this Go routine looks at it and can run before the worker.\n\tstatsRetryCounters.ResetAll()\n\tgo func() {\n\t\tctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)\n\t\tdefer cancel()\n\n\t\tfor {\n\t\t\tretries := statsRetryCounters.Counts()[retryCategoryNoMasterAvailable]\n\t\t\tif retries >= 1 {\n\t\t\t\tt.Logf(\"retried on no MASTER %v times\", retries)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tpanic(fmt.Errorf(\"timed out waiting for vtworker to retry due to NoMasterAvailable: %v\", ctx.Err()))\n\t\t\tcase <-time.After(10 * time.Millisecond):\n\t\t\t\t// Poll constantly.\n\t\t\t}\n\t\t}\n\n\t\t// Make leftReplica the new MASTER.\n\t\ttc.leftReplica.TM.ChangeType(ctx, topodatapb.TabletType_MASTER)\n\t\tt.Logf(\"resetting tablet back to MASTER\")\n\t\ttc.leftReplicaQs.UpdateType(topodatapb.TabletType_MASTER)\n\t\ttc.leftReplicaQs.AddDefaultHealthResponse()\n\t}()\n\n\t// Run the vtworker command.\n\tif err := runCommand(t, tc.wi, tc.wi.wr, tc.defaultWorkerArgs); err != nil {\n\t\tt.Fatal(err)\n\t}\n}",
"func (r *Raft) runLeader() {\n\tstate := leaderState{\n\t\tcommitCh: make(chan *DeferLog, 128),\n\t\treplicationState: make(map[string]*followerReplication),\n\t}\n\tdefer state.Release()\n\n\t// Initialize inflight tracker\n\tstate.inflight = NewInflight(state.commitCh)\n\n\tr.peerLock.Lock()\n\t// Start a replication routine for each peer\n\tfor _, peer := range r.peers {\n\t\tr.startReplication(&state, peer)\n\t}\n\tr.peerLock.Unlock()\n\n\t// seal leadership\n\tgo r.leaderNoop()\n\n\ttransition := false\n\tfor !transition {\n\t\tselect {\n\t\tcase applyLog := <-r.applyCh:\n\t\t\t// Prepare log\n\t\t\tapplyLog.log.Index = r.getLastLogIndex() + 1\n\t\t\tapplyLog.log.Term = r.getCurrentTerm()\n\t\t\t// Write the log entry locally\n\t\t\tif err := r.logs.StoreLog(&applyLog.log); err != nil {\n\t\t\t\tr.logE.Printf(\"Failed to commit log: %w\", err)\n\t\t\t\tapplyLog.response = err\n\t\t\t\tapplyLog.Response()\n\t\t\t\tr.setState(Follower)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Add this to the inflight logs\n\t\t\tstate.inflight.Start(applyLog, r.quorumSize())\n\t\t\tstate.inflight.Commit(applyLog.log.Index)\n\t\t\t// Update the last log since it's on disk now\n\t\t\tr.setLastLogIndex(applyLog.log.Index)\n\n\t\t\t// Notify the replicators of the new log\n\t\t\tfor _, f := range state.replicationState {\n\t\t\t\tasyncNotifyCh(f.triggerCh)\n\t\t\t}\n\n\t\tcase commitLog := <-state.commitCh:\n\t\t\t// Increment the commit index\n\t\t\tidx := commitLog.log.Index\n\t\t\tr.setCommitIndex(idx)\n\n\t\t\t// Perform leader-specific processing\n\t\t\ttransition = r.leaderProcessLog(&state, &commitLog.log)\n\n\t\t\t// Trigger applying logs locally\n\t\t\tr.commitCh <- commitTuple{idx, commitLog}\n\n\t\tcase rpc := <-r.rpcCh:\n\t\t\tswitch cmd := rpc.Command.(type) {\n\t\t\tcase *AppendEntriesRequest:\n\t\t\t\ttransition = r.appendEntries(rpc, cmd)\n\t\t\tcase *RequestVoteRequest:\n\t\t\t\ttransition = r.requestVote(rpc, cmd)\n\t\t\tdefault:\n\t\t\t\tr.logE.Printf(\"Leader state, got unexpected command: %#v\",\n\t\t\t\t\trpc.Command)\n\t\t\t\trpc.Respond(nil, fmt.Errorf(\"unexpected command\"))\n\t\t\t}\n\t\tcase <-r.shutdownCh:\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (rf *Raft) Start(command interface{}) (int, int, bool) {\n\tDPrintf(\"peer-%d ----------------------Start()-----------------------\", rf.me)\n\tindex := -1\n\tterm := -1\n\tisLeader := true\n\n\t// Your code here (2B).\n\t//term, isLeader = rf.GetState()\n\trf.mu.Lock()\n\tterm = rf.currentTerm\n\tif rf.state != Leader {\n\t\tisLeader = false\n\t}\n\tif isLeader {\n\t\t// Append the command into its own rf.log\n\t\tvar newlog LogEntry\n\t\tnewlog.Term = rf.currentTerm\n\t\tnewlog.Command = command\n\t\trf.log = append(rf.log, newlog)\n\t\trf.persist()\n\t\tindex = len(rf.log) // the 3rd return value.\n\t\trf.repCount[index] = 1\n\t\t// now the log entry is appended into leader's log.\n\t\trf.mu.Unlock()\n\n\t\t// start agreement and return immediately.\n\t\tfor peer_index, _ := range rf.peers {\n\t\t\tif peer_index == rf.me {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// send AppendEntries RPC to each peer. And decide when it is safe to apply a log entry to the state machine.\n\t\t\tgo func(i int) {\n\t\t\t\trf.mu.Lock()\n\t\t\t\tnextIndex_copy := make([]int, len(rf.peers))\n\t\t\t\tcopy(nextIndex_copy, rf.nextIndex)\n\t\t\t\trf.mu.Unlock()\n\t\t\t\tfor {\n\t\t\t\t\t// make a copy of current leader's state.\n\t\t\t\t\trf.mu.Lock()\n\t\t\t\t\t// we should not send RPC if rf.currentTerm != term, the log entry will be sent in later AE-RPCs in args.Entries.\n\t\t\t\t\tif rf.state != Leader || rf.currentTerm != term {\n\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\t// make a copy of leader's raft state.\n\t\t\t\t\tcommitIndex_copy := rf.commitIndex // during the agreement, commitIndex may increase.\n\t\t\t\t\tlog_copy := make([]LogEntry, len(rf.log)) // during the agreement, log could grow.\n\t\t\t\t\tcopy(log_copy, rf.log)\n\t\t\t\t\trf.mu.Unlock()\n\n\t\t\t\t\tvar args AppendEntriesArgs\n\t\t\t\t\tvar reply AppendEntriesReply\n\t\t\t\t\targs.Term = term\n\t\t\t\t\targs.LeaderId = rf.me\n\t\t\t\t\targs.LeaderCommit = commitIndex_copy\n\t\t\t\t\t// If last log index >= nextIndex for a follower: send AppendEntries RPC with log entries starting at nextIndex\n\t\t\t\t\t// NOTE: nextIndex is just a predication. not a precise value.\n\t\t\t\t\targs.PrevLogIndex = nextIndex_copy[i] - 1\n\t\t\t\t\tif args.PrevLogIndex > 0 {\n\t\t\t\t\t\t// FIXME: when will this case happen??\n\t\t\t\t\t\tif args.PrevLogIndex > len(log_copy) {\n\t\t\t\t\t\t\t// TDPrintf(\"adjust PrevLogIndex.\")\n\t\t\t\t\t\t\t//return\n\t\t\t\t\t\t\targs.PrevLogIndex = len(log_copy)\n\t\t\t\t\t\t}\n\t\t\t\t\t\targs.PrevLogTerm = log_copy[args.PrevLogIndex-1].Term\n\t\t\t\t\t}\n\t\t\t\t\targs.Entries = make([]LogEntry, len(log_copy)-args.PrevLogIndex)\n\t\t\t\t\tcopy(args.Entries, log_copy[args.PrevLogIndex:len(log_copy)])\n\t\t\t\t\tok := rf.sendAppendEntries(i, &args, &reply)\n\t\t\t\t\t// handle RPC reply in the same goroutine.\n\t\t\t\t\tif ok == true {\n\t\t\t\t\t\tif reply.Success == true {\n\t\t\t\t\t\t\t// this case means that the log entry is replicated successfully.\n\t\t\t\t\t\t\tDPrintf(\"peer-%d AppendEntries success!\", rf.me)\n\t\t\t\t\t\t\t// re-establish the assumption.\n\t\t\t\t\t\t\trf.mu.Lock()\n\t\t\t\t\t\t\tif rf.state != Leader || rf.currentTerm != term {\n\t\t\t\t\t\t\t\t//Figure-8 and p-8~9: never commits log entries from previous terms by counting replicas!\n\t\t\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t// NOTE: TA's QA: nextIndex[i] should not decrease, so check and set.\n\t\t\t\t\t\t\tif index >= rf.nextIndex[i] {\n\t\t\t\t\t\t\t\trf.nextIndex[i] = index + 1\n\t\t\t\t\t\t\t\t// TA's QA\n\t\t\t\t\t\t\t\trf.matchIndex[i] = args.PrevLogIndex + len(args.Entries) // matchIndex is not used in my implementation.\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t// test whether we can update the leader's commitIndex.\n\t\t\t\t\t\t\trf.repCount[index]++\n\t\t\t\t\t\t\t// update leader's commitIndex! We can determine that Figure-8's case will not occur now,\n\t\t\t\t\t\t\t// because we have test rf.currentTerm == term_copy before, so we will never commit log entries from previous terms.\n\t\t\t\t\t\t\tif rf.commitIndex < index && rf.repCount[index] > len(rf.peers)/2 {\n\t\t\t\t\t\t\t\t// apply the command.\n\t\t\t\t\t\t\t\tDPrintf(\"peer-%d Leader moves its commitIndex from %d to %d.\", rf.me, rf.commitIndex, index)\n\t\t\t\t\t\t\t\t// NOTE: the Leader should commit one by one.\n\t\t\t\t\t\t\t\trf.commitIndex = index\n\t\t\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\t\t\t// now the command at commitIndex is committed.\n\t\t\t\t\t\t\t\tgo func() {\n\t\t\t\t\t\t\t\t\trf.canApplyCh <- true\n\t\t\t\t\t\t\t\t}()\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\treturn // jump out of the loop.\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t// AppendEntries RPC fails because of log inconsistency: Decrement nextIndex and retry\n\t\t\t\t\t\t\trf.mu.Lock()\n\t\t\t\t\t\t\t// re-establish the assumption.\n\t\t\t\t\t\t\tif rf.state != Leader || rf.currentTerm != term {\n\t\t\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif reply.Term > rf.currentTerm {\n\t\t\t\t\t\t\t\trf.state = Follower\n\t\t\t\t\t\t\t\trf.currentTerm = reply.Term\n\t\t\t\t\t\t\t\trf.persist()\n\t\t\t\t\t\t\t\trf.resetElectionTimeout()\n\t\t\t\t\t\t\t\tDPrintf(\"peer-%d degenerate from Leader into Follower!!!\", rf.me)\n\t\t\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\t\t\trf.nonleaderCh <- true\n\t\t\t\t\t\t\t\t// don't try to send AppendEntries RPC to others then, rf is not the leader.\n\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t// NOTE: the nextIndex[i] should never < 1\n\t\t\t\t\t\t\t\tconflict_term := reply.ConflictTerm\n\t\t\t\t\t\t\t\tconflict_index := reply.ConflictIndex\n\t\t\t\t\t\t\t\t// refer to TA's guide blog.\n\t\t\t\t\t\t\t\t// first, try to find the first index of conflict_term in leader's log.\n\t\t\t\t\t\t\t\tfound := false\n\t\t\t\t\t\t\t\tnew_next_index := conflict_index // at least 1\n\t\t\t\t\t\t\t\tfor j := 0; j < len(rf.log); j++ {\n\t\t\t\t\t\t\t\t\tif rf.log[j].Term == conflict_term {\n\t\t\t\t\t\t\t\t\t\tfound = true\n\t\t\t\t\t\t\t\t\t} else if rf.log[j].Term > conflict_term {\n\t\t\t\t\t\t\t\t\t\tif found {\n\t\t\t\t\t\t\t\t\t\t\tnew_next_index = j + 1\n\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tnextIndex_copy[i] = new_next_index\n\t\t\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\t\t\t// now retry to send AppendEntries RPC to peer-i.\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// RPC fails. Retry!\n\t\t\t\t\t\t// when network partition\n\t\t\t\t\t\ttime.Sleep(time.Millisecond * time.Duration(100))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}(peer_index)\n\t\t}\n\t} else {\n\t\trf.mu.Unlock()\n\t}\n\n\treturn index, term, isLeader\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TestLeaderCommitEntry tests that when the entry has been safely replicated, the leader gives out the applied entries, which can be applied to its state machine. Also, the leader keeps track of the highest index it knows to be committed, and it includes that index in future AppendEntries RPCs so that the other servers eventually find out. Reference: section 5.3
|
func TestLeaderCommitEntry(t *testing.T) {
s := NewMemoryStorage()
r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)
defer closeAndFreeRaft(r)
r.becomeCandidate()
r.becomeLeader()
commitNoopEntry(r, s)
li := r.raftLog.lastIndex()
r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
for _, m := range r.readMessages() {
r.Step(acceptAndReply(m))
}
if g := r.raftLog.committed; g != li+1 {
t.Errorf("committed = %d, want %d", g, li+1)
}
wents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte("some data")}}
if g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {
t.Errorf("nextEnts = %+v, want %+v", g, wents)
}
msgs := r.readMessages()
sort.Sort(messageSlice(msgs))
for i, m := range msgs {
if w := uint64(i + 2); m.To != w {
t.Errorf("to = %x, want %x", m.To, w)
}
if m.Type != pb.MsgApp {
t.Errorf("type = %v, want %v", m.Type, pb.MsgApp)
}
if m.Commit != li+1 {
t.Errorf("commit = %d, want %d", m.Commit, li+1)
}
}
}
|
[
"func TestFollowerCommitEntry(t *testing.T) {\n\ttests := []struct {\n\t\tents []pb.Entry\n\t\tcommit uint64\n\t}{\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t},\n\t\t\t1,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data2\")},\n\t\t\t},\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data2\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data\")},\n\t\t\t},\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data2\")},\n\t\t\t},\n\t\t\t1,\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeFollower(1, 2)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 1, Entries: tt.ents, Commit: tt.commit})\n\n\t\tif g := r.raftLog.committed; g != tt.commit {\n\t\t\tt.Errorf(\"#%d: committed = %d, want %d\", i, g, tt.commit)\n\t\t}\n\t\twents := tt.ents[:int(tt.commit)]\n\t\tif g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {\n\t\t\tt.Errorf(\"#%d: nextEnts = %v, want %v\", i, g, wents)\n\t\t}\n\t}\n}",
"func TestLeaderAcknowledgeCommit(t *testing.T) {\n\ttests := []struct {\n\t\tsize int\n\t\tacceptors map[uint64]bool\n\t\twack bool\n\t}{\n\t\t{1, nil, true},\n\t\t{3, nil, false},\n\t\t{3, map[uint64]bool{2: true}, true},\n\t\t{3, map[uint64]bool{2: true, 3: true}, true},\n\t\t{5, nil, false},\n\t\t{5, map[uint64]bool{2: true}, false},\n\t\t{5, map[uint64]bool{2: true, 3: true}, true},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true}, true},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, true},\n\t}\n\tfor i, tt := range tests {\n\t\ts := NewMemoryStorage()\n\t\tr := newTestRaft(1, idsBySize(tt.size), 10, 1, s)\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeCandidate()\n\t\tr.becomeLeader()\n\t\tcommitNoopEntry(r, s)\n\t\tli := r.raftLog.lastIndex()\n\t\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\t\tfor _, m := range r.readMessages() {\n\t\t\tif tt.acceptors[m.To] {\n\t\t\t\tr.Step(acceptAndReply(m))\n\t\t\t}\n\t\t}\n\n\t\tif g := r.raftLog.committed > li; g != tt.wack {\n\t\t\tt.Errorf(\"#%d: ack commit = %v, want %v\", i, g, tt.wack)\n\t\t}\n\t}\n}",
"func TestCommitWithoutNewTermEntry(t *testing.T) {\n\ttt := newNetwork(nil, nil, nil, nil, nil)\n\tdefer tt.closeAll()\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// 0 cannot reach 2,3,4\n\ttt.cut(1, 3)\n\ttt.cut(1, 4)\n\ttt.cut(1, 5)\n\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tsm := tt.peers[1].(*raft)\n\tif sm.raftLog.committed != 1 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 1)\n\t}\n\n\t// network recovery\n\ttt.recover()\n\n\t// elect 1 as the new leader with term 2\n\t// after append a ChangeTerm entry from the current term, all entries\n\t// should be committed\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\tif sm.raftLog.committed != 4 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 4)\n\t}\n}",
"func Test_LeaderChanges(t *testing.T) {\n\t//giving time to elect a new leader\n\ttime.Sleep(time.Second * 1)\n\t//Start a timer here and verify that Append call doesn't succeed and timer times out which means S1 is partitioned--PENDING\n\tconst n int = 4\n\tset1 := \"set abc 20 8\\r\\nabcdefjg\\r\\n\"\n\texpected := []bool{false, true, false, false}\n\tchann := make([]chan LogEntry, n)\n\tr := [n]*Raft{r0, r2, r3, r4}\n\tfor k := 0; k < n; k++ {\n\t\tchann[k] = make(chan LogEntry)\n\t}\n\tfor i := 0; i < n; i++ {\n\t\tgo r[i].Client(chann[i], set1)\n\t}\n\n\tfor i := 0; i < n; i++ {\n\t\tresponse := <-chann[i]\n\t\tcommitStatus := response.Committed()\n\t\tif expected[i] != commitStatus {\n\t\t\tt.Error(\"Mismatch!\", expected, string(response.Data()))\n\t\t}\n\t}\n}",
"func TestLeaderStartReplication(t *testing.T) {\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tcommitNoopEntry(r, s)\n\tli := r.raftLog.lastIndex()\n\n\tents := []pb.Entry{{Data: []byte(\"some data\")}}\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: ents})\n\n\tif g := r.raftLog.lastIndex(); g != li+1 {\n\t\tt.Errorf(\"lastIndex = %d, want %d\", g, li+1)\n\t}\n\tif g := r.raftLog.committed; g != li {\n\t\tt.Errorf(\"committed = %d, want %d\", g, li)\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte(\"some data\")}}\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %+v, want %+v\", msgs, wmsgs)\n\t}\n\tif g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, wents) {\n\t\tt.Errorf(\"ents = %+v, want %+v\", g, wents)\n\t}\n}",
"func TestCannotCommitWithoutNewTermEntry(t *testing.T) {\n\ttt := newNetwork(nil, nil, nil, nil, nil)\n\tdefer tt.closeAll()\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// 0 cannot reach 2,3,4\n\ttt.cut(1, 3)\n\ttt.cut(1, 4)\n\ttt.cut(1, 5)\n\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tsm := tt.peers[1].(*raft)\n\tif sm.raftLog.committed != 1 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 1)\n\t}\n\n\t// network recovery\n\ttt.recover()\n\t// avoid committing ChangeTerm proposal\n\ttt.ignore(pb.MsgApp)\n\n\t// elect 2 as the new leader with term 2\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// no log entries from previous term should be committed\n\tsm = tt.peers[2].(*raft)\n\tif sm.raftLog.committed != 1 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 1)\n\t}\n\n\ttt.recover()\n\t// send heartbeat; reset wait\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgBeat})\n\t// append an entry at current term\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\t// expect the committed to be advanced\n\tif sm.raftLog.committed != 5 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 5)\n\t}\n}",
"func TestUpdateEntry(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\thdbt, err := newHDBTesterDeps(t.Name(), &disableScanLoopDeps{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Test 1: try calling updateEntry with a blank host. Result should be a\n\t// host with len 2 scan history.\n\tsomeErr := errors.New(\"testing err\")\n\tentry1 := modules.HostDBEntry{\n\t\tPublicKey: types.SiaPublicKey{\n\t\t\tKey: []byte{1},\n\t\t},\n\t}\n\tentry2 := modules.HostDBEntry{\n\t\tPublicKey: types.SiaPublicKey{\n\t\t\tKey: []byte{2},\n\t\t},\n\t}\n\n\t// Try inserting the first entry. Result in the host tree should be a host\n\t// with a scan history length of two.\n\thdbt.hdb.updateEntry(entry1, nil)\n\tupdatedEntry, exists := hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tif len(updatedEntry.ScanHistory) != 2 {\n\t\tt.Fatal(\"new entry was not given two scanning history entries\")\n\t}\n\tif !updatedEntry.ScanHistory[0].Timestamp.Before(updatedEntry.ScanHistory[1].Timestamp) {\n\t\tt.Error(\"new entry was not provided with a sorted scanning history\")\n\t}\n\tif !updatedEntry.ScanHistory[0].Success || !updatedEntry.ScanHistory[1].Success {\n\t\tt.Error(\"new entry was not given success values despite a successful scan\")\n\t}\n\n\t// Try inserting the second entry, but with an error. Results should largely\n\t// be the same.\n\thdbt.hdb.updateEntry(entry2, someErr)\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry2.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tif len(updatedEntry.ScanHistory) != 2 {\n\t\tt.Fatal(\"new entry was not given two scanning history entries\")\n\t}\n\tif !updatedEntry.ScanHistory[0].Timestamp.Before(updatedEntry.ScanHistory[1].Timestamp) {\n\t\tt.Error(\"new entry was not provided with a sorted scanning history\")\n\t}\n\tif updatedEntry.ScanHistory[0].Success || updatedEntry.ScanHistory[1].Success {\n\t\tt.Error(\"new entry was not given success values despite a successful scan\")\n\t}\n\n\t// Insert the first entry twice more, with no error. There should be 4\n\t// entries, and the timestamps should be strictly increasing.\n\thdbt.hdb.updateEntry(entry1, nil)\n\thdbt.hdb.updateEntry(entry1, nil)\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tif len(updatedEntry.ScanHistory) != 4 {\n\t\tt.Fatal(\"new entry was not given two scanning history entries\")\n\t}\n\tif !updatedEntry.ScanHistory[1].Timestamp.Before(updatedEntry.ScanHistory[2].Timestamp) {\n\t\tt.Error(\"new entry was not provided with a sorted scanning history\")\n\t}\n\tif !updatedEntry.ScanHistory[2].Timestamp.Before(updatedEntry.ScanHistory[3].Timestamp) {\n\t\tt.Error(\"new entry was not provided with a sorted scanning history\")\n\t}\n\tif !updatedEntry.ScanHistory[2].Success || !updatedEntry.ScanHistory[3].Success {\n\t\tt.Error(\"new entries did not get added with successful timestamps\")\n\t}\n\n\t// Add a non-successful scan and verify that it is registered properly.\n\thdbt.hdb.updateEntry(entry1, someErr)\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tif len(updatedEntry.ScanHistory) != 5 {\n\t\tt.Fatal(\"new entry was not given two scanning history entries\")\n\t}\n\tif !updatedEntry.ScanHistory[3].Success || updatedEntry.ScanHistory[4].Success {\n\t\tt.Error(\"new entries did not get added with successful timestamps\")\n\t}\n\n\t// Prefix an invalid entry to have a scan from more than maxHostDowntime\n\t// days ago. At less than minScans total, the host should not be deleted\n\t// upon update.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry2.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tupdatedEntry.ScanHistory = append([]modules.HostDBScan{{}}, updatedEntry.ScanHistory...)\n\terr = hdbt.hdb.hostTree.Modify(updatedEntry)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Entry should still exist.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry2.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\t// Add enough entries to get to minScans total length. When that length is\n\t// reached, the entry should be deleted.\n\tfor i := len(updatedEntry.ScanHistory); i < minScans; i++ {\n\t\thdbt.hdb.updateEntry(entry2, someErr)\n\t}\n\t// The entry should no longer exist in the hostdb, wiped for being offline.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry2.PublicKey)\n\tif exists {\n\t\tt.Fatal(\"entry should have been purged for being offline for too long\")\n\t}\n\n\t// Trigger compression on entry1 by adding a past scan and then adding\n\t// unsuccessful scans until compression happens.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tupdatedEntry.ScanHistory = append([]modules.HostDBScan{{Timestamp: time.Now().Add(maxHostDowntime * -1).Add(time.Hour * -1)}}, updatedEntry.ScanHistory...)\n\terr = hdbt.hdb.hostTree.Modify(updatedEntry)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor i := len(updatedEntry.ScanHistory); i <= minScans; i++ {\n\t\thdbt.hdb.updateEntry(entry1, someErr)\n\t}\n\t// The result should be compression, and not the entry getting deleted.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"entry should not have been purged for being offline for too long\")\n\t}\n\tif len(updatedEntry.ScanHistory) != minScans {\n\t\tt.Error(\"expecting a different number of scans\", len(updatedEntry.ScanHistory))\n\t}\n\tif updatedEntry.HistoricDowntime == 0 {\n\t\tt.Error(\"host reporting historic downtime?\")\n\t}\n\tif updatedEntry.HistoricUptime != 0 {\n\t\tt.Error(\"host not reporting historic uptime?\")\n\t}\n\n\t// Repeat triggering compression, but with uptime this time.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tupdatedEntry.ScanHistory = append([]modules.HostDBScan{{Success: true, Timestamp: time.Now().Add(time.Hour * 24 * 11 * -1)}}, updatedEntry.ScanHistory...)\n\terr = hdbt.hdb.hostTree.Modify(updatedEntry)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\thdbt.hdb.updateEntry(entry1, someErr)\n\t// The result should be compression, and not the entry getting deleted.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"entry should not have been purged for being offline for too long\")\n\t}\n\tif len(updatedEntry.ScanHistory) != minScans+1 {\n\t\tt.Error(\"expecting a different number of scans\")\n\t}\n\tif updatedEntry.HistoricUptime == 0 {\n\t\tt.Error(\"host not reporting historic uptime?\")\n\t}\n}",
"func applyEntrySupervisor(rs *RaftServer) {\n\tfor {\n\t\tselect {\n\t\tcase r := <-rs.aerCh:\n\t\t\trs.lock.Lock()\n\t\t\tif r.Term > rs.currentTerm {\n\t\t\t\tbecomeFollower(rs, r.Term)\n\t\t\t}\n\n\t\t\tif r.Success {\n\t\t\t\tentries := r.Entries\n\t\t\t\tfor i, e := range *entries {\n\t\t\t\t\trs.matchIndex[r.PeerId] = rs.nextIndex[r.PeerId]\n\t\t\t\t\trs.nextIndex[r.PeerId]++\n\n\t\t\t\t\tidx := r.LeaderPrevLogIndex + 1 + int64(i)\n\t\t\t\t\tif idx > rs.commitIndex && rs.log[idx].Term == rs.currentTerm {\n\t\t\t\t\t\t// Check if we have a majority to commit:\n\t\t\t\t\t\tc := 0\n\t\t\t\t\t\tfor i, mi := range rs.matchIndex {\n\t\t\t\t\t\t\tif i != rs.serverId && mi >= idx {\n\t\t\t\t\t\t\t\tc++\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif c >= len(rs.peerAddrs)/2+1 {\n\t\t\t\t\t\t\tlog.Printf(\"====Commited LogEntryIndex %v across %v nodes====\", idx, c)\n\t\t\t\t\t\t\trs.commitIndex = idx\n\t\t\t\t\t\t\trs.stateMachine.Apply(e)\n\t\t\t\t\t\t\trs.lastApplied = idx\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else if r.Error == ErrLogInconsistent {\n\t\t\t\tlog.Println(\"Sending another AppendEntry() to resolve log inconsistency\")\n\t\t\t\t// retry with more data (eventually they will line up):\n\t\t\t\trs.nextIndex[r.PeerId]--\n\t\t\t\taes := map[string]*AppendEntryReq{\n\t\t\t\t\trs.peerAddrs[r.PeerId]: rs.getAppendEntryReqForPeer(r.PeerId),\n\t\t\t\t}\n\t\t\t\tsendAppendEntries(aes, rs.aerCh)\n\t\t\t}\n\t\t\trs.lock.Unlock()\n\t\tcase <-rs.killCh:\n\t\t\tlog.Println(\"Shutting off applyEntrySupervisor. . .\")\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (instance *cache) CommitEntry(key string, content Cacheable) (ce *Entry, xerr fail.Error) {\n\tif instance.isNull() {\n\t\treturn nil, fail.InvalidInstanceError()\n\t}\n\tif key = strings.TrimSpace(key); key == \"\" {\n\t\treturn nil, fail.InvalidParameterCannotBeEmptyStringError(\"key\")\n\t}\n\n\tinstance.lock.Lock()\n\tdefer instance.lock.Unlock()\n\n\treturn instance.unsafeCommitEntry(key, content)\n}",
"func TestCommitAfterRemoveNode(t *testing.T) {\n\t// Create a cluster with two nodes.\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2}, 5, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\n\t// Begin to remove the second node.\n\tcc := pb.ConfChange{\n\t\tType: pb.ConfChangeRemoveNode,\n\t\tReplicaID: 2,\n\t}\n\tccData, err := cc.Marshal()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tr.Step(pb.Message{\n\t\tType: pb.MsgProp,\n\t\tEntries: []pb.Entry{\n\t\t\t{Type: pb.EntryConfChange, Data: ccData},\n\t\t},\n\t})\n\t// Stabilize the log and make sure nothing is committed yet.\n\tif ents := nextEnts(r, s); len(ents) > 0 {\n\t\tt.Fatalf(\"unexpected committed entries: %v\", ents)\n\t}\n\tccIndex := r.raftLog.lastIndex()\n\n\t// While the config change is pending, make another proposal.\n\tr.Step(pb.Message{\n\t\tType: pb.MsgProp,\n\t\tEntries: []pb.Entry{\n\t\t\t{Type: pb.EntryNormal, Data: []byte(\"hello\")},\n\t\t},\n\t})\n\n\t// Node 2 acknowledges the config change, committing it.\n\tr.Step(pb.Message{\n\t\tType: pb.MsgAppResp,\n\t\tFrom: 2,\n\t\tIndex: ccIndex,\n\t})\n\tents := nextEnts(r, s)\n\tif len(ents) != 2 {\n\t\tt.Fatalf(\"expected two committed entries, got %v\", ents)\n\t}\n\tif ents[0].Type != pb.EntryNormal || ents[0].Data != nil {\n\t\tt.Fatalf(\"expected ents[0] to be empty, but got %v\", ents[0])\n\t}\n\tif ents[1].Type != pb.EntryConfChange {\n\t\tt.Fatalf(\"expected ents[1] to be EntryConfChange, got %v\", ents[1])\n\t}\n\n\t// Apply the config change. This reduces quorum requirements so the\n\t// pending command can now commit.\n\tr.removeNode(2)\n\tents = nextEnts(r, s)\n\tif len(ents) != 1 || ents[0].Type != pb.EntryNormal ||\n\t\tstring(ents[0].Data) != \"hello\" {\n\t\tt.Fatalf(\"expected one committed EntryNormal, got %v\", ents)\n\t}\n}",
"func TestCommit(t *testing.T) {\n\tconst n = 4\n\tctrl := gomock.NewController(t)\n\ths := New()\n\tkeys := testutil.GenerateKeys(t, n, testutil.GenerateECDSAKey)\n\tbl := testutil.CreateBuilders(t, ctrl, n, keys...)\n\tacceptor := mocks.NewMockAcceptor(ctrl)\n\texecutor := mocks.NewMockExecutor(ctrl)\n\tsynchronizer := synchronizer.New(testutil.FixedTimeout(1000))\n\tcfg, replicas := testutil.CreateMockConfigWithReplicas(t, ctrl, n, keys...)\n\tbl[0].Register(hs, cfg, acceptor, executor, synchronizer, leaderrotation.NewFixed(2))\n\thl := bl.Build()\n\tsigners := hl.Signers()\n\n\t// create the needed blocks and QCs\n\tgenesisQC := hotstuff.NewQuorumCert(nil, 0, hotstuff.GetGenesis().Hash())\n\tb1 := testutil.NewProposeMsg(hotstuff.GetGenesis().Hash(), genesisQC, \"1\", 1, 2)\n\tb1QC := testutil.CreateQC(t, b1.Block, signers)\n\tb2 := testutil.NewProposeMsg(b1.Block.Hash(), b1QC, \"2\", 2, 2)\n\tb2QC := testutil.CreateQC(t, b2.Block, signers)\n\tb3 := testutil.NewProposeMsg(b2.Block.Hash(), b2QC, \"3\", 3, 2)\n\tb3QC := testutil.CreateQC(t, b3.Block, signers)\n\tb4 := testutil.NewProposeMsg(b3.Block.Hash(), b3QC, \"4\", 4, 2)\n\n\t// the second replica will be the leader, so we expect it to receive votes\n\treplicas[1].EXPECT().Vote(gomock.Any()).AnyTimes()\n\treplicas[1].EXPECT().NewView(gomock.Any()).AnyTimes()\n\n\t// executor will check that the correct command is executed\n\texecutor.EXPECT().Exec(gomock.Any()).Do(func(arg interface{}) {\n\t\tif arg.(hotstuff.Command) != b1.Block.Command() {\n\t\t\tt.Errorf(\"Wrong command executed: got: %s, want: %s\", arg, b1.Block.Command())\n\t\t}\n\t})\n\n\t// acceptor expects to receive the commands in order\n\tgomock.InOrder(\n\t\tacceptor.EXPECT().Proposed(gomock.Any()),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"1\")).Return(true),\n\t\tacceptor.EXPECT().Proposed(hotstuff.Command(\"1\")),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"2\")).Return(true),\n\t\tacceptor.EXPECT().Proposed(hotstuff.Command(\"2\")),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"3\")).Return(true),\n\t\tacceptor.EXPECT().Proposed(hotstuff.Command(\"3\")),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"4\")).Return(true),\n\t)\n\n\ths.OnPropose(b1)\n\ths.OnPropose(b2)\n\ths.OnPropose(b3)\n\ths.OnPropose(b4)\n}",
"func testCommit(t *testing.T, myApp app.BaseApp, h int64) []byte {\n\t// Commit first block, make sure non-nil hash\n\theader := abci.Header{Height: h}\n\tmyApp.BeginBlock(abci.RequestBeginBlock{Header: header})\n\tmyApp.EndBlock(abci.RequestEndBlock{})\n\tcres := myApp.Commit()\n\thash := cres.Data\n\tassert.NotEmpty(t, hash)\n\treturn hash\n}",
"func TestReadOnlyForNewLeader(t *testing.T) {\n\tnodeConfigs := []struct {\n\t\tid uint64\n\t\tcommitted uint64\n\t\tapplied uint64\n\t\tcompact_index uint64\n\t}{\n\t\t{1, 1, 1, 0},\n\t\t{2, 2, 2, 2},\n\t\t{3, 2, 2, 2},\n\t}\n\tpeers := make([]stateMachine, 0)\n\tfor _, c := range nodeConfigs {\n\t\tstorage := NewMemoryStorage()\n\t\tdefer storage.Close()\n\t\tstorage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 1}})\n\t\tstorage.SetHardState(pb.HardState{Term: 1, Commit: c.committed})\n\t\tif c.compact_index != 0 {\n\t\t\tstorage.Compact(c.compact_index)\n\t\t}\n\t\tcfg := newTestConfig(c.id, []uint64{1, 2, 3}, 10, 1, storage)\n\t\tcfg.Applied = c.applied\n\t\traft := newRaft(cfg)\n\t\tpeers = append(peers, raft)\n\t}\n\tnt := newNetwork(peers...)\n\n\t// Drop MsgApp to forbid peer a to commit any log entry at its term after it becomes leader.\n\tnt.ignore(pb.MsgApp)\n\t// Force peer a to become leader.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tsm := nt.peers[1].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Fatalf(\"state = %s, want %s\", sm.state, StateLeader)\n\t}\n\n\t// Ensure peer a drops read only request.\n\tvar windex uint64 = 4\n\twctx := []byte(\"ctx\")\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: wctx}}})\n\tif len(sm.readStates) != 0 {\n\t\tt.Fatalf(\"len(readStates) = %d, want zero\", len(sm.readStates))\n\t}\n\n\tnt.recover()\n\n\t// Force peer a to commit a log entry at its term\n\tfor i := 0; i < sm.heartbeatTimeout; i++ {\n\t\tsm.tick()\n\t}\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\tif sm.raftLog.committed != 4 {\n\t\tt.Fatalf(\"committed = %d, want 4\", sm.raftLog.committed)\n\t}\n\tlastLogTerm := sm.raftLog.zeroTermOnErrCompacted(sm.raftLog.term(sm.raftLog.committed))\n\tif lastLogTerm != sm.Term {\n\t\tt.Fatalf(\"last log term = %d, want %d\", lastLogTerm, sm.Term)\n\t}\n\n\t// Ensure peer a accepts read only request after it commits a entry at its term.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: wctx}}})\n\tif len(sm.readStates) != 1 {\n\t\tt.Fatalf(\"len(readStates) = %d, want 1\", len(sm.readStates))\n\t}\n\trs := sm.readStates[0]\n\tif rs.Index != windex {\n\t\tt.Fatalf(\"readIndex = %d, want %d\", rs.Index, windex)\n\t}\n\tif !bytes.Equal(rs.RequestCtx, wctx) {\n\t\tt.Fatalf(\"requestCtx = %v, want %v\", rs.RequestCtx, wctx)\n\t}\n}",
"func (rf *Raft) updateCommit(newCommitIndex int) {\n\n\tif newCommitIndex < rf.commitIndex {\n\t\tpanic(fmt.Sprintf(\"Server %v: new commit index %v is lower than previous one %v\\n\", rf.me, newCommitIndex, rf.commitIndex))\n\t}\n\n\trf.commitIndex = newCommitIndex\n\trf.debug(\"New commit index: %v\\n\", rf.commitIndex)\n\n\tif rf.commitIndex > rf.lastEntryIndex() {\n\t\tpanic(fmt.Sprintf(\"Server %v: new commit index is bigger than log size (%v, %v)\\n\", rf.me, rf.commitIndex, rf.lastEntryIndex()))\n\t}\n}",
"func TestRemoveLeader(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tstopper := stop.NewStopper()\n\tconst clusterSize = 6\n\tconst groupSize = 3\n\tcluster := newTestCluster(nil, clusterSize, stopper, t)\n\tdefer stopper.Stop()\n\n\t// Consume and apply the membership change events.\n\tfor i := 0; i < clusterSize; i++ {\n\t\tgo func(i int) {\n\t\t\tfor {\n\t\t\t\tif e, ok := <-cluster.events[i].MembershipChangeCommitted; ok {\n\t\t\t\t\te.Callback(nil)\n\t\t\t\t} else {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}(i)\n\t}\n\n\t// Tick all the clocks in the background to ensure that all the\n\t// necessary elections are triggered.\n\t// TODO(bdarnell): newTestCluster should have an option to use a\n\t// real clock instead of a manual one.\n\tstopper.RunWorker(func() {\n\t\tticker := time.NewTicker(10 * time.Millisecond)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-stopper.ShouldStop():\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tfor _, t := range cluster.tickers {\n\t\t\t\t\tt.NonBlockingTick()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\t// Create a group with three members.\n\tgroupID := roachpb.RangeID(1)\n\tcluster.createGroup(groupID, 0, groupSize)\n\n\t// Move the group one node at a time from the first three nodes to\n\t// the last three. In the process, we necessarily remove the leader\n\t// and trigger at least one new election among the new nodes.\n\tfor i := 0; i < groupSize; i++ {\n\t\tlog.Infof(\"adding node %d\", i+groupSize)\n\t\tch := cluster.nodes[i].ChangeGroupMembership(groupID, makeCommandID(),\n\t\t\traftpb.ConfChangeAddNode,\n\t\t\troachpb.ReplicaDescriptor{\n\t\t\t\tNodeID: cluster.nodes[i+groupSize].nodeID,\n\t\t\t\tStoreID: roachpb.StoreID(cluster.nodes[i+groupSize].nodeID),\n\t\t\t\tReplicaID: roachpb.ReplicaID(cluster.nodes[i+groupSize].nodeID),\n\t\t\t}, nil)\n\t\tif err := <-ch; err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tlog.Infof(\"removing node %d\", i)\n\t\tch = cluster.nodes[i].ChangeGroupMembership(groupID, makeCommandID(),\n\t\t\traftpb.ConfChangeRemoveNode,\n\t\t\troachpb.ReplicaDescriptor{\n\t\t\t\tNodeID: cluster.nodes[i].nodeID,\n\t\t\t\tStoreID: roachpb.StoreID(cluster.nodes[i].nodeID),\n\t\t\t\tReplicaID: roachpb.ReplicaID(cluster.nodes[i].nodeID),\n\t\t\t}, nil)\n\t\tif err := <-ch; err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}",
"func TestLeaderSyncFollowerLog(t *testing.T) {\n\tents := []pb.Entry{\n\t\t{},\n\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t}\n\tterm := uint64(8)\n\ttests := [][]pb.Entry{\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10}, {Term: 6, Index: 11},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t\t\t{Term: 7, Index: 11}, {Term: 7, Index: 12},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5}, {Term: 4, Index: 6}, {Term: 4, Index: 7},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 2, Index: 4}, {Term: 2, Index: 5}, {Term: 2, Index: 6},\n\t\t\t{Term: 3, Index: 7}, {Term: 3, Index: 8}, {Term: 3, Index: 9}, {Term: 3, Index: 10}, {Term: 3, Index: 11},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tleadStorage := NewMemoryStorage()\n\t\tdefer leadStorage.Close()\n\t\tleadStorage.Append(ents)\n\t\tlead := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, leadStorage)\n\t\tlead.loadState(pb.HardState{Commit: lead.raftLog.lastIndex(), Term: term})\n\t\tfollowerStorage := NewMemoryStorage()\n\t\tdefer followerStorage.Close()\n\t\tfollowerStorage.Append(tt)\n\t\tfollower := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, followerStorage)\n\t\tfollower.loadState(pb.HardState{Term: term - 1})\n\t\t// It is necessary to have a three-node cluster.\n\t\t// The second may have more up-to-date log than the first one, so the\n\t\t// first node needs the vote from the third node to become the leader.\n\t\tn := newNetwork(lead, follower, nopStepper)\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\t// The election occurs in the term after the one we loaded with\n\t\t// lead.loadState above.\n\t\tn.send(pb.Message{From: 3, To: 1, Type: pb.MsgVoteResp, Term: term + 1})\n\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\t\tif g := diffu(ltoa(lead.raftLog), ltoa(follower.raftLog)); g != \"\" {\n\t\t\tt.Errorf(\"#%d: log diff:\\n%s\", i, g)\n\t\t}\n\t}\n}",
"func (rc *ResourceCache) CommitEntry(key string, content cache.Cacheable) (ce *cache.Entry, xerr fail.Error) {\n\tif rc.isNull() {\n\t\treturn nil, fail.InvalidInstanceError()\n\t}\n\tif key == \"\" {\n\t\treturn nil, fail.InvalidParameterCannotBeEmptyStringError(\"key\")\n\t}\n\n\trc.lock.Lock()\n\tdefer rc.lock.Unlock()\n\n\treturn rc.unsafeCommitEntry(key, content)\n}",
"func TestLeaderFailure(t *testing.T){\r\n\tif !TESTLEADERFAILURE{\r\n\t\treturn\r\n\t}\r\n rafts,_ := makeRafts(5, \"input_spec.json\", \"log\", 200, 300)\t\r\n\ttime.Sleep(2*time.Second)\t\t\t\r\n\trunning := make(map[int]bool)\r\n\tfor i:=1;i<=5;i++{\r\n\t\trunning[i] = true\r\n\t}\r\n\trafts[0].smLock.RLock()\r\n\tlid := rafts[0].LeaderId()\r\n\trafts[0].smLock.RUnlock()\r\n\tdebugRaftTest(fmt.Sprintf(\"leader Id:%v\\n\", lid))\r\n\tif lid != -1{\r\n\t\trafts[lid-1].Shutdown()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"Leader(id:%v) is down, now\\n\", lid))\r\n\t\ttime.Sleep(4*time.Second)\t\t\t\r\n\t\trunning[lid] = false\r\n\t\tfor i := 1; i<= 5;i++{\r\n\t\t\tif running[i] {\r\n\t\t\t\trafts[i-1].Append([]byte(\"first\"))\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\ttime.Sleep(5*time.Second)\t\t\t\r\n\r\n\tfor idx, node := range rafts {\r\n\t\tnode.smLock.RLock()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"%v\", node.sm.String()))\r\n\t\tnode.smLock.RUnlock()\r\n\t\tif running[idx-1]{\r\n\t\t\tnode.Shutdown()\r\n\t\t}\r\n\t}\r\n}",
"func TestCacheUpdatesAfterCommit(t *testing.T) {\n\tvdbEnv.init(t, []string{\"lscc\", \"_lifecycle\"})\n\tdefer vdbEnv.cleanup()\n\n\tchainID := \"testcacheupdatesaftercommit\"\n\tdb, err := vdbEnv.DBProvider.GetDBHandle(chainID, nil)\n\trequire.NoError(t, err)\n\n\t// scenario: cache has 4 keys while the commit operation\n\t// updates 2 of those keys, delete the remaining 2 keys, and\n\t// adds a new key. At the end of the commit operation, only\n\t// those 2 keys should be present with the recent value\n\t// in the cache and the new key should not be present in the cache.\n\n\t// store 4 keys in the db\n\tbatch := statedb.NewUpdateBatch()\n\tvv1 := &statedb.VersionedValue{Value: []byte(\"value1\"), Metadata: []byte(\"meta1\"), Version: version.NewHeight(1, 2)}\n\tvv2 := &statedb.VersionedValue{Value: []byte(\"value2\"), Metadata: []byte(\"meta2\"), Version: version.NewHeight(1, 2)}\n\tvv3 := &statedb.VersionedValue{Value: []byte(\"value3\"), Metadata: []byte(\"meta3\"), Version: version.NewHeight(1, 2)}\n\tvv4 := &statedb.VersionedValue{Value: []byte(\"value4\"), Metadata: []byte(\"meta4\"), Version: version.NewHeight(1, 2)}\n\n\tbatch.PutValAndMetadata(\"ns1\", \"key1\", vv1.Value, vv1.Metadata, vv1.Version)\n\tbatch.PutValAndMetadata(\"ns1\", \"key2\", vv2.Value, vv2.Metadata, vv2.Version)\n\tbatch.PutValAndMetadata(\"ns2\", \"key1\", vv3.Value, vv3.Metadata, vv3.Version)\n\tbatch.PutValAndMetadata(\"ns2\", \"key2\", vv4.Value, vv4.Metadata, vv4.Version)\n\tsavePoint := version.NewHeight(1, 5)\n\trequire.NoError(t, db.ApplyUpdates(batch, savePoint))\n\n\t// key1, key2 in ns1 and ns2 would not be in cache\n\ttestDoesNotExistInCache(t, vdbEnv.cache, chainID, \"ns1\", \"key1\")\n\ttestDoesNotExistInCache(t, vdbEnv.cache, chainID, \"ns1\", \"key2\")\n\ttestDoesNotExistInCache(t, vdbEnv.cache, chainID, \"ns2\", \"key1\")\n\ttestDoesNotExistInCache(t, vdbEnv.cache, chainID, \"ns2\", \"key2\")\n\n\t// add key1 and key2 from ns1 to the cache\n\t_, err = db.GetState(\"ns1\", \"key1\")\n\trequire.NoError(t, err)\n\t_, err = db.GetState(\"ns1\", \"key2\")\n\trequire.NoError(t, err)\n\t// add key1 and key2 from ns2 to the cache\n\t_, err = db.GetState(\"ns2\", \"key1\")\n\trequire.NoError(t, err)\n\t_, err = db.GetState(\"ns2\", \"key2\")\n\trequire.NoError(t, err)\n\n\tv, err := vdbEnv.cache.getState(chainID, \"ns1\", \"key1\")\n\trequire.NoError(t, err)\n\tns1key1rev := string(v.AdditionalInfo)\n\n\tv, err = vdbEnv.cache.getState(chainID, \"ns1\", \"key2\")\n\trequire.NoError(t, err)\n\tns1key2rev := string(v.AdditionalInfo)\n\n\t// update key1 and key2 in ns1. delete key1 and key2 in ns2. add a new key3 in ns2.\n\tbatch = statedb.NewUpdateBatch()\n\tvv1Update := &statedb.VersionedValue{Value: []byte(\"new-value1\"), Metadata: []byte(\"meta1\"), Version: version.NewHeight(2, 2)}\n\tvv2Update := &statedb.VersionedValue{Value: []byte(\"new-value2\"), Metadata: []byte(\"meta2\"), Version: version.NewHeight(2, 2)}\n\tvv3Update := &statedb.VersionedValue{Version: version.NewHeight(2, 4)}\n\tvv4Update := &statedb.VersionedValue{Version: version.NewHeight(2, 5)}\n\tvv5 := &statedb.VersionedValue{Value: []byte(\"value5\"), Metadata: []byte(\"meta5\"), Version: version.NewHeight(1, 2)}\n\n\tbatch.PutValAndMetadata(\"ns1\", \"key1\", vv1Update.Value, vv1Update.Metadata, vv1Update.Version)\n\tbatch.PutValAndMetadata(\"ns1\", \"key2\", vv2Update.Value, vv2Update.Metadata, vv2Update.Version)\n\tbatch.Delete(\"ns2\", \"key1\", vv3Update.Version)\n\tbatch.Delete(\"ns2\", \"key2\", vv4Update.Version)\n\tbatch.PutValAndMetadata(\"ns2\", \"key3\", vv5.Value, vv5.Metadata, vv5.Version)\n\tsavePoint = version.NewHeight(2, 5)\n\trequire.NoError(t, db.ApplyUpdates(batch, savePoint))\n\n\t// cache should have only the update key1 and key2 in ns1\n\tcacheValue, err := vdbEnv.cache.getState(chainID, \"ns1\", \"key1\")\n\trequire.NoError(t, err)\n\tvv, err := constructVersionedValue(cacheValue)\n\trequire.NoError(t, err)\n\trequire.Equal(t, vv1Update, vv)\n\trequire.NotEqual(t, ns1key1rev, string(cacheValue.AdditionalInfo))\n\n\tcacheValue, err = vdbEnv.cache.getState(chainID, \"ns1\", \"key2\")\n\trequire.NoError(t, err)\n\tvv, err = constructVersionedValue(cacheValue)\n\trequire.NoError(t, err)\n\trequire.Equal(t, vv2Update, vv)\n\trequire.NotEqual(t, ns1key2rev, string(cacheValue.AdditionalInfo))\n\n\ttestDoesNotExistInCache(t, vdbEnv.cache, chainID, \"ns2\", \"key1\")\n\ttestDoesNotExistInCache(t, vdbEnv.cache, chainID, \"ns2\", \"key2\")\n\ttestDoesNotExistInCache(t, vdbEnv.cache, chainID, \"ns2\", \"key3\")\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TestLeaderAcknowledgeCommit tests that a log entry is committed once the leader that created the entry has replicated it on a majority of the servers. Reference: section 5.3
|
func TestLeaderAcknowledgeCommit(t *testing.T) {
tests := []struct {
size int
acceptors map[uint64]bool
wack bool
}{
{1, nil, true},
{3, nil, false},
{3, map[uint64]bool{2: true}, true},
{3, map[uint64]bool{2: true, 3: true}, true},
{5, nil, false},
{5, map[uint64]bool{2: true}, false},
{5, map[uint64]bool{2: true, 3: true}, true},
{5, map[uint64]bool{2: true, 3: true, 4: true}, true},
{5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, true},
}
for i, tt := range tests {
s := NewMemoryStorage()
r := newTestRaft(1, idsBySize(tt.size), 10, 1, s)
defer closeAndFreeRaft(r)
r.becomeCandidate()
r.becomeLeader()
commitNoopEntry(r, s)
li := r.raftLog.lastIndex()
r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
for _, m := range r.readMessages() {
if tt.acceptors[m.To] {
r.Step(acceptAndReply(m))
}
}
if g := r.raftLog.committed > li; g != tt.wack {
t.Errorf("#%d: ack commit = %v, want %v", i, g, tt.wack)
}
}
}
|
[
"func Test_LeaderChanges(t *testing.T) {\n\t//giving time to elect a new leader\n\ttime.Sleep(time.Second * 1)\n\t//Start a timer here and verify that Append call doesn't succeed and timer times out which means S1 is partitioned--PENDING\n\tconst n int = 4\n\tset1 := \"set abc 20 8\\r\\nabcdefjg\\r\\n\"\n\texpected := []bool{false, true, false, false}\n\tchann := make([]chan LogEntry, n)\n\tr := [n]*Raft{r0, r2, r3, r4}\n\tfor k := 0; k < n; k++ {\n\t\tchann[k] = make(chan LogEntry)\n\t}\n\tfor i := 0; i < n; i++ {\n\t\tgo r[i].Client(chann[i], set1)\n\t}\n\n\tfor i := 0; i < n; i++ {\n\t\tresponse := <-chann[i]\n\t\tcommitStatus := response.Committed()\n\t\tif expected[i] != commitStatus {\n\t\t\tt.Error(\"Mismatch!\", expected, string(response.Data()))\n\t\t}\n\t}\n}",
"func TestLeaderCommitEntry(t *testing.T) {\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tcommitNoopEntry(r, s)\n\tli := r.raftLog.lastIndex()\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tfor _, m := range r.readMessages() {\n\t\tr.Step(acceptAndReply(m))\n\t}\n\n\tif g := r.raftLog.committed; g != li+1 {\n\t\tt.Errorf(\"committed = %d, want %d\", g, li+1)\n\t}\n\twents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte(\"some data\")}}\n\tif g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {\n\t\tt.Errorf(\"nextEnts = %+v, want %+v\", g, wents)\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\tfor i, m := range msgs {\n\t\tif w := uint64(i + 2); m.To != w {\n\t\t\tt.Errorf(\"to = %x, want %x\", m.To, w)\n\t\t}\n\t\tif m.Type != pb.MsgApp {\n\t\t\tt.Errorf(\"type = %v, want %v\", m.Type, pb.MsgApp)\n\t\t}\n\t\tif m.Commit != li+1 {\n\t\t\tt.Errorf(\"commit = %d, want %d\", m.Commit, li+1)\n\t\t}\n\t}\n}",
"func TestCommit(t *testing.T) {\n\tconst n = 4\n\tctrl := gomock.NewController(t)\n\ths := New()\n\tkeys := testutil.GenerateKeys(t, n, testutil.GenerateECDSAKey)\n\tbl := testutil.CreateBuilders(t, ctrl, n, keys...)\n\tacceptor := mocks.NewMockAcceptor(ctrl)\n\texecutor := mocks.NewMockExecutor(ctrl)\n\tsynchronizer := synchronizer.New(testutil.FixedTimeout(1000))\n\tcfg, replicas := testutil.CreateMockConfigWithReplicas(t, ctrl, n, keys...)\n\tbl[0].Register(hs, cfg, acceptor, executor, synchronizer, leaderrotation.NewFixed(2))\n\thl := bl.Build()\n\tsigners := hl.Signers()\n\n\t// create the needed blocks and QCs\n\tgenesisQC := hotstuff.NewQuorumCert(nil, 0, hotstuff.GetGenesis().Hash())\n\tb1 := testutil.NewProposeMsg(hotstuff.GetGenesis().Hash(), genesisQC, \"1\", 1, 2)\n\tb1QC := testutil.CreateQC(t, b1.Block, signers)\n\tb2 := testutil.NewProposeMsg(b1.Block.Hash(), b1QC, \"2\", 2, 2)\n\tb2QC := testutil.CreateQC(t, b2.Block, signers)\n\tb3 := testutil.NewProposeMsg(b2.Block.Hash(), b2QC, \"3\", 3, 2)\n\tb3QC := testutil.CreateQC(t, b3.Block, signers)\n\tb4 := testutil.NewProposeMsg(b3.Block.Hash(), b3QC, \"4\", 4, 2)\n\n\t// the second replica will be the leader, so we expect it to receive votes\n\treplicas[1].EXPECT().Vote(gomock.Any()).AnyTimes()\n\treplicas[1].EXPECT().NewView(gomock.Any()).AnyTimes()\n\n\t// executor will check that the correct command is executed\n\texecutor.EXPECT().Exec(gomock.Any()).Do(func(arg interface{}) {\n\t\tif arg.(hotstuff.Command) != b1.Block.Command() {\n\t\t\tt.Errorf(\"Wrong command executed: got: %s, want: %s\", arg, b1.Block.Command())\n\t\t}\n\t})\n\n\t// acceptor expects to receive the commands in order\n\tgomock.InOrder(\n\t\tacceptor.EXPECT().Proposed(gomock.Any()),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"1\")).Return(true),\n\t\tacceptor.EXPECT().Proposed(hotstuff.Command(\"1\")),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"2\")).Return(true),\n\t\tacceptor.EXPECT().Proposed(hotstuff.Command(\"2\")),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"3\")).Return(true),\n\t\tacceptor.EXPECT().Proposed(hotstuff.Command(\"3\")),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"4\")).Return(true),\n\t)\n\n\ths.OnPropose(b1)\n\ths.OnPropose(b2)\n\ths.OnPropose(b3)\n\ths.OnPropose(b4)\n}",
"func (suite *KeeperTestSuite) TestSetPacketAcknowledgement() {\n\tpath := tibctesting.NewPath(suite.chainA, suite.chainB)\n\tsuite.coordinator.SetupClients(path)\n\n\tctxA := suite.chainA.GetContext()\n\tseq := uint64(10)\n\n\tstoredAckHash, found := suite.chainA.App.TIBCKeeper.PacketKeeper.GetPacketAcknowledgement(ctxA, path.EndpointA.ChainName, path.EndpointB.ChainName, seq)\n\tsuite.Require().False(found)\n\tsuite.Require().Nil(storedAckHash)\n\n\tackHash := []byte(\"ackhash\")\n\tsuite.chainA.App.TIBCKeeper.PacketKeeper.SetPacketAcknowledgement(ctxA, path.EndpointA.ChainName, path.EndpointB.ChainName, seq, ackHash)\n\n\tstoredAckHash, found = suite.chainA.App.TIBCKeeper.PacketKeeper.GetPacketAcknowledgement(ctxA, path.EndpointA.ChainName, path.EndpointB.ChainName, seq)\n\tsuite.Require().True(found)\n\tsuite.Require().Equal(ackHash, storedAckHash)\n\tsuite.Require().True(suite.chainA.App.TIBCKeeper.PacketKeeper.HasPacketAcknowledgement(ctxA, path.EndpointA.ChainName, path.EndpointB.ChainName, seq))\n}",
"func TestCommitWithoutNewTermEntry(t *testing.T) {\n\ttt := newNetwork(nil, nil, nil, nil, nil)\n\tdefer tt.closeAll()\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// 0 cannot reach 2,3,4\n\ttt.cut(1, 3)\n\ttt.cut(1, 4)\n\ttt.cut(1, 5)\n\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tsm := tt.peers[1].(*raft)\n\tif sm.raftLog.committed != 1 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 1)\n\t}\n\n\t// network recovery\n\ttt.recover()\n\n\t// elect 1 as the new leader with term 2\n\t// after append a ChangeTerm entry from the current term, all entries\n\t// should be committed\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\tif sm.raftLog.committed != 4 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 4)\n\t}\n}",
"func TestLogReplication1(t *testing.T) {\n\n\tack := make(chan bool)\n\n\t//Get leader\n\tleaderId := raft.GetLeaderId()\n\n\t//Append a log entry to leader as client\n\traft.InsertFakeLogEntry(leaderId)\n\n\tleaderLog := raft.GetLogAsString(leaderId)\n\t// log.Println(leaderLog)\n\n\ttime.AfterFunc(1*time.Second, func() { ack <- true })\n\t<-ack //Wait for 1 second for log replication to happen\n\n\t//Get logs of all others and compare with each\n\tfor i := 0; i < 5; i++ {\n\t\tcheckIfExpected(t, raft.GetLogAsString(i), leaderLog)\n\t}\n\n}",
"func (suite *KeeperTestSuite) TestSetPacketAcknowledgement() {\n\tpath := ibctesting.NewPath(suite.chainA, suite.chainB)\n\tsuite.coordinator.Setup(path)\n\n\tctxA := suite.chainA.GetContext()\n\tseq := uint64(10)\n\n\tstoredAckHash, found := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetPacketAcknowledgement(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq)\n\tsuite.Require().False(found)\n\tsuite.Require().Nil(storedAckHash)\n\n\tackHash := []byte(\"ackhash\")\n\tsuite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq, ackHash)\n\n\tstoredAckHash, found = suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetPacketAcknowledgement(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq)\n\tsuite.Require().True(found)\n\tsuite.Require().Equal(ackHash, storedAckHash)\n\tsuite.Require().True(suite.chainA.App.GetIBCKeeper().ChannelKeeper.HasPacketAcknowledgement(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq))\n}",
"func TestCommitAfterRemoveNode(t *testing.T) {\n\t// Create a cluster with two nodes.\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2}, 5, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\n\t// Begin to remove the second node.\n\tcc := pb.ConfChange{\n\t\tType: pb.ConfChangeRemoveNode,\n\t\tReplicaID: 2,\n\t}\n\tccData, err := cc.Marshal()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tr.Step(pb.Message{\n\t\tType: pb.MsgProp,\n\t\tEntries: []pb.Entry{\n\t\t\t{Type: pb.EntryConfChange, Data: ccData},\n\t\t},\n\t})\n\t// Stabilize the log and make sure nothing is committed yet.\n\tif ents := nextEnts(r, s); len(ents) > 0 {\n\t\tt.Fatalf(\"unexpected committed entries: %v\", ents)\n\t}\n\tccIndex := r.raftLog.lastIndex()\n\n\t// While the config change is pending, make another proposal.\n\tr.Step(pb.Message{\n\t\tType: pb.MsgProp,\n\t\tEntries: []pb.Entry{\n\t\t\t{Type: pb.EntryNormal, Data: []byte(\"hello\")},\n\t\t},\n\t})\n\n\t// Node 2 acknowledges the config change, committing it.\n\tr.Step(pb.Message{\n\t\tType: pb.MsgAppResp,\n\t\tFrom: 2,\n\t\tIndex: ccIndex,\n\t})\n\tents := nextEnts(r, s)\n\tif len(ents) != 2 {\n\t\tt.Fatalf(\"expected two committed entries, got %v\", ents)\n\t}\n\tif ents[0].Type != pb.EntryNormal || ents[0].Data != nil {\n\t\tt.Fatalf(\"expected ents[0] to be empty, but got %v\", ents[0])\n\t}\n\tif ents[1].Type != pb.EntryConfChange {\n\t\tt.Fatalf(\"expected ents[1] to be EntryConfChange, got %v\", ents[1])\n\t}\n\n\t// Apply the config change. This reduces quorum requirements so the\n\t// pending command can now commit.\n\tr.removeNode(2)\n\tents = nextEnts(r, s)\n\tif len(ents) != 1 || ents[0].Type != pb.EntryNormal ||\n\t\tstring(ents[0].Data) != \"hello\" {\n\t\tt.Fatalf(\"expected one committed EntryNormal, got %v\", ents)\n\t}\n}",
"func (suite *KeeperTestSuite) TestAcknowledgePacket() {\n\tvar (\n\t\tpath *ibctesting.Path\n\t\tpacket types.Packet\n\t\tack = ibcmock.MockAcknowledgement\n\n\t\tchannelCap *capabilitytypes.Capability\n\t\texpError *sdkerrors.Error\n\t)\n\n\ttestCases := []testCase{\n\t\t{\"success on ordered channel\", func() {\n\t\t\tpath.SetChannelOrdered()\n\t\t\tsuite.coordinator.Setup(path)\n\t\t\tpacket = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)\n\t\t\t// create packet commitment\n\t\t\terr := path.EndpointA.SendPacket(packet)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\t// create packet receipt and acknowledgement\n\t\t\terr = path.EndpointB.RecvPacket(packet)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t}, true},\n\t\t{\"success on unordered channel\", func() {\n\t\t\t// setup uses an UNORDERED channel\n\t\t\tsuite.coordinator.Setup(path)\n\t\t\tpacket = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)\n\n\t\t\t// create packet commitment\n\t\t\terr := path.EndpointA.SendPacket(packet)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\t// create packet receipt and acknowledgement\n\t\t\terr = path.EndpointB.RecvPacket(packet)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t}, true},\n\t\t{\"packet already acknowledged ordered channel (no-op)\", func() {\n\t\t\texpError = types.ErrNoOpMsg\n\n\t\t\tpath.SetChannelOrdered()\n\t\t\tsuite.coordinator.Setup(path)\n\t\t\tpacket = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)\n\t\t\t// create packet commitment\n\t\t\terr := path.EndpointA.SendPacket(packet)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\t// create packet receipt and acknowledgement\n\t\t\terr = path.EndpointB.RecvPacket(packet)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\n\t\t\terr = path.EndpointA.AcknowledgePacket(packet, ack.Acknowledgement())\n\t\t\tsuite.Require().NoError(err)\n\t\t}, false},\n\t\t{\"packet already acknowledged unordered channel (no-op)\", func() {\n\t\t\texpError = types.ErrNoOpMsg\n\n\t\t\t// setup uses an UNORDERED channel\n\t\t\tsuite.coordinator.Setup(path)\n\t\t\tpacket = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)\n\n\t\t\t// create packet commitment\n\t\t\terr := path.EndpointA.SendPacket(packet)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\t// create packet receipt and acknowledgement\n\t\t\terr = path.EndpointB.RecvPacket(packet)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\n\t\t\terr = path.EndpointA.AcknowledgePacket(packet, ack.Acknowledgement())\n\t\t\tsuite.Require().NoError(err)\n\t\t}, false},\n\t\t{\"channel not found\", func() {\n\t\t\texpError = types.ErrChannelNotFound\n\n\t\t\t// use wrong channel naming\n\t\t\tsuite.coordinator.Setup(path)\n\t\t\tpacket = types.NewPacket(ibctesting.MockPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)\n\t\t}, false},\n\t\t{\"channel not open\", func() {\n\t\t\texpError = types.ErrInvalidChannelState\n\n\t\t\tsuite.coordinator.Setup(path)\n\t\t\tpacket = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)\n\n\t\t\terr := path.EndpointA.SetChannelClosed()\n\t\t\tsuite.Require().NoError(err)\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t}, false},\n\t\t{\"capability authentication failed ORDERED\", func() {\n\t\t\texpError = types.ErrInvalidChannelCapability\n\n\t\t\tpath.SetChannelOrdered()\n\t\t\tsuite.coordinator.Setup(path)\n\n\t\t\tpacket = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)\n\t\t\t// create packet commitment\n\t\t\terr := path.EndpointA.SendPacket(packet)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\t// create packet receipt and acknowledgement\n\t\t\terr = path.EndpointB.RecvPacket(packet)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\tchannelCap = capabilitytypes.NewCapability(3)\n\t\t}, false},\n\t\t{\"packet destination port ≠ channel counterparty port\", func() {\n\t\t\texpError = types.ErrInvalidPacket\n\t\t\tsuite.coordinator.Setup(path)\n\n\t\t\t// use wrong port for dest\n\t\t\tpacket = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, ibctesting.InvalidID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t}, false},\n\t\t{\"packet destination channel ID ≠ channel counterparty channel ID\", func() {\n\t\t\texpError = types.ErrInvalidPacket\n\t\t\tsuite.coordinator.Setup(path)\n\n\t\t\t// use wrong channel for dest\n\t\t\tpacket = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t}, false},\n\t\t{\"connection not found\", func() {\n\t\t\texpError = connectiontypes.ErrConnectionNotFound\n\t\t\tsuite.coordinator.Setup(path)\n\n\t\t\t// pass channel check\n\t\t\tsuite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(\n\t\t\t\tsuite.chainA.GetContext(),\n\t\t\t\tpath.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID,\n\t\t\t\ttypes.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID), []string{\"connection-1000\"}, path.EndpointA.ChannelConfig.Version),\n\t\t\t)\n\t\t\tpacket = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)\n\t\t\tsuite.chainA.CreateChannelCapability(suite.chainA.GetSimApp().ScopedIBCMockKeeper, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t}, false},\n\t\t{\"connection not OPEN\", func() {\n\t\t\texpError = connectiontypes.ErrInvalidConnectionState\n\t\t\tsuite.coordinator.SetupClients(path)\n\t\t\t// connection on chainA is in INIT\n\t\t\terr := path.EndpointA.ConnOpenInit()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\t// pass channel check\n\t\t\tsuite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(\n\t\t\t\tsuite.chainA.GetContext(),\n\t\t\t\tpath.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID,\n\t\t\t\ttypes.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID), []string{path.EndpointA.ConnectionID}, path.EndpointA.ChannelConfig.Version),\n\t\t\t)\n\t\t\tpacket = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)\n\t\t\tsuite.chainA.CreateChannelCapability(suite.chainA.GetSimApp().ScopedIBCMockKeeper, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t}, false},\n\t\t{\"packet hasn't been sent\", func() {\n\t\t\texpError = types.ErrNoOpMsg\n\n\t\t\t// packet commitment never written\n\t\t\tsuite.coordinator.Setup(path)\n\t\t\tpacket = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t}, false},\n\t\t{\"packet ack verification failed\", func() {\n\t\t\t// skip error code check since error occurs in light-clients\n\n\t\t\t// ack never written\n\t\t\tsuite.coordinator.Setup(path)\n\t\t\tpacket = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)\n\n\t\t\t// create packet commitment\n\t\t\tpath.EndpointA.SendPacket(packet)\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t}, false},\n\t\t{\"packet commitment bytes do not match\", func() {\n\t\t\texpError = types.ErrInvalidPacket\n\n\t\t\t// setup uses an UNORDERED channel\n\t\t\tsuite.coordinator.Setup(path)\n\t\t\tpacket = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)\n\n\t\t\t// create packet commitment\n\t\t\terr := path.EndpointA.SendPacket(packet)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\t// create packet receipt and acknowledgement\n\t\t\terr = path.EndpointB.RecvPacket(packet)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\n\t\t\tpacket.Data = []byte(\"invalid packet commitment\")\n\t\t}, false},\n\t\t{\"next ack sequence not found\", func() {\n\t\t\texpError = types.ErrSequenceAckNotFound\n\t\t\tsuite.coordinator.SetupConnections(path)\n\n\t\t\tpath.EndpointA.ChannelID = ibctesting.FirstChannelID\n\t\t\tpath.EndpointB.ChannelID = ibctesting.FirstChannelID\n\n\t\t\t// manually creating channel prevents next sequence acknowledgement from being set\n\t\t\tsuite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(\n\t\t\t\tsuite.chainA.GetContext(),\n\t\t\t\tpath.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID,\n\t\t\t\ttypes.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID), []string{path.EndpointA.ConnectionID}, path.EndpointA.ChannelConfig.Version),\n\t\t\t)\n\n\t\t\tpacket = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)\n\t\t\t// manually set packet commitment\n\t\t\tsuite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, packet.GetSequence(), types.CommitPacket(suite.chainA.App.AppCodec(), packet))\n\n\t\t\t// manually set packet acknowledgement and capability\n\t\t\tsuite.chainB.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(suite.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, packet.GetSequence(), types.CommitAcknowledgement(ack.Acknowledgement()))\n\n\t\t\tsuite.chainA.CreateChannelCapability(suite.chainA.GetSimApp().ScopedIBCMockKeeper, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\n\t\t\tsuite.coordinator.CommitBlock(path.EndpointA.Chain, path.EndpointB.Chain)\n\n\t\t\tpath.EndpointA.UpdateClient()\n\t\t\tpath.EndpointB.UpdateClient()\n\t\t}, false},\n\t\t{\"next ack sequence mismatch ORDERED\", func() {\n\t\t\texpError = types.ErrPacketSequenceOutOfOrder\n\t\t\tpath.SetChannelOrdered()\n\t\t\tsuite.coordinator.Setup(path)\n\t\t\tpacket = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)\n\t\t\t// create packet commitment\n\t\t\terr := path.EndpointA.SendPacket(packet)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\t// create packet acknowledgement\n\t\t\terr = path.EndpointB.RecvPacket(packet)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\t// set next sequence ack wrong\n\t\t\tsuite.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceAck(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 10)\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t}, false},\n\t}\n\n\tfor i, tc := range testCases {\n\t\ttc := tc\n\t\tsuite.Run(fmt.Sprintf(\"Case %s, %d/%d tests\", tc.msg, i, len(testCases)), func() {\n\t\t\tsuite.SetupTest() // reset\n\t\t\texpError = nil // must explcitly set error for failed cases\n\t\t\tpath = ibctesting.NewPath(suite.chainA, suite.chainB)\n\n\t\t\ttc.malleate()\n\n\t\t\tpacketKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())\n\t\t\tproof, proofHeight := path.EndpointB.QueryProof(packetKey)\n\n\t\t\terr := suite.chainA.App.GetIBCKeeper().ChannelKeeper.AcknowledgePacket(suite.chainA.GetContext(), channelCap, packet, ack.Acknowledgement(), proof, proofHeight)\n\t\t\tpc := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())\n\n\t\t\tchannelA, _ := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetChannel(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel())\n\t\t\tsequenceAck, _ := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceAck(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel())\n\n\t\t\tif tc.expPass {\n\t\t\t\tsuite.NoError(err)\n\t\t\t\tsuite.Nil(pc)\n\n\t\t\t\tif channelA.Ordering == types.ORDERED {\n\t\t\t\t\tsuite.Require().Equal(packet.GetSequence()+1, sequenceAck, \"sequence not incremented in ordered channel\")\n\t\t\t\t} else {\n\t\t\t\t\tsuite.Require().Equal(uint64(1), sequenceAck, \"sequence incremented for UNORDERED channel\")\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tsuite.Error(err)\n\t\t\t\t// only check if expError is set, since not all error codes can be known\n\t\t\t\tif expError != nil {\n\t\t\t\t\tsuite.Require().True(errors.Is(err, expError))\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}",
"func (suite *HandlerTestSuite) TestHandleAcknowledgePacket() {\n\tvar (\n\t\tpacket channeltypes.Packet\n\t)\n\n\ttestCases := []struct {\n\t\tname string\n\t\tmalleate func()\n\t\texpPass bool\n\t}{\n\t\t{\"success: ORDERED\", func() {\n\t\t\tclientA, clientB, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, clientexported.Tendermint)\n\t\t\tchannelA, channelB := suite.coordinator.CreateChannel(suite.chainA, suite.chainB, connA, connB, channeltypes.ORDERED)\n\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = suite.coordinator.PacketExecuted(suite.chainB, suite.chainA, packet, clientA)\n\t\t\tsuite.Require().NoError(err)\n\t\t}, true},\n\t\t{\"success: UNORDERED\", func() {\n\t\t\tclientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB)\n\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = suite.coordinator.PacketExecuted(suite.chainB, suite.chainA, packet, clientA)\n\t\t\tsuite.Require().NoError(err)\n\t\t}, true},\n\t\t{\"success: UNORDERED acknowledge out of order packet\", func() {\n\t\t\t// setup uses an UNORDERED channel\n\t\t\tclientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB)\n\n\t\t\t// attempts to acknowledge ack with sequence 10 without acknowledging ack with sequence 1 (removing packet commitment)\n\t\t\tfor i := uint64(1); i < 10; i++ {\n\t\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\t\tsuite.Require().NoError(err)\n\n\t\t\t\terr = suite.coordinator.PacketExecuted(suite.chainB, suite.chainA, packet, clientA)\n\t\t\t\tsuite.Require().NoError(err)\n\n\t\t\t}\n\t\t}, true},\n\t\t{\"failure: ORDERED acknowledge out of order packet\", func() {\n\t\t\tclientA, clientB, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, clientexported.Tendermint)\n\t\t\tchannelA, channelB := suite.coordinator.CreateChannel(suite.chainA, suite.chainB, connA, connB, channeltypes.ORDERED)\n\n\t\t\t// attempts to acknowledge ack with sequence 10 without acknowledging ack with sequence 1 (removing packet commitment\n\t\t\tfor i := uint64(1); i < 10; i++ {\n\t\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\t\tsuite.Require().NoError(err)\n\n\t\t\t\terr = suite.coordinator.PacketExecuted(suite.chainB, suite.chainA, packet, clientA)\n\t\t\t\tsuite.Require().NoError(err)\n\t\t\t}\n\t\t}, false},\n\t\t{\"channel does not exist\", func() {\n\t\t\t// any non-nil value of packet is valid\n\t\t\tsuite.Require().NotNil(packet)\n\t\t}, false},\n\t\t{\"packet not received\", func() {\n\t\t\t_, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB)\n\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\tsuite.Require().NoError(err)\n\t\t}, false},\n\t\t{\"ORDERED: packet already acknowledged (replay)\", func() {\n\t\t\tclientA, clientB, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, clientexported.Tendermint)\n\t\t\tchannelA, channelB := suite.coordinator.CreateChannel(suite.chainA, suite.chainB, connA, connB, channeltypes.ORDERED)\n\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = suite.coordinator.PacketExecuted(suite.chainB, suite.chainA, packet, clientA)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = suite.coordinator.AcknowledgementExecuted(suite.chainA, suite.chainB, packet, clientB)\n\t\t\tsuite.Require().NoError(err)\n\t\t}, false},\n\t\t{\"UNORDERED: packet already received (replay)\", func() {\n\t\t\tclientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB)\n\n\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = suite.coordinator.PacketExecuted(suite.chainB, suite.chainA, packet, clientA)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = suite.coordinator.AcknowledgementExecuted(suite.chainA, suite.chainB, packet, clientB)\n\t\t\tsuite.Require().NoError(err)\n\t\t}, false},\n\t}\n\n\tfor _, tc := range testCases {\n\t\ttc := tc\n\n\t\tsuite.Run(tc.name, func() {\n\t\t\tsuite.SetupTest() // reset\n\t\t\tibctesting.TestHash = ibctransfertypes.FungibleTokenPacketAcknowledgement{true, \"\"}.GetBytes()\n\n\t\t\thandler := ibc.NewHandler(*suite.chainA.App.IBCKeeper)\n\n\t\t\ttc.malleate()\n\n\t\t\tpacketKey := host.KeyPacketAcknowledgement(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())\n\t\t\tproof, proofHeight := suite.chainB.QueryProof(packetKey)\n\n\t\t\tack := ibctesting.TestHash\n\n\t\t\tmsg := channeltypes.NewMsgAcknowledgement(packet, ack, proof, proofHeight, suite.chainA.SenderAccount.GetAddress())\n\n\t\t\t_, err := handler(suite.chainA.GetContext(), msg)\n\n\t\t\tif tc.expPass {\n\t\t\t\tsuite.Require().NoError(err)\n\n\t\t\t\t// replay should an error\n\t\t\t\t_, err := handler(suite.chainA.GetContext(), msg)\n\t\t\t\tsuite.Require().Error(err)\n\n\t\t\t\t// verify packet commitment was deleted\n\t\t\t\thas := suite.chainA.App.IBCKeeper.ChannelKeeper.HasPacketCommitment(suite.chainA.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())\n\t\t\t\tsuite.Require().False(has)\n\n\t\t\t} else {\n\t\t\t\tsuite.Require().Error(err)\n\t\t\t}\n\t\t})\n\t}\n}",
"func consumerTestWithCommits(t *testing.T, testname string, msgcnt int, useChannel bool, consumeFunc func(c *Consumer, mt *msgtracker, expCnt int), rebalanceCb func(c *Consumer, event Event) error) {\n\tconsumerTest(t, testname+\" auto commit\",\n\t\tmsgcnt, consumerCtrl{useChannel: useChannel, autoCommit: true}, consumeFunc, rebalanceCb)\n\n\tconsumerTest(t, testname+\" using CommitMessage() API\",\n\t\tmsgcnt, consumerCtrl{useChannel: useChannel, commitMode: ViaCommitMessageAPI}, consumeFunc, rebalanceCb)\n\n\tconsumerTest(t, testname+\" using CommitOffsets() API\",\n\t\tmsgcnt, consumerCtrl{useChannel: useChannel, commitMode: ViaCommitOffsetsAPI}, consumeFunc, rebalanceCb)\n\n\tconsumerTest(t, testname+\" using Commit() API\",\n\t\tmsgcnt, consumerCtrl{useChannel: useChannel, commitMode: ViaCommitAPI}, consumeFunc, rebalanceCb)\n\n}",
"func TestLogReplication2(t *testing.T) {\n\tack := make(chan bool)\n\n\t//Kill one server\n\traft.KillServer(1)\n\n\t//Append log to server\n\ttime.AfterFunc(1*time.Second, func() {\n\t\t//Get leader\n\t\tleaderId := raft.GetLeaderId()\n\n\t\t//Append a log entry to leader as client\n\t\traft.InsertFakeLogEntry(leaderId)\n\t})\n\n\t//Resurrect old server after enough time for other to move on\n\ttime.AfterFunc(2*time.Second, func() {\n\t\t//Resurrect old server\n\t\traft.ResurrectServer(1)\n\t})\n\n\t//Check log after some time to see if it matches with current leader\n\ttime.AfterFunc(3*time.Second, func() {\n\t\tleaderId := raft.GetLeaderId()\n\t\tleaderLog := raft.GetLogAsString(leaderId)\n\t\tserverLog := raft.GetLogAsString(1)\n\n\t\tcheckIfExpected(t, serverLog, leaderLog)\n\n\t\tack <- true\n\t})\n\n\t<-ack\n\n}",
"func TestCannotCommitWithoutNewTermEntry(t *testing.T) {\n\ttt := newNetwork(nil, nil, nil, nil, nil)\n\tdefer tt.closeAll()\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// 0 cannot reach 2,3,4\n\ttt.cut(1, 3)\n\ttt.cut(1, 4)\n\ttt.cut(1, 5)\n\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tsm := tt.peers[1].(*raft)\n\tif sm.raftLog.committed != 1 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 1)\n\t}\n\n\t// network recovery\n\ttt.recover()\n\t// avoid committing ChangeTerm proposal\n\ttt.ignore(pb.MsgApp)\n\n\t// elect 2 as the new leader with term 2\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// no log entries from previous term should be committed\n\tsm = tt.peers[2].(*raft)\n\tif sm.raftLog.committed != 1 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 1)\n\t}\n\n\ttt.recover()\n\t// send heartbeat; reset wait\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgBeat})\n\t// append an entry at current term\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\t// expect the committed to be advanced\n\tif sm.raftLog.committed != 5 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 5)\n\t}\n}",
"func TestCommitConflictRepeat4A(t *testing.T) {\n}",
"func (c *Consumer) Commit() error {\n\tsnap := c.resetAcked()\n\tif len(snap) < 1 {\n\t\treturn nil\n\t}\n\n\tfor partitionID, offset := range snap {\n\t\t// fmt.Printf(\"$,%s,%d,%d\\n\", c.id, partitionID, offset+1)\n\t\tif err := c.zoo.Commit(c.group, c.topic, partitionID, offset+1); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func TestCommitConflictRace4A(t *testing.T) {\n}",
"func TestCommitConflictRollback4A(t *testing.T) {\n}",
"func (suite *KeeperTestSuite) TestOnAcknowledgementPacket() {\n\tvar (\n\t\tsuccessAck = channeltypes.NewResultAcknowledgement([]byte{byte(1)})\n\t\tfailedAck = channeltypes.NewErrorAcknowledgement(\"failed packet transfer\")\n\n\t\tchannelA, channelB ibctesting.TestChannel\n\t\ttrace types.DenomTrace\n\t\tamount sdk.Int\n\t)\n\n\ttestCases := []struct {\n\t\tmsg string\n\t\tack channeltypes.Acknowledgement\n\t\tmalleate func()\n\t\tsuccess bool // success of ack\n\t\texpPass bool\n\t}{\n\t\t{\"success ack causes no-op\", successAck, func() {\n\t\t\ttrace = types.ParseDenomTrace(types.GetPrefixedDenom(channelB.PortID, channelB.ID, sdk.DefaultBondDenom))\n\t\t}, true, true},\n\t\t{\"successful refund from source chain\", failedAck, func() {\n\t\t\tescrow := types.GetEscrowAddress(channelA.PortID, channelA.ID)\n\t\t\ttrace = types.ParseDenomTrace(sdk.DefaultBondDenom)\n\t\t\tcoin := sdk.NewCoin(sdk.DefaultBondDenom, amount)\n\n\t\t\tsuite.Require().NoError(simapp.FundAccount(suite.chainA.App, suite.chainA.GetContext(), escrow, sdk.NewCoins(coin)))\n\t\t}, false, true},\n\t\t{\"unsuccessful refund from source\", failedAck,\n\t\t\tfunc() {\n\t\t\t\ttrace = types.ParseDenomTrace(sdk.DefaultBondDenom)\n\t\t\t}, false, false},\n\t\t{\"successful refund from with coin from external chain\", failedAck,\n\t\t\tfunc() {\n\t\t\t\tescrow := types.GetEscrowAddress(channelA.PortID, channelA.ID)\n\t\t\t\ttrace = types.ParseDenomTrace(types.GetPrefixedDenom(channelA.PortID, channelA.ID, sdk.DefaultBondDenom))\n\t\t\t\tcoin := sdk.NewCoin(trace.IBCDenom(), amount)\n\n\t\t\t\tsuite.Require().NoError(simapp.FundAccount(suite.chainA.App, suite.chainA.GetContext(), escrow, sdk.NewCoins(coin)))\n\t\t\t}, false, true},\n\t}\n\n\tfor _, tc := range testCases {\n\t\ttc := tc\n\n\t\tsuite.Run(fmt.Sprintf(\"Case %s\", tc.msg), func() {\n\t\t\tsuite.SetupTest() // reset\n\t\t\t_, _, _, _, channelA, channelB = suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)\n\t\t\tamount = sdk.NewInt(100) // must be explicitly changed\n\n\t\t\ttc.malleate()\n\n\t\t\tdata := types.NewFungibleTokenPacketData(trace.GetFullDenomPath(), amount.Uint64(), suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String())\n\t\t\tpacket := channeltypes.NewPacket(data.GetBytes(), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0)\n\n\t\t\tpreCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), trace.IBCDenom())\n\n\t\t\terr := suite.chainA.App.TransferKeeper.OnAcknowledgementPacket(suite.chainA.GetContext(), packet, data, tc.ack)\n\t\t\tif tc.expPass {\n\t\t\t\tsuite.Require().NoError(err)\n\t\t\t\tpostCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), trace.IBCDenom())\n\t\t\t\tdeltaAmount := postCoin.Amount.Sub(preCoin.Amount)\n\n\t\t\t\tif tc.success {\n\t\t\t\t\tsuite.Require().Equal(int64(0), deltaAmount.Int64(), \"successful ack changed balance\")\n\t\t\t\t} else {\n\t\t\t\t\tsuite.Require().Equal(amount, deltaAmount, \"failed ack did not trigger refund\")\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\tsuite.Require().Error(err)\n\t\t\t}\n\t\t})\n\t}\n}",
"func (s *Service) onOffsetCommit(brokerId int32, duration time.Duration) {\n\n\t// todo:\n\t// if the commit took too long, don't count it in 'commits' but add it to the histogram?\n\t// and how do we want to handle cases where we get an error??\n\t// should we have another metric that tells us about failed commits? or a label on the counter?\n\tbrokerIdStr := fmt.Sprintf(\"%v\", brokerId)\n\ts.endToEndCommitLatency.WithLabelValues(brokerIdStr).Observe(duration.Seconds())\n\n\tif duration > s.config.Consumer.CommitSla {\n\t\treturn\n\t}\n\n\ts.endToEndCommits.Inc()\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TestFollowerCommitEntry tests that once a follower learns that a log entry is committed, it applies the entry to its local state machine (in log order). Reference: section 5.3
|
func TestFollowerCommitEntry(t *testing.T) {
tests := []struct {
ents []pb.Entry
commit uint64
}{
{
[]pb.Entry{
{Term: 1, Index: 1, Data: []byte("some data")},
},
1,
},
{
[]pb.Entry{
{Term: 1, Index: 1, Data: []byte("some data")},
{Term: 1, Index: 2, Data: []byte("some data2")},
},
2,
},
{
[]pb.Entry{
{Term: 1, Index: 1, Data: []byte("some data2")},
{Term: 1, Index: 2, Data: []byte("some data")},
},
2,
},
{
[]pb.Entry{
{Term: 1, Index: 1, Data: []byte("some data")},
{Term: 1, Index: 2, Data: []byte("some data2")},
},
1,
},
}
for i, tt := range tests {
r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())
defer closeAndFreeRaft(r)
r.becomeFollower(1, 2)
r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 1, Entries: tt.ents, Commit: tt.commit})
if g := r.raftLog.committed; g != tt.commit {
t.Errorf("#%d: committed = %d, want %d", i, g, tt.commit)
}
wents := tt.ents[:int(tt.commit)]
if g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {
t.Errorf("#%d: nextEnts = %v, want %v", i, g, wents)
}
}
}
|
[
"func TestLeaderCommitEntry(t *testing.T) {\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tcommitNoopEntry(r, s)\n\tli := r.raftLog.lastIndex()\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tfor _, m := range r.readMessages() {\n\t\tr.Step(acceptAndReply(m))\n\t}\n\n\tif g := r.raftLog.committed; g != li+1 {\n\t\tt.Errorf(\"committed = %d, want %d\", g, li+1)\n\t}\n\twents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte(\"some data\")}}\n\tif g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {\n\t\tt.Errorf(\"nextEnts = %+v, want %+v\", g, wents)\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\tfor i, m := range msgs {\n\t\tif w := uint64(i + 2); m.To != w {\n\t\t\tt.Errorf(\"to = %x, want %x\", m.To, w)\n\t\t}\n\t\tif m.Type != pb.MsgApp {\n\t\t\tt.Errorf(\"type = %v, want %v\", m.Type, pb.MsgApp)\n\t\t}\n\t\tif m.Commit != li+1 {\n\t\t\tt.Errorf(\"commit = %d, want %d\", m.Commit, li+1)\n\t\t}\n\t}\n}",
"func TestCommitWithoutNewTermEntry(t *testing.T) {\n\ttt := newNetwork(nil, nil, nil, nil, nil)\n\tdefer tt.closeAll()\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// 0 cannot reach 2,3,4\n\ttt.cut(1, 3)\n\ttt.cut(1, 4)\n\ttt.cut(1, 5)\n\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tsm := tt.peers[1].(*raft)\n\tif sm.raftLog.committed != 1 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 1)\n\t}\n\n\t// network recovery\n\ttt.recover()\n\n\t// elect 1 as the new leader with term 2\n\t// after append a ChangeTerm entry from the current term, all entries\n\t// should be committed\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\tif sm.raftLog.committed != 4 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 4)\n\t}\n}",
"func TestLeaderAcknowledgeCommit(t *testing.T) {\n\ttests := []struct {\n\t\tsize int\n\t\tacceptors map[uint64]bool\n\t\twack bool\n\t}{\n\t\t{1, nil, true},\n\t\t{3, nil, false},\n\t\t{3, map[uint64]bool{2: true}, true},\n\t\t{3, map[uint64]bool{2: true, 3: true}, true},\n\t\t{5, nil, false},\n\t\t{5, map[uint64]bool{2: true}, false},\n\t\t{5, map[uint64]bool{2: true, 3: true}, true},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true}, true},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, true},\n\t}\n\tfor i, tt := range tests {\n\t\ts := NewMemoryStorage()\n\t\tr := newTestRaft(1, idsBySize(tt.size), 10, 1, s)\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeCandidate()\n\t\tr.becomeLeader()\n\t\tcommitNoopEntry(r, s)\n\t\tli := r.raftLog.lastIndex()\n\t\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\t\tfor _, m := range r.readMessages() {\n\t\t\tif tt.acceptors[m.To] {\n\t\t\t\tr.Step(acceptAndReply(m))\n\t\t\t}\n\t\t}\n\n\t\tif g := r.raftLog.committed > li; g != tt.wack {\n\t\t\tt.Errorf(\"#%d: ack commit = %v, want %v\", i, g, tt.wack)\n\t\t}\n\t}\n}",
"func TestLeaderSyncFollowerLog(t *testing.T) {\n\tents := []pb.Entry{\n\t\t{},\n\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t}\n\tterm := uint64(8)\n\ttests := [][]pb.Entry{\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10}, {Term: 6, Index: 11},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t\t\t{Term: 7, Index: 11}, {Term: 7, Index: 12},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5}, {Term: 4, Index: 6}, {Term: 4, Index: 7},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 2, Index: 4}, {Term: 2, Index: 5}, {Term: 2, Index: 6},\n\t\t\t{Term: 3, Index: 7}, {Term: 3, Index: 8}, {Term: 3, Index: 9}, {Term: 3, Index: 10}, {Term: 3, Index: 11},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tleadStorage := NewMemoryStorage()\n\t\tdefer leadStorage.Close()\n\t\tleadStorage.Append(ents)\n\t\tlead := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, leadStorage)\n\t\tlead.loadState(pb.HardState{Commit: lead.raftLog.lastIndex(), Term: term})\n\t\tfollowerStorage := NewMemoryStorage()\n\t\tdefer followerStorage.Close()\n\t\tfollowerStorage.Append(tt)\n\t\tfollower := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, followerStorage)\n\t\tfollower.loadState(pb.HardState{Term: term - 1})\n\t\t// It is necessary to have a three-node cluster.\n\t\t// The second may have more up-to-date log than the first one, so the\n\t\t// first node needs the vote from the third node to become the leader.\n\t\tn := newNetwork(lead, follower, nopStepper)\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\t// The election occurs in the term after the one we loaded with\n\t\t// lead.loadState above.\n\t\tn.send(pb.Message{From: 3, To: 1, Type: pb.MsgVoteResp, Term: term + 1})\n\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\t\tif g := diffu(ltoa(lead.raftLog), ltoa(follower.raftLog)); g != \"\" {\n\t\t\tt.Errorf(\"#%d: log diff:\\n%s\", i, g)\n\t\t}\n\t}\n}",
"func Test_LeaderChanges(t *testing.T) {\n\t//giving time to elect a new leader\n\ttime.Sleep(time.Second * 1)\n\t//Start a timer here and verify that Append call doesn't succeed and timer times out which means S1 is partitioned--PENDING\n\tconst n int = 4\n\tset1 := \"set abc 20 8\\r\\nabcdefjg\\r\\n\"\n\texpected := []bool{false, true, false, false}\n\tchann := make([]chan LogEntry, n)\n\tr := [n]*Raft{r0, r2, r3, r4}\n\tfor k := 0; k < n; k++ {\n\t\tchann[k] = make(chan LogEntry)\n\t}\n\tfor i := 0; i < n; i++ {\n\t\tgo r[i].Client(chann[i], set1)\n\t}\n\n\tfor i := 0; i < n; i++ {\n\t\tresponse := <-chann[i]\n\t\tcommitStatus := response.Committed()\n\t\tif expected[i] != commitStatus {\n\t\t\tt.Error(\"Mismatch!\", expected, string(response.Data()))\n\t\t}\n\t}\n}",
"func TestLeaderTransferToUpToDateNodeFromFollower(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}",
"func TestFollowerAppendEntries(t *testing.T) {\n\ttests := []struct {\n\t\tindex, term uint64\n\t\tents []pb.Entry\n\t\twents []pb.Entry\n\t\twunstable []pb.Entry\n\t}{\n\t\t{\n\t\t\t2, 2,\n\t\t\t[]pb.Entry{{Term: 3, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}, {Term: 3, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 3}},\n\t\t},\n\t\t{\n\t\t\t1, 1,\n\t\t\t[]pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t},\n\t\t{\n\t\t\t0, 0,\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t0, 0,\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tstorage.Append([]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}})\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, storage)\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeFollower(2, 2)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 2, LogTerm: tt.term, Index: tt.index, Entries: tt.ents})\n\n\t\tif g := r.raftLog.allEntries(); !reflect.DeepEqual(g, tt.wents) {\n\t\t\tt.Errorf(\"#%d: ents = %+v, want %+v\", i, g, tt.wents)\n\t\t}\n\t\tif g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, tt.wunstable) {\n\t\t\tt.Errorf(\"#%d: unstableEnts = %+v, want %+v\", i, g, tt.wunstable)\n\t\t}\n\t}\n}",
"func TestCommitAfterRemoveNode(t *testing.T) {\n\t// Create a cluster with two nodes.\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2}, 5, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\n\t// Begin to remove the second node.\n\tcc := pb.ConfChange{\n\t\tType: pb.ConfChangeRemoveNode,\n\t\tReplicaID: 2,\n\t}\n\tccData, err := cc.Marshal()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tr.Step(pb.Message{\n\t\tType: pb.MsgProp,\n\t\tEntries: []pb.Entry{\n\t\t\t{Type: pb.EntryConfChange, Data: ccData},\n\t\t},\n\t})\n\t// Stabilize the log and make sure nothing is committed yet.\n\tif ents := nextEnts(r, s); len(ents) > 0 {\n\t\tt.Fatalf(\"unexpected committed entries: %v\", ents)\n\t}\n\tccIndex := r.raftLog.lastIndex()\n\n\t// While the config change is pending, make another proposal.\n\tr.Step(pb.Message{\n\t\tType: pb.MsgProp,\n\t\tEntries: []pb.Entry{\n\t\t\t{Type: pb.EntryNormal, Data: []byte(\"hello\")},\n\t\t},\n\t})\n\n\t// Node 2 acknowledges the config change, committing it.\n\tr.Step(pb.Message{\n\t\tType: pb.MsgAppResp,\n\t\tFrom: 2,\n\t\tIndex: ccIndex,\n\t})\n\tents := nextEnts(r, s)\n\tif len(ents) != 2 {\n\t\tt.Fatalf(\"expected two committed entries, got %v\", ents)\n\t}\n\tif ents[0].Type != pb.EntryNormal || ents[0].Data != nil {\n\t\tt.Fatalf(\"expected ents[0] to be empty, but got %v\", ents[0])\n\t}\n\tif ents[1].Type != pb.EntryConfChange {\n\t\tt.Fatalf(\"expected ents[1] to be EntryConfChange, got %v\", ents[1])\n\t}\n\n\t// Apply the config change. This reduces quorum requirements so the\n\t// pending command can now commit.\n\tr.removeNode(2)\n\tents = nextEnts(r, s)\n\tif len(ents) != 1 || ents[0].Type != pb.EntryNormal ||\n\t\tstring(ents[0].Data) != \"hello\" {\n\t\tt.Fatalf(\"expected one committed EntryNormal, got %v\", ents)\n\t}\n}",
"func TestStartAsFollower(t *testing.T) {\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tif r.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", r.state, StateFollower)\n\t}\n}",
"func TestCannotCommitWithoutNewTermEntry(t *testing.T) {\n\ttt := newNetwork(nil, nil, nil, nil, nil)\n\tdefer tt.closeAll()\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// 0 cannot reach 2,3,4\n\ttt.cut(1, 3)\n\ttt.cut(1, 4)\n\ttt.cut(1, 5)\n\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tsm := tt.peers[1].(*raft)\n\tif sm.raftLog.committed != 1 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 1)\n\t}\n\n\t// network recovery\n\ttt.recover()\n\t// avoid committing ChangeTerm proposal\n\ttt.ignore(pb.MsgApp)\n\n\t// elect 2 as the new leader with term 2\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// no log entries from previous term should be committed\n\tsm = tt.peers[2].(*raft)\n\tif sm.raftLog.committed != 1 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 1)\n\t}\n\n\ttt.recover()\n\t// send heartbeat; reset wait\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgBeat})\n\t// append an entry at current term\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\t// expect the committed to be advanced\n\tif sm.raftLog.committed != 5 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 5)\n\t}\n}",
"func TestStep0Committee(t *testing.T) {\n\tassert.NotPanics(t, func() {\n\t\tp, ks := consensus.MockProvisioners(10)\n\t\th := committee.NewHandler(ks[0], *p)\n\t\th.AmMember(1, 0, 10)\n\t})\n}",
"func TestRepoWrapperCommit(t *testing.T) {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\tt.Fatalf(\"%v\", err)\n\t}\n\n\tdefer os.Chdir(cwd)\n\n\ttempdir := tempDir(t, \"TestRepoWrapper-\")\n\tdefer os.RemoveAll(tempdir)\n\trepoRoot := filepath.Join(tempdir, \"repo\")\n\tgitInit(t, repoRoot)\n\n\tos.Chdir(repoRoot)\n\n\tf, err := ioutil.TempFile(repoRoot, \"\")\n\tif err != nil {\n\t\tt.Fatalf(\"%v\", err)\n\t}\n\n\tbaseFile := filepath.Base(f.Name())\n\tcontentExpected := []byte(\"foo\\n\")\n\tf.Write(contentExpected)\n\tgitAdd(t, baseFile)\n\n\trepoWrapper := RepoWrapper{}\n\trepo, err := repoWrapper.repo(repoRoot)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to get a new repo, err=%v\", err)\n\t}\n\n\tw, err := repo.Worktree()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to get a WorkTree for repo %v, err=%v\", repo, err)\n\t}\n\n\tname := \"TestIt\"\n\temail := \"[email protected]\"\n\tmessage := \"testit\"\n\th, err := repoWrapper.Commit(w, name, email, message, false)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to commit to repo %v, err=%v\", repo, err)\n\t}\n\n\tcIter, err := repo.Log(&git.LogOptions{From: *h})\n\terr = cIter.ForEach(func(c *object.Commit) error {\n\t\tif c.Author.Name != name {\n\t\t\tt.Fatalf(\"Expected name %v not found for %v\", name, h)\n\t\t}\n\t\tif c.Author.Email != email {\n\t\t\tt.Fatalf(\"Expected email %v not found for %v\", email, h)\n\t\t}\n\t\tif c.Message != message {\n\t\t\tt.Fatalf(\"Expected message %v not found for %v\", message, h)\n\t\t}\n\t\treturn nil\n\t})\n}",
"func TestLeaderStartReplication(t *testing.T) {\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tcommitNoopEntry(r, s)\n\tli := r.raftLog.lastIndex()\n\n\tents := []pb.Entry{{Data: []byte(\"some data\")}}\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: ents})\n\n\tif g := r.raftLog.lastIndex(); g != li+1 {\n\t\tt.Errorf(\"lastIndex = %d, want %d\", g, li+1)\n\t}\n\tif g := r.raftLog.committed; g != li {\n\t\tt.Errorf(\"committed = %d, want %d\", g, li)\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte(\"some data\")}}\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %+v, want %+v\", msgs, wmsgs)\n\t}\n\tif g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, wents) {\n\t\tt.Errorf(\"ents = %+v, want %+v\", g, wents)\n\t}\n}",
"func (t *Tracker) Commit() error { return nil }",
"func TestFileEntry(t *testing.T) {\n\tstores := []struct {\n\t\tname string\n\t\tfixture func() (bundle *fileEntryTestBundle, cleanup func())\n\t}{\n\t\t{\"LocalFileEntry\", fileEntryLocalFixture},\n\t}\n\n\ttests := []func(require *require.Assertions, bundle *fileEntryTestBundle){\n\t\ttestCreate,\n\t\ttestCreateExisting,\n\t\ttestCreateFail,\n\t\ttestMoveFrom,\n\t\ttestMoveFromExisting,\n\t\ttestMoveFromWrongState,\n\t\ttestMoveFromWrongSourcePath,\n\t\ttestMove,\n\t\ttestLinkTo,\n\t\ttestDelete,\n\t\ttestDeleteFailsForPersistedFile,\n\t\ttestGetMetadataAndSetMetadata,\n\t\ttestGetMetadataFail,\n\t\ttestSetMetadataAt,\n\t\ttestGetOrSetMetadata,\n\t\ttestDeleteMetadata,\n\t\ttestRangeMetadata,\n\t}\n\n\tfor _, store := range stores {\n\t\tt.Run(store.name, func(t *testing.T) {\n\t\t\tfor _, test := range tests {\n\t\t\t\ttestName := runtime.FuncForPC(reflect.ValueOf(test).Pointer()).Name()\n\t\t\t\tparts := strings.Split(testName, \".\")\n\t\t\t\tt.Run(parts[len(parts)-1], func(t *testing.T) {\n\t\t\t\t\trequire := require.New(t)\n\t\t\t\t\ts, cleanup := store.fixture()\n\t\t\t\t\tdefer cleanup()\n\t\t\t\t\ttest(require, s)\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}",
"func TestCommitConflictRace4A(t *testing.T) {\n}",
"func TestLeaderTransferToUpToDateNode(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}",
"func applyEntrySupervisor(rs *RaftServer) {\n\tfor {\n\t\tselect {\n\t\tcase r := <-rs.aerCh:\n\t\t\trs.lock.Lock()\n\t\t\tif r.Term > rs.currentTerm {\n\t\t\t\tbecomeFollower(rs, r.Term)\n\t\t\t}\n\n\t\t\tif r.Success {\n\t\t\t\tentries := r.Entries\n\t\t\t\tfor i, e := range *entries {\n\t\t\t\t\trs.matchIndex[r.PeerId] = rs.nextIndex[r.PeerId]\n\t\t\t\t\trs.nextIndex[r.PeerId]++\n\n\t\t\t\t\tidx := r.LeaderPrevLogIndex + 1 + int64(i)\n\t\t\t\t\tif idx > rs.commitIndex && rs.log[idx].Term == rs.currentTerm {\n\t\t\t\t\t\t// Check if we have a majority to commit:\n\t\t\t\t\t\tc := 0\n\t\t\t\t\t\tfor i, mi := range rs.matchIndex {\n\t\t\t\t\t\t\tif i != rs.serverId && mi >= idx {\n\t\t\t\t\t\t\t\tc++\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif c >= len(rs.peerAddrs)/2+1 {\n\t\t\t\t\t\t\tlog.Printf(\"====Commited LogEntryIndex %v across %v nodes====\", idx, c)\n\t\t\t\t\t\t\trs.commitIndex = idx\n\t\t\t\t\t\t\trs.stateMachine.Apply(e)\n\t\t\t\t\t\t\trs.lastApplied = idx\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else if r.Error == ErrLogInconsistent {\n\t\t\t\tlog.Println(\"Sending another AppendEntry() to resolve log inconsistency\")\n\t\t\t\t// retry with more data (eventually they will line up):\n\t\t\t\trs.nextIndex[r.PeerId]--\n\t\t\t\taes := map[string]*AppendEntryReq{\n\t\t\t\t\trs.peerAddrs[r.PeerId]: rs.getAppendEntryReqForPeer(r.PeerId),\n\t\t\t\t}\n\t\t\t\tsendAppendEntries(aes, rs.aerCh)\n\t\t\t}\n\t\t\trs.lock.Unlock()\n\t\tcase <-rs.killCh:\n\t\t\tlog.Println(\"Shutting off applyEntrySupervisor. . .\")\n\t\t\treturn\n\t\t}\n\t}\n}",
"func TestCommitOverwrite4A(t *testing.T) {\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TestFollowerCheckMsgApp tests that if the follower does not find an entry in its log with the same index and term as the one in AppendEntries RPC, then it refuses the new entries. Otherwise it replies that it accepts the append entries. Reference: section 5.3
|
func TestFollowerCheckMsgApp(t *testing.T) {
ents := []pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}}
tests := []struct {
term uint64
index uint64
windex uint64
wreject bool
wrejectHint uint64
}{
// match with committed entries
{0, 0, 1, false, 0},
{ents[0].Term, ents[0].Index, 1, false, 0},
// match with uncommitted entries
{ents[1].Term, ents[1].Index, 2, false, 0},
// unmatch with existing entry
{ents[0].Term, ents[1].Index, ents[1].Index, true, 2},
// unexisting entry
{ents[1].Term + 1, ents[1].Index + 1, ents[1].Index + 1, true, 2},
}
for i, tt := range tests {
storage := NewMemoryStorage()
storage.Append(ents)
r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, storage)
defer closeAndFreeRaft(r)
r.loadState(pb.HardState{Commit: 1})
r.becomeFollower(2, 2)
r.Step(pb.Message{From: 2, FromGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},
To: 1, ToGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},
Type: pb.MsgApp, Term: 2, LogTerm: tt.term, Index: tt.index})
msgs := r.readMessages()
wmsgs := []pb.Message{
{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},
To: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},
Type: pb.MsgAppResp, Term: 2, Index: tt.windex, Reject: tt.wreject, RejectHint: tt.wrejectHint},
}
if !reflect.DeepEqual(msgs, wmsgs) {
t.Errorf("#%d: msgs = %+v, want %+v", i, msgs, wmsgs)
}
}
}
|
[
"func TestFollowerAppendEntries(t *testing.T) {\n\ttests := []struct {\n\t\tindex, term uint64\n\t\tents []pb.Entry\n\t\twents []pb.Entry\n\t\twunstable []pb.Entry\n\t}{\n\t\t{\n\t\t\t2, 2,\n\t\t\t[]pb.Entry{{Term: 3, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}, {Term: 3, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 3}},\n\t\t},\n\t\t{\n\t\t\t1, 1,\n\t\t\t[]pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t},\n\t\t{\n\t\t\t0, 0,\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t0, 0,\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tstorage.Append([]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}})\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, storage)\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeFollower(2, 2)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 2, LogTerm: tt.term, Index: tt.index, Entries: tt.ents})\n\n\t\tif g := r.raftLog.allEntries(); !reflect.DeepEqual(g, tt.wents) {\n\t\t\tt.Errorf(\"#%d: ents = %+v, want %+v\", i, g, tt.wents)\n\t\t}\n\t\tif g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, tt.wunstable) {\n\t\t\tt.Errorf(\"#%d: unstableEnts = %+v, want %+v\", i, g, tt.wunstable)\n\t\t}\n\t}\n}",
"func (handler *RuleHandler) FollowerOnAppendEntries(msg iface.MsgAppendEntries, log iface.RaftLog, status iface.Status) []interface{} {\n\tactions := make([]interface{}, 0) // list of actions created\n\t// since we are hearing from the leader, reset timeout\n\tactions = append(actions, iface.ActionResetTimer{\n\t\tHalfTime: false,\n\t})\n\tactions = append(actions, iface.ActionSetLeaderLastHeard{\n\t\tInstant: time.Now(),\n\t})\n\n\t// maybe we are outdated\n\tif msg.Term > status.CurrentTerm() {\n\t\tactions = append(actions, iface.ActionSetCurrentTerm{\n\t\t\tNewCurrentTerm: msg.Term,\n\t\t})\n\t}\n\n\tprevEntry, _ := log.Get(msg.PrevLogIndex)\n\n\t// leader is outdated ?\n\tif msg.Term < status.CurrentTerm() {\n\t\tactions = append(actions, iface.ReplyAppendEntries{\n\t\t\tAddress: status.NodeAddress(),\n\t\t\tSuccess: false,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t})\n\t\treturn actions\n\t}\n\n\t// I dont have previous log entry (but should)\n\tif prevEntry == nil && msg.PrevLogIndex != -1 {\n\t\tactions = append(actions, iface.ReplyAppendEntries{\n\t\t\tAddress: status.NodeAddress(),\n\t\t\tSuccess: false,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t})\n\t\treturn actions\n\t}\n\n\t// I have previous log entry, but it does not match\n\tif prevEntry != nil && prevEntry.Term != msg.PrevLogTerm {\n\t\tactions = append(actions, iface.ReplyAppendEntries{\n\t\t\tAddress: status.NodeAddress(),\n\t\t\tSuccess: false,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t})\n\t\treturn actions\n\t}\n\n\t// all is ok. accept new entries\n\tactions = append(actions, iface.ReplyAppendEntries{\n\t\tAddress: status.NodeAddress(),\n\t\tSuccess: true,\n\t\tTerm: status.CurrentTerm(),\n\t})\n\n\t// if there is anything to append, do it\n\tif len(msg.Entries) > 0 {\n\t\t// delete all entries in log after PrevLogIndex\n\t\tactions = append(actions, iface.ActionDeleteLog{\n\t\t\tCount: log.LastIndex() - msg.PrevLogIndex,\n\t\t})\n\n\t\t// take care ! Maybe we are removing an entry\n\t\t// containing our current cluster configuration.\n\t\t// In this case, revert to previous cluster\n\t\t// configuration\n\t\tcontainsClusterChange := false\n\t\tstabilized := false\n\t\tclusterChangeIndex := status.ClusterChangeIndex()\n\t\tclusterChangeTerm := status.ClusterChangeTerm()\n\t\tcluster := append(status.PeerAddresses(), status.NodeAddress())\n\t\tfor !stabilized {\n\t\t\tstabilized = true\n\t\t\tif clusterChangeIndex > msg.PrevLogIndex {\n\t\t\t\tstabilized = false\n\t\t\t\tcontainsClusterChange = true\n\t\t\t\tentry, _ := log.Get(clusterChangeIndex)\n\t\t\t\trecord := &iface.ClusterChangeCommand{}\n\t\t\t\tjson.Unmarshal(entry.Command, &record)\n\t\t\t\tclusterChangeIndex = record.OldClusterChangeIndex\n\t\t\t\tclusterChangeTerm = record.OldClusterChangeTerm\n\t\t\t\tcluster = record.OldCluster\n\t\t\t}\n\t\t}\n\n\t\t// if deletion detected, rewind to previous configuration\n\t\tif containsClusterChange {\n\t\t\tactions = append(actions, iface.ActionSetClusterChange{\n\t\t\t\tNewClusterChangeIndex: clusterChangeIndex,\n\t\t\t\tNewClusterChangeTerm: clusterChangeTerm,\n\t\t\t})\n\t\t\tpeers := []iface.PeerAddress{}\n\t\t\tfor _, addr := range cluster {\n\t\t\t\tif addr != status.NodeAddress() {\n\t\t\t\t\tpeers = append(peers, addr)\n\t\t\t\t}\n\t\t\t}\n\t\t\tactions = append(actions, iface.ActionSetPeers{\n\t\t\t\tPeerAddresses: peers,\n\t\t\t})\n\t\t}\n\n\t\t// append all entries sent by leader\n\t\tactions = append(actions, iface.ActionAppendLog{\n\t\t\tEntries: msg.Entries,\n\t\t})\n\n\t\t// once again, take care ! Maybe we are adding some entry\n\t\t// describing a cluster change. In such a case, we must apply\n\t\t// the new cluster configuration to ourselves (specifically,\n\t\t// the last cluster configuration among the new entries)\n\t\tfor index := len(msg.Entries) - 1; index >= 0; index-- {\n\t\t\tif msg.Entries[index].Kind != iface.EntryAddServer &&\n\t\t\t\tmsg.Entries[index].Kind != iface.EntryRemoveServer {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\trecord := &iface.ClusterChangeCommand{}\n\t\t\tjson.Unmarshal(msg.Entries[index].Command, &record)\n\t\t\tactions = append(actions, iface.ActionSetClusterChange{\n\t\t\t\tNewClusterChangeIndex: msg.PrevLogIndex + int64(index+1),\n\t\t\t\tNewClusterChangeTerm: msg.Entries[index].Term,\n\t\t\t})\n\t\t\tpeers := []iface.PeerAddress{}\n\t\t\tfor _, addr := range record.NewCluster {\n\t\t\t\tif addr != status.NodeAddress() {\n\t\t\t\t\tpeers = append(peers, addr)\n\t\t\t\t}\n\t\t\t}\n\t\t\tactions = append(actions, iface.ActionSetPeers{\n\t\t\t\tPeerAddresses: peers,\n\t\t\t})\n\t\t\tbreak\n\t\t}\n\n\t}\n\n\t// if leader has committed more than we know, update our index\n\t// and demand state-machine application\n\tif msg.LeaderCommitIndex > status.CommitIndex() {\n\t\tactions = append(actions, iface.ActionSetCommitIndex{\n\t\t\tNewCommitIndex: int64(math.Min(\n\t\t\t\tfloat64(msg.LeaderCommitIndex),\n\t\t\t\tfloat64(msg.PrevLogIndex+int64(len(msg.Entries))),\n\t\t\t)),\n\t\t})\n\t\t// order the state machine to apply the new committed entries\n\t\t// (only if they are state machine commands)\n\t\t// TODO: Treat configuration change\n\t\tfor index := status.CommitIndex() + 1; index < msg.LeaderCommitIndex; index++ {\n\t\t\tvar entry *iface.LogEntry\n\n\t\t\t// get from my log\n\t\t\tif index <= msg.PrevLogIndex {\n\t\t\t\tentry, _ = log.Get(index)\n\n\t\t\t\t// get from leader\n\t\t\t} else {\n\t\t\t\tentry = &msg.Entries[index-msg.PrevLogIndex-1]\n\t\t\t}\n\n\t\t\tswitch entry.Kind {\n\t\t\tcase iface.EntryStateMachineCommand:\n\t\t\t\tactions = append(actions, iface.ActionStateMachineApply{\n\t\t\t\t\tEntryIndex: index,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\treturn actions\n}",
"func handleAppendEntries(s *Sailor, state *storage.State, am *appendMessage, leaderId string) (appendReply, error) {\n\t// Check if the node needs to convert to the follower state\n\tif (s.state != follower && am.Term == s.currentTerm) || am.Term > s.currentTerm {\n\t\ts.becomeFollower(am.Term)\n\t}\n\n\trep := appendReply{Term: s.currentTerm, Success: false}\n\t// Reject the RPC if it has the wrong term\n\tif s.currentTerm > am.Term {\n\t\treturn rep, nil\n\t}\n\ts.timer.Reset(new_time()) // The message is from the leader, reset our election-start clock\n\ts.leaderId = leaderId\n\t// Reject the RPC if the position or the term is wrong\n\tif am.PrevLogIndex != 0 && (len(s.log) <= int(am.PrevLogIndex-1) ||\n\t\t(len(s.log) > 0 && s.log[am.PrevLogIndex-1].Term != am.PrevLogTerm)) {\n\t\treturn rep, nil\n\t}\n\n\trep.Success = true // The RPC is valid and the call will succeed\n\n\trep.PrepLower = am.PrevLogIndex + 1\n\trep.ComLower = s.volatile.commitIndex\n\n\t// Loop over the values we're dropping from our log. If we were the leader when\n\t// the values were proposed, then we reply to the client with a set failed message.\n\t// We know we were the leader if we were counting votes for that log entry.\n\tfor i := am.PrevLogIndex; i < uint(len(s.log)); i++ {\n\t\tif s.log[i].votes != 0 {\n\t\t\tfail := messages.Message{}\n\t\t\tfail.Source = s.client.NodeName\n\t\t\tfail.Type = \"setResponse\"\n\t\t\tfail.Error = \"SET FAILED\"\n\t\t\tfail.ID = s.log[i].Id\n\t\t\tfail.Key = s.log[i].Trans.Key\n\t\t\ts.client.SendToBroker(fail)\n\t\t}\n\t}\n\t// Drops the extra entries and adds the new ones\n\ts.log = append(s.log[:am.PrevLogIndex], am.Entries...)\n\t// If we have new values to commit\n\tif am.LeaderCommit > s.volatile.commitIndex {\n\t\t// Choose the min of our log size and the leader's commit index\n\t\tif int(am.LeaderCommit) <= len(s.log) {\n\t\t\ts.volatile.commitIndex = am.LeaderCommit\n\t\t} else {\n\t\t\ts.volatile.commitIndex = uint(len(s.log))\n\t\t}\n\t\t// Actually commit and apply the transactions to the state machine\n\t\tfor s.volatile.lastApplied < s.volatile.commitIndex {\n\t\t\ts.volatile.lastApplied += 1\n\t\t\tstate.ApplyTransaction(s.log[s.volatile.lastApplied-1].Trans)\n\t\t}\n\n\t}\n\trep.PrepUpper = uint(len(s.log))\n\trep.ComUpper = s.volatile.commitIndex\n\treturn rep, nil\n}",
"func TestLeaderSyncFollowerLog(t *testing.T) {\n\tents := []pb.Entry{\n\t\t{},\n\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t}\n\tterm := uint64(8)\n\ttests := [][]pb.Entry{\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10}, {Term: 6, Index: 11},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t\t\t{Term: 7, Index: 11}, {Term: 7, Index: 12},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5}, {Term: 4, Index: 6}, {Term: 4, Index: 7},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 2, Index: 4}, {Term: 2, Index: 5}, {Term: 2, Index: 6},\n\t\t\t{Term: 3, Index: 7}, {Term: 3, Index: 8}, {Term: 3, Index: 9}, {Term: 3, Index: 10}, {Term: 3, Index: 11},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tleadStorage := NewMemoryStorage()\n\t\tdefer leadStorage.Close()\n\t\tleadStorage.Append(ents)\n\t\tlead := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, leadStorage)\n\t\tlead.loadState(pb.HardState{Commit: lead.raftLog.lastIndex(), Term: term})\n\t\tfollowerStorage := NewMemoryStorage()\n\t\tdefer followerStorage.Close()\n\t\tfollowerStorage.Append(tt)\n\t\tfollower := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, followerStorage)\n\t\tfollower.loadState(pb.HardState{Term: term - 1})\n\t\t// It is necessary to have a three-node cluster.\n\t\t// The second may have more up-to-date log than the first one, so the\n\t\t// first node needs the vote from the third node to become the leader.\n\t\tn := newNetwork(lead, follower, nopStepper)\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\t// The election occurs in the term after the one we loaded with\n\t\t// lead.loadState above.\n\t\tn.send(pb.Message{From: 3, To: 1, Type: pb.MsgVoteResp, Term: term + 1})\n\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\t\tif g := diffu(ltoa(lead.raftLog), ltoa(follower.raftLog)); g != \"\" {\n\t\t\tt.Errorf(\"#%d: log diff:\\n%s\", i, g)\n\t\t}\n\t}\n}",
"func (r *Raft) handleAppendEntries(m pb.Message) {\n\t// Your Code Here (2A).\n\tDPrintf(\"node [%v] handle append, state=[%v], term=[%v], lastIndex=[%v], lastTerm=[%v], preIndex=[%v]\\n\",\n\t\tr.id, r.State, r.Term, r.RaftLog.LastIndex(), r.RaftLog.LastLogTerm(), m.Index)\n\tr.electionElapsed = 0\n\tif r.Term <= m.Term && r.State != StateFollower {\n\t\tr.becomeFollower(m.Term, m.From)\n\t}\n\tlastIndex := r.RaftLog.LastIndex()\n\n\tif m.Term != None && r.Term > m.Term {\n\t\t// stale and reject\n\t\tDPrintf(\"node[%v] reject append, reason: term stale\\n\", r.id)\n\t\tr.sendAppendResponse(m.From, true, 0)\n\t\treturn\n\t}\n\tr.Lead = m.From\n\tif m.Index > lastIndex {\n\t\t// not match certainly\n\t\tDPrintf(\"node[%v] reject append, reason: do not have such index\\n\", r.id)\n\t\tr.sendAppendResponse(m.From,true, r.RaftLog.LastIndex())\n\t\treturn\n\t}\n\t// index match\n\n\tlogTerm, _ := r.RaftLog.Term(m.Index)\n\n\n\tif logTerm != m.LogTerm {\n\t\tDPrintf(\"node[%v] reject append, reason: term not match\\n\", r.id)\n\t\tr.sendAppendResponse(m.From,true, r.RaftLog.LastIndex())\n\t\treturn\n\t}\n\n\n\tfor i, entry := range m.Entries {\n\t\tif entry.Index <= r.RaftLog.LastIndex() {\n\t\t\t// in this case, maybe we need to delete some entry\n\t\t\tlogTerm, _ := r.RaftLog.Term(entry.Index)\n\t\t\tif logTerm != entry.Term {\n\t\t\t\tr.RaftLog.DeleteFromIndex(entry.Index)\n\t\t\t\tr.RaftLog.entries = append(r.RaftLog.entries, *entry)\n\t\t\t\tr.RaftLog.stabled = min(r.RaftLog.stabled, entry.Index - 1)\n\t\t\t}\n\t\t}else {\n\t\t\tfor _, entry := range m.Entries[i:] {\n\t\t\t\tr.RaftLog.entries = append(r.RaftLog.entries, *entry)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\tDPrintf(\"append success, now node[%v] raftLog=[%v]\\n\", r.id, r.RaftLog.entries)\n\n\t// update the commit\n\tif m.Commit > r.RaftLog.committed {\n\t\tcommitted := min(m.Commit, m.GetIndex()+uint64(len(m.GetEntries())))\n\t\tr.RaftLog.committed = min(committed, r.RaftLog.LastIndex())\n\t\tDPrintf(\"node[%v] commit update to[%v]\\n\", r.id, r.RaftLog.committed)\n\t}\n\n\tDPrintf(\"node[%v] send appendResponse to[%v], reject=[%v], lastIndex=[%v]\\n\", r.id, m.From, false, r.RaftLog.LastIndex())\n\tr.sendAppendResponse(m.From, false, r.RaftLog.LastIndex())\n\n}",
"func (rf *Raft) sendLogEntry(follower int) {\n\tDPrintf(\"[%d] is send AppendEntry to [%d]\", rf.me, follower)\n\trf.mu.Lock()\n\tif rf.state != Leader {\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\t// Send install-snapshot to lagged follower.\n\tif rf.nextIndex[follower] <= rf.lastIncludedIndex {\n\t\tgo rf.sendSnapshot(follower)\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\t// Prepare the AppendEntries request args\n\tprevLogIndex := rf.nextIndex[follower] - 1\n\tprevLogTerm := rf.getEntry(prevLogIndex).LogTerm\n\targs := AppendEntriesArgs{Term: rf.currentTerm, LeaderId: rf.me, PrevLogIndex: prevLogIndex, PrevLogTerm: prevLogTerm, CommitIndex: rf.commitIndex, Len: 0}\n\tif rf.nextIndex[follower] < rf.logIndex {\n\t\t// Fill the log data gap for this follower\n\t\tentries := rf.getRangeEntry(rf.nextIndex[follower], rf.logIndex)\n\t\targs.Entries = entries\n\t\targs.Len = len(entries)\n\t}\n\trf.mu.Unlock()\n\n\t// Send the RPC AppendEntries\n\tvar reply AppendEntriesReply\n\tif rf.peers[follower].Call(\"Raft.AppendEntries\", &args, &reply) {\n\t\tDPrintf(\"[%d] get AppendEntries reply from [%d]\", rf.me, follower)\n\t\t// Lock after the RPC finished.\n\t\trf.mu.Lock()\n\t\tdefer rf.mu.Unlock()\n\n\t\t// Reply is not successful, early return.\n\t\tif !reply.Success {\n\t\t\tif reply.Term > rf.currentTerm { // the leader is obsolete\n\t\t\t\trf.stepDown(reply.Term)\n\t\t\t} else {\n\t\t\t\t// follower is inconsistent with leader\n\t\t\t\t// force follower's data to be overwritten by resetting index.\n\t\t\t\trf.nextIndex[follower] = Max(1, Min(reply.ConflictIndex, rf.logIndex))\n\t\t\t\tif rf.nextIndex[follower] <= rf.lastIncludedIndex {\n\t\t\t\t\tgo rf.sendSnapshot(follower)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn\n\t\t}\n\n\t\t// Reply is successful\n\n\t\tprevLogIndex, logEntriesLen := args.PrevLogIndex, args.Len\n\t\tif prevLogIndex+logEntriesLen+1 > rf.nextIndex[follower] {\n\t\t\t// Update the our local record (for index) for this follower.\n\t\t\trf.nextIndex[follower] = prevLogIndex + logEntriesLen + 1\n\t\t\trf.matchIndex[follower] = prevLogIndex + logEntriesLen\n\t\t}\n\n\t\t// Update Commit Index if logs have been replicated to majority of followers.\n\t\ttoCommitIndex := prevLogIndex + logEntriesLen\n\t\tif rf.canCommit(toCommitIndex) {\n\t\t\trf.commitIndex = toCommitIndex\n\t\t\trf.persist()\n\n\t\t\t// Key: avoid blocking here !!!\n\t\t\trf.notifyApplyCh <- struct{}{}\n\t\t}\n\n\t}\n}",
"func (handler *RuleHandler) FollowerOnAppendEntriesReply(msg iface.MsgAppendEntriesReply, log iface.RaftLog, status iface.Status) []interface{} {\n\t// delayed append entries reply. ignore it\n\treturn []interface{}{}\n}",
"func TestFollowerCommitEntry(t *testing.T) {\n\ttests := []struct {\n\t\tents []pb.Entry\n\t\tcommit uint64\n\t}{\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t},\n\t\t\t1,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data2\")},\n\t\t\t},\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data2\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data\")},\n\t\t\t},\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data2\")},\n\t\t\t},\n\t\t\t1,\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeFollower(1, 2)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 1, Entries: tt.ents, Commit: tt.commit})\n\n\t\tif g := r.raftLog.committed; g != tt.commit {\n\t\t\tt.Errorf(\"#%d: committed = %d, want %d\", i, g, tt.commit)\n\t\t}\n\t\twents := tt.ents[:int(tt.commit)]\n\t\tif g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {\n\t\t\tt.Errorf(\"#%d: nextEnts = %v, want %v\", i, g, wents)\n\t\t}\n\t}\n}",
"func (rf *Raft) heartbeatAppendEntries() {\n\t// make server -> reply map\n\treplies := make([]*AppendEntriesReply, len(rf.peers))\n\tfor servIdx := range rf.peers {\n\t\treplies[servIdx] = &AppendEntriesReply{}\n\t}\n\n\tfor !rf.killed() {\n\t\trf.mu.Lock()\n\n\t\t// if we are no longer the leader\n\t\tif rf.state != Leader {\n\t\t\trf.Log(LogDebug, \"Discovered no longer the leader, stopping heartbeat\")\n\t\t\trf.mu.Unlock()\n\t\t\treturn\n\t\t}\n\t\t// send out heartbeats concurrently if leader\n\t\tfor servIdx := range rf.peers {\n\t\t\tif servIdx != rf.me {\n\n\t\t\t\t// successful request - update matchindex and nextindex accordingly\n\t\t\t\tif replies[servIdx].Success {\n\t\t\t\t\tif replies[servIdx].HighestLogIndexAdded > 0 {\n\t\t\t\t\t\trf.matchIndex[servIdx] = replies[servIdx].HighestLogIndexAdded\n\t\t\t\t\t}\n\t\t\t\t\trf.nextIndex[servIdx] = rf.matchIndex[servIdx] + 1\n\n\t\t\t\t\t// failed request - check for better term or decrease nextIndex\n\t\t\t\t} else if !replies[servIdx].Success && replies[servIdx].Returned {\n\n\t\t\t\t\t// we might have found out we shouldn't be the leader!\n\t\t\t\t\tif replies[servIdx].CurrentTerm > rf.currentTerm {\n\t\t\t\t\t\trf.Log(LogDebug, \"Detected server with higher term, stopping heartbeat and changing to follower.\")\n\t\t\t\t\t\trf.state = Follower\n\t\t\t\t\t\trf.currentTerm = replies[servIdx].CurrentTerm\n\n\t\t\t\t\t\t// persist - updated current term\n\t\t\t\t\t\tdata := rf.GetStateBytes(false)\n\t\t\t\t\t\trf.persister.SaveRaftState(data)\n\n\t\t\t\t\t\tgo rf.heartbeatTimeoutCheck()\n\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t// failure - we need to decrease next index\n\t\t\t\t\t// 1. case where follower has no entry at the place we thought\n\t\t\t\t\t// => want to back up to start of follower log\n\t\t\t\t\t// 2. case where server has entry with different term NOT seen by leader\n\t\t\t\t\t// => want to back up nextIndex to the start of the 'run' of entries with that term (i.e. IndexFirstConflictingTerm)\n\t\t\t\t\t// 3. case where server has entry with different term that HAS been seen by leader\n\t\t\t\t\t// => want to back up to last entry leader has with that term\n\t\t\t\t\t//\n\t\t\t\t\t// Note for 2 and 3 ... if leader does not have the relevant log\n\t\t\t\t\t// entries, we need to call InstallSnapshot!\n\t\t\t\t\t//\n\t\t\t\t\trf.Log(LogInfo, \"Failed to AppendEntries to server\", servIdx, \"\\n - IndexFirstConflictingTerm\", replies[servIdx].IndexFirstConflictingTerm, \"\\n - ConflictingEntryTerm\", replies[servIdx].ConflictingEntryTerm, \"\\n - LastLogIndex\", replies[servIdx].LastLogIndex)\n\t\t\t\t\tif replies[servIdx].ConflictingEntryTerm == -1 {\n\t\t\t\t\t\t// case 1 - follower has no entry at the given location\n\t\t\t\t\t\trf.nextIndex[servIdx] = replies[servIdx].LastLogIndex + 1\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// if not case 1, need to check we have the logs at and beyond\n\t\t\t\t\t\t// IndexFirstConflictingTerm\n\t\t\t\t\t\traftLogIdx := rf.getTrimmedLogIndex(replies[servIdx].IndexFirstConflictingTerm)\n\t\t\t\t\t\tif raftLogIdx == -1 {\n\t\t\t\t\t\t\t// don't have the logs we need - will need to snapshot\n\t\t\t\t\t\t\t// set nextIndex to the lastIncludedIndex to force this\n\t\t\t\t\t\t\trf.nextIndex[servIdx] = rf.lastIncludedIndex\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tif rf.log[raftLogIdx].Term != replies[servIdx].ConflictingEntryTerm {\n\t\t\t\t\t\t\t\t// case 2 - follower has a term not seen by leader\n\t\t\t\t\t\t\t\trf.Log(LogDebug, \"Case 2: follower has a term not seen by leader\")\n\t\t\t\t\t\t\t\trf.nextIndex[servIdx] = replies[servIdx].IndexFirstConflictingTerm\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t// case 3 - follower has a term seen by leader\n\t\t\t\t\t\t\t\t// need to go to latest entry that leader has with this term\n\t\t\t\t\t\t\t\trf.Log(LogDebug, \"Case 3: follower has a term seen by leader, finding leader's latest entry with this term \\n - rf.log[\", rf.log)\n\t\t\t\t\t\t\t\trf.nextIndex[servIdx] = replies[servIdx].IndexFirstConflictingTerm\n\t\t\t\t\t\t\t\tfor rf.log[rf.getTrimmedLogIndex(rf.nextIndex[servIdx])].Term == replies[servIdx].ConflictingEntryTerm {\n\t\t\t\t\t\t\t\t\trf.nextIndex[servIdx]++\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// if we need to install a snapshot, then\n\t\t\t\t// nextIndex becomes the next index after the snapshot we will install\n\t\t\t\t// notice that we will then immediately send an AppendEntries request to the server,\n\t\t\t\t// and it will fail until the snapshot is installed, and we will just keep\n\t\t\t\t// resetting nextIndex\n\t\t\t\tif rf.nextIndex[servIdx] <= rf.lastIncludedIndex {\n\t\t\t\t\trf.Log(LogInfo, \"Failed to AppendEntries to server\", servIdx, \"- need to send InstallSnapshot!\")\n\t\t\t\t\trf.nextIndex[servIdx] = rf.lastIncludedIndex + 1\n\n\t\t\t\t\t// actually call the RPC\n\t\t\t\t\targs := &InstallSnapshotArgs{\n\t\t\t\t\t\tLeaderTerm: rf.currentTerm,\n\t\t\t\t\t\tSnapshot: rf.persister.ReadSnapshot(),\n\t\t\t\t\t}\n\t\t\t\t\treply := &InstallSnapshotReply{}\n\t\t\t\t\tgo rf.sendInstallSnapshot(servIdx, args, reply)\n\t\t\t\t}\n\n\t\t\t\t// send a new append entries request to the server if the last one has finished\n\t\t\t\trf.Log(LogDebug, \"rf.nextIndex for server\", servIdx, \"set to idx\", rf.nextIndex[servIdx], \"\\n - rf.log\", rf.log, \"\\n - rf.lastIncludedIndex\", rf.lastIncludedIndex, \"\\n - rf.lastIncludedTerm\", rf.lastIncludedTerm)\n\t\t\t\tentries := []LogEntry{}\n\t\t\t\tif len(rf.log) > 0 {\n\t\t\t\t\tentries = rf.log[rf.getTrimmedLogIndex(rf.nextIndex[servIdx]):]\n\t\t\t\t}\n\t\t\t\targs := &AppendEntriesArgs{\n\t\t\t\t\tLeaderTerm: rf.currentTerm,\n\t\t\t\t\tLeaderCommitIndex: rf.commitIndex,\n\t\t\t\t\tLogEntries: entries,\n\t\t\t\t}\n\n\t\t\t\t// only place the reply into replies, when the RPC completes,\n\t\t\t\t// to prevent partial data (unsure if this can happen but seems like a good idea)\n\t\t\t\tgo func(servIdx int) {\n\t\t\t\t\trf.Log(LogDebug, \"sendAppendEntries to servIdx\", servIdx)\n\t\t\t\t\treply := &AppendEntriesReply{}\n\t\t\t\t\tok := rf.sendAppendEntries(servIdx, args, reply)\n\t\t\t\t\tif ok {\n\t\t\t\t\t\trf.Log(LogDebug, \"Received AppendEntries reply from server\", servIdx, \"\\n - reply\", reply)\n\t\t\t\t\t\treplies[servIdx] = reply\n\t\t\t\t\t}\n\t\t\t\t}(servIdx)\n\t\t\t}\n\t\t}\n\n\t\t// walk up through possible new commit indices\n\t\t// update commit index\n\t\torigIndex := rf.commitIndex\n\t\tnewIdx := rf.commitIndex + 1\n\t\tfor len(rf.log) > 0 && newIdx <= rf.log[len(rf.log)-1].Index {\n\t\t\treplicas := 1 // already replicated in our log\n\t\t\tfor servIdx := range rf.peers {\n\t\t\t\tif servIdx != rf.me && rf.matchIndex[servIdx] >= newIdx {\n\t\t\t\t\treplicas++\n\t\t\t\t}\n\t\t\t}\n\t\t\tif replicas >= int(math.Ceil(float64(len(rf.peers))/2.0)) &&\n\t\t\t\tnewIdx > rf.lastIncludedIndex &&\n\t\t\t\trf.getTrimmedLogIndex(newIdx) >= 0 &&\n\t\t\t\trf.log[rf.getTrimmedLogIndex(newIdx)].Term == rf.currentTerm {\n\t\t\t\trf.commitIndex = newIdx\n\t\t\t\trf.Log(LogInfo, \"Entry \", rf.log[rf.getTrimmedLogIndex(rf.commitIndex)], \"replicated on a majority of servers. Commited to index\", rf.commitIndex)\n\t\t\t}\n\t\t\tnewIdx++\n\t\t}\n\n\t\t// send messages to applyCh for every message that was committed\n\t\tfor origIndex < rf.commitIndex {\n\t\t\torigIndex++\n\t\t\tif rf.getTrimmedLogIndex(origIndex) >= 0 {\n\t\t\t\trf.Log(LogInfo, \"Sending applyCh confirmation for commit of \", rf.log[rf.getTrimmedLogIndex(origIndex)], \"at index\", origIndex)\n\t\t\t\t{\n\t\t\t\t\trf.applyCh <- ApplyMsg{\n\t\t\t\t\t\tCommandValid: true,\n\t\t\t\t\t\tCommandIndex: origIndex,\n\t\t\t\t\t\tCommandTerm: rf.currentTerm,\n\t\t\t\t\t\tCommand: rf.log[rf.getTrimmedLogIndex(origIndex)].Command,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\trf.mu.Unlock()\n\t\ttime.Sleep(heartbeatSendInterval)\n\t}\n}",
"func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif args.Entries == nil && args.Term < rf.currentTerm { //heartbeat scenario\n\t\treply.Success = false\n\t\treply.Term = rf.currentTerm //not resetting clock for reelection\n\t\t//fmt.Println(\"Received Invalid AppendEntry for Server: term: from Leader:\", rf.me, args.Term, args.LeaderID)\n\n\t} else if args.Entries == nil && len(rf.log)-1 < args.PrevLogIndex {\n\t\treply.Success = false\n\t\trf.currentTerm = args.Term\n\t\trf.peerStatus = Follower\n\t\trf.validBeat = true\n\n\t} else if args.Entries == nil && rf.log[args.PrevLogIndex].Term != args.PrevLogTerm {\n\t\treply.Success = false\n\t\trf.currentTerm = args.Term\n\t\trf.peerStatus = Follower\n\t\trf.validBeat = true\n\t} else if args.Entries == nil && rf.log[args.PrevLogIndex].Term == args.PrevLogTerm {\n\t\treply.Success = true\n\t\trf.currentTerm = args.Term\n\t\trf.peerStatus = Follower\n\t\trf.validBeat = true\n\t\tif args.LeaderCommit > rf.commitIndex {\n\t\t\trf.commitIndex = min(args.LeaderCommit, len(rf.log)-1)\n\t\t\tif rf.lastApplied < rf.commitIndex {\n\t\t\t\trf.myCh <- ApplyMsg{true, rf.log[rf.commitIndex].Command, rf.commitIndex}\n\t\t\t\tfmt.Println(\"Case 1: CommitIndex updated to: Server:\", rf.commitIndex, rf.me)\n\t\t\t\trf.lastApplied = rf.commitIndex\n\t\t\t}\n\t\t}\n\t\t//fmt.Println(\"Received valid AppendEntry Server: Term: Leader:\", rf.me, args.Term, args.LeaderID)\n\t} else if args.Entries != nil && len(rf.log)-1 < args.PrevLogIndex {\n\t\treply.Success = false\n\t\trf.currentTerm = args.Term\n\t\trf.peerStatus = Follower\n\t\trf.validBeat = true\n\n\t} else if args.Entries != nil && rf.log[args.PrevLogIndex].Term != args.PrevLogTerm {\n\t\treply.Success = false\n\t\trf.log = rf.log[0:args.PrevLogIndex] //truncate the log\n\t\trf.currentTerm = args.Term\n\t\trf.peerStatus = Follower\n\t\trf.validBeat = true\n\t\tfmt.Println(\"Truncate logs for server:\", rf.me)\n\n\t} else if args.Entries != nil && rf.log[args.PrevLogIndex].Term == args.PrevLogTerm {\n\t\treply.Success = true\n\t\trf.log = append(rf.log, args.Entries[:]...) //To Check: Does this work all the time?\n\t\trf.currentTerm = args.Term\n\t\trf.peerStatus = Follower\n\t\trf.validBeat = true\n\t\tfmt.Println(\"AppendEntries from: Leader: to Server: log, Entries:\", args.LeaderID, rf.me, rf.log, args.Entries, args.PrevLogIndex)\n\n\t\tif args.LeaderCommit > rf.commitIndex {\n\t\t\trf.commitIndex = min(args.LeaderCommit, len(rf.log)-1)\n\t\t\tif rf.lastApplied < rf.commitIndex {\n\t\t\t\trf.myCh <- ApplyMsg{true, rf.log[rf.commitIndex].Command, rf.commitIndex}\n\t\t\t\trf.lastApplied = rf.commitIndex\n\t\t\t\tfmt.Println(\"Case 3: CommitIndex updated to: Server: Value:\", rf.commitIndex, rf.me, rf.log[rf.commitIndex].Command)\n\t\t\t}\n\n\t\t}\n\n\t}\n\n}",
"func (s *raftServer) handleFollowers(followers []int, nextIndex *utils.SyncIntIntMap, matchIndex *utils.SyncIntIntMap, aeToken *utils.SyncIntIntMap) {\n\tfor s.State() == LEADER {\n\t\tfor _, f := range followers {\n\t\t\tlastIndex := s.localLog.TailIndex()\n\t\t\tn, ok := nextIndex.Get(f)\n\t\t\tif !ok {\n\t\t\t\tpanic(\"nextIndex not found for follower \" + strconv.Itoa(f))\n\t\t\t}\n\n\t\t\tunlocked, ok := aeToken.Get(f)\n\n\t\t\tif !ok {\n\t\t\t\tpanic(\"aeToken not found for follower \" + strconv.Itoa(f))\n\t\t\t}\n\n\t\t\tif lastIndex != 0 && lastIndex >= n && unlocked == 1 {\n\t\t\t\taeToken.Set(f, 0)\n\t\t\t\t// send a new AppendEntry\n\t\t\t\tprevIndex := n - 1\n\t\t\t\tvar prevTerm int64 = 0\n\t\t\t\t// n = 0 when we add first entry to the log\n\t\t\t\tif prevIndex > 0 {\n\t\t\t\t\tprevTerm = s.localLog.Get(prevIndex).Term\n\t\t\t\t}\n\t\t\t\tae := &AppendEntry{Term: s.Term(), LeaderId: s.server.Pid(), PrevLogIndex: prevIndex, PrevLogTerm: prevTerm}\n\t\t\t\tae.LeaderCommit = s.commitIndex.Get()\n\t\t\t\tae.Entry = *s.localLog.Get(n)\n\t\t\t\ts.writeToLog(\"Replicating entry \" + strconv.FormatInt(n, 10))\n\t\t\t\ts.server.Outbox() <- &cluster.Envelope{Pid: f, Msg: ae}\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(NICE * time.Millisecond)\n\t}\n}",
"func CheckFollower(f models.Follower) (bool, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)\n\tdefer cancel()\n\tdb := MongoCon.Database(\"flash\")\n\tcol := db.Collection(\"follower\")\n\n\treg := bson.M{\n\t\t\"user_id\": f.UserID,\n\t\t\"user_followed\": f.UserFollowed,\n\t}\n\n\tvar result models.Follower\n\terr := col.FindOne(ctx, reg).Decode(&result)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}",
"func (r *RaftNode) handleAppendEntries(msg AppendEntriesMsg) (resetTimeout, fallback bool) {\n\tif len(msg.request.GetEntries()) > 0 {\n\t\tr.Debug(\"Got appendEntries with %d entries from %v\", len(msg.request.GetEntries()), msg.request.GetLeader())\n\t} else {\n\t\tr.Verbose(\"Got appendEntries heartbeat from %v\", msg.request.GetLeader().Id)\n\t}\n\n\t// resetTimeout == request successful\n\tif msg.request.GetTerm() < r.GetCurrentTerm() {\n\t\t// if the leader calling us is behind the times the request is unsuccessful, and it should revert\n\t\tmsg.reply <- AppendEntriesReply{r.GetCurrentTerm(), false} // our term is greater the leader's\n\t\treturn false, false\n\n\t} else {\n\t\t// node has higher or equivalent term and so this is an acceptable heartbeat\n\t\t// make sure we have this leader as our leader and the correct term\n\t\tr.updateTermIfNecessary(msg.request.GetTerm())\n\n\t\t// no matter our state, we'll always be reverting to a follower when getting an AppendEntries,\n\t\t// so set our leader to be the cluster leader (who will also be the one who sent the message)\n\t\tr.Leader = msg.request.GetLeader()\n\n\t\tsuccess := r.mergeLogEntries(msg.request)\n\t\tmsg.reply <- AppendEntriesReply{r.GetCurrentTerm(), success}\n\n\t\t// always \"fall back\", but this will only be utilized by leaders and candidates\n\t\treturn true, true\n\t}\n}",
"func (rf *Raft) sendAppendEntriesToMultipleFollowers() {\n for !rf.killed() {\n rf.mu.Lock()\n if rf.state != \"Leader\" {\n DLCPrintf(\"Server (%d) is no longer Leader and stop sending Heart Beat\", rf.me)\n rf.mu.Unlock()\n return\n }\n\n for i := 0; i < len(rf.peers) && rf.state == \"Leader\"; i++ {\n if i == rf.me {\n continue\n }else{\n if rf.nextIndex[i] <= rf.snapshottedIndex {\n go rf.sendInstallSnapshotToOneFollower(i, rf.log[0].Term)\n }else{\n go rf.sendAppendEntriesToOneFollower(i)\n }\n }\n }\n if rf.state != \"Leader\" {\n DLCPrintf(\"Server (%d) is no longer Leader and stop sending Heart Beat\", rf.me)\n rf.mu.Unlock()\n return\n }\n rf.commitEntries()\n rf.mu.Unlock()\n\n time.Sleep(100 * time.Millisecond)\n }\n}",
"func (r *Raft) AppendToLog_Follower(request AppendEntriesReq) {\n\tterm := request.term\n\tcmd := request.entries\n\tindex := request.prevLogIndex + 1\n\tlogVal := LogVal{term, cmd, 0} //make object for log's value field\n\n\tif len(r.myLog) == index {\n\t\tr.myLog = append(r.myLog, logVal) //when trying to add a new entry\n\t} else {\n\t\tr.myLog[index] = logVal //overwriting in case of log repair\n\t\t//fmt.Println(\"Overwiriting!!\")\n\t}\n\t//fmt.Println(r.myId(), \"Append to log\", string(cmd))\n\t//modify metadata after appending\n\t//r.myMetaData.lastLogIndex = r.myMetaData.lastLogIndex + 1\n\t//r.myMetaData.prevLogIndex = r.myMetaData.lastLogIndex\n\t//\tif len(r.myLog) == 1 {\n\t//\t\tr.myMetaData.prevLogTerm = r.myMetaData.prevLogTerm + 1\n\t//\t} else if len(r.myLog) > 1 {\n\t//\t\tr.myMetaData.prevLogTerm = r.myLog[r.myMetaData.prevLogIndex].Term\n\t//\t}\n\n\t//Changed on 4th april, above is wrong in case of overwriting of log\n\tr.myMetaData.lastLogIndex = index\n\tr.myMetaData.prevLogIndex = index - 1\n\tif index == 0 {\n\t\tr.myMetaData.prevLogTerm = r.myMetaData.prevLogTerm + 1 //or simple -1\n\t} else if index >= 1 {\n\t\tr.myMetaData.prevLogTerm = r.myLog[index-1].Term\n\t}\n\n\t//Update commit index\n\tleaderCI := float64(request.leaderCommitIndex)\n\tmyLI := float64(r.myMetaData.lastLogIndex)\n\tif request.leaderCommitIndex > r.myMetaData.commitIndex {\n\t\tif myLI == -1 { //REDUNDANT since Append to log will make sure it is never -1,also must not copy higher CI if self LI is -1\n\t\t\tr.myMetaData.commitIndex = int(leaderCI)\n\t\t} else {\n\t\t\tr.myMetaData.commitIndex = int(math.Min(leaderCI, myLI))\n\t\t}\n\t}\n\t//fmt.Println(r.myId(), \"My CI is:\", r.myMetaData.commitIndex)\n\tr.WriteLogToDisk()\n}",
"func (r *Raft) AppendToLog_Follower(request AppendEntriesReq) {\n\tTerm := request.LeaderLastLogTerm\n\tcmd := request.Entries\n\tindex := request.PrevLogIndex + 1\n\tlogVal := LogVal{Term, cmd, 0} //make object for log's value field\n\n\tif len(r.MyLog) == index {\n\t\tr.MyLog = append(r.MyLog, logVal) //when trying to add a new entry\n\t} else {\n\t\tr.MyLog[index] = logVal //overwriting in case of log repair\n\t}\n\n\tr.MyMetaData.LastLogIndex = index\n\tr.MyMetaData.PrevLogIndex = index - 1\n\tif index == 0 {\n\t\tr.MyMetaData.PrevLogTerm = r.MyMetaData.PrevLogTerm + 1 //or simple -1\n\t} else if index >= 1 {\n\t\tr.MyMetaData.PrevLogTerm = r.MyLog[index-1].Term\n\t}\n\tleaderCI := float64(request.LeaderCommitIndex) //Update commit index\n\tmyLI := float64(r.MyMetaData.LastLogIndex)\n\tif request.LeaderCommitIndex > r.MyMetaData.CommitIndex {\n\t\tr.MyMetaData.CommitIndex = int(math.Min(leaderCI, myLI))\n\t}\n\tr.WriteLogToDisk()\n}",
"func (handler *RuleHandler) FollowerOnAddServer(msg iface.MsgAddServer, log iface.RaftLog, status iface.Status) []interface{} {\n\t// leader should be responsible for this\n\treturn []interface{}{iface.ReplyNotLeader{}}\n}",
"func TestFollowerVote(t *testing.T) {\n\ttests := []struct {\n\t\tvote uint64\n\t\tnvote uint64\n\t\twreject bool\n\t}{\n\t\t{None, 1, false},\n\t\t{None, 2, false},\n\t\t{1, 1, false},\n\t\t{2, 2, false},\n\t\t{1, 2, true},\n\t\t{2, 1, true},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.loadState(pb.HardState{Term: 1, Vote: tt.vote})\n\n\t\tr.Step(pb.Message{From: tt.nvote, FromGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},\n\t\t\tTo: 1, ToGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTerm: 1, Type: pb.MsgVote})\n\n\t\tmsgs := r.readMessages()\n\t\twmsgs := []pb.Message{\n\t\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\t\tTo: tt.nvote, ToGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},\n\t\t\t\tTerm: 1, Type: pb.MsgVoteResp, Reject: tt.wreject},\n\t\t}\n\t\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\t\tt.Errorf(\"#%d: msgs = %v, want %v\", i, msgs, wmsgs)\n\t\t}\n\t}\n}",
"func (rf *Raft) AppendEntriesHandler(req *AppendEntriesRequest, resp *AppendEntriesResponse) {\n\n\t/*++++++++++++++++++++CRITICAL SECTION++++++++++++++++++++*/\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tdefer rf.info(\"AppendEntries RPC returns\")\n\n\trf.info(\"AppendEntries RPC receives %+v\", *req)\n\tresp.ResponseTerm = rf.currentTerm\n\n\t// 1. reply false if term < currentTerm (§5.1)\n\tif req.LeaderTerm < rf.currentTerm {\n\t\tresp.Info = TERM_OUTDATED\n\t\treturn\n\t}\n\n\t// reset the election timeout\n\trf.resetTrigger()\n\n\t// if RPC request or response contains term T > currentTerm:\n\t// set currentTerm = T, convert to follower (§5.1)\n\tif req.LeaderTerm > rf.currentTerm {\n\t\trf.currentTerm = req.LeaderTerm\n\t\trf.persist()\n\t\trf.role = FOLLOWER\n\t}\n\n\t// finds the position of the given PrevLogIndex at the log\n\tsliceIdx := req.PrevLogIndex - rf.offset\n\n\tswitch {\n\n\t// PrevLogIndex points beyond the end of the log,\n\t// handle it the same as if the entry exists but the term did not match\n\t// i.e., reply false\n\tcase sliceIdx >= len(rf.logs):\n\t\tresp.Info = LOG_INCONSISTENT\n\t\tresp.ConflictIndex = len(rf.logs) + rf.offset - 1\n\t\tresp.ConflictTerm = -1\n\t\treturn\n\n\t// PrevLogIndex matches the lastIncludedIndex (no log)\n\tcase sliceIdx == -1 && req.PrevLogIndex == 0:\n\n\t// PrevLogIndex matches the lastIncludedIndex in the snapshot\n\tcase sliceIdx == -1 && req.PrevLogIndex == rf.lastIncludedIndex:\n\n\tcase sliceIdx < 0:\n\t\tresp.Info = LOG_INCONSISTENT\n\t\tresp.ConflictIndex = 0\n\t\tresp.ConflictTerm = -1\n\t\tmsg := fmt.Sprintf(\"%s A=%d,C=%d,T=%d,O=%d,{...=>[%d|%d]}\",\n\t\t\ttime.Now().Format(\"15:04:05.000\"), rf.lastAppliedIndex, rf.commitIndex, rf.currentTerm, rf.offset, rf.lastIncludedIndex, rf.lastIncludedTerm)\n\n\t\tif len(rf.logs) == 0 {\n\t\t\tmsg += \"{} \"\n\t\t} else {\n\t\t\tmsg += fmt.Sprintf(\"{%+v->%+v} \", rf.logs[0], rf.logs[len(rf.logs)-1])\n\t\t}\n\t\tmsg += fmt.Sprintf(RAFT_FORMAT, rf.me)\n\t\tmsg += fmt.Sprintf(\"##### APPEND_ENTRIES REQ3%+v\", *req)\n\t\tmsg += \"\\n\"\n\n\t\tfmt.Println(msg)\n\t\treturn\n\n\tdefault:\n\t\t// 2. reply false if the log doesn't contain an entry at prevLogIndex\n\t\t// whose term matches prevLogTerm (§5.3)\n\t\tif rf.logs[sliceIdx].Term != req.PrevLogTerm {\n\t\t\tresp.ConflictTerm = rf.logs[sliceIdx].Term\n\t\t\tfor i := 0; i <= sliceIdx; i++ {\n\t\t\t\tif rf.logs[i].Term == resp.ConflictTerm {\n\t\t\t\t\tresp.ConflictIndex = rf.logs[i].Index\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tresp.Info = LOG_INCONSISTENT\n\t\t\treturn\n\t\t}\n\t}\n\n\tresp.Info = SUCCESS\n\n\t// 3. if an existing entry conflicts with a new one (same index\n\t// but different terms), delete the existing entry and all that\n\t// follow it (§5.3)\n\t// 4. append any new entries not already in the log\n\ti := sliceIdx + 1\n\tj := 0\n\n\tes := make([]LogEntry, len(req.Entries))\n\tcopy(es, req.Entries)\n\tfor j < len(es) {\n\t\tif i == len(rf.logs) {\n\t\t\trf.logs = append(rf.logs, es[j])\n\t\t} else if rf.logs[i].Term != es[j].Term {\n\t\t\trf.logs = rf.logs[:i]\n\t\t\trf.logs = append(rf.logs, es[j])\n\t\t}\n\t\ti++\n\t\tj++\n\t}\n\trf.persist()\n\n\t// 5. If leaderCommit > commitIndex, set commitIndex =\n\t// min(leaderCommit, index of last new entry)\n\trf.receiverTryUpdateCommitIndex(req)\n\t/*--------------------CRITICAL SECTION--------------------*/\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TestFollowerAppendEntries tests that when AppendEntries RPC is valid, the follower will delete the existing conflict entry and all that follow it, and append any new entries not already in the log. Also, it writes the new entry into stable storage. Reference: section 5.3
|
func TestFollowerAppendEntries(t *testing.T) {
tests := []struct {
index, term uint64
ents []pb.Entry
wents []pb.Entry
wunstable []pb.Entry
}{
{
2, 2,
[]pb.Entry{{Term: 3, Index: 3}},
[]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}, {Term: 3, Index: 3}},
[]pb.Entry{{Term: 3, Index: 3}},
},
{
1, 1,
[]pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}},
[]pb.Entry{{Term: 1, Index: 1}, {Term: 3, Index: 2}, {Term: 4, Index: 3}},
[]pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}},
},
{
0, 0,
[]pb.Entry{{Term: 1, Index: 1}},
[]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}},
nil,
},
{
0, 0,
[]pb.Entry{{Term: 3, Index: 1}},
[]pb.Entry{{Term: 3, Index: 1}},
[]pb.Entry{{Term: 3, Index: 1}},
},
}
for i, tt := range tests {
storage := NewMemoryStorage()
storage.Append([]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}})
r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, storage)
defer closeAndFreeRaft(r)
r.becomeFollower(2, 2)
r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 2, LogTerm: tt.term, Index: tt.index, Entries: tt.ents})
if g := r.raftLog.allEntries(); !reflect.DeepEqual(g, tt.wents) {
t.Errorf("#%d: ents = %+v, want %+v", i, g, tt.wents)
}
if g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, tt.wunstable) {
t.Errorf("#%d: unstableEnts = %+v, want %+v", i, g, tt.wunstable)
}
}
}
|
[
"func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif args.Entries == nil && args.Term < rf.currentTerm { //heartbeat scenario\n\t\treply.Success = false\n\t\treply.Term = rf.currentTerm //not resetting clock for reelection\n\t\t//fmt.Println(\"Received Invalid AppendEntry for Server: term: from Leader:\", rf.me, args.Term, args.LeaderID)\n\n\t} else if args.Entries == nil && len(rf.log)-1 < args.PrevLogIndex {\n\t\treply.Success = false\n\t\trf.currentTerm = args.Term\n\t\trf.peerStatus = Follower\n\t\trf.validBeat = true\n\n\t} else if args.Entries == nil && rf.log[args.PrevLogIndex].Term != args.PrevLogTerm {\n\t\treply.Success = false\n\t\trf.currentTerm = args.Term\n\t\trf.peerStatus = Follower\n\t\trf.validBeat = true\n\t} else if args.Entries == nil && rf.log[args.PrevLogIndex].Term == args.PrevLogTerm {\n\t\treply.Success = true\n\t\trf.currentTerm = args.Term\n\t\trf.peerStatus = Follower\n\t\trf.validBeat = true\n\t\tif args.LeaderCommit > rf.commitIndex {\n\t\t\trf.commitIndex = min(args.LeaderCommit, len(rf.log)-1)\n\t\t\tif rf.lastApplied < rf.commitIndex {\n\t\t\t\trf.myCh <- ApplyMsg{true, rf.log[rf.commitIndex].Command, rf.commitIndex}\n\t\t\t\tfmt.Println(\"Case 1: CommitIndex updated to: Server:\", rf.commitIndex, rf.me)\n\t\t\t\trf.lastApplied = rf.commitIndex\n\t\t\t}\n\t\t}\n\t\t//fmt.Println(\"Received valid AppendEntry Server: Term: Leader:\", rf.me, args.Term, args.LeaderID)\n\t} else if args.Entries != nil && len(rf.log)-1 < args.PrevLogIndex {\n\t\treply.Success = false\n\t\trf.currentTerm = args.Term\n\t\trf.peerStatus = Follower\n\t\trf.validBeat = true\n\n\t} else if args.Entries != nil && rf.log[args.PrevLogIndex].Term != args.PrevLogTerm {\n\t\treply.Success = false\n\t\trf.log = rf.log[0:args.PrevLogIndex] //truncate the log\n\t\trf.currentTerm = args.Term\n\t\trf.peerStatus = Follower\n\t\trf.validBeat = true\n\t\tfmt.Println(\"Truncate logs for server:\", rf.me)\n\n\t} else if args.Entries != nil && rf.log[args.PrevLogIndex].Term == args.PrevLogTerm {\n\t\treply.Success = true\n\t\trf.log = append(rf.log, args.Entries[:]...) //To Check: Does this work all the time?\n\t\trf.currentTerm = args.Term\n\t\trf.peerStatus = Follower\n\t\trf.validBeat = true\n\t\tfmt.Println(\"AppendEntries from: Leader: to Server: log, Entries:\", args.LeaderID, rf.me, rf.log, args.Entries, args.PrevLogIndex)\n\n\t\tif args.LeaderCommit > rf.commitIndex {\n\t\t\trf.commitIndex = min(args.LeaderCommit, len(rf.log)-1)\n\t\t\tif rf.lastApplied < rf.commitIndex {\n\t\t\t\trf.myCh <- ApplyMsg{true, rf.log[rf.commitIndex].Command, rf.commitIndex}\n\t\t\t\trf.lastApplied = rf.commitIndex\n\t\t\t\tfmt.Println(\"Case 3: CommitIndex updated to: Server: Value:\", rf.commitIndex, rf.me, rf.log[rf.commitIndex].Command)\n\t\t\t}\n\n\t\t}\n\n\t}\n\n}",
"func (rf *Raft) sendLogEntry(follower int) {\n\tDPrintf(\"[%d] is send AppendEntry to [%d]\", rf.me, follower)\n\trf.mu.Lock()\n\tif rf.state != Leader {\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\t// Send install-snapshot to lagged follower.\n\tif rf.nextIndex[follower] <= rf.lastIncludedIndex {\n\t\tgo rf.sendSnapshot(follower)\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\t// Prepare the AppendEntries request args\n\tprevLogIndex := rf.nextIndex[follower] - 1\n\tprevLogTerm := rf.getEntry(prevLogIndex).LogTerm\n\targs := AppendEntriesArgs{Term: rf.currentTerm, LeaderId: rf.me, PrevLogIndex: prevLogIndex, PrevLogTerm: prevLogTerm, CommitIndex: rf.commitIndex, Len: 0}\n\tif rf.nextIndex[follower] < rf.logIndex {\n\t\t// Fill the log data gap for this follower\n\t\tentries := rf.getRangeEntry(rf.nextIndex[follower], rf.logIndex)\n\t\targs.Entries = entries\n\t\targs.Len = len(entries)\n\t}\n\trf.mu.Unlock()\n\n\t// Send the RPC AppendEntries\n\tvar reply AppendEntriesReply\n\tif rf.peers[follower].Call(\"Raft.AppendEntries\", &args, &reply) {\n\t\tDPrintf(\"[%d] get AppendEntries reply from [%d]\", rf.me, follower)\n\t\t// Lock after the RPC finished.\n\t\trf.mu.Lock()\n\t\tdefer rf.mu.Unlock()\n\n\t\t// Reply is not successful, early return.\n\t\tif !reply.Success {\n\t\t\tif reply.Term > rf.currentTerm { // the leader is obsolete\n\t\t\t\trf.stepDown(reply.Term)\n\t\t\t} else {\n\t\t\t\t// follower is inconsistent with leader\n\t\t\t\t// force follower's data to be overwritten by resetting index.\n\t\t\t\trf.nextIndex[follower] = Max(1, Min(reply.ConflictIndex, rf.logIndex))\n\t\t\t\tif rf.nextIndex[follower] <= rf.lastIncludedIndex {\n\t\t\t\t\tgo rf.sendSnapshot(follower)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn\n\t\t}\n\n\t\t// Reply is successful\n\n\t\tprevLogIndex, logEntriesLen := args.PrevLogIndex, args.Len\n\t\tif prevLogIndex+logEntriesLen+1 > rf.nextIndex[follower] {\n\t\t\t// Update the our local record (for index) for this follower.\n\t\t\trf.nextIndex[follower] = prevLogIndex + logEntriesLen + 1\n\t\t\trf.matchIndex[follower] = prevLogIndex + logEntriesLen\n\t\t}\n\n\t\t// Update Commit Index if logs have been replicated to majority of followers.\n\t\ttoCommitIndex := prevLogIndex + logEntriesLen\n\t\tif rf.canCommit(toCommitIndex) {\n\t\t\trf.commitIndex = toCommitIndex\n\t\t\trf.persist()\n\n\t\t\t// Key: avoid blocking here !!!\n\t\t\trf.notifyApplyCh <- struct{}{}\n\t\t}\n\n\t}\n}",
"func TestFollowerCommitEntry(t *testing.T) {\n\ttests := []struct {\n\t\tents []pb.Entry\n\t\tcommit uint64\n\t}{\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t},\n\t\t\t1,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data2\")},\n\t\t\t},\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data2\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data\")},\n\t\t\t},\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data2\")},\n\t\t\t},\n\t\t\t1,\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeFollower(1, 2)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 1, Entries: tt.ents, Commit: tt.commit})\n\n\t\tif g := r.raftLog.committed; g != tt.commit {\n\t\t\tt.Errorf(\"#%d: committed = %d, want %d\", i, g, tt.commit)\n\t\t}\n\t\twents := tt.ents[:int(tt.commit)]\n\t\tif g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {\n\t\t\tt.Errorf(\"#%d: nextEnts = %v, want %v\", i, g, wents)\n\t\t}\n\t}\n}",
"func (r *Raft) AppendToLog_Follower(request AppendEntriesReq) {\n\tterm := request.term\n\tcmd := request.entries\n\tindex := request.prevLogIndex + 1\n\tlogVal := LogVal{term, cmd, 0} //make object for log's value field\n\n\tif len(r.myLog) == index {\n\t\tr.myLog = append(r.myLog, logVal) //when trying to add a new entry\n\t} else {\n\t\tr.myLog[index] = logVal //overwriting in case of log repair\n\t\t//fmt.Println(\"Overwiriting!!\")\n\t}\n\t//fmt.Println(r.myId(), \"Append to log\", string(cmd))\n\t//modify metadata after appending\n\t//r.myMetaData.lastLogIndex = r.myMetaData.lastLogIndex + 1\n\t//r.myMetaData.prevLogIndex = r.myMetaData.lastLogIndex\n\t//\tif len(r.myLog) == 1 {\n\t//\t\tr.myMetaData.prevLogTerm = r.myMetaData.prevLogTerm + 1\n\t//\t} else if len(r.myLog) > 1 {\n\t//\t\tr.myMetaData.prevLogTerm = r.myLog[r.myMetaData.prevLogIndex].Term\n\t//\t}\n\n\t//Changed on 4th april, above is wrong in case of overwriting of log\n\tr.myMetaData.lastLogIndex = index\n\tr.myMetaData.prevLogIndex = index - 1\n\tif index == 0 {\n\t\tr.myMetaData.prevLogTerm = r.myMetaData.prevLogTerm + 1 //or simple -1\n\t} else if index >= 1 {\n\t\tr.myMetaData.prevLogTerm = r.myLog[index-1].Term\n\t}\n\n\t//Update commit index\n\tleaderCI := float64(request.leaderCommitIndex)\n\tmyLI := float64(r.myMetaData.lastLogIndex)\n\tif request.leaderCommitIndex > r.myMetaData.commitIndex {\n\t\tif myLI == -1 { //REDUNDANT since Append to log will make sure it is never -1,also must not copy higher CI if self LI is -1\n\t\t\tr.myMetaData.commitIndex = int(leaderCI)\n\t\t} else {\n\t\t\tr.myMetaData.commitIndex = int(math.Min(leaderCI, myLI))\n\t\t}\n\t}\n\t//fmt.Println(r.myId(), \"My CI is:\", r.myMetaData.commitIndex)\n\tr.WriteLogToDisk()\n}",
"func (handler *RuleHandler) FollowerOnAppendEntries(msg iface.MsgAppendEntries, log iface.RaftLog, status iface.Status) []interface{} {\n\tactions := make([]interface{}, 0) // list of actions created\n\t// since we are hearing from the leader, reset timeout\n\tactions = append(actions, iface.ActionResetTimer{\n\t\tHalfTime: false,\n\t})\n\tactions = append(actions, iface.ActionSetLeaderLastHeard{\n\t\tInstant: time.Now(),\n\t})\n\n\t// maybe we are outdated\n\tif msg.Term > status.CurrentTerm() {\n\t\tactions = append(actions, iface.ActionSetCurrentTerm{\n\t\t\tNewCurrentTerm: msg.Term,\n\t\t})\n\t}\n\n\tprevEntry, _ := log.Get(msg.PrevLogIndex)\n\n\t// leader is outdated ?\n\tif msg.Term < status.CurrentTerm() {\n\t\tactions = append(actions, iface.ReplyAppendEntries{\n\t\t\tAddress: status.NodeAddress(),\n\t\t\tSuccess: false,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t})\n\t\treturn actions\n\t}\n\n\t// I dont have previous log entry (but should)\n\tif prevEntry == nil && msg.PrevLogIndex != -1 {\n\t\tactions = append(actions, iface.ReplyAppendEntries{\n\t\t\tAddress: status.NodeAddress(),\n\t\t\tSuccess: false,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t})\n\t\treturn actions\n\t}\n\n\t// I have previous log entry, but it does not match\n\tif prevEntry != nil && prevEntry.Term != msg.PrevLogTerm {\n\t\tactions = append(actions, iface.ReplyAppendEntries{\n\t\t\tAddress: status.NodeAddress(),\n\t\t\tSuccess: false,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t})\n\t\treturn actions\n\t}\n\n\t// all is ok. accept new entries\n\tactions = append(actions, iface.ReplyAppendEntries{\n\t\tAddress: status.NodeAddress(),\n\t\tSuccess: true,\n\t\tTerm: status.CurrentTerm(),\n\t})\n\n\t// if there is anything to append, do it\n\tif len(msg.Entries) > 0 {\n\t\t// delete all entries in log after PrevLogIndex\n\t\tactions = append(actions, iface.ActionDeleteLog{\n\t\t\tCount: log.LastIndex() - msg.PrevLogIndex,\n\t\t})\n\n\t\t// take care ! Maybe we are removing an entry\n\t\t// containing our current cluster configuration.\n\t\t// In this case, revert to previous cluster\n\t\t// configuration\n\t\tcontainsClusterChange := false\n\t\tstabilized := false\n\t\tclusterChangeIndex := status.ClusterChangeIndex()\n\t\tclusterChangeTerm := status.ClusterChangeTerm()\n\t\tcluster := append(status.PeerAddresses(), status.NodeAddress())\n\t\tfor !stabilized {\n\t\t\tstabilized = true\n\t\t\tif clusterChangeIndex > msg.PrevLogIndex {\n\t\t\t\tstabilized = false\n\t\t\t\tcontainsClusterChange = true\n\t\t\t\tentry, _ := log.Get(clusterChangeIndex)\n\t\t\t\trecord := &iface.ClusterChangeCommand{}\n\t\t\t\tjson.Unmarshal(entry.Command, &record)\n\t\t\t\tclusterChangeIndex = record.OldClusterChangeIndex\n\t\t\t\tclusterChangeTerm = record.OldClusterChangeTerm\n\t\t\t\tcluster = record.OldCluster\n\t\t\t}\n\t\t}\n\n\t\t// if deletion detected, rewind to previous configuration\n\t\tif containsClusterChange {\n\t\t\tactions = append(actions, iface.ActionSetClusterChange{\n\t\t\t\tNewClusterChangeIndex: clusterChangeIndex,\n\t\t\t\tNewClusterChangeTerm: clusterChangeTerm,\n\t\t\t})\n\t\t\tpeers := []iface.PeerAddress{}\n\t\t\tfor _, addr := range cluster {\n\t\t\t\tif addr != status.NodeAddress() {\n\t\t\t\t\tpeers = append(peers, addr)\n\t\t\t\t}\n\t\t\t}\n\t\t\tactions = append(actions, iface.ActionSetPeers{\n\t\t\t\tPeerAddresses: peers,\n\t\t\t})\n\t\t}\n\n\t\t// append all entries sent by leader\n\t\tactions = append(actions, iface.ActionAppendLog{\n\t\t\tEntries: msg.Entries,\n\t\t})\n\n\t\t// once again, take care ! Maybe we are adding some entry\n\t\t// describing a cluster change. In such a case, we must apply\n\t\t// the new cluster configuration to ourselves (specifically,\n\t\t// the last cluster configuration among the new entries)\n\t\tfor index := len(msg.Entries) - 1; index >= 0; index-- {\n\t\t\tif msg.Entries[index].Kind != iface.EntryAddServer &&\n\t\t\t\tmsg.Entries[index].Kind != iface.EntryRemoveServer {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\trecord := &iface.ClusterChangeCommand{}\n\t\t\tjson.Unmarshal(msg.Entries[index].Command, &record)\n\t\t\tactions = append(actions, iface.ActionSetClusterChange{\n\t\t\t\tNewClusterChangeIndex: msg.PrevLogIndex + int64(index+1),\n\t\t\t\tNewClusterChangeTerm: msg.Entries[index].Term,\n\t\t\t})\n\t\t\tpeers := []iface.PeerAddress{}\n\t\t\tfor _, addr := range record.NewCluster {\n\t\t\t\tif addr != status.NodeAddress() {\n\t\t\t\t\tpeers = append(peers, addr)\n\t\t\t\t}\n\t\t\t}\n\t\t\tactions = append(actions, iface.ActionSetPeers{\n\t\t\t\tPeerAddresses: peers,\n\t\t\t})\n\t\t\tbreak\n\t\t}\n\n\t}\n\n\t// if leader has committed more than we know, update our index\n\t// and demand state-machine application\n\tif msg.LeaderCommitIndex > status.CommitIndex() {\n\t\tactions = append(actions, iface.ActionSetCommitIndex{\n\t\t\tNewCommitIndex: int64(math.Min(\n\t\t\t\tfloat64(msg.LeaderCommitIndex),\n\t\t\t\tfloat64(msg.PrevLogIndex+int64(len(msg.Entries))),\n\t\t\t)),\n\t\t})\n\t\t// order the state machine to apply the new committed entries\n\t\t// (only if they are state machine commands)\n\t\t// TODO: Treat configuration change\n\t\tfor index := status.CommitIndex() + 1; index < msg.LeaderCommitIndex; index++ {\n\t\t\tvar entry *iface.LogEntry\n\n\t\t\t// get from my log\n\t\t\tif index <= msg.PrevLogIndex {\n\t\t\t\tentry, _ = log.Get(index)\n\n\t\t\t\t// get from leader\n\t\t\t} else {\n\t\t\t\tentry = &msg.Entries[index-msg.PrevLogIndex-1]\n\t\t\t}\n\n\t\t\tswitch entry.Kind {\n\t\t\tcase iface.EntryStateMachineCommand:\n\t\t\t\tactions = append(actions, iface.ActionStateMachineApply{\n\t\t\t\t\tEntryIndex: index,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\treturn actions\n}",
"func (r *Raft) AppendToLog_Follower(request AppendEntriesReq) {\n\tTerm := request.LeaderLastLogTerm\n\tcmd := request.Entries\n\tindex := request.PrevLogIndex + 1\n\tlogVal := LogVal{Term, cmd, 0} //make object for log's value field\n\n\tif len(r.MyLog) == index {\n\t\tr.MyLog = append(r.MyLog, logVal) //when trying to add a new entry\n\t} else {\n\t\tr.MyLog[index] = logVal //overwriting in case of log repair\n\t}\n\n\tr.MyMetaData.LastLogIndex = index\n\tr.MyMetaData.PrevLogIndex = index - 1\n\tif index == 0 {\n\t\tr.MyMetaData.PrevLogTerm = r.MyMetaData.PrevLogTerm + 1 //or simple -1\n\t} else if index >= 1 {\n\t\tr.MyMetaData.PrevLogTerm = r.MyLog[index-1].Term\n\t}\n\tleaderCI := float64(request.LeaderCommitIndex) //Update commit index\n\tmyLI := float64(r.MyMetaData.LastLogIndex)\n\tif request.LeaderCommitIndex > r.MyMetaData.CommitIndex {\n\t\tr.MyMetaData.CommitIndex = int(math.Min(leaderCI, myLI))\n\t}\n\tr.WriteLogToDisk()\n}",
"func handleAppendEntries(s *Sailor, state *storage.State, am *appendMessage, leaderId string) (appendReply, error) {\n\t// Check if the node needs to convert to the follower state\n\tif (s.state != follower && am.Term == s.currentTerm) || am.Term > s.currentTerm {\n\t\ts.becomeFollower(am.Term)\n\t}\n\n\trep := appendReply{Term: s.currentTerm, Success: false}\n\t// Reject the RPC if it has the wrong term\n\tif s.currentTerm > am.Term {\n\t\treturn rep, nil\n\t}\n\ts.timer.Reset(new_time()) // The message is from the leader, reset our election-start clock\n\ts.leaderId = leaderId\n\t// Reject the RPC if the position or the term is wrong\n\tif am.PrevLogIndex != 0 && (len(s.log) <= int(am.PrevLogIndex-1) ||\n\t\t(len(s.log) > 0 && s.log[am.PrevLogIndex-1].Term != am.PrevLogTerm)) {\n\t\treturn rep, nil\n\t}\n\n\trep.Success = true // The RPC is valid and the call will succeed\n\n\trep.PrepLower = am.PrevLogIndex + 1\n\trep.ComLower = s.volatile.commitIndex\n\n\t// Loop over the values we're dropping from our log. If we were the leader when\n\t// the values were proposed, then we reply to the client with a set failed message.\n\t// We know we were the leader if we were counting votes for that log entry.\n\tfor i := am.PrevLogIndex; i < uint(len(s.log)); i++ {\n\t\tif s.log[i].votes != 0 {\n\t\t\tfail := messages.Message{}\n\t\t\tfail.Source = s.client.NodeName\n\t\t\tfail.Type = \"setResponse\"\n\t\t\tfail.Error = \"SET FAILED\"\n\t\t\tfail.ID = s.log[i].Id\n\t\t\tfail.Key = s.log[i].Trans.Key\n\t\t\ts.client.SendToBroker(fail)\n\t\t}\n\t}\n\t// Drops the extra entries and adds the new ones\n\ts.log = append(s.log[:am.PrevLogIndex], am.Entries...)\n\t// If we have new values to commit\n\tif am.LeaderCommit > s.volatile.commitIndex {\n\t\t// Choose the min of our log size and the leader's commit index\n\t\tif int(am.LeaderCommit) <= len(s.log) {\n\t\t\ts.volatile.commitIndex = am.LeaderCommit\n\t\t} else {\n\t\t\ts.volatile.commitIndex = uint(len(s.log))\n\t\t}\n\t\t// Actually commit and apply the transactions to the state machine\n\t\tfor s.volatile.lastApplied < s.volatile.commitIndex {\n\t\t\ts.volatile.lastApplied += 1\n\t\t\tstate.ApplyTransaction(s.log[s.volatile.lastApplied-1].Trans)\n\t\t}\n\n\t}\n\trep.PrepUpper = uint(len(s.log))\n\trep.ComUpper = s.volatile.commitIndex\n\treturn rep, nil\n}",
"func (rf *Raft) sendAppendEntriesToMultipleFollowers() {\n for !rf.killed() {\n rf.mu.Lock()\n if rf.state != \"Leader\" {\n DLCPrintf(\"Server (%d) is no longer Leader and stop sending Heart Beat\", rf.me)\n rf.mu.Unlock()\n return\n }\n\n for i := 0; i < len(rf.peers) && rf.state == \"Leader\"; i++ {\n if i == rf.me {\n continue\n }else{\n if rf.nextIndex[i] <= rf.snapshottedIndex {\n go rf.sendInstallSnapshotToOneFollower(i, rf.log[0].Term)\n }else{\n go rf.sendAppendEntriesToOneFollower(i)\n }\n }\n }\n if rf.state != \"Leader\" {\n DLCPrintf(\"Server (%d) is no longer Leader and stop sending Heart Beat\", rf.me)\n rf.mu.Unlock()\n return\n }\n rf.commitEntries()\n rf.mu.Unlock()\n\n time.Sleep(100 * time.Millisecond)\n }\n}",
"func TestLeaderSyncFollowerLog(t *testing.T) {\n\tents := []pb.Entry{\n\t\t{},\n\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t}\n\tterm := uint64(8)\n\ttests := [][]pb.Entry{\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10}, {Term: 6, Index: 11},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t\t\t{Term: 7, Index: 11}, {Term: 7, Index: 12},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5}, {Term: 4, Index: 6}, {Term: 4, Index: 7},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 2, Index: 4}, {Term: 2, Index: 5}, {Term: 2, Index: 6},\n\t\t\t{Term: 3, Index: 7}, {Term: 3, Index: 8}, {Term: 3, Index: 9}, {Term: 3, Index: 10}, {Term: 3, Index: 11},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tleadStorage := NewMemoryStorage()\n\t\tdefer leadStorage.Close()\n\t\tleadStorage.Append(ents)\n\t\tlead := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, leadStorage)\n\t\tlead.loadState(pb.HardState{Commit: lead.raftLog.lastIndex(), Term: term})\n\t\tfollowerStorage := NewMemoryStorage()\n\t\tdefer followerStorage.Close()\n\t\tfollowerStorage.Append(tt)\n\t\tfollower := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, followerStorage)\n\t\tfollower.loadState(pb.HardState{Term: term - 1})\n\t\t// It is necessary to have a three-node cluster.\n\t\t// The second may have more up-to-date log than the first one, so the\n\t\t// first node needs the vote from the third node to become the leader.\n\t\tn := newNetwork(lead, follower, nopStepper)\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\t// The election occurs in the term after the one we loaded with\n\t\t// lead.loadState above.\n\t\tn.send(pb.Message{From: 3, To: 1, Type: pb.MsgVoteResp, Term: term + 1})\n\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\t\tif g := diffu(ltoa(lead.raftLog), ltoa(follower.raftLog)); g != \"\" {\n\t\t\tt.Errorf(\"#%d: log diff:\\n%s\", i, g)\n\t\t}\n\t}\n}",
"func (r *Raft) setupAppendEntries(s *followerReplication, req *pb.AppendEntriesRequest, nextIndex, lastIndex uint64) error {\n\treq.Term = s.currentTerm\n\treq.Leader = r.transport.EncodePeer(r.localID, r.localAddr)\n\treq.LeaderCommitIndex = r.getCommitIndex()\n\tif err := r.setPreviousLog(req, nextIndex); err != nil {\n\t\treturn err\n\t}\n\tif err := r.setNewLogs(req, nextIndex, lastIndex); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (r *Raft) handleAppendEntries(m pb.Message) {\n\t// Your Code Here (2A).\n\tDPrintf(\"node [%v] handle append, state=[%v], term=[%v], lastIndex=[%v], lastTerm=[%v], preIndex=[%v]\\n\",\n\t\tr.id, r.State, r.Term, r.RaftLog.LastIndex(), r.RaftLog.LastLogTerm(), m.Index)\n\tr.electionElapsed = 0\n\tif r.Term <= m.Term && r.State != StateFollower {\n\t\tr.becomeFollower(m.Term, m.From)\n\t}\n\tlastIndex := r.RaftLog.LastIndex()\n\n\tif m.Term != None && r.Term > m.Term {\n\t\t// stale and reject\n\t\tDPrintf(\"node[%v] reject append, reason: term stale\\n\", r.id)\n\t\tr.sendAppendResponse(m.From, true, 0)\n\t\treturn\n\t}\n\tr.Lead = m.From\n\tif m.Index > lastIndex {\n\t\t// not match certainly\n\t\tDPrintf(\"node[%v] reject append, reason: do not have such index\\n\", r.id)\n\t\tr.sendAppendResponse(m.From,true, r.RaftLog.LastIndex())\n\t\treturn\n\t}\n\t// index match\n\n\tlogTerm, _ := r.RaftLog.Term(m.Index)\n\n\n\tif logTerm != m.LogTerm {\n\t\tDPrintf(\"node[%v] reject append, reason: term not match\\n\", r.id)\n\t\tr.sendAppendResponse(m.From,true, r.RaftLog.LastIndex())\n\t\treturn\n\t}\n\n\n\tfor i, entry := range m.Entries {\n\t\tif entry.Index <= r.RaftLog.LastIndex() {\n\t\t\t// in this case, maybe we need to delete some entry\n\t\t\tlogTerm, _ := r.RaftLog.Term(entry.Index)\n\t\t\tif logTerm != entry.Term {\n\t\t\t\tr.RaftLog.DeleteFromIndex(entry.Index)\n\t\t\t\tr.RaftLog.entries = append(r.RaftLog.entries, *entry)\n\t\t\t\tr.RaftLog.stabled = min(r.RaftLog.stabled, entry.Index - 1)\n\t\t\t}\n\t\t}else {\n\t\t\tfor _, entry := range m.Entries[i:] {\n\t\t\t\tr.RaftLog.entries = append(r.RaftLog.entries, *entry)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\tDPrintf(\"append success, now node[%v] raftLog=[%v]\\n\", r.id, r.RaftLog.entries)\n\n\t// update the commit\n\tif m.Commit > r.RaftLog.committed {\n\t\tcommitted := min(m.Commit, m.GetIndex()+uint64(len(m.GetEntries())))\n\t\tr.RaftLog.committed = min(committed, r.RaftLog.LastIndex())\n\t\tDPrintf(\"node[%v] commit update to[%v]\\n\", r.id, r.RaftLog.committed)\n\t}\n\n\tDPrintf(\"node[%v] send appendResponse to[%v], reject=[%v], lastIndex=[%v]\\n\", r.id, m.From, false, r.RaftLog.LastIndex())\n\tr.sendAppendResponse(m.From, false, r.RaftLog.LastIndex())\n\n}",
"func (s *raftServer) handleFollowers(followers []int, nextIndex *utils.SyncIntIntMap, matchIndex *utils.SyncIntIntMap, aeToken *utils.SyncIntIntMap) {\n\tfor s.State() == LEADER {\n\t\tfor _, f := range followers {\n\t\t\tlastIndex := s.localLog.TailIndex()\n\t\t\tn, ok := nextIndex.Get(f)\n\t\t\tif !ok {\n\t\t\t\tpanic(\"nextIndex not found for follower \" + strconv.Itoa(f))\n\t\t\t}\n\n\t\t\tunlocked, ok := aeToken.Get(f)\n\n\t\t\tif !ok {\n\t\t\t\tpanic(\"aeToken not found for follower \" + strconv.Itoa(f))\n\t\t\t}\n\n\t\t\tif lastIndex != 0 && lastIndex >= n && unlocked == 1 {\n\t\t\t\taeToken.Set(f, 0)\n\t\t\t\t// send a new AppendEntry\n\t\t\t\tprevIndex := n - 1\n\t\t\t\tvar prevTerm int64 = 0\n\t\t\t\t// n = 0 when we add first entry to the log\n\t\t\t\tif prevIndex > 0 {\n\t\t\t\t\tprevTerm = s.localLog.Get(prevIndex).Term\n\t\t\t\t}\n\t\t\t\tae := &AppendEntry{Term: s.Term(), LeaderId: s.server.Pid(), PrevLogIndex: prevIndex, PrevLogTerm: prevTerm}\n\t\t\t\tae.LeaderCommit = s.commitIndex.Get()\n\t\t\t\tae.Entry = *s.localLog.Get(n)\n\t\t\t\ts.writeToLog(\"Replicating entry \" + strconv.FormatInt(n, 10))\n\t\t\t\ts.server.Outbox() <- &cluster.Envelope{Pid: f, Msg: ae}\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(NICE * time.Millisecond)\n\t}\n}",
"func (rf *Raft) AppendEntriesHandler(req *AppendEntriesRequest, resp *AppendEntriesResponse) {\n\n\t/*++++++++++++++++++++CRITICAL SECTION++++++++++++++++++++*/\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tdefer rf.info(\"AppendEntries RPC returns\")\n\n\trf.info(\"AppendEntries RPC receives %+v\", *req)\n\tresp.ResponseTerm = rf.currentTerm\n\n\t// 1. reply false if term < currentTerm (§5.1)\n\tif req.LeaderTerm < rf.currentTerm {\n\t\tresp.Info = TERM_OUTDATED\n\t\treturn\n\t}\n\n\t// reset the election timeout\n\trf.resetTrigger()\n\n\t// if RPC request or response contains term T > currentTerm:\n\t// set currentTerm = T, convert to follower (§5.1)\n\tif req.LeaderTerm > rf.currentTerm {\n\t\trf.currentTerm = req.LeaderTerm\n\t\trf.persist()\n\t\trf.role = FOLLOWER\n\t}\n\n\t// finds the position of the given PrevLogIndex at the log\n\tsliceIdx := req.PrevLogIndex - rf.offset\n\n\tswitch {\n\n\t// PrevLogIndex points beyond the end of the log,\n\t// handle it the same as if the entry exists but the term did not match\n\t// i.e., reply false\n\tcase sliceIdx >= len(rf.logs):\n\t\tresp.Info = LOG_INCONSISTENT\n\t\tresp.ConflictIndex = len(rf.logs) + rf.offset - 1\n\t\tresp.ConflictTerm = -1\n\t\treturn\n\n\t// PrevLogIndex matches the lastIncludedIndex (no log)\n\tcase sliceIdx == -1 && req.PrevLogIndex == 0:\n\n\t// PrevLogIndex matches the lastIncludedIndex in the snapshot\n\tcase sliceIdx == -1 && req.PrevLogIndex == rf.lastIncludedIndex:\n\n\tcase sliceIdx < 0:\n\t\tresp.Info = LOG_INCONSISTENT\n\t\tresp.ConflictIndex = 0\n\t\tresp.ConflictTerm = -1\n\t\tmsg := fmt.Sprintf(\"%s A=%d,C=%d,T=%d,O=%d,{...=>[%d|%d]}\",\n\t\t\ttime.Now().Format(\"15:04:05.000\"), rf.lastAppliedIndex, rf.commitIndex, rf.currentTerm, rf.offset, rf.lastIncludedIndex, rf.lastIncludedTerm)\n\n\t\tif len(rf.logs) == 0 {\n\t\t\tmsg += \"{} \"\n\t\t} else {\n\t\t\tmsg += fmt.Sprintf(\"{%+v->%+v} \", rf.logs[0], rf.logs[len(rf.logs)-1])\n\t\t}\n\t\tmsg += fmt.Sprintf(RAFT_FORMAT, rf.me)\n\t\tmsg += fmt.Sprintf(\"##### APPEND_ENTRIES REQ3%+v\", *req)\n\t\tmsg += \"\\n\"\n\n\t\tfmt.Println(msg)\n\t\treturn\n\n\tdefault:\n\t\t// 2. reply false if the log doesn't contain an entry at prevLogIndex\n\t\t// whose term matches prevLogTerm (§5.3)\n\t\tif rf.logs[sliceIdx].Term != req.PrevLogTerm {\n\t\t\tresp.ConflictTerm = rf.logs[sliceIdx].Term\n\t\t\tfor i := 0; i <= sliceIdx; i++ {\n\t\t\t\tif rf.logs[i].Term == resp.ConflictTerm {\n\t\t\t\t\tresp.ConflictIndex = rf.logs[i].Index\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tresp.Info = LOG_INCONSISTENT\n\t\t\treturn\n\t\t}\n\t}\n\n\tresp.Info = SUCCESS\n\n\t// 3. if an existing entry conflicts with a new one (same index\n\t// but different terms), delete the existing entry and all that\n\t// follow it (§5.3)\n\t// 4. append any new entries not already in the log\n\ti := sliceIdx + 1\n\tj := 0\n\n\tes := make([]LogEntry, len(req.Entries))\n\tcopy(es, req.Entries)\n\tfor j < len(es) {\n\t\tif i == len(rf.logs) {\n\t\t\trf.logs = append(rf.logs, es[j])\n\t\t} else if rf.logs[i].Term != es[j].Term {\n\t\t\trf.logs = rf.logs[:i]\n\t\t\trf.logs = append(rf.logs, es[j])\n\t\t}\n\t\ti++\n\t\tj++\n\t}\n\trf.persist()\n\n\t// 5. If leaderCommit > commitIndex, set commitIndex =\n\t// min(leaderCommit, index of last new entry)\n\trf.receiverTryUpdateCommitIndex(req)\n\t/*--------------------CRITICAL SECTION--------------------*/\n}",
"func TestAppend(t *testing.T) {\n\tif *treeID == 0 {\n\t\tt.Skip(\"--tree_id flag unset, skipping test\")\n\t}\n\n\tname := \"testAppend\"\n\tt.Run(name, func(t *testing.T) {\n\t\tctx := context.Background()\n\t\tpersonality, err := p.NewPersonality(*trillianAddr, *treeID, mustGetSigner(t))\n\t\tif err != nil {\n\t\t\tt.Fatalf(err.Error())\n\t\t}\n\t\tchkptOldRaw, err := personality.GetChkpt(ctx)\n\t\tif err != nil {\n\t\t\tt.Fatalf(err.Error())\n\t\t}\n\t\tchkptOld := mustOpenCheckpoint(t, chkptOldRaw)\n\t\t// Add a random entry so we can be sure it's new.\n\t\tentry := make([]byte, 10)\n\t\tif _, err := rand.Read(entry); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tchkptNewRaw, err := personality.Append(ctx, entry)\n\t\tif err != nil {\n\t\t\tt.Fatalf(err.Error())\n\t\t}\n\t\tchkptNew := mustOpenCheckpoint(t, chkptNewRaw)\n\t\tif chkptNew.Size <= chkptOld.Size {\n\t\t\tt.Errorf(\"the log didn't grow properly in %v\", name)\n\t\t}\n\t\tfmt.Printf(\"success in %v, new log size is %v\\n\", name, chkptNew.Size)\n\t})\n}",
"func sendAppendEntries(s *Sailor, peer string) error {\n\tam := appendMessage{}\n\tam.Term = s.currentTerm\n\tam.LeaderId = s.client.NodeName\n\tam.PrevLogIndex = s.leader.nextIndex[peer] - 1\n\t// This is just some fancy logic to check for the bounds on the log\n\t// e.g. our log has 0 entries, so the prevEntryTerm cannot be pulled from the log\n\tif len(s.log) == 0 {\n\t\tam.PrevLogTerm = 0\n\t\tam.Entries = nil\n\t} else {\n\t\t// If our log is too short to have prevTerm, use 0\n\t\tif int(s.leader.nextIndex[peer])-2 < 0 {\n\t\t\tam.PrevLogTerm = 0\n\t\t} else {\n\t\t\tam.PrevLogTerm = s.log[s.leader.nextIndex[peer]-2].Term\n\t\t}\n\t\t// If our nextIndex is a value we don't have yet, send nothing\n\t\tif s.leader.nextIndex[peer] > uint(len(s.log)) {\n\t\t\tam.Entries = []entry{}\n\t\t} else {\n\t\t\tam.Entries = s.log[s.leader.nextIndex[peer]-1:]\n\t\t}\n\t}\n\n\tam.LeaderCommit = s.volatile.commitIndex\n\tap := messages.Message{}\n\tap.Type = \"appendEntries\"\n\tap.ID = 0\n\tap.Source = s.client.NodeName\n\tap.Value = makePayload(am)\n\treturn s.client.SendToPeer(ap, peer)\n}",
"func (rf *Raft) heartbeatAppendEntries() {\n\t// make server -> reply map\n\treplies := make([]*AppendEntriesReply, len(rf.peers))\n\tfor servIdx := range rf.peers {\n\t\treplies[servIdx] = &AppendEntriesReply{}\n\t}\n\n\tfor !rf.killed() {\n\t\trf.mu.Lock()\n\n\t\t// if we are no longer the leader\n\t\tif rf.state != Leader {\n\t\t\trf.Log(LogDebug, \"Discovered no longer the leader, stopping heartbeat\")\n\t\t\trf.mu.Unlock()\n\t\t\treturn\n\t\t}\n\t\t// send out heartbeats concurrently if leader\n\t\tfor servIdx := range rf.peers {\n\t\t\tif servIdx != rf.me {\n\n\t\t\t\t// successful request - update matchindex and nextindex accordingly\n\t\t\t\tif replies[servIdx].Success {\n\t\t\t\t\tif replies[servIdx].HighestLogIndexAdded > 0 {\n\t\t\t\t\t\trf.matchIndex[servIdx] = replies[servIdx].HighestLogIndexAdded\n\t\t\t\t\t}\n\t\t\t\t\trf.nextIndex[servIdx] = rf.matchIndex[servIdx] + 1\n\n\t\t\t\t\t// failed request - check for better term or decrease nextIndex\n\t\t\t\t} else if !replies[servIdx].Success && replies[servIdx].Returned {\n\n\t\t\t\t\t// we might have found out we shouldn't be the leader!\n\t\t\t\t\tif replies[servIdx].CurrentTerm > rf.currentTerm {\n\t\t\t\t\t\trf.Log(LogDebug, \"Detected server with higher term, stopping heartbeat and changing to follower.\")\n\t\t\t\t\t\trf.state = Follower\n\t\t\t\t\t\trf.currentTerm = replies[servIdx].CurrentTerm\n\n\t\t\t\t\t\t// persist - updated current term\n\t\t\t\t\t\tdata := rf.GetStateBytes(false)\n\t\t\t\t\t\trf.persister.SaveRaftState(data)\n\n\t\t\t\t\t\tgo rf.heartbeatTimeoutCheck()\n\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t// failure - we need to decrease next index\n\t\t\t\t\t// 1. case where follower has no entry at the place we thought\n\t\t\t\t\t// => want to back up to start of follower log\n\t\t\t\t\t// 2. case where server has entry with different term NOT seen by leader\n\t\t\t\t\t// => want to back up nextIndex to the start of the 'run' of entries with that term (i.e. IndexFirstConflictingTerm)\n\t\t\t\t\t// 3. case where server has entry with different term that HAS been seen by leader\n\t\t\t\t\t// => want to back up to last entry leader has with that term\n\t\t\t\t\t//\n\t\t\t\t\t// Note for 2 and 3 ... if leader does not have the relevant log\n\t\t\t\t\t// entries, we need to call InstallSnapshot!\n\t\t\t\t\t//\n\t\t\t\t\trf.Log(LogInfo, \"Failed to AppendEntries to server\", servIdx, \"\\n - IndexFirstConflictingTerm\", replies[servIdx].IndexFirstConflictingTerm, \"\\n - ConflictingEntryTerm\", replies[servIdx].ConflictingEntryTerm, \"\\n - LastLogIndex\", replies[servIdx].LastLogIndex)\n\t\t\t\t\tif replies[servIdx].ConflictingEntryTerm == -1 {\n\t\t\t\t\t\t// case 1 - follower has no entry at the given location\n\t\t\t\t\t\trf.nextIndex[servIdx] = replies[servIdx].LastLogIndex + 1\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// if not case 1, need to check we have the logs at and beyond\n\t\t\t\t\t\t// IndexFirstConflictingTerm\n\t\t\t\t\t\traftLogIdx := rf.getTrimmedLogIndex(replies[servIdx].IndexFirstConflictingTerm)\n\t\t\t\t\t\tif raftLogIdx == -1 {\n\t\t\t\t\t\t\t// don't have the logs we need - will need to snapshot\n\t\t\t\t\t\t\t// set nextIndex to the lastIncludedIndex to force this\n\t\t\t\t\t\t\trf.nextIndex[servIdx] = rf.lastIncludedIndex\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tif rf.log[raftLogIdx].Term != replies[servIdx].ConflictingEntryTerm {\n\t\t\t\t\t\t\t\t// case 2 - follower has a term not seen by leader\n\t\t\t\t\t\t\t\trf.Log(LogDebug, \"Case 2: follower has a term not seen by leader\")\n\t\t\t\t\t\t\t\trf.nextIndex[servIdx] = replies[servIdx].IndexFirstConflictingTerm\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t// case 3 - follower has a term seen by leader\n\t\t\t\t\t\t\t\t// need to go to latest entry that leader has with this term\n\t\t\t\t\t\t\t\trf.Log(LogDebug, \"Case 3: follower has a term seen by leader, finding leader's latest entry with this term \\n - rf.log[\", rf.log)\n\t\t\t\t\t\t\t\trf.nextIndex[servIdx] = replies[servIdx].IndexFirstConflictingTerm\n\t\t\t\t\t\t\t\tfor rf.log[rf.getTrimmedLogIndex(rf.nextIndex[servIdx])].Term == replies[servIdx].ConflictingEntryTerm {\n\t\t\t\t\t\t\t\t\trf.nextIndex[servIdx]++\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// if we need to install a snapshot, then\n\t\t\t\t// nextIndex becomes the next index after the snapshot we will install\n\t\t\t\t// notice that we will then immediately send an AppendEntries request to the server,\n\t\t\t\t// and it will fail until the snapshot is installed, and we will just keep\n\t\t\t\t// resetting nextIndex\n\t\t\t\tif rf.nextIndex[servIdx] <= rf.lastIncludedIndex {\n\t\t\t\t\trf.Log(LogInfo, \"Failed to AppendEntries to server\", servIdx, \"- need to send InstallSnapshot!\")\n\t\t\t\t\trf.nextIndex[servIdx] = rf.lastIncludedIndex + 1\n\n\t\t\t\t\t// actually call the RPC\n\t\t\t\t\targs := &InstallSnapshotArgs{\n\t\t\t\t\t\tLeaderTerm: rf.currentTerm,\n\t\t\t\t\t\tSnapshot: rf.persister.ReadSnapshot(),\n\t\t\t\t\t}\n\t\t\t\t\treply := &InstallSnapshotReply{}\n\t\t\t\t\tgo rf.sendInstallSnapshot(servIdx, args, reply)\n\t\t\t\t}\n\n\t\t\t\t// send a new append entries request to the server if the last one has finished\n\t\t\t\trf.Log(LogDebug, \"rf.nextIndex for server\", servIdx, \"set to idx\", rf.nextIndex[servIdx], \"\\n - rf.log\", rf.log, \"\\n - rf.lastIncludedIndex\", rf.lastIncludedIndex, \"\\n - rf.lastIncludedTerm\", rf.lastIncludedTerm)\n\t\t\t\tentries := []LogEntry{}\n\t\t\t\tif len(rf.log) > 0 {\n\t\t\t\t\tentries = rf.log[rf.getTrimmedLogIndex(rf.nextIndex[servIdx]):]\n\t\t\t\t}\n\t\t\t\targs := &AppendEntriesArgs{\n\t\t\t\t\tLeaderTerm: rf.currentTerm,\n\t\t\t\t\tLeaderCommitIndex: rf.commitIndex,\n\t\t\t\t\tLogEntries: entries,\n\t\t\t\t}\n\n\t\t\t\t// only place the reply into replies, when the RPC completes,\n\t\t\t\t// to prevent partial data (unsure if this can happen but seems like a good idea)\n\t\t\t\tgo func(servIdx int) {\n\t\t\t\t\trf.Log(LogDebug, \"sendAppendEntries to servIdx\", servIdx)\n\t\t\t\t\treply := &AppendEntriesReply{}\n\t\t\t\t\tok := rf.sendAppendEntries(servIdx, args, reply)\n\t\t\t\t\tif ok {\n\t\t\t\t\t\trf.Log(LogDebug, \"Received AppendEntries reply from server\", servIdx, \"\\n - reply\", reply)\n\t\t\t\t\t\treplies[servIdx] = reply\n\t\t\t\t\t}\n\t\t\t\t}(servIdx)\n\t\t\t}\n\t\t}\n\n\t\t// walk up through possible new commit indices\n\t\t// update commit index\n\t\torigIndex := rf.commitIndex\n\t\tnewIdx := rf.commitIndex + 1\n\t\tfor len(rf.log) > 0 && newIdx <= rf.log[len(rf.log)-1].Index {\n\t\t\treplicas := 1 // already replicated in our log\n\t\t\tfor servIdx := range rf.peers {\n\t\t\t\tif servIdx != rf.me && rf.matchIndex[servIdx] >= newIdx {\n\t\t\t\t\treplicas++\n\t\t\t\t}\n\t\t\t}\n\t\t\tif replicas >= int(math.Ceil(float64(len(rf.peers))/2.0)) &&\n\t\t\t\tnewIdx > rf.lastIncludedIndex &&\n\t\t\t\trf.getTrimmedLogIndex(newIdx) >= 0 &&\n\t\t\t\trf.log[rf.getTrimmedLogIndex(newIdx)].Term == rf.currentTerm {\n\t\t\t\trf.commitIndex = newIdx\n\t\t\t\trf.Log(LogInfo, \"Entry \", rf.log[rf.getTrimmedLogIndex(rf.commitIndex)], \"replicated on a majority of servers. Commited to index\", rf.commitIndex)\n\t\t\t}\n\t\t\tnewIdx++\n\t\t}\n\n\t\t// send messages to applyCh for every message that was committed\n\t\tfor origIndex < rf.commitIndex {\n\t\t\torigIndex++\n\t\t\tif rf.getTrimmedLogIndex(origIndex) >= 0 {\n\t\t\t\trf.Log(LogInfo, \"Sending applyCh confirmation for commit of \", rf.log[rf.getTrimmedLogIndex(origIndex)], \"at index\", origIndex)\n\t\t\t\t{\n\t\t\t\t\trf.applyCh <- ApplyMsg{\n\t\t\t\t\t\tCommandValid: true,\n\t\t\t\t\t\tCommandIndex: origIndex,\n\t\t\t\t\t\tCommandTerm: rf.currentTerm,\n\t\t\t\t\t\tCommand: rf.log[rf.getTrimmedLogIndex(origIndex)].Command,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\trf.mu.Unlock()\n\t\ttime.Sleep(heartbeatSendInterval)\n\t}\n}",
"func (rf *Raft) StartAppendLog() {\n\tvar count int32 = 1\n\tfor i, _ := range rf.peers {\n\t\tif i == rf.me {\n\t\t\tcontinue\n\t\t}\n\t\tgo func(i int) {\n\t\t\tfor{\n\t\t\t\trf.mu.Lock()\n\t\t\t\t//fmt.Printf(\"follower %d lastlogindex: %v, nextIndex: %v\\n\",i, rf.GetPrevLogIndex(i), rf.nextIndex[i])\n\t\t\t\t//fmt.Print(\"sending log entries from leader %d to peer %d for term %d\\n\", rf.me, i, rf.currentTerm)\n\t\t\t\t//fmt.Print(\"nextIndex:%d\\n\", rf.nextIndex[i])\n\t\t\t\tif rf.state != Leader {\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\targs := AppendEntriesArgs{\n\t\t\t\t\tTerm: rf.currentTerm,\n\t\t\t\t\tLeaderId: rf.me,\n\t\t\t\t\tPrevLogIndex: rf.GetPrevLogIndex(i),\n\t\t\t\t\tPrevLogTerm: rf.GetPrevLogTerm(i),\n\t\t\t\t\tEntries: append(make([]LogEntry, 0), rf.logEntries[rf.nextIndex[i]:]...),\n\t\t\t\t\tLeaderCommit: rf.commitIndex,\n\t\t\t\t}\n\t\t\t\treply := AppendEntriesReply{}\n\t\t\t\trf.mu.Unlock()\n\t\t\t\tok := rf.sendAppendEntries(i, &args, &reply)\n\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\trf.mu.Lock()\n\t\t\t\tif rf.state != Leader {\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif reply.Term > rf.currentTerm {\n\t\t\t\t\trf.BeFollower(reply.Term)\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tsend(rf.appendEntry)\n\t\t\t\t\t}()\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif reply.Success {\n\t\t\t\t\trf.matchIndex[i] = args.PrevLogIndex + len(args.Entries)\n\t\t\t\t\trf.nextIndex[i] = rf.matchIndex[i] + 1\n\t\t\t\t\t//fmt.Print(\"leader: %v, for peer %v, match index: %d, next index: %d, peers: %d\\n\", rf.me, i, rf.matchIndex[i], rf.nextIndex[i], len(rf.peers))\n\t\t\t\t\tatomic.AddInt32(&count, 1)\n\t\t\t\t\tif atomic.LoadInt32(&count) > int32(len(rf.peers)/2) {\n\t\t\t\t\t\t//fmt.Print(\"leader %d reach agreement\\n, args.prevlogindex:%d, len:%d\\n\", rf.me, args.PrevLogIndex, len(args.Entries))\n\t\t\t\t\t\trf.UpdateCommitIndex()\n\t\t\t\t\t}\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\t//fmt.Printf(\"peer %d reset the next index from %d to %d\\n\", i, rf.nextIndex[i], rf.nextIndex[i]-1)\n\t\t\t\t\tif rf.nextIndex[i] > 0 {\n\t\t\t\t\t\trf.nextIndex[i]--\n\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t} else {\n\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\t}\n\t\t}(i)\n\t}\n\n}",
"func (rf *Raft) sendEntries() {\n\trf.mu.Lock()\n\tlastLog := rf.getLastLog()\n\trf.mu.Unlock()\n\tfor i := range rf.peers {\n\t\tif i == rf.me {\n\t\t\tcontinue\n\t\t}\n\t\trf.mu.Lock()\n\t\tmatchIndex := rf.LeaderStatus.matchIndex[i]\n\t\tnextIndex := rf.LeaderStatus.nextIndex[i]\n\t\t//DPrintf(\"send entry peer=%v matchIndex=%v lastIndex=%v nextIndex=%v\", i, matchIndex, lastLog.Index, nextIndex)\n\t\tvar req *AppendEntriesArgs\n\t\t// TODO: whether delete ???\n\t\tif matchIndex >= lastLog.Index {\n\t\t\treq = &AppendEntriesArgs{\n\t\t\t\tType: HeartBeat,\n\t\t\t\tTerm: rf.currentTerm,\n\t\t\t\tLeaderId: rf.peerId,\n\t\t\t\tLeaderCommit: rf.commitIndex,\n\t\t\t}\n\t\t\tDPrintf(\"peer=%v send heartbeat to peer=%v\", rf.me, i)\n\t\t} else {\n\t\t\t// TODO: if the logEntries be cutoff after make snapshot, we should shift the start index\n\t\t\tlogEntries := rf.logEntries[matchIndex+1 : min(nextIndex+1, len(rf.logEntries))]\n\t\t\tprevLog := rf.logEntries[matchIndex]\n\t\t\treq = &AppendEntriesArgs{\n\t\t\t\tType: Entries,\n\t\t\t\tTerm: rf.currentTerm,\n\t\t\t\tLeaderId: rf.peerId,\n\t\t\t\tPrevLogIndex: prevLog.Index,\n\t\t\t\tPrevLogTerm: prevLog.Term,\n\t\t\t\tLogEntries: logEntries, // TODO: refine to control each time send message count (case 2B)\n\t\t\t\tLeaderCommit: rf.commitIndex,\n\t\t\t}\n\t\t\t//DPrintf(\"peer=%v send entry=%v to=%v next=%v logEntrySize=%d\", rf.me, rf.logEntries[matchIndex+1 : nextIndex+1], i, nextIndex, len(logEntries))\n\t\t}\n\t\trf.mu.Unlock()\n\t\tgo rf.sendAppendEntries(i, req, &AppendEntriesReply{})\n\t}\n}",
"func (s *server) processAppendEntriesResponse(resp *AppendEntriesResponse) {\n\t// If we find a higher term then change to a follower and exit.\n\tif resp.Term() > s.Term() {\n\t\ts.updateCurrentTerm(resp.Term(), \"\")\n\t\treturn\n\t}\n\n\t// panic response if it's not successful.\n\tif !resp.Success() {\n\t\treturn\n\t}\n\n\t// if one peer successfully append a log from the leader term,\n\t// we add it to the synced list\n\tif resp.append == true {\n\t\tfmt.Println(s.syncedPeer)\n\t\tfmt.Println(resp.peer, \"->\", s.syncedPeer[resp.peer])\n\t\ts.syncedPeer[resp.peer] = true\n\t\tfmt.Println(resp.peer, \"->\", s.syncedPeer[resp.peer])\n\t}\n\n\t// Increment the commit count to make sure we have a quorum before committing.\n\tif len(s.syncedPeer) < s.QuorumSize() {\n\t\treturn\n\t}\n\n\t// Determine the committed index that a majority has.\n\tvar indices []uint64\n\tindices = append(indices, s.log.currentIndex())\n\tfor _, peer := range s.peers {\n\t\tindices = append(indices, peer.getPrevLogIndex())\n\t}\n\tsort.Sort(sort.Reverse(uint64Slice(indices)))\n\n\t// We can commit up to the index which the majority of the members have appended.\n\tcommitIndex := indices[s.QuorumSize()-1]\n\tcommittedIndex := s.log.commitIndex\n\n\tif commitIndex > committedIndex {\n\t\t// leader needs to do a fsync before committing log entries\n\t\ts.log.sync()\n\t\ts.log.setCommitIndex(commitIndex)\n\t\ts.debugln(\"commit index \", commitIndex)\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TestLeaderSyncFollowerLog tests that the leader could bring a follower's log into consistency with its own. Reference: section 5.3, figure 7
|
func TestLeaderSyncFollowerLog(t *testing.T) {
ents := []pb.Entry{
{},
{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},
{Term: 4, Index: 4}, {Term: 4, Index: 5},
{Term: 5, Index: 6}, {Term: 5, Index: 7},
{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},
}
term := uint64(8)
tests := [][]pb.Entry{
{
{},
{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},
{Term: 4, Index: 4}, {Term: 4, Index: 5},
{Term: 5, Index: 6}, {Term: 5, Index: 7},
{Term: 6, Index: 8}, {Term: 6, Index: 9},
},
{
{},
{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},
{Term: 4, Index: 4},
},
{
{},
{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},
{Term: 4, Index: 4}, {Term: 4, Index: 5},
{Term: 5, Index: 6}, {Term: 5, Index: 7},
{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10}, {Term: 6, Index: 11},
},
{
{},
{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},
{Term: 4, Index: 4}, {Term: 4, Index: 5},
{Term: 5, Index: 6}, {Term: 5, Index: 7},
{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},
{Term: 7, Index: 11}, {Term: 7, Index: 12},
},
{
{},
{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},
{Term: 4, Index: 4}, {Term: 4, Index: 5}, {Term: 4, Index: 6}, {Term: 4, Index: 7},
},
{
{},
{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},
{Term: 2, Index: 4}, {Term: 2, Index: 5}, {Term: 2, Index: 6},
{Term: 3, Index: 7}, {Term: 3, Index: 8}, {Term: 3, Index: 9}, {Term: 3, Index: 10}, {Term: 3, Index: 11},
},
}
for i, tt := range tests {
leadStorage := NewMemoryStorage()
defer leadStorage.Close()
leadStorage.Append(ents)
lead := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, leadStorage)
lead.loadState(pb.HardState{Commit: lead.raftLog.lastIndex(), Term: term})
followerStorage := NewMemoryStorage()
defer followerStorage.Close()
followerStorage.Append(tt)
follower := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, followerStorage)
follower.loadState(pb.HardState{Term: term - 1})
// It is necessary to have a three-node cluster.
// The second may have more up-to-date log than the first one, so the
// first node needs the vote from the third node to become the leader.
n := newNetwork(lead, follower, nopStepper)
n.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// The election occurs in the term after the one we loaded with
// lead.loadState above.
n.send(pb.Message{From: 3, To: 1, Type: pb.MsgVoteResp, Term: term + 1})
n.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
if g := diffu(ltoa(lead.raftLog), ltoa(follower.raftLog)); g != "" {
t.Errorf("#%d: log diff:\n%s", i, g)
}
}
}
|
[
"func TestFollowerCommitEntry(t *testing.T) {\n\ttests := []struct {\n\t\tents []pb.Entry\n\t\tcommit uint64\n\t}{\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t},\n\t\t\t1,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data2\")},\n\t\t\t},\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data2\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data\")},\n\t\t\t},\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data2\")},\n\t\t\t},\n\t\t\t1,\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeFollower(1, 2)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 1, Entries: tt.ents, Commit: tt.commit})\n\n\t\tif g := r.raftLog.committed; g != tt.commit {\n\t\t\tt.Errorf(\"#%d: committed = %d, want %d\", i, g, tt.commit)\n\t\t}\n\t\twents := tt.ents[:int(tt.commit)]\n\t\tif g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {\n\t\t\tt.Errorf(\"#%d: nextEnts = %v, want %v\", i, g, wents)\n\t\t}\n\t}\n}",
"func Test_LeaderChanges(t *testing.T) {\n\t//giving time to elect a new leader\n\ttime.Sleep(time.Second * 1)\n\t//Start a timer here and verify that Append call doesn't succeed and timer times out which means S1 is partitioned--PENDING\n\tconst n int = 4\n\tset1 := \"set abc 20 8\\r\\nabcdefjg\\r\\n\"\n\texpected := []bool{false, true, false, false}\n\tchann := make([]chan LogEntry, n)\n\tr := [n]*Raft{r0, r2, r3, r4}\n\tfor k := 0; k < n; k++ {\n\t\tchann[k] = make(chan LogEntry)\n\t}\n\tfor i := 0; i < n; i++ {\n\t\tgo r[i].Client(chann[i], set1)\n\t}\n\n\tfor i := 0; i < n; i++ {\n\t\tresponse := <-chann[i]\n\t\tcommitStatus := response.Committed()\n\t\tif expected[i] != commitStatus {\n\t\t\tt.Error(\"Mismatch!\", expected, string(response.Data()))\n\t\t}\n\t}\n}",
"func TestStartAsFollower(t *testing.T) {\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tif r.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", r.state, StateFollower)\n\t}\n}",
"func TestLeaderTransferToUpToDateNodeFromFollower(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}",
"func TestLogReplication1(t *testing.T) {\n\n\tack := make(chan bool)\n\n\t//Get leader\n\tleaderId := raft.GetLeaderId()\n\n\t//Append a log entry to leader as client\n\traft.InsertFakeLogEntry(leaderId)\n\n\tleaderLog := raft.GetLogAsString(leaderId)\n\t// log.Println(leaderLog)\n\n\ttime.AfterFunc(1*time.Second, func() { ack <- true })\n\t<-ack //Wait for 1 second for log replication to happen\n\n\t//Get logs of all others and compare with each\n\tfor i := 0; i < 5; i++ {\n\t\tcheckIfExpected(t, raft.GetLogAsString(i), leaderLog)\n\t}\n\n}",
"func TestLearnerLogReplication(t *testing.T) {\n\tn1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\n\tnt := newNetwork(n1, n2)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\n\tsetRandomizedElectionTimeout(n1, n1.electionTimeout)\n\tfor i := 0; i < n1.electionTimeout; i++ {\n\t\tn1.tick()\n\t}\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})\n\n\t// n1 is leader and n2 is learner\n\tif n1.state != StateLeader {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", n1.state, StateLeader)\n\t}\n\tif !n2.isLearner {\n\t\tt.Error(\"peer 2 state: not learner, want yes\")\n\t}\n\n\tnextCommitted := n1.raftLog.committed + 1\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"somedata\")}}})\n\tif n1.raftLog.committed != nextCommitted {\n\t\tt.Errorf(\"peer 1 wants committed to %d, but still %d\", nextCommitted, n1.raftLog.committed)\n\t}\n\n\tif n1.raftLog.committed != n2.raftLog.committed {\n\t\tt.Errorf(\"peer 2 wants committed to %d, but still %d\", n1.raftLog.committed, n2.raftLog.committed)\n\t}\n\n\tmatch := n1.getProgress(2).Match\n\tif match != n2.raftLog.committed {\n\t\tt.Errorf(\"progress 2 of leader 1 wants match %d, but got %d\", n2.raftLog.committed, match)\n\t}\n}",
"func TestLeaderStartReplication(t *testing.T) {\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tcommitNoopEntry(r, s)\n\tli := r.raftLog.lastIndex()\n\n\tents := []pb.Entry{{Data: []byte(\"some data\")}}\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: ents})\n\n\tif g := r.raftLog.lastIndex(); g != li+1 {\n\t\tt.Errorf(\"lastIndex = %d, want %d\", g, li+1)\n\t}\n\tif g := r.raftLog.committed; g != li {\n\t\tt.Errorf(\"committed = %d, want %d\", g, li)\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte(\"some data\")}}\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %+v, want %+v\", msgs, wmsgs)\n\t}\n\tif g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, wents) {\n\t\tt.Errorf(\"ents = %+v, want %+v\", g, wents)\n\t}\n}",
"func TestLogReplication2(t *testing.T) {\n\tack := make(chan bool)\n\n\t//Kill one server\n\traft.KillServer(1)\n\n\t//Append log to server\n\ttime.AfterFunc(1*time.Second, func() {\n\t\t//Get leader\n\t\tleaderId := raft.GetLeaderId()\n\n\t\t//Append a log entry to leader as client\n\t\traft.InsertFakeLogEntry(leaderId)\n\t})\n\n\t//Resurrect old server after enough time for other to move on\n\ttime.AfterFunc(2*time.Second, func() {\n\t\t//Resurrect old server\n\t\traft.ResurrectServer(1)\n\t})\n\n\t//Check log after some time to see if it matches with current leader\n\ttime.AfterFunc(3*time.Second, func() {\n\t\tleaderId := raft.GetLeaderId()\n\t\tleaderLog := raft.GetLogAsString(leaderId)\n\t\tserverLog := raft.GetLogAsString(1)\n\n\t\tcheckIfExpected(t, serverLog, leaderLog)\n\n\t\tack <- true\n\t})\n\n\t<-ack\n\n}",
"func Test_LogRepair(t *testing.T) {\n\t//Crash one of the follower say 0 for sometime, while leader is sending AEs to other followers\n\t//Wake up f0, and now leader should repair the log!\n\t//append more entries to make log stale! 1 entry doesn't make log stale since leader is always ahead of followers by 1 entry,\n\tconst n int = 4\n\tset1 := \"set abc 20 8\\r\\nabcdefjg\\r\\n\"\n\tset3 := \"set abc 3 8\\r\\nabcdefjg\\r\\n\"\n\tset4 := \"set abc 6 7\\r\\nmonikas\\r\\n\"\n\tgetm3 := \"getm abc\\r\\n\"\n\tcmd := []string{set1, set3, set4, getm3}\n\texpected := true\n\tchann := make([]chan LogEntry, n)\n\tfor k := 0; k < n; k++ {\n\t\tchann[k] = make(chan LogEntry)\n\t}\n\n\tfor i := 0; i < n; i++ {\n\t\tgo r2.Client(chann[i], cmd[i])\n\t}\n\tfor i := 0; i < n; i++ {\n\t\tresponse := <-chann[i]\n\t\tcommitStatus := response.Committed()\n\t\tif expected != commitStatus {\n\t\t\tt.Error(\"Mismatch!\", expected, response.Committed())\n\t\t}\n\t}\n\t//fmt.Println(\"\\n=========Server 1 resuming now!============\\n\")\n\tsetCrash(false)\n\tsetServerToCrash(-1)\n\t//now Server1's log gets repaired when it starts receiving Heartbeats during this time period--HOW TO TEST?\n\ttime.Sleep(time.Second * 1)\n}",
"func TestLeaderCommitEntry(t *testing.T) {\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tcommitNoopEntry(r, s)\n\tli := r.raftLog.lastIndex()\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tfor _, m := range r.readMessages() {\n\t\tr.Step(acceptAndReply(m))\n\t}\n\n\tif g := r.raftLog.committed; g != li+1 {\n\t\tt.Errorf(\"committed = %d, want %d\", g, li+1)\n\t}\n\twents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte(\"some data\")}}\n\tif g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {\n\t\tt.Errorf(\"nextEnts = %+v, want %+v\", g, wents)\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\tfor i, m := range msgs {\n\t\tif w := uint64(i + 2); m.To != w {\n\t\t\tt.Errorf(\"to = %x, want %x\", m.To, w)\n\t\t}\n\t\tif m.Type != pb.MsgApp {\n\t\t\tt.Errorf(\"type = %v, want %v\", m.Type, pb.MsgApp)\n\t\t}\n\t\tif m.Commit != li+1 {\n\t\t\tt.Errorf(\"commit = %d, want %d\", m.Commit, li+1)\n\t\t}\n\t}\n}",
"func TestFollowerVote(t *testing.T) {\n\ttests := []struct {\n\t\tvote uint64\n\t\tnvote uint64\n\t\twreject bool\n\t}{\n\t\t{None, 1, false},\n\t\t{None, 2, false},\n\t\t{1, 1, false},\n\t\t{2, 2, false},\n\t\t{1, 2, true},\n\t\t{2, 1, true},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.loadState(pb.HardState{Term: 1, Vote: tt.vote})\n\n\t\tr.Step(pb.Message{From: tt.nvote, FromGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},\n\t\t\tTo: 1, ToGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTerm: 1, Type: pb.MsgVote})\n\n\t\tmsgs := r.readMessages()\n\t\twmsgs := []pb.Message{\n\t\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\t\tTo: tt.nvote, ToGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},\n\t\t\t\tTerm: 1, Type: pb.MsgVoteResp, Reject: tt.wreject},\n\t\t}\n\t\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\t\tt.Errorf(\"#%d: msgs = %v, want %v\", i, msgs, wmsgs)\n\t\t}\n\t}\n}",
"func TestVote_Follower(t *testing.T) {\n\tutil.SuppressLoggers()\n\tconfig := DefaultConfig()\n\n\tt.Run(\"Handle RequestVote with Stale Term\", func(t *testing.T) {\n\t\tcluster, err := CreateLocalCluster(config)\n\t\tdefer cleanupCluster(cluster)\n\n\t\ttime.Sleep(2 * time.Second)\n\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tleader, err := findLeader(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfollowers, err := findAllFollowers(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tleader.setCurrentTerm(3)\n\t\ttime.Sleep(1 * time.Second)\n\n\t\t// make sure the client get the correct response while registering itself with a candidate\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(1),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(3),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've denied vote\")\n\t\t}\n\t})\n\n\tt.Run(\"Handle RequestVote with Higher Term\", func(t *testing.T) {\n\t\tcluster, err := CreateLocalCluster(config)\n\t\tdefer cleanupCluster(cluster)\n\n\t\ttime.Sleep(2 * time.Second)\n\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tleader, err := findLeader(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfollowers, err := findAllFollowers(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tleader.leaderMutex.Lock()\n\t\tlogEntry := &rpc.LogEntry{\n\t\t\tIndex: leader.LastLogIndex() + 1,\n\t\t\tTermId: leader.GetCurrentTerm(),\n\t\t\tType: rpc.CommandType_NOOP,\n\t\t\tData: []byte{1, 2, 3, 4},\n\t\t}\n\t\tleader.StoreLog(logEntry)\n\t\tleader.leaderMutex.Unlock()\n\t\ttime.Sleep(1 * time.Second)\n\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(3),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(1),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've denied vote\")\n\t\t}\n\n\t\treply, _ = followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(3),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(2),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif !reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've granted vote\")\n\t\t}\n\t})\n}",
"func TestFollowerAppendEntries(t *testing.T) {\n\ttests := []struct {\n\t\tindex, term uint64\n\t\tents []pb.Entry\n\t\twents []pb.Entry\n\t\twunstable []pb.Entry\n\t}{\n\t\t{\n\t\t\t2, 2,\n\t\t\t[]pb.Entry{{Term: 3, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}, {Term: 3, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 3}},\n\t\t},\n\t\t{\n\t\t\t1, 1,\n\t\t\t[]pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t},\n\t\t{\n\t\t\t0, 0,\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t0, 0,\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tstorage.Append([]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}})\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, storage)\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeFollower(2, 2)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 2, LogTerm: tt.term, Index: tt.index, Entries: tt.ents})\n\n\t\tif g := r.raftLog.allEntries(); !reflect.DeepEqual(g, tt.wents) {\n\t\t\tt.Errorf(\"#%d: ents = %+v, want %+v\", i, g, tt.wents)\n\t\t}\n\t\tif g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, tt.wunstable) {\n\t\t\tt.Errorf(\"#%d: unstableEnts = %+v, want %+v\", i, g, tt.wunstable)\n\t\t}\n\t}\n}",
"func TestClusteringFollowerDeleteChannelNotInSnapshot(t *testing.T) {\n\tcleanupDatastore(t)\n\tdefer cleanupDatastore(t)\n\tcleanupRaftLog(t)\n\tdefer cleanupRaftLog(t)\n\n\t// For this test, use a central NATS server.\n\tns := natsdTest.RunDefaultServer()\n\tdefer ns.Shutdown()\n\n\tmaxInactivity := 250 * time.Millisecond\n\n\t// Configure first server\n\ts1sOpts := getTestDefaultOptsForClustering(\"a\", true)\n\ts1sOpts.Clustering.TrailingLogs = 0\n\ts1sOpts.MaxInactivity = maxInactivity\n\ts1 := runServerWithOpts(t, s1sOpts, nil)\n\tdefer s1.Shutdown()\n\n\t// Configure second server.\n\ts2sOpts := getTestDefaultOptsForClustering(\"b\", false)\n\ts2sOpts.Clustering.TrailingLogs = 0\n\ts2sOpts.MaxInactivity = maxInactivity\n\ts2 := runServerWithOpts(t, s2sOpts, nil)\n\tdefer s2.Shutdown()\n\n\t// Configure third server.\n\ts3sOpts := getTestDefaultOptsForClustering(\"c\", false)\n\ts3sOpts.Clustering.TrailingLogs = 0\n\ts3sOpts.MaxInactivity = maxInactivity\n\ts3 := runServerWithOpts(t, s3sOpts, nil)\n\tdefer s3.Shutdown()\n\n\tservers := []*StanServer{s1, s2, s3}\n\tfor _, s := range servers {\n\t\tcheckState(t, s, Clustered)\n\t}\n\n\t// Wait for leader to be elected.\n\tleader := getLeader(t, 10*time.Second, servers...)\n\n\t// Create a client connection.\n\tsc, err := stan.Connect(clusterName, clientName)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected to connect correctly, got err %v\", err)\n\t}\n\tdefer sc.Close()\n\n\t// Send a message, which will create the channel\n\tchannel := \"foo\"\n\texpectedMsg := make(map[uint64]msg)\n\texpectedMsg[1] = msg{sequence: 1, data: []byte(\"first\")}\n\tif err := sc.Publish(channel, expectedMsg[1].data); err != nil {\n\t\tt.Fatalf(\"Error on publish: %v\", err)\n\t}\n\tsc.Close()\n\t// Wait for channel to be replicated in all servers\n\tverifyChannelConsistency(t, channel, 5*time.Second, 1, 1, expectedMsg, servers...)\n\n\t// Kill a follower.\n\tvar follower *StanServer\n\tfor _, s := range servers {\n\t\tif leader != s {\n\t\t\tfollower = s\n\t\t\tbreak\n\t\t}\n\t}\n\tservers = removeServer(servers, follower)\n\tfollower.Shutdown()\n\n\t// Wait for more than the MaxInactivity\n\ttime.Sleep(2 * maxInactivity)\n\t// Check channel is no longer in leader\n\tverifyChannelExist(t, leader, channel, false, 5*time.Second)\n\t// Perform a snapshot after the channel has been deleted\n\tif err := leader.raft.Snapshot().Error(); err != nil {\n\t\tt.Fatalf(\"Error on snapshot: %v\", err)\n\t}\n\n\t// Restart the follower\n\tfollower = runServerWithOpts(t, follower.opts, nil)\n\tdefer follower.Shutdown()\n\tservers = append(servers, follower)\n\n\tgetLeader(t, 10*time.Second, servers...)\n\n\t// The follower will have recovered foo (from streaming store), but then from\n\t// the snapshot should realize that the channel no longer exits and should delete it.\n\tverifyChannelExist(t, follower, channel, false, 5*time.Second)\n}",
"func TestFollower(t *testing.T) {\n\tf := newFixture(t)\n\n\tprevUpdateTime := time.Now().Add(-10 * time.Second)\n\tprevUpdateTimeKube := metav1.NewTime(prevUpdateTime)\n\tmetric0, metric0Typed := newFakeDatadogMetric(\"default\", \"dd-metric-0\", \"metric query0\", datadoghq.DatadogMetricStatus{\n\t\tValue: \"10\",\n\t\tConditions: []datadoghq.DatadogMetricCondition{\n\t\t\t{\n\t\t\t\tType: datadoghq.DatadogMetricConditionTypeActive,\n\t\t\t\tStatus: corev1.ConditionTrue,\n\t\t\t\tLastTransitionTime: prevUpdateTimeKube,\n\t\t\t\tLastUpdateTime: prevUpdateTimeKube,\n\t\t\t},\n\t\t\t{\n\t\t\t\tType: datadoghq.DatadogMetricConditionTypeValid,\n\t\t\t\tStatus: corev1.ConditionTrue,\n\t\t\t\tLastTransitionTime: prevUpdateTimeKube,\n\t\t\t\tLastUpdateTime: prevUpdateTimeKube,\n\t\t\t},\n\t\t\t{\n\t\t\t\tType: datadoghq.DatadogMetricConditionTypeUpdated,\n\t\t\t\tStatus: corev1.ConditionTrue,\n\t\t\t\tLastTransitionTime: prevUpdateTimeKube,\n\t\t\t\tLastUpdateTime: prevUpdateTimeKube,\n\t\t\t},\n\t\t\t{\n\t\t\t\tType: datadoghq.DatadogMetricConditionTypeError,\n\t\t\t\tStatus: corev1.ConditionFalse,\n\t\t\t\tLastTransitionTime: prevUpdateTimeKube,\n\t\t\t\tLastUpdateTime: prevUpdateTimeKube,\n\t\t\t},\n\t\t},\n\t})\n\tmetric1, metric1Typed := newFakeDatadogMetric(\"default\", \"autogen-1\", \"metric query1\", datadoghq.DatadogMetricStatus{\n\t\tValue: \"10\",\n\t\tConditions: []datadoghq.DatadogMetricCondition{\n\t\t\t{\n\t\t\t\tType: datadoghq.DatadogMetricConditionTypeActive,\n\t\t\t\tStatus: corev1.ConditionTrue,\n\t\t\t\tLastTransitionTime: prevUpdateTimeKube,\n\t\t\t\tLastUpdateTime: prevUpdateTimeKube,\n\t\t\t},\n\t\t\t{\n\t\t\t\tType: datadoghq.DatadogMetricConditionTypeValid,\n\t\t\t\tStatus: corev1.ConditionTrue,\n\t\t\t\tLastTransitionTime: prevUpdateTimeKube,\n\t\t\t\tLastUpdateTime: prevUpdateTimeKube,\n\t\t\t},\n\t\t\t{\n\t\t\t\tType: datadoghq.DatadogMetricConditionTypeUpdated,\n\t\t\t\tStatus: corev1.ConditionTrue,\n\t\t\t\tLastTransitionTime: prevUpdateTimeKube,\n\t\t\t\tLastUpdateTime: prevUpdateTimeKube,\n\t\t\t},\n\t\t\t{\n\t\t\t\tType: datadoghq.DatadogMetricConditionTypeError,\n\t\t\t\tStatus: corev1.ConditionFalse,\n\t\t\t\tLastTransitionTime: prevUpdateTimeKube,\n\t\t\t\tLastUpdateTime: prevUpdateTimeKube,\n\t\t\t},\n\t\t},\n\t})\n\tunstructured.SetNestedField(metric1.Object, \"dd-metric-1\", \"spec\", \"externalMetricName\")\n\tmetric1Typed.Spec.ExternalMetricName = \"dd-metric-1\"\n\n\tupdateTime := time.Now()\n\tf.datadogMetricLister = append(f.datadogMetricLister, metric0, metric1)\n\tf.objects = append(f.objects, metric0Typed, metric1Typed)\n\t// We have new updates locally (maybe leader changed or something. Followers should still overwrite local cache)\n\tddm := model.DatadogMetricInternal{\n\t\tID: \"default/dd-metric-0\",\n\t\tValid: true,\n\t\tActive: true,\n\t\tValue: 20.0,\n\t\tUpdateTime: kubernetes.TimeWithoutWall(updateTime),\n\t\tDataTime: kubernetes.TimeWithoutWall(updateTime),\n\t\tError: fmt.Errorf(\"Error from backend while fetching metric\"),\n\t}\n\tddm.SetQueries(\"metric query0\")\n\tf.store.Set(\"default/dd-metric-0\", ddm, \"utest\")\n\n\tf.runControllerSync(false, \"default/dd-metric-0\", nil)\n\n\t// Check internal store content\n\tassert.Equal(t, 1, f.store.Count())\n\tddm = model.DatadogMetricInternal{\n\t\tID: \"default/dd-metric-0\",\n\t\tValid: true,\n\t\tActive: true,\n\t\tValue: 10.0,\n\t\tUpdateTime: kubernetes.TimeWithoutWall(prevUpdateTime.UTC()),\n\t\tDataTime: kubernetes.TimeWithoutWall(prevUpdateTime.UTC()),\n\t\tError: nil,\n\t}\n\tddm.SetQueries(\"metric query0\")\n\tassert.Equal(t, &ddm, f.store.Get(\"default/dd-metric-0\"))\n\n\tf.runControllerSync(false, \"default/autogen-1\", nil)\n\tassert.Equal(t, 2, f.store.Count())\n\n\tddm = model.DatadogMetricInternal{\n\t\tID: \"default/autogen-1\",\n\t\tValid: true,\n\t\tActive: true,\n\t\tAutogen: true,\n\t\tExternalMetricName: \"dd-metric-1\",\n\t\tValue: 10.0,\n\t\tUpdateTime: kubernetes.TimeWithoutWall(prevUpdateTime.UTC()),\n\t\tDataTime: kubernetes.TimeWithoutWall(prevUpdateTime.UTC()),\n\t\tError: nil,\n\t}\n\tddm.SetQueries(\"metric query1\")\n\tassert.Equal(t, &ddm, f.store.Get(\"default/autogen-1\"))\n}",
"func TestLeaderTransferToUpToDateNode(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}",
"func TestClusteringFollowerDeleteOldChannelPriorToSnapshotRestore(t *testing.T) {\n\tcleanupDatastore(t)\n\tdefer cleanupDatastore(t)\n\tcleanupRaftLog(t)\n\tdefer cleanupRaftLog(t)\n\n\trestoreMsgsAttempts = 2\n\trestoreMsgsRcvTimeout = 50 * time.Millisecond\n\trestoreMsgsSleepBetweenAttempts = 0\n\tdefer func() {\n\t\trestoreMsgsAttempts = defaultRestoreMsgsAttempts\n\t\trestoreMsgsRcvTimeout = defaultRestoreMsgsRcvTimeout\n\t\trestoreMsgsSleepBetweenAttempts = defaultRestoreMsgsSleepBetweenAttempts\n\t}()\n\n\t// For this test, use a central NATS server.\n\tns := natsdTest.RunDefaultServer()\n\tdefer ns.Shutdown()\n\n\tmaxInactivity := 250 * time.Millisecond\n\n\t// Configure first server\n\ts1sOpts := getTestDefaultOptsForClustering(\"a\", true)\n\ts1sOpts.Clustering.TrailingLogs = 0\n\ts1sOpts.MaxInactivity = maxInactivity\n\ts1 := runServerWithOpts(t, s1sOpts, nil)\n\tdefer s1.Shutdown()\n\n\t// Configure second server.\n\ts2sOpts := getTestDefaultOptsForClustering(\"b\", false)\n\ts2sOpts.Clustering.TrailingLogs = 0\n\ts2sOpts.MaxInactivity = maxInactivity\n\ts2 := runServerWithOpts(t, s2sOpts, nil)\n\tdefer s2.Shutdown()\n\n\t// Configure third server.\n\ts3sOpts := getTestDefaultOptsForClustering(\"c\", false)\n\ts3sOpts.Clustering.TrailingLogs = 0\n\ts3sOpts.MaxInactivity = maxInactivity\n\ts3 := runServerWithOpts(t, s3sOpts, nil)\n\tdefer s3.Shutdown()\n\n\tservers := []*StanServer{s1, s2, s3}\n\n\t// Wait for leader to be elected.\n\tleader := getLeader(t, 10*time.Second, servers...)\n\n\t// Create a client connection.\n\tsc, err := stan.Connect(clusterName, clientName)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected to connect correctly, got err %v\", err)\n\t}\n\tdefer sc.Close()\n\n\t// Send a message, which will create the channel\n\tchannel := \"foo\"\n\texpectedMsg := make(map[uint64]msg)\n\texpectedMsg[1] = msg{sequence: 1, data: []byte(\"1\")}\n\texpectedMsg[2] = msg{sequence: 2, data: []byte(\"2\")}\n\texpectedMsg[3] = msg{sequence: 3, data: []byte(\"3\")}\n\tfor i := 1; i < 4; i++ {\n\t\tif err := sc.Publish(channel, expectedMsg[uint64(i)].data); err != nil {\n\t\t\tt.Fatalf(\"Error on publish: %v\", err)\n\t\t}\n\t}\n\t// Wait for channel to be replicated in all servers\n\tverifyChannelConsistency(t, channel, 5*time.Second, 1, 3, expectedMsg, servers...)\n\n\t// Shutdown a follower\n\tvar follower *StanServer\n\tfor _, s := range servers {\n\t\tif leader != s {\n\t\t\tfollower = s\n\t\t\tbreak\n\t\t}\n\t}\n\tservers = removeServer(servers, follower)\n\tfollower.Shutdown()\n\n\t// Let the channel be deleted\n\ttime.Sleep(2 * maxInactivity)\n\n\t// Now send a message that causes the channel to be recreated\n\texpectedMsg = make(map[uint64]msg)\n\texpectedMsg[1] = msg{sequence: 1, data: []byte(\"4\")}\n\tif err := sc.Publish(channel, expectedMsg[1].data); err != nil {\n\t\tt.Fatalf(\"Error on publish: %v\", err)\n\t}\n\tverifyChannelConsistency(t, channel, 5*time.Second, 1, 1, expectedMsg, servers...)\n\n\t// Perform snapshot on the leader.\n\tif err := leader.raft.Snapshot().Error(); err != nil {\n\t\tt.Fatalf(\"Error during snapshot: %v\", err)\n\t}\n\n\t// Now send another message then a sub to prevent deletion\n\texpectedMsg[2] = msg{sequence: 2, data: []byte(\"5\")}\n\tif err := sc.Publish(channel, expectedMsg[2].data); err != nil {\n\t\tt.Fatalf(\"Error on publish: %v\", err)\n\t}\n\tverifyChannelConsistency(t, channel, 5*time.Second, 1, 2, expectedMsg, servers...)\n\tsc.Subscribe(channel, func(_ *stan.Msg) {}, stan.DeliverAllAvailable())\n\n\t// Now restart the follower...\n\tfollower = runServerWithOpts(t, follower.opts, nil)\n\tdefer follower.Shutdown()\n\tservers = append(servers, follower)\n\tgetLeader(t, 10*time.Second, servers...)\n\n\t// Now check content of channel on the follower.\n\tverifyChannelConsistency(t, channel, 5*time.Second, 1, 2, expectedMsg, follower)\n}",
"func TestLeaderTransferWithCheckQuorum(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tfor i := 1; i < 4; i++ {\n\t\tr := nt.peers[uint64(i)].(*raft)\n\t\tr.checkQuorum = true\n\t\tsetRandomizedElectionTimeout(r, r.electionTimeout+i)\n\t}\n\n\t// Letting peer 2 electionElapsed reach to timeout so that it can vote for peer 1\n\tf := nt.peers[2].(*raft)\n\tfor i := 0; i < f.electionTimeout; i++ {\n\t\tf.tick()\n\t}\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}",
"func testLeaderCycle(t *testing.T, preVote bool) {\n\tvar cfg func(*Config)\n\tif preVote {\n\t\tcfg = preVoteConfig\n\t}\n\tn := newNetworkWithConfig(cfg, nil, nil, nil)\n\tdefer n.closeAll()\n\tfor campaignerID := uint64(1); campaignerID <= 3; campaignerID++ {\n\t\tn.send(pb.Message{From: campaignerID, To: campaignerID, Type: pb.MsgHup})\n\n\t\tfor _, peer := range n.peers {\n\t\t\tsm := peer.(*raft)\n\t\t\tif sm.id == campaignerID && sm.state != StateLeader {\n\t\t\t\tt.Errorf(\"preVote=%v: campaigning node %d state = %v, want StateLeader\",\n\t\t\t\t\tpreVote, sm.id, sm.state)\n\t\t\t} else if sm.id != campaignerID && sm.state != StateFollower {\n\t\t\t\tt.Errorf(\"preVote=%v: after campaign of node %d, \"+\n\t\t\t\t\t\"node %d had state = %v, want StateFollower\",\n\t\t\t\t\tpreVote, campaignerID, sm.id, sm.state)\n\t\t\t}\n\t\t}\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TestVoter tests the voter denies its vote if its own log is more uptodate than that of the candidate. Reference: section 5.4.1
|
func TestVoter(t *testing.T) {
tests := []struct {
ents []pb.Entry
logterm uint64
index uint64
wreject bool
}{
// same logterm
{[]pb.Entry{{Term: 1, Index: 1}}, 1, 1, false},
{[]pb.Entry{{Term: 1, Index: 1}}, 1, 2, false},
{[]pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}}, 1, 1, true},
// candidate higher logterm
{[]pb.Entry{{Term: 1, Index: 1}}, 2, 1, false},
{[]pb.Entry{{Term: 1, Index: 1}}, 2, 2, false},
{[]pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}}, 2, 1, false},
// voter higher logterm
{[]pb.Entry{{Term: 2, Index: 1}}, 1, 1, true},
{[]pb.Entry{{Term: 2, Index: 1}}, 1, 2, true},
{[]pb.Entry{{Term: 2, Index: 1}, {Term: 1, Index: 2}}, 1, 1, true},
}
for i, tt := range tests {
storage := NewMemoryStorage()
storage.Append(tt.ents)
r := newTestRaft(1, []uint64{1, 2}, 10, 1, storage)
defer closeAndFreeRaft(r)
r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgVote, Term: 3, LogTerm: tt.logterm, Index: tt.index})
msgs := r.readMessages()
if len(msgs) != 1 {
t.Fatalf("#%d: len(msg) = %d, want %d", i, len(msgs), 1)
}
m := msgs[0]
if m.Type != pb.MsgVoteResp {
t.Errorf("#%d: msgType = %d, want %d", i, m.Type, pb.MsgVoteResp)
}
if m.Reject != tt.wreject {
t.Errorf("#%d: reject = %t, want %t", i, m.Reject, tt.wreject)
}
}
}
|
[
"func TestVote_Candidate(t *testing.T) {\n\tutil.SuppressLoggers()\n\tconfig := DefaultConfig()\n\tconfig.ClusterSize = 5\n\n\tcluster, err := CreateLocalCluster(config)\n\tdefer cleanupCluster(cluster)\n\n\ttime.Sleep(2 * time.Second)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tleader, err := findLeader(cluster)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tleader.setCurrentTerm(3)\n\tleader.leaderMutex.Lock()\n\tlogEntry := &rpc.LogEntry{\n\t\tIndex: leader.LastLogIndex() + 1,\n\t\tTermId: leader.GetCurrentTerm(),\n\t\tType: rpc.CommandType_NOOP,\n\t\tData: []byte{1, 2, 3, 4},\n\t}\n\tleader.StoreLog(logEntry)\n\tleader.leaderMutex.Unlock()\n\ttime.Sleep(1 * time.Second)\n\n\tfollowers, err := findAllFollowers(cluster)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif followers[0].GetCurrentTerm() != 3 {\n\t\tt.Fatalf(\"Term should've changed to %d but is %d\", 3, followers[0].GetCurrentTerm())\n\t}\n\n\tfollowers[1].setCurrentTerm(3)\n\tfollowers[1].config.ElectionTimeout = 1 * time.Second\n\tfollowers[3].NetworkPolicy.PauseWorld(true)\n\tfollowers[2].NetworkPolicy.PauseWorld(true)\n\tleader.NetworkPolicy.PauseWorld(true)\n\n\tt.Run(\"Handle competing RequestVote with Stale Term\", func(t *testing.T) {\n\n\t\ttime.Sleep(500 * time.Millisecond)\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(1),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(3),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've denied vote\")\n\t\t}\n\t})\n\n\tt.Run(\"Handle competing RequestVote with Higher Term and Out-of-date log\", func(t *testing.T) {\n\n\t\ttime.Sleep(500 * time.Millisecond)\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(100),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(1),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've denied vote\")\n\t\t}\n\t})\n\n\tt.Run(\"Handle competing RequestVote with Higher Term and Up-to-date log\", func(t *testing.T) {\n\n\t\ttime.Sleep(500 * time.Millisecond)\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(200),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(2),\n\t\t\tLastLogTerm: uint64(3),\n\t\t})\n\t\tif !reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've granted vote\")\n\t\t}\n\t})\n}",
"func TestElection_HasVoted(t *testing.T) {\n\ttestDatabase := database.StormDB{\n\t\tFile: \"election_testdb.db\",\n\t}\n\terr := testDatabase.Connect()\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't connect to database. Error: %s\", err.Error())\n\t}\n\ttestVoter := models.Voter{\n\t\tStudentID: 1,\n\t\tCohort: 1,\n\t\tName: \"Prof Sturman\",\n\t}\n\ttestVoterWontVote := models.Voter{\n\t\tStudentID: 2,\n\t\tCohort: 1,\n\t\tName: \"Prof Goldschmidt\",\n\t}\n\ttestCandidate := models.Candidate{\n\t\tID: 1,\n\t\tCohort: 1,\n\t\tName: \"Joey Lyon\",\n\t}\n\n\terr = testDatabase.StoreVoter(testVoter)\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't add test voter to database\")\n\t}\n\terr = testDatabase.StoreVoter(testVoterWontVote)\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't add test voter to database\")\n\t}\n\terr = testDatabase.StoreCandidate(testCandidate)\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't add test candidate to database\")\n\t}\n\n\te := New(&testDatabase, false, []string{})\n\t// Begin testing HasVoted function\n\tret, err := e.HasVoted(testVoter)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error in checking if voter has voted\")\n\t}\n\tif *ret {\n\t\tt.Errorf(\"HasVoted returned true when a voter hasn't voted\")\n\t}\n\n\tvote := &models.Vote{\n\t\tCandidate: 1,\n\t\tStudentID: 1,\n\t}\n\tvote.HashVote(&testVoter)\n\te.CastVotes(&testVoter, []models.Vote{*vote})\n\tret, err = e.HasVoted(testVoter)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error in checking if voter has voted\")\n\t}\n\tif *ret == false {\n\t\tt.Errorf(\"HasVoted returned false when a voter has voted\")\n\t}\n\n\tret, err = e.HasVoted(testVoterWontVote)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error in checking if voter has voted\")\n\t}\n\tif *ret {\n\t\tt.Errorf(\"HasVoted returned true when a voter has not voted\")\n\t}\n\terr = os.Remove(\"election_testdb.db\")\n\tif err != nil {\n\t\tt.Log(\"Cleanup failed\")\n\t}\n}",
"func TestVoting(t *testing.T) {\n\t// Define the various voting scenarios to test\n\ttests := []struct {\n\t\tepoch uint64\n\t\tvalidators []string\n\t\tvotes []testerVote\n\t\tresults []string\n\t}{\n\t\t{\n\t\t\t// Single validator, no votes cast\n\t\t\tvalidators: []string{\"A\"},\n\t\t\tvotes: []testerVote{{validator: \"A\"}},\n\t\t\tresults: []string{\"A\"},\n\t\t}, {\n\t\t\t// Single validator, voting to add two others (only accept first, second needs 2 votes)\n\t\t\tvalidators: []string{\"A\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Two validators, voting to add three others (only accept first two, third needs 3 votes already)\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: true},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: true},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"E\", auth: true},\n\t\t\t\t{validator: \"B\", voted: \"E\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t}, {\n\t\t\t// Single validator, dropping itself (weird, but one less cornercase by explicitly allowing this)\n\t\t\tvalidators: []string{\"A\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"A\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{},\n\t\t}, {\n\t\t\t// Two validators, actually needing mutual consent to drop either of them (not fulfilled)\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Two validators, actually needing mutual consent to drop either of them (fulfilled)\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\"},\n\t\t}, {\n\t\t\t// Three validators, two of them deciding to drop the third\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Four validators, consensus of two not being enough to drop anyone\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t}, {\n\t\t\t// Four validators, consensus of three already being enough to drop someone\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\"},\n\t\t}, {\n\t\t\t// Authorizations are counted once per validator per target\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Authorizing multiple accounts concurrently is permitted\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: true},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t}, {\n\t\t\t// Deauthorizations are counted once per validator per target\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Deauthorizing multiple accounts concurrently is permitted\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Votes from deauthorized validators are discarded immediately (deauth votes)\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"C\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Votes from deauthorized validators are discarded immediately (auth votes)\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"C\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Cascading changes are not allowed, only the the account being voted on may change\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\"},\n\t\t}, {\n\t\t\t// Changes reaching consensus out of bounds (via a deauth) execute on touch\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"C\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Changes reaching consensus out of bounds (via a deauth) may go out of consensus on first touch\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\"},\n\t\t}, {\n\t\t\t// Ensure that pending votes don't survive authorization status changes. This\n\t\t\t// corner case can only appear if a validator is quickly added, remove and then\n\t\t\t// readded (or the inverse), while one of the original voters dropped. If a\n\t\t\t// past vote is left cached in the system somewhere, this will interfere with\n\t\t\t// the final validator outcome.\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\", \"E\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"F\", auth: true}, // Authorize F, 3 votes needed\n\t\t\t\t{validator: \"B\", voted: \"F\", auth: true},\n\t\t\t\t{validator: \"C\", voted: \"F\", auth: true},\n\t\t\t\t{validator: \"D\", voted: \"F\", auth: false}, // Deauthorize F, 4 votes needed (leave A's previous vote \"unchanged\")\n\t\t\t\t{validator: \"E\", voted: \"F\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"F\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"F\", auth: false},\n\t\t\t\t{validator: \"D\", voted: \"F\", auth: true}, // Almost authorize F, 2/3 votes needed\n\t\t\t\t{validator: \"E\", voted: \"F\", auth: true},\n\t\t\t\t{validator: \"B\", voted: \"A\", auth: false}, // Deauthorize A, 3 votes needed\n\t\t\t\t{validator: \"C\", voted: \"A\", auth: false},\n\t\t\t\t{validator: \"D\", voted: \"A\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"F\", auth: true}, // Finish authorizing F, 3/3 votes needed\n\t\t\t},\n\t\t\tresults: []string{\"B\", \"C\", \"D\", \"E\", \"F\"},\n\t\t}, {\n\t\t\t// Epoch transitions reset all votes to allow chain checkpointing\n\t\t\tepoch: 3,\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\"}, // Checkpoint block, (don't vote here, it's validated outside of snapshots)\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t},\n\t}\n\n\t// Run through the scenarios and test them\n\tfor i, tt := range tests {\n\t\t// Create the account pool and generate the initial set of validators\n\t\taccounts := newTesterAccountPool()\n\n\t\tvalidators := make([]common.Address, len(tt.validators))\n\t\tfor j, validator := range tt.validators {\n\t\t\tvalidators[j] = accounts.address(validator)\n\t\t}\n\t\tfor j := 0; j < len(validators); j++ {\n\t\t\tfor k := j + 1; k < len(validators); k++ {\n\t\t\t\tif bytes.Compare(validators[j][:], validators[k][:]) > 0 {\n\t\t\t\t\tvalidators[j], validators[k] = validators[k], validators[j]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tgenesis := testutils.Genesis(validators, true)\n\t\tconfig := new(istanbul.Config)\n\t\t*config = *istanbul.DefaultConfig\n\t\tconfig.TestQBFTBlock = big.NewInt(0)\n\t\tif tt.epoch != 0 {\n\t\t\tconfig.Epoch = tt.epoch\n\t\t}\n\n\t\tchain, backend := newBlockchainFromConfig(\n\t\t\tgenesis,\n\t\t\t[]*ecdsa.PrivateKey{accounts.accounts[tt.validators[0]]},\n\t\t\tconfig,\n\t\t)\n\n\t\t// Assemble a chain of headers from the cast votes\n\t\theaders := make([]*types.Header, len(tt.votes))\n\t\tfor j, vote := range tt.votes {\n\t\t\tblockNumber := big.NewInt(int64(j) + 1)\n\t\t\theaders[j] = &types.Header{\n\t\t\t\tNumber: blockNumber,\n\t\t\t\tTime: uint64(int64(j) * int64(config.GetConfig(blockNumber).BlockPeriod)),\n\t\t\t\tCoinbase: accounts.address(vote.validator),\n\t\t\t\tDifficulty: istanbulcommon.DefaultDifficulty,\n\t\t\t\tMixDigest: types.IstanbulDigest,\n\t\t\t}\n\t\t\t_ = qbftengine.ApplyHeaderQBFTExtra(\n\t\t\t\theaders[j],\n\t\t\t\tqbftengine.WriteValidators(validators),\n\t\t\t)\n\n\t\t\tif j > 0 {\n\t\t\t\theaders[j].ParentHash = headers[j-1].Hash()\n\t\t\t}\n\n\t\t\tcopy(headers[j].Extra, genesis.ExtraData)\n\n\t\t\tif len(vote.voted) > 0 {\n\t\t\t\tif err := accounts.writeValidatorVote(headers[j], vote.validator, vote.voted, vote.auth); err != nil {\n\t\t\t\t\tt.Errorf(\"Error writeValidatorVote test: %d, validator: %s, voteType: %v (err=%v)\", j, vote.voted, vote.auth, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Pass all the headers through clique and ensure tallying succeeds\n\t\thead := headers[len(headers)-1]\n\n\t\tsnap, err := backend.snapshot(chain, head.Number.Uint64(), head.Hash(), headers)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"test %d: failed to create voting snapshot: %v\", i, err)\n\t\t\tbackend.Stop()\n\t\t\tcontinue\n\t\t}\n\t\t// Verify the final list of validators against the expected ones\n\t\tvalidators = make([]common.Address, len(tt.results))\n\t\tfor j, validator := range tt.results {\n\t\t\tvalidators[j] = accounts.address(validator)\n\t\t}\n\t\tfor j := 0; j < len(validators); j++ {\n\t\t\tfor k := j + 1; k < len(validators); k++ {\n\t\t\t\tif bytes.Compare(validators[j][:], validators[k][:]) > 0 {\n\t\t\t\t\tvalidators[j], validators[k] = validators[k], validators[j]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tresult := snap.validators()\n\t\tif len(result) != len(validators) {\n\t\t\tt.Errorf(\"test %d: validators mismatch: have %x, want %x\", i, result, validators)\n\t\t\tbackend.Stop()\n\t\t\tcontinue\n\t\t}\n\t\tfor j := 0; j < len(result); j++ {\n\t\t\tif !bytes.Equal(result[j][:], validators[j][:]) {\n\t\t\t\tt.Errorf(\"test %d, validator %d: validator mismatch: have %x, want %x\", i, j, result[j], validators[j])\n\t\t\t}\n\t\t}\n\t\tbackend.Stop()\n\t}\n}",
"func TestVote_Follower(t *testing.T) {\n\tutil.SuppressLoggers()\n\tconfig := DefaultConfig()\n\n\tt.Run(\"Handle RequestVote with Stale Term\", func(t *testing.T) {\n\t\tcluster, err := CreateLocalCluster(config)\n\t\tdefer cleanupCluster(cluster)\n\n\t\ttime.Sleep(2 * time.Second)\n\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tleader, err := findLeader(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfollowers, err := findAllFollowers(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tleader.setCurrentTerm(3)\n\t\ttime.Sleep(1 * time.Second)\n\n\t\t// make sure the client get the correct response while registering itself with a candidate\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(1),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(3),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've denied vote\")\n\t\t}\n\t})\n\n\tt.Run(\"Handle RequestVote with Higher Term\", func(t *testing.T) {\n\t\tcluster, err := CreateLocalCluster(config)\n\t\tdefer cleanupCluster(cluster)\n\n\t\ttime.Sleep(2 * time.Second)\n\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tleader, err := findLeader(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfollowers, err := findAllFollowers(cluster)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tleader.leaderMutex.Lock()\n\t\tlogEntry := &rpc.LogEntry{\n\t\t\tIndex: leader.LastLogIndex() + 1,\n\t\t\tTermId: leader.GetCurrentTerm(),\n\t\t\tType: rpc.CommandType_NOOP,\n\t\t\tData: []byte{1, 2, 3, 4},\n\t\t}\n\t\tleader.StoreLog(logEntry)\n\t\tleader.leaderMutex.Unlock()\n\t\ttime.Sleep(1 * time.Second)\n\n\t\treply, _ := followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(3),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(1),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've denied vote\")\n\t\t}\n\n\t\treply, _ = followers[0].RequestVoteCaller(context.Background(), &rpc.RequestVoteRequest{\n\t\t\tTerm: uint64(3),\n\t\t\tCandidate: followers[1].Self,\n\t\t\tLastLogIndex: uint64(2),\n\t\t\tLastLogTerm: uint64(1),\n\t\t})\n\t\tif !reply.VoteGranted {\n\t\t\tt.Fatal(\"Should've granted vote\")\n\t\t}\n\t})\n}",
"func CheckVoter(userID, safeword string, dbc *mongo.Client) (VoterVote, error) {\r\n\tu, err := primitive.ObjectIDFromHex(userID)\r\n\tif err != nil {\r\n\t\treturn VoterVote{true, false}, err\r\n\t}\r\n\r\n\tr := struct {\r\n\t\tHasVoted bool `bson:\"hasVoted\"`\r\n\t\tSafeword string `bson:\"safeword\"`\r\n\t\tHash string `bson:\"hash\"`\r\n\t}{}\r\n\terr = dbc.Database(\"aye-go\").Collection(\"voter\").FindOne(context.Background(), bson.M{\"_id\": u}).Decode(&r)\r\n\r\n\t// cause for rejection\r\n\tif r.HasVoted || err == mongo.ErrNoDocuments {\r\n\t\treturn VoterVote{r.HasVoted, false}, nil\r\n\t}\r\n\r\n\t// unintended error\r\n\tif err != nil {\r\n\t\treturn VoterVote{r.HasVoted, false}, err\r\n\t}\r\n\r\n\thsw := md5.Sum([]byte(safeword + r.Hash))\r\n\r\n\t// all clear\r\n\treturn VoterVote{r.HasVoted, fmt.Sprintf(\"%x\", hsw) != r.Safeword}, nil\r\n}",
"func TestCommitment_singleVoter(t *testing.T) {\n\tcommitCh := make(chan struct{}, 1)\n\tc := newCommitment(commitCh, voters(1), 4)\n\tc.match(\"s1\", 10)\n\tif c.getCommitIndex() != 10 {\n\t\tt.Fatalf(\"expected 10 entries committed, found %d\",\n\t\t\tc.getCommitIndex())\n\t}\n\tif !drainNotifyCh(commitCh) {\n\t\tt.Fatalf(\"expected commit notify\")\n\t}\n\tc.setConfiguration(voters(1))\n\tif drainNotifyCh(commitCh) {\n\t\tt.Fatalf(\"unexpected commit notify\")\n\t}\n\tc.match(\"s1\", 12)\n\tif c.getCommitIndex() != 12 {\n\t\tt.Fatalf(\"expected 12 entries committed, found %d\",\n\t\t\tc.getCommitIndex())\n\t}\n\tif !drainNotifyCh(commitCh) {\n\t\tt.Fatalf(\"expected commit notify\")\n\t}\n}",
"func TestCommitment_noVoterSanity(t *testing.T) {\n\tcommitCh := make(chan struct{}, 1)\n\tc := newCommitment(commitCh, makeConfiguration([]string{}), 4)\n\tc.match(\"s1\", 10)\n\tc.setConfiguration(makeConfiguration([]string{}))\n\tc.match(\"s1\", 10)\n\tif c.getCommitIndex() != 0 {\n\t\tt.Fatalf(\"no voting servers: shouldn't be able to commit\")\n\t}\n\tif drainNotifyCh(commitCh) {\n\t\tt.Fatalf(\"unexpected commit notify\")\n\t}\n\n\t// add a voter so we can commit something and then remove it\n\tc.setConfiguration(voters(1))\n\tc.match(\"s1\", 10)\n\tif c.getCommitIndex() != 10 {\n\t\tt.Fatalf(\"expected 10 entries committed, found %d\",\n\t\t\tc.getCommitIndex())\n\t}\n\tif !drainNotifyCh(commitCh) {\n\t\tt.Fatalf(\"expected commit notify\")\n\t}\n\n\tc.setConfiguration(makeConfiguration([]string{}))\n\tc.match(\"s1\", 20)\n\tif c.getCommitIndex() != 10 {\n\t\tt.Fatalf(\"expected 10 entries committed, found %d\",\n\t\t\tc.getCommitIndex())\n\t}\n\tif drainNotifyCh(commitCh) {\n\t\tt.Fatalf(\"unexpected commit notify\")\n\t}\n\n}",
"func TestLearnerCannotVote(t *testing.T) {\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n2)\n\n\tn2.becomeFollower(1, None)\n\n\tn2.Step(pb.Message{From: 1, To: 2, Term: 2, Type: pb.MsgVote, LogTerm: 11, Index: 11})\n\n\tif len(n2.msgs) != 0 {\n\t\tt.Errorf(\"expect learner not to vote, but received %v messages\", n2.msgs)\n\t}\n}",
"func (th *TestHarness) TestSignVote() error {\n\tth.logger.Info(\"TEST: Signing of votes\")\n\tfor _, voteType := range voteTypes {\n\t\tth.logger.Info(\"Testing vote type\", \"type\", voteType)\n\t\thash := tmhash.Sum([]byte(\"hash\"))\n\t\tvote := &types.Vote{\n\t\t\tType: voteType,\n\t\t\tHeight: 101,\n\t\t\tRound: 0,\n\t\t\tBlockID: types.BlockID{\n\t\t\t\tHash: hash,\n\t\t\t\tPartSetHeader: types.PartSetHeader{\n\t\t\t\t\tHash: hash,\n\t\t\t\t\tTotal: 1000000,\n\t\t\t\t},\n\t\t\t},\n\t\t\tValidatorIndex: 0,\n\t\t\tValidatorAddress: tmhash.SumTruncated([]byte(\"addr\")),\n\t\t\tTimestamp: time.Now(),\n\t\t}\n\t\tv := vote.ToProto()\n\t\tvoteBytes := types.VoteSignBytes(th.chainID, v)\n\t\t// sign the vote\n\t\tif err := th.signerClient.SignVote(th.chainID, v); err != nil {\n\t\t\tth.logger.Error(\"FAILED: Signing of vote\", \"err\", err)\n\t\t\treturn newTestHarnessError(ErrTestSignVoteFailed, err, fmt.Sprintf(\"voteType=%d\", voteType))\n\t\t}\n\t\tvote.Signature = v.Signature\n\t\tth.logger.Debug(\"Signed vote\", \"vote\", vote)\n\t\t// validate the contents of the vote\n\t\tif err := vote.ValidateBasic(); err != nil {\n\t\t\tth.logger.Error(\"FAILED: Signed vote is invalid\", \"err\", err)\n\t\t\treturn newTestHarnessError(ErrTestSignVoteFailed, err, fmt.Sprintf(\"voteType=%d\", voteType))\n\t\t}\n\t\tsck, err := th.signerClient.GetPubKey()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// now validate the signature on the proposal\n\t\tif sck.VerifySignature(voteBytes, vote.Signature) {\n\t\t\tth.logger.Info(\"Successfully validated vote signature\", \"type\", voteType)\n\t\t} else {\n\t\t\tth.logger.Error(\"FAILED: Vote signature validation failed\", \"type\", voteType)\n\t\t\treturn newTestHarnessError(ErrTestSignVoteFailed, nil, \"signature validation failed\")\n\t\t}\n\t}\n\treturn nil\n}",
"func vote() {\n\tsetupProfiler()\n\tdefer teardownProfiler()\n\n\tvar joiners = make([]chan int, 0)\n\tfor i := 0; i < voterGoroutines; i++ {\n\t\tvar joinchan = make(chan int)\n\t\tjoiners = append(joiners, joinchan)\n\t\tgo placeVotes(joinchan)\n\t}\n\tvar totalVotes = 0\n\tfor v, join := range joiners {\n\t\tvotes := <-join\n\t\ttotalVotes += votes\n\t\tfmt.Printf(\"Voter %v finished and placed %v votes.\\n\", v, votes)\n\t}\n\tfmt.Printf(\"Generated %v votes in %v seconds (%0.0f votes/second)\\n\",\n\t\ttotalVotes, votingDuration.Seconds(),\n\t\tfloat64(totalVotes)/votingDuration.Seconds())\n\treturn\n}",
"func TestCannotTransferLeaseToVoterOutgoing(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\tdefer log.Scope(t).Close(t)\n\tctx := context.Background()\n\n\tknobs, ltk := makeReplicationTestKnobs()\n\t// Add a testing knob to allow us to block the change replicas command\n\t// while it is being proposed. When we detect that the change replicas\n\t// command to move n3 to VOTER_OUTGOING has been evaluated, we'll send\n\t// the request to transfer the lease to n3. The hope is that it will\n\t// get past the sanity above latch acquisition prior to change replicas\n\t// command committing.\n\tvar scratchRangeID atomic.Value\n\tscratchRangeID.Store(roachpb.RangeID(0))\n\tchangeReplicasChan := make(chan chan struct{}, 1)\n\tshouldBlock := func(args kvserverbase.ProposalFilterArgs) bool {\n\t\t// Block if a ChangeReplicas command is removing a node from our range.\n\t\treturn args.Req.RangeID == scratchRangeID.Load().(roachpb.RangeID) &&\n\t\t\targs.Cmd.ReplicatedEvalResult.ChangeReplicas != nil &&\n\t\t\tlen(args.Cmd.ReplicatedEvalResult.ChangeReplicas.Removed()) > 0\n\t}\n\tblockIfShould := func(args kvserverbase.ProposalFilterArgs) {\n\t\tif shouldBlock(args) {\n\t\t\tch := make(chan struct{})\n\t\t\tchangeReplicasChan <- ch\n\t\t\t<-ch\n\t\t}\n\t}\n\tknobs.Store.(*kvserver.StoreTestingKnobs).TestingProposalFilter = func(args kvserverbase.ProposalFilterArgs) *roachpb.Error {\n\t\tblockIfShould(args)\n\t\treturn nil\n\t}\n\ttc := testcluster.StartTestCluster(t, 4, base.TestClusterArgs{\n\t\tServerArgs: base.TestServerArgs{Knobs: knobs},\n\t\tReplicationMode: base.ReplicationManual,\n\t})\n\tdefer tc.Stopper().Stop(ctx)\n\n\tscratchStartKey := tc.ScratchRange(t)\n\tdesc := tc.AddVotersOrFatal(t, scratchStartKey, tc.Targets(1, 2)...)\n\tscratchRangeID.Store(desc.RangeID)\n\t// Make sure n1 has the lease to start with.\n\terr := tc.Server(0).DB().AdminTransferLease(context.Background(),\n\t\tscratchStartKey, tc.Target(0).StoreID)\n\trequire.NoError(t, err)\n\n\t// The test proceeds as follows:\n\t//\n\t// - Send an AdminChangeReplicasRequest to remove n3 and add n4\n\t// - Block the step that moves n3 to VOTER_OUTGOING on changeReplicasChan\n\t// - Send an AdminLeaseTransfer to make n3 the leaseholder\n\t// - Try really hard to make sure that the lease transfer at least gets to\n\t// latch acquisition before unblocking the ChangeReplicas.\n\t// - Unblock the ChangeReplicas.\n\t// - Make sure the lease transfer fails.\n\n\tltk.withStopAfterJointConfig(func() {\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\t_, err = tc.Server(0).DB().AdminChangeReplicas(ctx,\n\t\t\t\tscratchStartKey, desc, []roachpb.ReplicationChange{\n\t\t\t\t\t{ChangeType: roachpb.REMOVE_VOTER, Target: tc.Target(2)},\n\t\t\t\t\t{ChangeType: roachpb.ADD_VOTER, Target: tc.Target(3)},\n\t\t\t\t})\n\t\t\trequire.NoError(t, err)\n\t\t}()\n\t\tch := <-changeReplicasChan\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terr := tc.Server(0).DB().AdminTransferLease(context.Background(),\n\t\t\t\tscratchStartKey, tc.Target(2).StoreID)\n\t\t\trequire.Error(t, err)\n\t\t\trequire.Regexp(t,\n\t\t\t\t// The error generated during evaluation.\n\t\t\t\t\"replica cannot hold lease|\"+\n\t\t\t\t\t// If the lease transfer request has not yet made it to the latching\n\t\t\t\t\t// phase by the time we close(ch) below, we can receive the following\n\t\t\t\t\t// error due to the sanity checking which happens in\n\t\t\t\t\t// AdminTransferLease before attempting to evaluate the lease\n\t\t\t\t\t// transfer.\n\t\t\t\t\t// We have a sleep loop below to try to encourage the lease transfer\n\t\t\t\t\t// to make it past that sanity check prior to letting the change\n\t\t\t\t\t// of replicas proceed.\n\t\t\t\t\t\"cannot transfer lease to replica of type VOTER_DEMOTING_LEARNER\", err.Error())\n\t\t}()\n\t\t// Try really hard to make sure that our request makes it past the\n\t\t// sanity check error to the evaluation error.\n\t\tfor i := 0; i < 100; i++ {\n\t\t\truntime.Gosched()\n\t\t\ttime.Sleep(time.Microsecond)\n\t\t}\n\t\tclose(ch)\n\t\twg.Wait()\n\t})\n\n}",
"func (_Contracts *ContractsTransactorSession) VerifyVoter(_proposal *big.Int, _positionId *big.Int, _voterId *big.Int, _voterAddr common.Address) (*types.Transaction, error) {\n\treturn _Contracts.Contract.VerifyVoter(&_Contracts.TransactOpts, _proposal, _positionId, _voterId, _voterAddr)\n}",
"func (alg *Algorand) committeeVote(round uint64, step uint64, expectedNum int, hash common.Hash) error {\n\tif alg.maliciousType == EvilVoteNothing {\n\t\t// vote nothing\n\t\treturn nil\n\t}\n\n\t// check if user is in committee using Sortition(Alg 1) for j > 0\n\tvrf, proof, j := alg.sortition(alg.sortitionSeed(round), role(committee, round, step), expectedNum, alg.tokenOwn())\n\n\t//log.Trace(\"[algorand:committeeVote] committeeVote\", \"ID\", alg.id, \"sub-user\", j)\n\t// only committee members originate a message\n\tif j > 0 {\n\t\t// Gossip vote message\n\t\tvoteMsg := &VoteMessage{\n\t\t\tBlockNumber: round,\n\t\t\tStep: step,\n\t\t\tSub: uint64(j),\n\t\t\tVRF: vrf,\n\t\t\tProof: proof,\n\t\t\tParentHash: alg.chain.last.Hash(),\n\t\t\tBlockHash: hash,\n\t\t}\n\t\t_, err := voteMsg.Sign(alg.privkey)\n\t\tif err != nil {\n\t\t\tlog.Info(\"[algorand:committeeVote] sign err\", \"ID\", alg.id, \"sub-user\", j, \"err\", err)\n\t\t\treturn err\n\t\t}\n\t\t//log.Info(\"[algorand:committeeVote] signed \", \"ID\", alg.id, \"sub-user\", j, \"votemsg\", voteMsg)\n\t\t//gossip vote\n\t\talg.chain.protocolManager.vote_Ch <- VotePreEvent{Vote: voteMsg}\n\t\t//go alg.chain.protocolManager.BroadcastVote(voteMsg.Hash(), voteMsg)\n\t}\n\treturn nil\n}",
"func (t *subtransaction) updateVoterState(voter *Voter, vote *voting.Vote) error {\n\tswitch voter.result {\n\tcase VoteUndecided:\n\t\t// Happy case, we can still cast a vote.\n\t\tbreak\n\tcase VoteCanceled:\n\t\treturn ErrTransactionCanceled\n\tcase VoteStopped:\n\t\treturn ErrTransactionStopped\n\tcase VoteCommitted:\n\t\treturn fmt.Errorf(\"cannot change committed vote\")\n\tdefault:\n\t\t// Because we didn't vote yet, we know that the node cannot be\n\t\t// either in VoteCommitted or VoteFailed state.\n\t\treturn fmt.Errorf(\"voter is in invalid state %d\", voter.result)\n\t}\n\n\tswitch {\n\tcase vote != nil:\n\t\tif voter.vote != nil {\n\t\t\treturn errors.New(\"changing current vote is not allowed\")\n\t\t}\n\n\t\tt.voteCounts[*vote] += voter.Votes\n\t\tvoter.vote = vote\n\tcase vote == nil:\n\t\tif t.isDone() {\n\t\t\t// If the transaction is already done, it's too late to cancel our vote.\n\t\t\t// Other nodes may have committed their changes already.\n\t\t\treturn errors.New(\"subtransaction was already finished\")\n\t\t}\n\n\t\t// Remove the voter's support for the vote so it's not counted towards the\n\t\t// majority. The node is not going to commit the subtransaction anyway.\n\t\tt.voteCounts[*voter.vote] -= voter.Votes\n\t\tvoter.result = VoteCanceled\n\t}\n\n\tdefer func() {\n\t\tif t.mustSignalVoters() {\n\t\t\tclose(t.doneCh)\n\t\t}\n\t}()\n\n\tvar majorityVote *voting.Vote\n\tvar majorityVoteCount uint\n\tfor v, voteCount := range t.voteCounts {\n\t\tif majorityVoteCount < voteCount {\n\t\t\tv := v\n\t\t\tmajorityVoteCount = voteCount\n\t\t\tmajorityVote = &v\n\t\t}\n\t}\n\n\tvar outstandingVotes uint\n\tfor _, voter := range t.votersByNode {\n\t\tif voter.vote == nil {\n\t\t\toutstandingVotes += voter.Votes\n\t\t}\n\t}\n\n\t// When the majority vote didn't yet cross the threshold and the number of outstanding votes\n\t// may still get us across that threshold, then we need to wait for more votes to come in.\n\tif majorityVoteCount < t.threshold && majorityVoteCount+outstandingVotes >= t.threshold {\n\t\treturn nil\n\t}\n\n\t// Update all voters which have cast a vote and which are not undecided. We mustn't change\n\t// any voters which did decide on an outcome already as they may have already committed or\n\t// aborted their action.\n\tfor _, voter := range t.votersByNode {\n\t\t// We cannot change the mind of nodes which have already settled on any outcome\n\t\t// after the fact.\n\t\tif voter.result != VoteUndecided {\n\t\t\tcontinue\n\t\t}\n\n\t\t// We do not change the mind of any voter which didn't yet cast its vote. While it\n\t\t// may be true that it can only fail anyway, it is easier to handle if we just wait\n\t\t// for its incoming vote and set it to failed at that point in time.\n\t\tif voter.vote == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If the majority vote count is smaller than the threshold at this point, then we\n\t\t// know that we cannot ever reach it anymore even with the votes which are still\n\t\t// outstanding. We can thus mark this node as failed.\n\t\tif majorityVoteCount < t.threshold {\n\t\t\tvoter.result = VoteFailed\n\t\t\tcontinue\n\t\t}\n\n\t\t// Otherwise, the result depends on whether the voter agrees on the quorum or not.\n\t\tif *voter.vote == *majorityVote {\n\t\t\tvoter.result = VoteCommitted\n\t\t} else {\n\t\t\tvoter.result = VoteFailed\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (a *RPC) VoteForLeader(args *RequestVoteRPCArgs,reply *bool) error{\n\t//r.ResetTimer()\n \t//fmt.Println(\"received Vote request parameter \",(*args).CandidateId,\" \",(*args).Term,\" \",(*args).LastLogTerm,\" \",(*args).LastLogIndex)\n \t//if len(r.Log)>1{\n \t//\tfmt.Println(\"Vote Request folloer parameter \",r.Id,\" \", r.CurrentTerm,\" \",r.Log[len(r.Log)-1].Term ,\" \",len(r.Log)-1)\n \t//}\n\tif r.IsLeader==2 { // if this server is follower\n\t\t//r.ResetTimer() //TODO\n\t\tif r.CurrentTerm > args.Term || r.VotedFor >-1 { // if follower has updated Term or already voted for other candidate in same term , reply nagative\n\t\t\t*reply = false\n\t\t} else if r.VotedFor== -1{ // if follower has not voted for anyone in current Term \n\t\t\tlastIndex:= len(r.Log) \n\t\t\tif lastIndex > 0 && args.LastLogIndex >0{ // if Candiate log and this server log is not empty. \n\t\t\t\tif r.Log[lastIndex-1].Term > args.LastLogTerm { // and Term of last log in follower is updated than Candidate, reject vote\n *reply=false\n }else if r.Log[lastIndex-1].Term == args.LastLogTerm{ // else if Terms of Follower and candidate is same\n \tif (lastIndex-1) >args.LastLogIndex { // but follower log is more updated, reject vote\n \t\t*reply = false\n \t} else {\n \t\t\t*reply = true // If last log terms is match and followe log is sync with candiate, vote for candidate\n \t\t}\n }else{ // if last log term is not updated and Term does not match, \n \t \t\t*reply=true//means follower is lagging behind candiate in log entries, vote for candidate\n \t\t}\n \t\n\t\t\t} else if lastIndex >args.LastLogIndex { // either of them is Zero\n\t\t\t\t*reply = false // if Follower has entries in Log, its more updated, reject vote\n\t\t\t}else{\n\t\t\t\t\t*reply = true // else Vote for candiate\n\t\t\t\t}\n\t\t}else{\n\t\t\t*reply=false\n\t\t}\n\t}else{\n\t\t*reply = false // This server is already a leader or candiate, reject vote\n\t}\n\n\tif(*reply) {\n r.VotedFor=args.CandidateId // Set Voted for to candiate Id if this server has voted positive\n }\n\t/*if(*reply) {\n\t\tfmt.Println(\"Follower \",r.Id,\" Voted for \",r.VotedFor)\n\t}else{\n\t\tfmt.Println(\"Follower \",r.Id,\" rejected vote for \",args.CandidateId)\n\t}*/\n\treturn nil\n}",
"func (_Contracts *ContractsSession) VerifyVoter(_proposal *big.Int, _positionId *big.Int, _voterId *big.Int, _voterAddr common.Address) (*types.Transaction, error) {\n\treturn _Contracts.Contract.VerifyVoter(&_Contracts.TransactOpts, _proposal, _positionId, _voterId, _voterAddr)\n}",
"func (v *verifyFuture) vote(leader bool) {\n\tv.voteLock.Lock()\n\tdefer v.voteLock.Unlock()\n\n\t// Guard against having notified already\n\tif v.notifyCh == nil {\n\t\treturn\n\t}\n\n\tif leader {\n\t\tv.votes++\n\t\tif v.votes >= v.quorumSize {\n\t\t\tv.notifyCh <- v\n\t\t\tv.notifyCh = nil\n\t\t}\n\t} else {\n\t\tv.notifyCh <- v\n\t\tv.notifyCh = nil\n\t}\n}",
"func (s *VotingChaincode) vote(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tif len(args) != 3 {\n\t\treturn shim.Error(msg.GetErrMsg(\"COM_ERR_01\", []string{\"vote\", \"3\"}))\n\t}\n\n\ttodayDate := string(time.Now().UTC().Format(\"2006/01/02\"))\n\n\tvoterSSN := args[0]\n\telectionType := args[1]\n\tcandidatePubKey := args[2]\n\n\telection, err := u.FindCompositeKey(stub, c.ELECTION, []string{electionType})\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\tif election == \"\" {\n\t\treturn shim.Error(msg.GetErrMsg(\"VOT_ERR_15\", []string{electionType}))\n\t}\n\n\t_, keyParts, err := stub.SplitCompositeKey(election)\n\tif err != nil {\n\t\treturn shim.Error(msg.GetErrMsg(\"COM_ERR_07\", []string{election}))\n\t}\n\n\tisElectionPeriod := u.IsWithinRange(todayDate, keyParts[1], keyParts[2], \"2006/01/02\")\n\tif isElectionPeriod != true {\n\t\treturn shim.Error(msg.GetErrMsg(\"VOT_ERR_13\", []string{todayDate, electionType, fmt.Sprint(keyParts[1] + \"-\" + keyParts[2])}))\n\t}\n\n\tfound, voterPubKey := u.FindUserBySSN(stub, voterSSN)\n\tif !found {\n\t\treturn shim.Error(msg.GetErrMsg(\"COM_ERR_14\", []string{voterSSN}))\n\t}\n\n\tvoterAsBytes, err := stub.GetState(voterPubKey)\n\tif err != nil {\n\t\treturn shim.Error(msg.GetErrMsg(\"COM_ERR_10\", []string{voterSSN, err.Error()}))\n\t}\n\n\tvoter := User{}\n\terr = json.Unmarshal(voterAsBytes, &voter)\n\n\tif err != nil {\n\t\treturn shim.Error(\"Failed to unmarshal voter\")\n\t}\n\n\thasVoted := strings.Contains(voter.Election, c.VOTED)\n\tif hasVoted == true {\n\t\treturn shim.Error(msg.GetErrMsg(\"VOT_ERR_14\", []string{voterSSN}))\n\t}\n\n\tisRegistered := strings.Contains(voter.Election, c.REGISTERED)\n\tif isRegistered != true {\n\t\treturn shim.Error(msg.GetErrMsg(\"VOT_ERR_11\", []string{fmt.Sprint(\"Voter\" + voterSSN + \" Not Registered\")}))\n\t}\n\n\tisEligibleToVote := strings.Split(voter.Election, c.SEPARATOR)\n\tif isEligibleToVote[6] != \"true\" {\n\t\treturn shim.Error(msg.GetErrMsg(\"VOT_ERR_11\", []string{fmt.Sprint(isEligibleToVote[5] + \" Voter Min Age \" + strconv.Itoa(c.VOTER_MIN_AGE))}))\n\t}\n\n\tvoterAge := isEligibleToVote[5]\n\n\tcandidateAsBytes, err := stub.GetState(candidatePubKey)\n\tif err != nil {\n\t\treturn shim.Error(msg.GetErrMsg(\"COM_ERR_10\", []string{voterSSN, err.Error()}))\n\t}\n\n\tcandidate := User{}\n\tjson.Unmarshal(candidateAsBytes, &candidate)\n\n\tisCandidate := strings.Split(candidate.Election, c.SEPARATOR)\n\tif isCandidate[4] != \"true\" {\n\t\treturn shim.Error(msg.GetErrMsg(\"VOT_ERR_12\", []string{candidatePubKey, \"Not Registered\"}))\n\t}\n\n\tif candidate.SSN == voter.SSN {\n\t\treturn shim.Error(msg.GetErrMsg(\"VOT_ERR_12\", []string{candidatePubKey, fmt.Sprint(\"Same Voter \" + voterSSN + \" and Candidate \" + candidate.SSN)}))\n\t}\n\n\t_, err = s.callOtherCC(stub, c.CCNAME, c.CHANNELID, []string{\"giveVote\", voter.SSN, candidatePubKey, electionType, todayDate})\n\tif err != nil {\n\t\treturn shim.Error(msg.GetErrMsg(\"COM_ERR_17\", []string{c.CCNAME, err.Error()}))\n\t}\n\n\tvoter.Election = strings.Replace(voter.Election, c.REGISTERED, c.VOTED, -1)\n\n\tvoterAsBytes, _ = json.Marshal(voter)\n\n\terr = stub.PutState(voterPubKey, voterAsBytes)\n\tif err != nil {\n\t\treturn shim.Error(msg.GetErrMsg(\"COM_ERR_09\", []string{voterPubKey, err.Error()}))\n\t}\n\n\tvote := Vote{\n\t\tvoterSSN,\n\t\tvoter.FirstName,\n\t\tvoter.LastName,\n\t\tvoterAge,\n\t\tcandidatePubKey,\n\t\ttodayDate,\n\t\telectionType,\n\t\tstub.GetTxID()}\n\n\tvoteJSON, _ := json.Marshal(vote)\n\n\treturn shim.Success(voteJSON)\n}",
"func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\trf.mu.Lock()\n\t//DPrintf(\"server %d RequestVote lock\", rf.me)\n\tdefer rf.mu.Unlock()\n\tDPrintf(\"#%d server receive RequestVote rpc from #%d at term %d\\n\", rf.me, args.CandidateId, args.Term)\n\t// Your code here (2A, 2B).\n\treply.Term = rf.currentTerm\n\tif args.Term < rf.currentTerm {\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = -1\n\t\trf.state = 0\n\t\trf.persist()\n\t\tif rf.leaderTimer != nil {\n\t\t\trf.leaderTimer.Stop()\n\t\t}\n\t}\n\tif rf.votedFor == -1 || rf.votedFor == args.CandidateId {\n\t\tif args.LastLogTerm > rf.log[rf.lastIndex - rf.offset].Term {\n\t\t\treply.VoteGranted = true\n\t\t} else if args.LastLogTerm == rf.log[rf.lastIndex - rf.offset].Term && args.LastLogIndex >= rf.lastIndex {\n\t\t\treply.VoteGranted = true\n\t\t} else {\n\t\t\tDPrintf(\"#%d server is more update than requestvoter\\n\", rf.me)\n\t\t\treply.VoteGranted = false\n\t\t}\n\t} else {\n\t\tDPrintf(\"#%d server has voted other\\n\", rf.me)\n\t\treply.VoteGranted = false\n\t}\n\tif (reply.VoteGranted) {\n\t\trf.votedFor = args.CandidateId\n\t\trf.state = 1\n\t\tif rf.leaderTimer != nil {\n\t\t\trf.leaderTimer.Stop()\n\t\t}\n\t\trf.persist()\n\t\tvar d time.Duration\n\t\td = time.Duration(333 * rand.Float64() + 533)\n\t\tif rf.timer != nil {\n\t\t\trf.timer.Reset(d * time.Millisecond)\n\t\t}\n\t}\n\tDPrintf(\"#%d server receive RequestVote rpc from #%d at term %d, result: %v\\n\", rf.me, args.CandidateId, rf.currentTerm, reply.VoteGranted)\n\t//DPrintf(\"server %d RequestVote unlock\", rf.me)\n\treturn\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Free decrements the reference count on a message, and releases its resources if no further references remain. While this is not strictly necessary thanks to GC, doing so allows for the resources to be recycled without engaging GC. This can have rather substantial benefits for performance.
|
func (m *Message) Free() {
var ch chan *Message
if v := atomic.AddInt32(&m.refcnt, -1); v > 0 {
return
}
for i := range messageCache {
if m.bsize == messageCache[i].maxbody {
ch = messageCache[i].cache
break
}
}
m.Port = nil
select {
case ch <- m:
default:
}
}
|
[
"func (m *Message) Free() {\n\tC.nlmsg_free(m.nlm)\n\tm.nlm = nil\n}",
"func (m *Message) Release() {\n\tif m != nil {\n\t\tm.Text = nil\n\t\tfor i := len(m.List) - 1; i >= 0; i-- {\n\t\t\tm.List[i] = nil\n\t\t}\n\t\tm.List = m.List[:0]\n\t\tif m.used > 0 {\n\t\t\tcopy(m.buf[:m.used], blankBuf[:m.used])\n\t\t\tm.used = 0\n\t\t}\n\t\tmessagePool.Put(m)\n\t}\n}",
"func (m *Message) Free() {\n\tfor i := range messageCache {\n\t\tif m.bsize == messageCache[i].maxbody {\n\t\t\tmessageCache[i].pool.Put(m)\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (pkt *Packet) Free() {\n\tpkt.mtx.Lock()\n\tif *pkt.refCount <= 0 {\n\t\tpanic(\"reference count underflow\")\n\t}\n\t*pkt.refCount--\n\tif *pkt.refCount == 0 {\n\t\tpkt.reset()\n\t\tpkt.mtx.Unlock()\n\t\tpacketPool.Put(pkt)\n\t} else {\n\t\tpkt.mtx.Unlock()\n\t}\n}",
"func (room *RoomMessages) messagesFree() {\n\troom.info.messagesM.Lock()\n\troom.info._messages = nil\n\troom.info.messagesM.Unlock()\n}",
"func (self *LdapMessage) MsgFree() int{\n if self.msg != nil {\n rv := C.ldap_msgfree(self.msg)\n self.msg = nil\n return int(rv)\n }\n return -1\n}",
"func (conn *Connection) Free() {\n\n\tif conn.done() {\n\t\treturn\n\t}\n\tconn.setDone()\n\n\tconn.wGroup.Wait()\n\n\t// dont delete. conn = nil make pointer nil, but other pointers\n\t// arent nil. If conn.disconnected = true it is mean that all\n\t// resources are cleared, but pointer alive, so we only make pointer = nil\n\tif conn.lobby == nil {\n\t\treturn\n\t}\n\n\tconn.setDisconnected()\n\n\tconn.ws.Close()\n\tclose(conn.send)\n\tclose(conn.actionSem)\n\t// dont delete. conn = nil make pointer nil, but other pointers\n\t// arent nil and we make 'conn.disconnected = true' for them\n\n\tconn.lobby = nil\n\tconn.setRoom(nil)\n\n\t//fmt.Println(\"conn free memory\")\n}",
"func (recv *Sequence) Free() {\n\tC.g_sequence_free((*C.GSequence)(recv.native))\n\n\treturn\n}",
"func (pool *Pool) Free(object IPoolable) {\n\tobject.Deallocate()\n\tpool.r++\n}",
"func (b *Buffer) Free() {\n\tb.Reset()\n\tbufPool.Put(b)\n}",
"func (b *Buffer) Free() {\n\tfor _, s := range b.chunks {\n\t\tb.pool.Put(s)\n\t}\n\tb.chunks = b.chunks[0:0]\n}",
"func (recv *Checksum) Free() {\n\tC.g_checksum_free((*C.GChecksum)(recv.native))\n\n\treturn\n}",
"func (recv *StringChunk) Free() {\n\tC.g_string_chunk_free((*C.GStringChunk)(recv.native))\n\n\treturn\n}",
"func (c *Cache) Free(\n\tctx context.Context, owner client.Object,\n) error {\n\tc.informerReferencesMux.Lock()\n\tdefer c.informerReferencesMux.Unlock()\n\tdefer c.sampleMetrics(ctx)\n\n\tlog := logr.FromContextOrDiscard(ctx)\n\n\townerRef, err := c.ownerRef(owner)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor gvk, refs := range c.informerReferences {\n\t\tif _, ok := refs[ownerRef]; ok {\n\t\t\tdelete(refs, ownerRef)\n\n\t\t\tif len(refs) == 0 {\n\t\t\t\tlog.Info(\"releasing watcher\",\n\t\t\t\t\t\"kind\", gvk.Kind, \"group\", gvk.Group,\n\t\t\t\t\t\"ownerNamespace\", owner.GetNamespace())\n\n\t\t\t\tif err := c.informerMap.Delete(ctx, gvk); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"releasing informer for %v: %w\", gvk, err)\n\t\t\t\t}\n\n\t\t\t\tdelete(c.informerReferences, gvk)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func Free() {\n\tflags = nil // Any future call to Get() will panic on a nil dereference.\n}",
"func (p *Packet) Free() {\n\tif p == nil {\n\t\treturn\n\t}\n\n\tp.body.Free()\n\n\tp.header = Header{}\n\tp.body = nil\n\tpktPool.Put(p)\n}",
"func (x *FzIcclink) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}",
"func (recv *Error) Free() {\n\tC.g_error_free((*C.GError)(recv.native))\n\n\treturn\n}",
"func (acker *acker) Free() {\n\tfor k, _ := range acker.fmap {\n\t\tacker.fmap[k] = nil\n\t}\n\tacker.fmap = nil\n\tacker.mutex = nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Dup creates a "duplicate" message. What it really does is simply increment the reference count on the message. Note that since the underlying message is actually shared, consumers must take care not to modify the message. (We might revise this API in the future to add a copyonwrite facility, but for now modification is neither needed nor supported.) Applications should NOT make use of this function it is intended for Protocol, Transport and internal use only.
|
func (m *Message) Dup() *Message {
atomic.AddInt32(&m.refcnt, 1)
return m
}
|
[
"func (pkt *Packet) Dup() {\n\tpkt.mtx.Lock()\n\tif *pkt.refCount <= 0 {\n\t\tpanic(\"cannot reference freed packet\")\n\t}\n\t*pkt.refCount++\n\tpkt.mtx.Unlock()\n}",
"func (m *Message) Dup() *Message {\n\tdup := NewMessage(len(m.Body))\n\tdup.Body = append(dup.Body, m.Body...)\n\tdup.Header = append(dup.Header, m.Header...)\n\treturn dup\n}",
"func (g *GossipCollector) DuplicateMessage(msg *pubsub.Message) {\n\tif msg.Topic == nil {\n\t\treturn\n\t}\n\treceivedMessagesBytes.WithLabelValues(*msg.Topic).Add(float64(len(msg.Data)))\n\treceivedMessagesCount.WithLabelValues(*msg.Topic).Inc()\n}",
"func (b *WriteBuffer) Dup() *WriteBuffer {\n\tdup := &WriteBuffer{}\n\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\tdup.alloc = b.alloc\n\tdup.inner = FromSlice(b.inner.ToByteSlice())\n\n\treturn dup\n}",
"func (c *Clac) DupN() error {\n\tnum, err := c.popCount()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.dup(0, num)\n}",
"func (r *Relationship) Dup(newSrc, newDest *Element) *Relationship {\n\tdup := &Relationship{\n\t\tSource: newSrc,\n\t\tInteractionStyle: r.InteractionStyle,\n\t\tTags: r.Tags,\n\t\tURL: r.URL,\n\t\tDestination: newDest,\n\t\tDescription: r.Description,\n\t\tTechnology: r.Technology,\n\t}\n\tIdentify(dup)\n\treturn dup\n}",
"func Duplicate(h handle.Handle, pid uint32, access handle.DuplicateAccess) (handle.Handle, error) {\n\ttargetPs, err := process.Open(process.DupHandle, false, pid)\n\tif err != nil {\n\t\treturn ^handle.Handle(0), err\n\t}\n\tdefer targetPs.Close()\n\tcurrentPs, err := process.Open(process.DupHandle, false, uint32(os.Getpid()))\n\tif err != nil {\n\t\treturn ^handle.Handle(0), err\n\t}\n\tdefer currentPs.Close()\n\t// duplicate the remote handle in the current process's address space.\n\t// Note that for certain handle types this operation might fail\n\t// as they don't permit duplicate operations\n\tdup, err := h.Duplicate(targetPs, currentPs, access)\n\tif err != nil {\n\t\treturn ^handle.Handle(0), fmt.Errorf(\"couldn't duplicate handle: %v\", err)\n\t}\n\treturn dup, nil\n}",
"func (s *UDPSession) SetDUP(dup int) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\ts.dup = dup\n}",
"func (handle Handle) Duplicate(src, dest Handle, access DuplicateAccess) (Handle, error) {\n\tvar destHandle Handle\n\terrno, _, err := duplicateHandle.Call(\n\t\tuintptr(src),\n\t\tuintptr(handle),\n\t\tuintptr(dest),\n\t\tuintptr(unsafe.Pointer(&destHandle)),\n\t\tuintptr(access),\n\t\t0,\n\t\t0,\n\t)\n\tif winerrno.Errno(errno) != winerrno.Success {\n\t\treturn destHandle, nil\n\t}\n\treturn Handle(0), os.NewSyscallError(\"DuplicateHandle\", err)\n}",
"func (item *Item) Dup() Item {\n\tdup := *item\n\tdup.id = make([]byte, len(item.id))\n\tcopy(dup.id, item.id)\n\treturn dup\n}",
"func (jbobject *JavaNioCharBuffer) Duplicate() *JavaNioCharBuffer {\n\tjret, err := jbobject.CallMethod(javabind.GetEnv(), \"duplicate\", \"java/nio/CharBuffer\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tretconv := javabind.NewJavaToGoCallable()\n\tdst := &javabind.Callable{}\n\tretconv.Dest(dst)\n\tif err := retconv.Convert(javabind.ObjectRef(jret)); err != nil {\n\t\tpanic(err)\n\t}\n\tretconv.CleanUp()\n\tunique_x := &JavaNioCharBuffer{}\n\tunique_x.Callable = dst\n\treturn unique_x\n}",
"func (n *Node) DeepDup() *Node {\n\t// Start with a shallow copy.\n\tn = n.Dup()\n\t// And then recursively copy children.\n\tfor i, ch := range n.Children {\n\t\tn.Children[i] = ch.DeepDup()\n\t}\n\treturn n\n}",
"func (s DataShape) Duplicate() DataShape {\n\treturn DataShape{dims: s.dims, shape: append([]uint8{}, s.shape...)}\n}",
"func Clone(m Message) (Message, error) {\n\tb := m.CachedBinary()\n\n\tif m.Payload() == nil {\n\t\treturn nil, fmt.Errorf(\"could not clone message, topic: %s\", m.Category())\n\t}\n\n\treturn simple{\n\t\tcategory: m.Category(),\n\t\tmarshaled: &b,\n\t\tpayload: m.Payload().Copy(),\n\t\tmetadata: m.Metadata(),\n\t}, nil\n}",
"func (c *Clac) DupR() error {\n\tnum, err := c.popCount()\n\tif err != nil {\n\t\treturn err\n\t}\n\tpos, err := c.popIndex()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.dup(pos, num)\n}",
"func Dup(fd int) (int, string, error) {\n\treturn dupCloseOnExec(fd)\n}",
"func DupFd() (int, error) {\n\treturn server.DupFd()\n}",
"func (msg *Message) Clone(message *Message) *Message {\n\tmsgID := uuid.New().String()\n\treturn NewRawMessage().BuildHeader(msgID, message.GetParentID(), message.GetTimestamp()).\n\t\tBuildRouter(message.GetSource(), message.GetGroup(), message.GetResource(), message.GetOperation()).\n\t\tFillBody(message.GetContent())\n}",
"func (duplfs *DuplFs) Duplicate(gridfs *mgo.GridFS, oid string) (string, error) {\n\treturn duplfs.DuplicateWithId(gridfs, oid, \"\", time.Now())\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Expired returns true if the message has "expired". This is used by transport implementations to discard messages that have been stuck in the write queue for too long, and should be discarded rather than delivered across the transport. This is only used on the TX path, there is no sense of "expiration" on the RX path.
|
func (m *Message) Expired() bool {
if m.expire.IsZero() {
return false
}
if m.expire.After(time.Now()) {
return false
}
return true
}
|
[
"func (m *Attachment) HasExpired() bool {\n\tvar validTime = m.SigningTime.Add(time.Duration(m.SigningMinutes) * time.Minute)\n\treturn validTime.Unix() < time.Now().Unix()\n}",
"func (r *Record) IsExpired() bool {\n\treturn IsExpired(r.H.Meta.TTL, r.H.Meta.Timestamp)\n}",
"func (c *CodeExchangeSession) HasExpired(expiryTime time.Duration) bool {\r\n\treturn time.Since(c.CreatedAt) > expiryTime\r\n}",
"func (w *writer) isExpired(now time.Time) bool {\n\tif w.count == 0 {\n\t\treturn false\n\t}\n\tif w.expiryTime == nil {\n\t\treturn false\n\t}\n\treturn w.expiryTime.Before(now)\n}",
"func (s *subscription) IsExpired() bool {\n\treturn s.ExpiresAt.Before(time.Now())\n}",
"func (upload *Upload) IsExpired() bool {\n\tif upload.ExpireAt != nil {\n\t\tif time.Now().After(*upload.ExpireAt) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (l *memoryLease) IsExpired(_ context.Context) bool {\n\treturn !l.leaser.store.isLeased(l.PartitionID)\n}",
"func (c *CIDOffer) HasExpired() bool {\n\texpiryTime := time.Unix(c.expiry, 0)\n\tnow := time.Now()\n\treturn expiryTime.Before(now)\n}",
"func (b *Object) expired() bool {\n\tif b.expire <= 0 {\n\t\treturn false\n\t}\n\n\treturn time.Now().Unix() >= b.expire\n}",
"func (s *Session) IsExpired() bool {\n\treturn s.ExpiredAt.Before(time.Now())\n}",
"func (item *item) expired() bool {\n\tif item.ttl <= 0 {\n\t\treturn false\n\t}\n\treturn item.expireAt.Before(time.Now())\n}",
"func (c CachedObject) IsExpired() bool {\r\n\r\n\telapsed := time.Now().Sub(c.CreatedAt.Add(time.Hour * getExpiryTimeInHrs()))\r\n\r\n\tif elapsed > 0.0 {\r\n\t\treturn true\r\n\t}\r\n\r\n\treturn false\r\n}",
"func (r IABResponse) IsExpired() bool {\n\tswitch {\n\tcase !r.IsValidSubscription():\n\t\treturn false\n\tdefault:\n\t\tnow := time.Now().UnixNano() / int64(time.Millisecond)\n\t\treturn r.SubscriptionPurchase.ExpiryTimeMillis < now\n\t}\n}",
"func (err *ValidationError) IsExpired() bool { return err.exp }",
"func (item Item) expired() bool {\n\tif item.Expiration == 0 {\n\t\treturn false\n\t}\n\treturn time.Now().UnixNano() > item.Expiration\n}",
"func (d *AccessData) IsExpired() bool {\n\treturn d.IsExpiredAt(time.Now())\n}",
"func (a *Account) IsExpired() bool {\n\ta.mu.RLock()\n\texp := a.expired\n\ta.mu.RUnlock()\n\treturn exp\n}",
"func (i *item) expired() bool {\n\treturn i.expiration.Before(time.Now())\n}",
"func (o *OAuthState) IsExpired() bool {\n\tif o.ExpiresAt <= 0 {\n\t\treturn false\n\t}\n\n\tif GetMillis() > o.ExpiresAt {\n\t\treturn true\n\t}\n\n\treturn false\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewMessage is the supported way to obtain a new Message. This makes use of a "cache" which greatly reduces the load on the garbage collector.
|
func NewMessage(sz int) *Message {
var m *Message
var ch chan *Message
for i := range messageCache {
if sz < messageCache[i].maxbody {
ch = messageCache[i].cache
sz = messageCache[i].maxbody
break
}
}
select {
case m = <-ch:
default:
m = &Message{}
m.bbuf = make([]byte, 0, sz)
m.hbuf = make([]byte, 0, 32)
m.bsize = sz
}
m.refcnt = 1
m.Body = m.bbuf
m.Header = m.hbuf
return m
}
|
[
"func newMessage(msg string) *Message {\n\treturn &Message{\n\t\tId: ID,\n\t\tRts: time.Now(),\n\t\tContent: msg,\n\t}\n}",
"func New() *Message {\n\tmsg := &Message{\n\t\tStatus: STATUS_NEW,\n\t\tFilter: false,\n\t\tContent: Content{\n\t\t\tHead: Head{},\n\t\t\tData: nil,\n\t\t},\n\t}\n\treturn msg\n}",
"func NewMessage(message *types.Message) {\n\tif db.NewRecord(message) {\n\t\tdb.Create(message)\n\t}\n}",
"func messageAddNew(msg string) (Message, error) {\n\t//Initialize temp structure to be able to use append function\n\ttmpMessage := Message{}\n\n\tlastId += 1\n\ttmpMessage.Id = lastId\n\ttmpMessage.Message = msg\n\n\tmessages = append(messages, tmpMessage)\n\n\treturn tmpMessage, nil\n}",
"func NewMessage() *Message {\n\tvar p Message\n\tp.Buffer = make([]uint8, 2)\n\tp.Cursor = 2\n\treturn &p\n}",
"func (message *Message) NewMessage(action string, version Version, key metadata.Key, data []byte) *Message {\n\tif key == nil {\n\t\tkey = message.Key\n\t}\n\n\tif version == NullVersion {\n\t\tversion = message.Version\n\t}\n\n\tchild := NewMessage(action, int8(version), key, data)\n\tchild.ctx = metadata.NewParentIDContext(child.ctx, metadata.ParentID(message.ID))\n\tchild.ctx = metadata.NewParentTimestampContext(child.ctx, metadata.ParentTimestamp(message.Timestamp))\n\n\treturn child\n}",
"func NewMessage(text string) Message {\n\treturn Message{Text: text}\n}",
"func NewMessage(mtype string, message string) *Message {\n\treturn &Message{\n\t\tTime: time.Now(),\n\t\tType: mtype,\n\t\tMessage: message,\n\t}\n}",
"func (pool *MessagePool) New() (msg *Message) {\n\tselect {\n\tcase msg = <-pool.Messages:\n\tdefault:\n\t\tmsg = &Message{}\n\t}\n\treturn\n}",
"func (p *Pipe) newMessage() message {\n\tm := message{sourceID: p.ID()}\n\tif len(p.params) > 0 {\n\t\tm.params = p.params\n\t\tp.params = make(map[string][]phono.ParamFunc)\n\t}\n\tif len(p.feedback) > 0 {\n\t\tm.feedback = p.feedback\n\t\tp.feedback = make(map[string][]phono.ParamFunc)\n\t}\n\treturn m\n}",
"func NewMessage(command, family, flags int) (*Message, error) {\n\tnlm := C.nlmsg_alloc()\n\tif nlm == nil {\n\t\treturn nil, errors.New(\"failed to create netlink message\")\n\t}\n\tC.genlmsg_put(nlm, C.NL_AUTO_PID, C.NL_AUTO_SEQ, C.int(family), 0, C.int(flags), C.uint8_t(command), genlVersion)\n\treturn &Message{nlm: nlm}, nil\n}",
"func NewMessage(req *http.Request) (IMessage, error) {\n\tvar msg Message\n\terr := json.NewDecoder(req.Body).Decode(&msg)\n\treturn &msg, err\n}",
"func (a *BotAdapter) newMessage(channel *models.Channel, text string) *models.Message {\n\treturn &models.Message{\n\t\tID: a.idgen.ID(),\n\t\tRoomID: channel.ID,\n\t\tMsg: text,\n\t\tUser: a.user,\n\t}\n}",
"func (this MessageType) New() (Message, error) {\n\tswitch this {\n\tcase CONNECT:\n\t\treturn NewConnectMessage(), nil\n\tcase CONNACK:\n\t\treturn NewConnackMessage(), nil\n\tcase PUBLISH:\n\t\treturn NewPublishMessage(), nil\n\tcase PUBACK:\n\t\treturn NewPubackMessage(), nil\n\tcase PUBREC:\n\t\treturn NewPubrecMessage(), nil\n\tcase PUBREL:\n\t\treturn NewPubrelMessage(), nil\n\tcase PUBCOMP:\n\t\treturn NewPubcompMessage(), nil\n\tcase SUBSCRIBE:\n\t\treturn NewSubscribeMessage(), nil\n\tcase SUBACK:\n\t\treturn NewSubackMessage(), nil\n\tcase UNSUBSCRIBE:\n\t\treturn NewUnsubscribeMessage(), nil\n\tcase UNSUBACK:\n\t\treturn NewUnsubackMessage(), nil\n\tcase PINGREQ:\n\t\treturn NewPingreqMessage(), nil\n\tcase PINGRESP:\n\t\treturn NewPingrespMessage(), nil\n\tcase DISCONNECT:\n\t\treturn NewDisconnectMessage(), nil\n\t}\n\n\treturn nil, fmt.Errorf(\"MessageType/NewMessage: Invalid message type %d.\", this)\n}",
"func NewMessage(mcf MessageContentFrame) *Message {\n\tmsg := &Message{\n\t\tID: NextCnt(),\n\t\tPayload: make([]byte, 0),\n\t}\n\tswitch m := mcf.(type) {\n\tcase *BasicPublish:\n\t\tmsg.Method = m\n\t\tmsg.Exchange = m.Exchange\n\t\tmsg.RoutingKey = m.RoutingKey\n\tcase *BasicDeliver:\n\t\tmsg.Method = m\n\t\tmsg.Exchange = m.Exchange\n\t\tmsg.RoutingKey = m.RoutingKey\n\t}\n\treturn msg\n}",
"func NewMessage(format string, args []interface{}) Message {\n\treturn Message{format: format, args: args}\n}",
"func NewMessage(metrics ...Metric) Message {\n\treturn Message{metrics: metrics}\n}",
"func NewMessage(from, text, boxId string, withDbUpdate bool) (*Message, *MessageBox, error) {\n\tmsgBox, err := Get(boxId)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tmsg := &Message{From: from, Text: text, ID: commons.GenerateUniqueId()}\n\tif withDbUpdate {\n\t\tmsgBox.Messages.Add(msg.ID)\n\t\tif err := db.Update(from, msg, msgBox); err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\treturn msg, msgBox, nil\n}",
"func NewMessage(data []byte, offset int64) *Message {\n\tinternal := &mInternal{\n\t\tdata: data,\n\t\tmarked: false,\n\t\tcommitted: false,\n\t\toffset: offset,\n\t\tupstreamDoneChan: make(chan struct{}),\n\t\tmu: sync.Mutex{},\n\t}\n\treturn &Message{\n\t\tinternal,\n\t\t&MessageMock{\n\t\t\tGetDataFunc: internal.getDataFunc,\n\t\t\tMarkFunc: internal.markFunc,\n\t\t\tCommitFunc: internal.commitFunc,\n\t\t\tReleaseFunc: internal.releaseFunc,\n\t\t\tCommitAndReleaseFunc: internal.commitAndReleaseFunc,\n\t\t\tOffsetFunc: internal.offsetFunc,\n\t\t\tUpstreamDoneFunc: internal.upstreamDoneFunc,\n\t\t},\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Your KthLargest object will be instantiated and called as such: obj := Constructor(k, nums); param_1 := obj.Add(val);
|
func main() {
k := 3
arr := []int{4,5,8,2}
obj := Constructor(k, arr)
fmt.Println(obj.Add(3))
fmt.Println(obj.Add(5))
fmt.Println(obj.Add(10))
fmt.Println(obj.Add(9))
fmt.Println(obj.Add(4))
}
|
[
"func main() {\r\n\r\n\ttree := Node{\r\n\t\t32,\r\n\t\t&Node{\r\n\t\t\t16,\r\n\t\t\t&Node{\r\n\t\t\t\t8,\r\n\t\t\t\t&Node{4, nil, nil},\r\n\t\t\t\tnil,\r\n\t\t\t},\r\n\t\t\t&Node{\r\n\t\t\t\t24,\r\n\t\t\t\t&Node{22, nil, nil},\r\n\t\t\t\t&Node{26, nil, nil},\r\n\t\t\t},\r\n\t\t},\r\n\t\t&Node{\r\n\t\t\t64,\r\n\t\t\t&Node{48, nil, nil},\r\n\t\t\t&Node{72, nil, nil},\r\n\t\t},\r\n\t}\r\n\r\n\tsecondLargest := FindSecondLargest(&tree, -1)\r\n\r\n\tfmt.Println(secondLargest)\r\n}",
"func main() {\n\tnums := []int{1, 2, 3, 3, 3}\n\tobj := Constructor(nums)\n\tfmt.Println(obj.Pick(3))\n\tfmt.Println(obj.Pick(3))\n\tfmt.Println(obj.Pick(3))\n\tfmt.Println(obj.Pick(1))\n\tfmt.Println(obj.Pick(1))\n\tfmt.Println(obj.Pick(1))\n}",
"func main() {\n\tgreatest := max(10, 20, 15, 8, 9, 6)\n\tfmt.Println(greatest)\n}",
"func FindKthMax(nums []int, k int) (int, error) {\n\tindex := len(nums) - k\n\treturn kthNumber(nums, index)\n}",
"func main() {\n\tobj := Constructor()\n\tobj.AddNum(1)\n\tobj.AddNum(2)\n\tfmt.Println(obj.FindMedian())\n\tobj.AddNum(3)\n\tfmt.Println(obj.FindMedian())\n}",
"func main(){\n\t maxQueue := Constructor()\n\t maxQueue.Push_back(94)\n\t maxQueue.Push_back(16)\n\t maxQueue.Push_back(89)\n\t fmt.Println(maxQueue.Pop_front())\n\t maxQueue.Push_back(22)\n\t maxQueue.Push_back(33)\n\t maxQueue.Push_back(44)\n\t maxQueue.Push_back(111)\n\t maxQueue.Pop_front()\n\t maxQueue.Pop_front()\n\t maxQueue.Pop_front()\n\t fmt.Println(maxQueue.Max_value())\n }",
"func main() {\n\tmichael := CreatePerson(\"Michael\", 23)\n\tleah := CreatePerson(\"Leah\", 22)\n\tjake := CreatePerson(\"jake\", 19)\n\ttim := CreatePerson(\"tim\", 12)\n\tlarry := CreatePerson(\"larry\", 20)\n\tlenny := CreatePerson(\"lenny\", 21)\n\tjunior := CreatePerson(\"junior\", 10)\n\n\tpersonList := []Comparable{michael, leah, jake, tim, larry, lenny, junior}\n\n\t// HEAPSORT\n\tfmt.Println(\"### Testing HeapSort Implementation ###\")\n\tfmt.Println(\"Before Sorting:\")\n\tfor _, value := range personList {\n\t\tfmt.Println(value)\n\t}\n\n\tHeapSort(personList)\n\n\tfmt.Println(\"\\nAfter Sorting:\")\n\tfor _, value := range personList {\n\t\tfmt.Println(value)\n\t}\n\n\tfmt.Printf(\"\\n### Constructing Max Heap ###\\n\")\n\tpersonHeap := CreateMaxHeap(10)\n\tpersonHeap.Add(michael)\n\tpersonHeap.Add(leah)\n\tpersonHeap.Add(jake)\n\tpersonHeap.Add(tim)\n\tpersonHeap.Add(larry)\n\tpersonHeap.Add(lenny)\n\tpersonHeap.Add(junior)\n\n\tfmt.Println(\"Popping values from top of Max Heap\")\n\tvalue, ok := personHeap.Pop()\n\tfor ok {\n\t\tfmt.Printf(\"Top Value: %v\\n\", value)\n\t\tvalue, ok = personHeap.Pop()\n\t}\n}",
"func GetMaxKth(arr []int, k int) []int {\n\tif arr == nil || len(arr) < 1 || k < 0 || k >= len(arr) {\n\t\treturn nil\n\t}\n\n\tintHeap := datastructure.IntHeap{\n\t\tData: []int{},\n\t}\n\tintHeap.Cmp = func(i, j int) bool {\n\t\treturn intHeap.Data[i] < intHeap.Data[j]\n\t}\n\theap.Init(&intHeap)\n\n\tfor _, e := range arr {\n\t\tif intHeap.Len() < k {\n\t\t\theap.Push(&intHeap, e)\n\t\t} else {\n\t\t\titem := heap.Pop(&intHeap).(int)\n\t\t\tif e > item {\n\t\t\t\theap.Push(&intHeap, e)\n\t\t\t} else {\n\t\t\t\theap.Push(&intHeap, item)\n\t\t\t}\n\t\t}\n\t}\n\treturn intHeap.Data\n}",
"func (fn *formulaFuncs) kth(name string, argsList *list.List) formulaArg {\n\tif argsList.Len() != 2 {\n\t\treturn newErrorFormulaArg(formulaErrorVALUE, fmt.Sprintf(\"%s requires 2 arguments\", name))\n\t}\n\tarray := argsList.Front().Value.(formulaArg).ToList()\n\targK := argsList.Back().Value.(formulaArg).ToNumber()\n\tif argK.Type != ArgNumber {\n\t\treturn argK\n\t}\n\tk := int(argK.Number)\n\tif k < 1 {\n\t\treturn newErrorFormulaArg(formulaErrorNUM, \"k should be > 0\")\n\t}\n\tvar data []float64\n\tfor _, arg := range array {\n\t\tif arg.Type == ArgNumber {\n\t\t\tdata = append(data, arg.Number)\n\t\t}\n\t}\n\tif len(data) < k {\n\t\treturn newErrorFormulaArg(formulaErrorNUM, \"k should be <= length of array\")\n\t}\n\tsort.Float64s(data)\n\tif name == \"LARGE\" {\n\t\treturn newNumberFormulaArg(data[len(data)-k])\n\t}\n\treturn newNumberFormulaArg(data[k-1])\n}",
"func kthLargestNumber(nums []string, k int) string {\n\tsort.Slice(nums, func(i, j int) bool {\n\t\tif len(nums[i]) != len(nums[j]) {\n\t\t\treturn len(nums[i]) < len(nums[j])\n\t\t}\n\t\tfor x := range nums[i] {\n\t\t\tif nums[i][x] != nums[j][x] {\n\t\t\t\treturn nums[i][x] < nums[j][x]\n\t\t\t}\n\t\t}\n\t\treturn false\n\t})\n\treturn nums[len(nums)-k]\n}",
"func New(k int, l Less) Interface {\n\treturn &topK{k, &comparables{make([]interface{}, 0, k), l}}\n}",
"func main() {\n\tobj := Constructor()\n\tobj.Push(10)\n\tobj.Pop()\n\tfmt.Println(obj.Top())\n\tfmt.Println(obj.GetMin())\n}",
"func main() {\n\tobj := Constructor()\n\tfmt.Println(obj.Book(10, 20))\n\tfmt.Println(obj.Book(50, 60))\n\tfmt.Println(obj.Book(10, 40))\n\tfmt.Println(obj.Book(5, 15))\n\tfmt.Println(obj.Book(5, 10))\n\tfmt.Println(obj.Book(25, 55))\n}",
"func KthSmallestElement(array []int, k int) interface{} {\n\th := &MaxHeap{}\n\theap.Init(h)\n\tfor i := 0; i < len(array); i++ {\n\t\theap.Push(h, array[i])\n\t\tif h.Len() > k {\n\t\t\theap.Pop(h)\n\t\t}\n\t}\n\n\tans := heap.Pop(h)\n\treturn ans\n}",
"func Max(key []byte, nodes []*memberlist.Node) (max *memberlist.Node) {\n\tmaxValue := big.NewInt(0)\n\n\tCompute(key, nodes, func(node *memberlist.Node, bi *big.Int) {\n\t\tif bi.Cmp(maxValue) == 1 {\n\t\t\tmaxValue = bi\n\t\t\tmax = node\n\t\t}\n\t})\n\n\treturn max\n}",
"func TestLargest(t *testing.T) {\n\tp, s := beam.NewPipelineWithRoot()\n\tcol := beam.Create(s, 1, 11, 7, 5, 10)\n\ttopTwo := Largest(s, col, 2, lessInt)\n\tpassert.Equals(s, topTwo, []int{11, 10})\n\tif err := ptest.Run(p); err != nil {\n\t\tt.Errorf(\"pipeline failed but should have succeeded, got %v\", err)\n\t}\n}",
"func (t *Table) Biggest() y.Key { return t.biggest }",
"func main123000() {\r\n\th := &IntHeap{2, 1, 5}\r\n\theap.Init(h)\r\n\theap.Push(h, 3)\r\n\tfmt.Printf(\"minimum: %d\\n\", (*h)[0])\r\n\tfor h.Len() > 0 {\r\n\t\tfmt.Printf(\"%d \", heap.Pop(h))\r\n\t}\r\n}",
"func main() {\n\tobj := Constructor()\n\tvals := []int{100, 80, 60, 70, 60, 75, 85}\n\tfor _, v := range vals {\n\t\tfmt.Println(\"Input:\", v, \"Output \", obj.Next(v))\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetResourceRecordSets will return all resource record sets for a managed zone
|
func (d *DNS) GetResourceRecordSets(projectID string, managedZone string) ([]*v1.ResourceRecordSet, error) {
ctx := context.Background()
rrsService := v1.NewResourceRecordSetsService(d.V1)
rrsListCall := rrsService.List(projectID, managedZone).Context(ctx)
rrsList, err := d.Calls.ResourceRecordSetsList.Do(rrsListCall)
if err != nil {
return nil, err
}
return rrsList.Rrsets, nil
}
|
[
"func (s *cloudDNSService) getResourceRecordSets(dnsZone string) ([]*dns.ResourceRecordSet, error) {\n\ttimer := pkg.NewTimer(prometheus.ObserverFunc(func(v float64) {\n\t\trequestRecordsTimeSummary.WithLabelValues(dnsZone).Observe(v)\n\t}))\n\tdefer timer.ObserveDuration()\n\n\tpageToken := \"\"\n\n\tresourceRecordSets := make([]*dns.ResourceRecordSet, 0, 16)\n\tfor {\n\t\treq := s.service.ResourceRecordSets.List(s.project, dnsZone)\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken(pageToken)\n\t\t}\n\t\tresp, err := req.Do()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"[Cloud DNS] Error getting DNS resourceRecordSets from zone %s: %v\", dnsZone, err)\n\t\t}\n\t\tfor _, r := range resp.Rrsets {\n\t\t\tresourceRecordSets = append(resourceRecordSets, r)\n\t\t}\n\t\tif resp.NextPageToken == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tpageToken = resp.NextPageToken\n\t}\n\treturn resourceRecordSets, nil\n}",
"func (c *Client) ListRecordSets(zoneID string) ([]*route53.ResourceRecordSet, error) {\n\trecords := make([]*route53.ResourceRecordSet, 0)\n\n\tclient, err := c.initRoute53Client()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparams := &route53.ListResourceRecordSetsInput{\n\t\tHostedZoneId: aws.String(zoneID),\n\t}\n\n\terr = client.ListResourceRecordSetsPages(params, func(resp *route53.ListResourceRecordSetsOutput, lastPage bool) bool {\n\t\tlog.Debugf(\"Getting a list of AWS RRS of length: %d\", len(resp.ResourceRecordSets))\n\t\trecords = append(records, resp.ResourceRecordSets...)\n\t\treturn !lastPage\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn records, nil\n}",
"func (s *FastDNSv2Service) GetZoneRecordSets(ctx context.Context, zone string, opt *ListZoneRecordSetOptions) (*ListZoneRecordSets, *Response, error) {\n\tu := fmt.Sprintf(\"/config-dns/v2/zones/%v/recordsets\", zone)\n\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar z *ListZoneRecordSets\n\tresp, err := s.client.Do(ctx, req, &z)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn z, resp, nil\n}",
"func (a *AmazonDNS) getZoneRecordSet() error {\n\tif acache.CacheTime != (time.Time{}) {\n\t\tnow := time.Now()\n\t\tif now.Sub(acache.CacheTime).Seconds() <= 30 {\n\t\t\treturn nil\n\t\t}\n\t\tacache = amazonRrsetCache{}\n\t}\n\n\tservice, err := a.getDNSService()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tparams := &route53.ListResourceRecordSetsInput{\n\t\tHostedZoneId: aws.String(a.ZoneID),\n\t}\n\n\terr = service.ListResourceRecordSetsPages(params,\n\t\tfunc(page *route53.ListResourceRecordSetsOutput, lastPage bool) bool {\n\t\t\tacache.Rrsets = append(acache.Rrsets, page.ResourceRecordSets...)\n\t\t\treturn lastPage\n\t\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tacache.CacheTime = time.Now()\n\treturn nil\n}",
"func route53ResourceRecordSetsForZone(opts Options, logger logrus.FieldLogger, svc *route53.Route53, zone *route53.HostedZone, zoneTags Tags, set *Set) ([]*route53ResourceRecordSet, error) {\n\tvar toDelete []*route53ResourceRecordSet\n\n\trecordsPageFunc := func(records *route53.ListResourceRecordSetsOutput, _ bool) bool {\n\t\tfor _, rrs := range records.ResourceRecordSets {\n\t\t\tif !opts.SkipRoute53ManagementCheck && !resourceRecordSetIsManaged(rrs) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\to := &route53ResourceRecordSet{zone: zone, obj: rrs}\n\t\t\t// no tags for ResourceRecordSets, so use zone tags instead\n\t\t\tif !set.Mark(opts, o, nil, zoneTags) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif _, ok := opts.SkipResourceRecordSetTypes[*rrs.Type]; !ok {\n\t\t\t\tlogger.Warningf(\"%s: deleting %T: %s\", o.ARN(), rrs, *rrs.Name)\n\t\t\t\tif !opts.DryRun {\n\t\t\t\t\ttoDelete = append(toDelete, o)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\terr := svc.ListResourceRecordSetsPages(&route53.ListResourceRecordSetsInput{HostedZoneId: zone.Id}, recordsPageFunc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn toDelete, nil\n}",
"func (d *DNS) GetResourceRecordSet(projectID string, managedZone string, name string) (*v1.ResourceRecordSet, error) {\n\tctx := context.Background()\n\trrsService := v1.NewResourceRecordSetsService(d.V1)\n\trrsListCall := rrsService.List(projectID, managedZone).Context(ctx).Name(name)\n\trrsList, err := d.Calls.ResourceRecordSetsList.Do(rrsListCall)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(rrsList.Rrsets) == 0 {\n\t\treturn nil, nil\n\t}\n\treturn rrsList.Rrsets[0], nil\n}",
"func (o *dnsOp) listRecords(zone dnsprovider.Zone) ([]dnsprovider.ResourceRecordSet, error) {\n\tkey := zone.Name() + \"::\" + zone.ID()\n\n\trrs := o.recordsCache[key]\n\tif rrs == nil {\n\t\trrsProvider, ok := zone.ResourceRecordSets()\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"zone does not support resource records %q\", zone.Name())\n\t\t}\n\n\t\tklog.V(2).Infof(\"Querying all dnsprovider records for zone %q\", zone.Name())\n\t\tvar err error\n\t\trrs, err = rrsProvider.List()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error querying resource records for zone %q: %v\", zone.Name(), err)\n\t\t}\n\n\t\to.recordsCache[key] = rrs\n\t}\n\n\treturn rrs, nil\n}",
"func GetRecordSet(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *RecordSetState, opts ...pulumi.ResourceOption) (*RecordSet, error) {\n\tvar resource RecordSet\n\terr := ctx.ReadResource(\"azure-native:network/v20170901:RecordSet\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (api *powerdnsProvider) GetZoneRecords(domain string) (models.Records, error) {\n\tzone, err := api.client.Zones().GetZone(context.Background(), api.ServerName, domain)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcurRecords := models.Records{}\n\t// loop over grouped records by type, called RRSet\n\tfor _, rrset := range zone.ResourceRecordSets {\n\t\tif rrset.Type == \"SOA\" {\n\t\t\tcontinue\n\t\t}\n\t\t// loop over single records of this group and create records\n\t\tfor _, pdnsRecord := range rrset.Records {\n\t\t\tr, err := toRecordConfig(domain, pdnsRecord, rrset.TTL, rrset.Name, rrset.Type)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tcurRecords = append(curRecords, r)\n\t\t}\n\t}\n\n\treturn curRecords, nil\n}",
"func (c *cloudnsProvider) GetZoneRecords(domain string, meta map[string]string) (models.Records, error) {\n\trecords, err := c.getRecords(domain)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\texistingRecords := make([]*models.RecordConfig, len(records))\n\tfor i := range records {\n\t\texistingRecords[i] = toRc(domain, &records[i])\n\t}\n\treturn existingRecords, nil\n}",
"func (c *DeviceController) GetRecords(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tres := r53.GetRecordSets(vars[\"id\"])\n\tc.SendJSON(\n\t\tw,\n\t\tr,\n\t\tres,\n\t\thttp.StatusOK,\n\t)\n}",
"func (m *MockClient) ListResourceRecordSets(input *route53.ListResourceRecordSetsInput) (*route53.ListResourceRecordSetsOutput, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListResourceRecordSets\", input)\n\tret0, _ := ret[0].(*route53.ListResourceRecordSetsOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (m *MockClient) ListResourceRecordSets(arg0 *route53.ListResourceRecordSetsInput) (*route53.ListResourceRecordSetsOutput, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListResourceRecordSets\", arg0)\n\tret0, _ := ret[0].(*route53.ListResourceRecordSetsOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (s *dnsRecordSetLister) DnsRecordSets(namespace string) DnsRecordSetNamespaceLister {\n\treturn dnsRecordSetNamespaceLister{indexer: s.indexer, namespace: namespace}\n}",
"func (d *DNS) SetResourceRecordSets(projectID string, managedZone string, records []*v1.ResourceRecordSet) error {\n\tvar deletions []*v1.ResourceRecordSet\n\tvar additions []*v1.ResourceRecordSet\n\tvar change *v1.Change\n\tlogItems := []string{}\n\tfor _, record := range records {\n\t\texisting, err := d.GetResourceRecordSet(projectID, managedZone, record.Name)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error trying to get existing resource record set: %s\", err)\n\t\t}\n\t\taction := \"creating\"\n\t\tif existing != nil {\n\t\t\tdeletions = append(deletions, existing)\n\t\t\taction = \"recreating\"\n\t\t}\n\t\tlogItems = append(logItems, fmt.Sprintf(\"====> %s %s => %s %s\", action, record.Name, record.Type, strings.Join(record.Rrdatas, \",\")))\n\t\tadditions = append(additions, record)\n\t}\n\td.log.Info(\"Ensuring the DNS zone %s has the following records:\", managedZone)\n\tfor _, item := range logItems {\n\t\td.log.ListItem(item)\n\t}\n\tif len(deletions) > 0 {\n\t\tchange = &v1.Change{\n\t\t\tDeletions: deletions,\n\t\t}\n\t\tif err := d.executeChange(projectID, managedZone, change); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tchange = &v1.Change{\n\t\tAdditions: additions,\n\t}\n\tif err := d.executeChange(projectID, managedZone, change); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (api *packetframeProvider) GetZoneRecords(domain string, meta map[string]string) (models.Records, error) {\n\n\tzone, err := api.getZone(domain)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"no such zone %q in Packetframe account\", domain)\n\t}\n\n\trecords, err := api.getRecords(zone.ID)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not load records for domain %q\", domain)\n\t}\n\n\texistingRecords := make([]*models.RecordConfig, len(records))\n\n\tdc := models.DomainConfig{\n\t\tName: domain,\n\t}\n\n\tfor i := range records {\n\t\texistingRecords[i] = toRc(&dc, &records[i])\n\t}\n\n\treturn existingRecords, nil\n}",
"func (s *FastDNSv2Service) GetChangeListRecordSets(ctx context.Context, zone string, opt *ChangeListOptions) (*ChangeListRecords, *Response, error) {\n\tu := fmt.Sprintf(\"/config-dns/v2/changelists/%v/recordsets\", zone)\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tc := new(ChangeListRecords)\n\tresp, err := s.client.Do(ctx, req, &c)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn c, resp, nil\n\n}",
"func GetRecordSet(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *RecordSetState, opts ...pulumi.ResourceOption) (*RecordSet, error) {\n\tvar resource RecordSet\n\terr := ctx.ReadResource(\"gcp:dns/recordSet:RecordSet\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (client DnsClient) getZoneRecords(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/zones/{zoneNameOrId}/records\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response GetZoneRecordsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetResourceRecordSet will search for an existing record set by the resourcer record set name
|
func (d *DNS) GetResourceRecordSet(projectID string, managedZone string, name string) (*v1.ResourceRecordSet, error) {
ctx := context.Background()
rrsService := v1.NewResourceRecordSetsService(d.V1)
rrsListCall := rrsService.List(projectID, managedZone).Context(ctx).Name(name)
rrsList, err := d.Calls.ResourceRecordSetsList.Do(rrsListCall)
if err != nil {
return nil, err
}
if len(rrsList.Rrsets) == 0 {
return nil, nil
}
return rrsList.Rrsets[0], nil
}
|
[
"func GetRecordSet(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *RecordSetState, opts ...pulumi.ResourceOption) (*RecordSet, error) {\n\tvar resource RecordSet\n\terr := ctx.ReadResource(\"azure-native:network/v20170901:RecordSet\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func GetRecordSet(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *RecordSetState, opts ...pulumi.ResourceOption) (*RecordSet, error) {\n\tvar resource RecordSet\n\terr := ctx.ReadResource(\"gcp:dns/recordSet:RecordSet\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (s *cloudDNSService) getResourceRecordSets(dnsZone string) ([]*dns.ResourceRecordSet, error) {\n\ttimer := pkg.NewTimer(prometheus.ObserverFunc(func(v float64) {\n\t\trequestRecordsTimeSummary.WithLabelValues(dnsZone).Observe(v)\n\t}))\n\tdefer timer.ObserveDuration()\n\n\tpageToken := \"\"\n\n\tresourceRecordSets := make([]*dns.ResourceRecordSet, 0, 16)\n\tfor {\n\t\treq := s.service.ResourceRecordSets.List(s.project, dnsZone)\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken(pageToken)\n\t\t}\n\t\tresp, err := req.Do()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"[Cloud DNS] Error getting DNS resourceRecordSets from zone %s: %v\", dnsZone, err)\n\t\t}\n\t\tfor _, r := range resp.Rrsets {\n\t\t\tresourceRecordSets = append(resourceRecordSets, r)\n\t\t}\n\t\tif resp.NextPageToken == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tpageToken = resp.NextPageToken\n\t}\n\treturn resourceRecordSets, nil\n}",
"func findRecord(d *schema.ResourceData, meta interface{}) (*route53.ResourceRecordSet, error) {\n\tconn := meta.(*AWSClient).r53conn\n\t// Scan for a\n\tzone := cleanZoneID(d.Get(\"zone_id\").(string))\n\n\t// get expanded name\n\tzoneRecord, err := conn.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(zone)})\n\tif err != nil {\n\t\tif r53err, ok := err.(awserr.Error); ok && r53err.Code() == \"NoSuchHostedZone\" {\n\t\t\treturn nil, r53NoHostedZoneFound\n\t\t}\n\t\treturn nil, err\n\t}\n\n\ten := expandRecordName(d.Get(\"name\").(string), *zoneRecord.HostedZone.Name)\n\tlog.Printf(\"[DEBUG] Expanded record name: %s\", en)\n\td.Set(\"fqdn\", en)\n\n\tlopts := &route53.ListResourceRecordSetsInput{\n\t\tHostedZoneId: aws.String(cleanZoneID(zone)),\n\t\tStartRecordName: aws.String(en),\n\t\tStartRecordType: aws.String(d.Get(\"type\").(string)),\n\t}\n\n\tlog.Printf(\"[DEBUG] List resource records sets for zone: %s, opts: %s\",\n\t\tzone, lopts)\n\tresp, err := conn.ListResourceRecordSets(lopts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, record := range resp.ResourceRecordSets {\n\t\tname := cleanRecordName(*record.Name)\n\t\tif FQDN(strings.ToLower(name)) != FQDN(strings.ToLower(*lopts.StartRecordName)) {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.ToUpper(*record.Type) != strings.ToUpper(*lopts.StartRecordType) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif record.SetIdentifier != nil && *record.SetIdentifier != d.Get(\"set_identifier\") {\n\t\t\tcontinue\n\t\t}\n\t\t// The only safe return where a record is found\n\t\treturn record, nil\n\t}\n\treturn nil, r53NoRecordsFound\n}",
"func (s *FastDNSv2Service) GetRecordSet(ctx context.Context, opt *RecordSetOptions) (*RecordSet, *Response, error) {\n\tu := fmt.Sprintf(\"/config-dns/v2/zones/%v/names/%v/types/%v\", opt.Zone, opt.Name, opt.Type)\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar rs *RecordSet\n\tresp, err := s.client.Do(ctx, req, &rs)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn rs, resp, nil\n}",
"func NewRecordSet(ctx *pulumi.Context,\n\tname string, args *RecordSetArgs, opts ...pulumi.ResourceOption) (*RecordSet, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.RecordType == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'RecordType'\")\n\t}\n\tif args.ResourceGroupName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ResourceGroupName'\")\n\t}\n\tif args.ZoneName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ZoneName'\")\n\t}\n\taliases := pulumi.Aliases([]pulumi.Alias{\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:network/v20170901:RecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:network:RecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:network:RecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:network/v20150504preview:RecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:network/v20150504preview:RecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:network/v20160401:RecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:network/v20160401:RecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:network/v20171001:RecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:network/v20171001:RecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:network/v20180301preview:RecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:network/v20180301preview:RecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:network/v20180501:RecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:network/v20180501:RecordSet\"),\n\t\t},\n\t})\n\topts = append(opts, aliases)\n\tvar resource RecordSet\n\terr := ctx.RegisterResource(\"azure-native:network/v20170901:RecordSet\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (d *DNS) GetResourceRecordSets(projectID string, managedZone string) ([]*v1.ResourceRecordSet, error) {\n\tctx := context.Background()\n\trrsService := v1.NewResourceRecordSetsService(d.V1)\n\trrsListCall := rrsService.List(projectID, managedZone).Context(ctx)\n\trrsList, err := d.Calls.ResourceRecordSetsList.Do(rrsListCall)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn rrsList.Rrsets, nil\n}",
"func (a *AmazonDNS) getZoneRecordSet() error {\n\tif acache.CacheTime != (time.Time{}) {\n\t\tnow := time.Now()\n\t\tif now.Sub(acache.CacheTime).Seconds() <= 30 {\n\t\t\treturn nil\n\t\t}\n\t\tacache = amazonRrsetCache{}\n\t}\n\n\tservice, err := a.getDNSService()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tparams := &route53.ListResourceRecordSetsInput{\n\t\tHostedZoneId: aws.String(a.ZoneID),\n\t}\n\n\terr = service.ListResourceRecordSetsPages(params,\n\t\tfunc(page *route53.ListResourceRecordSetsOutput, lastPage bool) bool {\n\t\t\tacache.Rrsets = append(acache.Rrsets, page.ResourceRecordSets...)\n\t\t\treturn lastPage\n\t\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tacache.CacheTime = time.Now()\n\treturn nil\n}",
"func (client DnsClient) getRRSet(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/zones/{zoneNameOrId}/records/{domain}/{rtype}\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response GetRRSetResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}",
"func NewResourceRecordSet(ctx *pulumi.Context,\n\tname string, args *ResourceRecordSetArgs, opts ...pulumi.ResourceOption) (*ResourceRecordSet, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.ManagedZone == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ManagedZone'\")\n\t}\n\treplaceOnChanges := pulumi.ReplaceOnChanges([]string{\n\t\t\"managedZone\",\n\t\t\"project\",\n\t})\n\topts = append(opts, replaceOnChanges)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource ResourceRecordSet\n\terr := ctx.RegisterResource(\"google-native:dns/v1beta2:ResourceRecordSet\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func NewRecordSet(ctx *pulumi.Context,\n\tname string, args *RecordSetArgs, opts ...pulumi.ResourceOption) (*RecordSet, error) {\n\tif args == nil || args.ManagedZone == nil {\n\t\treturn nil, errors.New(\"missing required argument 'ManagedZone'\")\n\t}\n\tif args == nil || args.Rrdatas == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Rrdatas'\")\n\t}\n\tif args == nil || args.Ttl == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Ttl'\")\n\t}\n\tif args == nil || args.Type == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Type'\")\n\t}\n\tif args == nil {\n\t\targs = &RecordSetArgs{}\n\t}\n\tvar resource RecordSet\n\terr := ctx.RegisterResource(\"gcp:dns/recordSet:RecordSet\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func NewRecordSet(ctx *pulumi.Context,\n\tname string, args *RecordSetArgs, opts ...pulumi.ResourceOption) (*RecordSet, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.ManagedZone == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ManagedZone'\")\n\t}\n\tif args.Name == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Name'\")\n\t}\n\tif args.Type == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Type'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource RecordSet\n\terr := ctx.RegisterResource(\"gcp:dns/recordSet:RecordSet\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (parser *MRCPParser) MRCPParserResourceSet(name string) {\n\n}",
"func (c *Client) ListRecordSets(zoneID string) ([]*route53.ResourceRecordSet, error) {\n\trecords := make([]*route53.ResourceRecordSet, 0)\n\n\tclient, err := c.initRoute53Client()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparams := &route53.ListResourceRecordSetsInput{\n\t\tHostedZoneId: aws.String(zoneID),\n\t}\n\n\terr = client.ListResourceRecordSetsPages(params, func(resp *route53.ListResourceRecordSetsOutput, lastPage bool) bool {\n\t\tlog.Debugf(\"Getting a list of AWS RRS of length: %d\", len(resp.ResourceRecordSets))\n\t\trecords = append(records, resp.ResourceRecordSets...)\n\t\treturn !lastPage\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn records, nil\n}",
"func (s *ResourceRecordSetServer) applyResourceRecordSet(ctx context.Context, c *dns.Client, request *dnspb.ApplyDnsResourceRecordSetRequest) (*dnspb.DnsResourceRecordSet, error) {\n\tp := ProtoToResourceRecordSet(request.GetResource())\n\tres, err := c.ApplyResourceRecordSet(ctx, p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := ResourceRecordSetToProto(res)\n\treturn r, nil\n}",
"func (d *DNS) SetResourceRecordSets(projectID string, managedZone string, records []*v1.ResourceRecordSet) error {\n\tvar deletions []*v1.ResourceRecordSet\n\tvar additions []*v1.ResourceRecordSet\n\tvar change *v1.Change\n\tlogItems := []string{}\n\tfor _, record := range records {\n\t\texisting, err := d.GetResourceRecordSet(projectID, managedZone, record.Name)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error trying to get existing resource record set: %s\", err)\n\t\t}\n\t\taction := \"creating\"\n\t\tif existing != nil {\n\t\t\tdeletions = append(deletions, existing)\n\t\t\taction = \"recreating\"\n\t\t}\n\t\tlogItems = append(logItems, fmt.Sprintf(\"====> %s %s => %s %s\", action, record.Name, record.Type, strings.Join(record.Rrdatas, \",\")))\n\t\tadditions = append(additions, record)\n\t}\n\td.log.Info(\"Ensuring the DNS zone %s has the following records:\", managedZone)\n\tfor _, item := range logItems {\n\t\td.log.ListItem(item)\n\t}\n\tif len(deletions) > 0 {\n\t\tchange = &v1.Change{\n\t\t\tDeletions: deletions,\n\t\t}\n\t\tif err := d.executeChange(projectID, managedZone, change); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tchange = &v1.Change{\n\t\tAdditions: additions,\n\t}\n\tif err := d.executeChange(projectID, managedZone, change); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (c *awsClient) FindRoute53ResourceRecord(zoneID, name, rrType string) ([]*route53.ResourceRecord, error) {\n\tlog.Printf(\"Looking for resource record set %s %s in zone ID: %s\", name, rrType, zoneID)\n\n\tparams := &route53.ListResourceRecordSetsInput{\n\t\tHostedZoneId: aws.String(zoneID),\n\t\tMaxItems: aws.String(\"1\"),\n\t\tStartRecordName: aws.String(name),\n\t\tStartRecordType: aws.String(rrType),\n\t}\n\n\tresp, err := c.Route53.ListResourceRecordSets(params)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error locating resource record: %v\", err)\n\t}\n\n\tif len(resp.ResourceRecordSets) < 1 {\n\t\treturn nil, fmt.Errorf(\"Resource record set %s %s not found\", name, rrType)\n\t}\n\n\treturn resp.ResourceRecordSets[0].ResourceRecords, nil\n}",
"func route53ResourceRecordSetsForZone(opts Options, logger logrus.FieldLogger, svc *route53.Route53, zone *route53.HostedZone, zoneTags Tags, set *Set) ([]*route53ResourceRecordSet, error) {\n\tvar toDelete []*route53ResourceRecordSet\n\n\trecordsPageFunc := func(records *route53.ListResourceRecordSetsOutput, _ bool) bool {\n\t\tfor _, rrs := range records.ResourceRecordSets {\n\t\t\tif !opts.SkipRoute53ManagementCheck && !resourceRecordSetIsManaged(rrs) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\to := &route53ResourceRecordSet{zone: zone, obj: rrs}\n\t\t\t// no tags for ResourceRecordSets, so use zone tags instead\n\t\t\tif !set.Mark(opts, o, nil, zoneTags) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif _, ok := opts.SkipResourceRecordSetTypes[*rrs.Type]; !ok {\n\t\t\t\tlogger.Warningf(\"%s: deleting %T: %s\", o.ARN(), rrs, *rrs.Name)\n\t\t\t\tif !opts.DryRun {\n\t\t\t\t\ttoDelete = append(toDelete, o)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\terr := svc.ListResourceRecordSetsPages(&route53.ListResourceRecordSetsInput{HostedZoneId: zone.Id}, recordsPageFunc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn toDelete, nil\n}",
"func (s *ResourceRecordSetServer) ListDnsResourceRecordSet(ctx context.Context, request *dnspb.ListDnsResourceRecordSetRequest) (*dnspb.ListDnsResourceRecordSetResponse, error) {\n\tcl, err := createConfigResourceRecordSet(ctx, request.ServiceAccountFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresources, err := cl.ListResourceRecordSet(ctx, request.Project, request.ManagedZone)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar protos []*dnspb.DnsResourceRecordSet\n\tfor _, r := range resources.Items {\n\t\trp := ResourceRecordSetToProto(r)\n\t\tprotos = append(protos, rp)\n\t}\n\treturn &dnspb.ListDnsResourceRecordSetResponse{Items: protos}, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetResourceRecordSets will create or update a DNS zone with one or more record sets
|
func (d *DNS) SetResourceRecordSets(projectID string, managedZone string, records []*v1.ResourceRecordSet) error {
var deletions []*v1.ResourceRecordSet
var additions []*v1.ResourceRecordSet
var change *v1.Change
logItems := []string{}
for _, record := range records {
existing, err := d.GetResourceRecordSet(projectID, managedZone, record.Name)
if err != nil {
return fmt.Errorf("Error trying to get existing resource record set: %s", err)
}
action := "creating"
if existing != nil {
deletions = append(deletions, existing)
action = "recreating"
}
logItems = append(logItems, fmt.Sprintf("====> %s %s => %s %s", action, record.Name, record.Type, strings.Join(record.Rrdatas, ",")))
additions = append(additions, record)
}
d.log.Info("Ensuring the DNS zone %s has the following records:", managedZone)
for _, item := range logItems {
d.log.ListItem(item)
}
if len(deletions) > 0 {
change = &v1.Change{
Deletions: deletions,
}
if err := d.executeChange(projectID, managedZone, change); err != nil {
return err
}
}
change = &v1.Change{
Additions: additions,
}
if err := d.executeChange(projectID, managedZone, change); err != nil {
return err
}
return nil
}
|
[
"func (p *Provider) SetRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\n\tclient := &http.Client{}\n\n\tvar updatedRecords []libdns.Record\n\n\tvar resourceRecordSets []LeasewebRecordSet\n\n\tfor _, record := range records {\n\n\t\trecordSet := LeasewebRecordSet{\n\t\t\tName: record.Name,\n\t\t\tType: record.Type,\n\t\t\tContent: []string{record.Value},\n\t\t\tTTL: int(record.TTL.Seconds()),\n\t\t}\n\n\t\tresourceRecordSets = append(resourceRecordSets, recordSet)\n\n\t\tupdatedRecords = append(updatedRecords, record)\n\t}\n\n\tbody := &LeasewebRecordSets{\n\t\tResourceRecordSets: resourceRecordSets,\n\t}\n\n\tbodyBuffer := new(bytes.Buffer)\n\tjson.NewEncoder(bodyBuffer).Encode(body)\n\n\treq, err := http.NewRequest(http.MethodPut, fmt.Sprintf(\"https://api.leaseweb.com/hosting/v2/domains/%s/resourceRecordSets\", zone), bodyBuffer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(LeasewebApiKeyHeader, p.APIKey)\n\n\tres, err := client.Do(req)\n\tdefer res.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif res.StatusCode < 200 || res.StatusCode > 299 {\n\t\treturn nil, fmt.Errorf(\"Received StatusCode %d from Leaseweb API\", res.StatusCode)\n\t}\n\n\treturn updatedRecords, nil\n}",
"func (dns *client) UpdateRecordSets(changes []*route53.Change) error {\n\tpartitions := util.Partition(len(changes), dns.maxRecordChanges)\n\tfor _, partition := range partitions {\n\t\tbatch := changes[partition.Low:partition.High]\n\t\trecordSetsInput := &route53.ChangeResourceRecordSetsInput{\n\t\t\tHostedZoneId: aws.String(dns.hostedZone),\n\t\t\tChangeBatch: &route53.ChangeBatch{\n\t\t\t\tChanges: batch,\n\t\t\t},\n\t\t}\n\n\t\t_, err := dns.r53.ChangeResourceRecordSets(recordSetsInput)\n\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to create A record: %v\", err)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (c *Client) ChangeRecordSets(upsert, del, create []*route53.ResourceRecordSet, zoneID string) error {\n\tclient, err := c.initRoute53Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar changes []*route53.Change\n\tchanges = append(changes, createChangesList(\"CREATE\", create)...)\n\tchanges = append(changes, createChangesList(\"UPSERT\", upsert)...)\n\tchanges = append(changes, createChangesList(\"DELETE\", del)...)\n\tif len(changes) > 0 {\n\t\tparams := &route53.ChangeResourceRecordSetsInput{\n\t\t\tChangeBatch: &route53.ChangeBatch{\n\t\t\t\tChanges: changes,\n\t\t\t},\n\t\t\tHostedZoneId: aws.String(zoneID),\n\t\t}\n\t\t_, err = client.ChangeResourceRecordSets(params)\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (c *client) AddRecordSet(zone string, set models.ResourceRecordSet) error {\n\tpath := fmt.Sprintf(\"/api/v1/servers/localhost/zones/%s\", url.PathEscape(zone))\n\n\tset.ChangeType = models.ChangeTypeReplace\n\tpatch := models.Zone{\n\t\tResourceRecordSets: []models.ResourceRecordSet{\n\t\t\tset,\n\t\t},\n\t}\n\n\treturn c.httpClient.Patch(path, nil, pdnshttp.WithJSONRequestBody(&patch))\n}",
"func (p *Provider) SetRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {\n\terr := p.auth()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot authenticate to OpenStack Designate: %v\", err)\n\t}\n\n\terr = p.setZone(zone)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot set ZONE: %v\", err)\n\t}\n\n\tvar setRecords []libdns.Record\n\n\tfor _, record := range records {\n\t\trecordID, err := p.getRecordID(record.Name, zone)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif recordID == \"\" {\n\t\t\treturn nil, errors.New(\"recordID does not exist\")\n\t\t}\n\n\t\terr = p.updateRecord(record, recordID)\n\t\tif err != nil {\n\t\t\treturn setRecords, err\n\t\t}\n\t\tsetRecords = append(setRecords, record)\n\t}\n\n\treturn setRecords, nil\n}",
"func (p *Provider) SetRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {\n\treturn p.updateRecords(ctx, zone, records)\n}",
"func (d *DNSController) ensureDNSRrsets(dnsZone dnsprovider.Zone, dnsName string, endpoints []string, uplevelCname string) error {\n\trrsets, supported := dnsZone.ResourceRecordSets()\n\tif !supported {\n\t\treturn fmt.Errorf(\"Failed to ensure DNS records for %s. DNS provider does not support the ResourceRecordSets interface\", dnsName)\n\t}\n\trrsetList, err := rrsets.Get(dnsName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(rrsetList) == 0 {\n\t\tglog.V(4).Infof(\"No recordsets found for DNS name %q. Need to add either A records (if we have healthy endpoints), or a CNAME record to %q\", dnsName, uplevelCname)\n\t\tif len(endpoints) < 1 {\n\t\t\tglog.V(4).Infof(\"There are no healthy endpoint addresses at level %q, so CNAME to %q, if provided\", dnsName, uplevelCname)\n\t\t\tif uplevelCname != \"\" {\n\t\t\t\tglog.V(4).Infof(\"Creating CNAME to %q for %q\", uplevelCname, dnsName)\n\t\t\t\tnewRrset := rrsets.New(dnsName, []string{uplevelCname}, minDNSTTL, rrstype.CNAME)\n\t\t\t\tglog.V(4).Infof(\"Adding recordset %v\", newRrset)\n\t\t\t\terr = rrsets.StartChangeset().Add(newRrset).Apply()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tglog.V(4).Infof(\"Successfully created CNAME to %q for %q\", uplevelCname, dnsName)\n\t\t\t} else {\n\t\t\t\tglog.V(4).Infof(\"We want no record for %q, and we have no record, so we're all good.\", dnsName)\n\t\t\t}\n\t\t} else {\n\t\t\t// We have valid endpoint addresses, so just add them as A records.\n\t\t\t// But first resolve DNS names, as some cloud providers (like AWS) expose\n\t\t\t// load balancers behind DNS names, not IP addresses.\n\t\t\tglog.V(4).Infof(\"We have valid endpoint addresses %v at level %q, so add them as A records, after resolving DNS names\", endpoints, dnsName)\n\t\t\t// Resolve DNS through network\n\t\t\tresolvedEndpoints, err := getResolvedEndpoints(endpoints, d.netWrapper)\n\t\t\tif err != nil {\n\t\t\t\treturn err // TODO: We could potentially add the ones we did get back, even if some of them failed to resolve.\n\t\t\t}\n\n\t\t\tnewRrset := rrsets.New(dnsName, resolvedEndpoints, minDNSTTL, rrstype.A)\n\t\t\tglog.V(4).Infof(\"Adding recordset %v\", newRrset)\n\t\t\terr = rrsets.StartChangeset().Add(newRrset).Apply()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tglog.V(4).Infof(\"Successfully added recordset %v\", newRrset)\n\t\t}\n\t} else {\n\t\t// the rrsets already exists, so make it right.\n\t\tglog.V(4).Infof(\"Recordset %v already exists. Ensuring that it is correct.\", rrsetList)\n\t\tif len(endpoints) < 1 {\n\t\t\t// Need an appropriate CNAME record. Check that we have it.\n\t\t\tnewRrset := rrsets.New(dnsName, []string{uplevelCname}, minDNSTTL, rrstype.CNAME)\n\t\t\tglog.V(4).Infof(\"No healthy endpoints for %d. Have recordsets %v. Need recordset %v\", dnsName, rrsetList, newRrset)\n\t\t\tfound := findRrset(rrsetList, newRrset)\n\t\t\tif found != nil {\n\t\t\t\t// The existing rrset is equivalent to the required one - our work is done here\n\t\t\t\tglog.V(4).Infof(\"Existing recordset %v is equivalent to needed recordset %v, our work is done here.\", rrsetList, newRrset)\n\t\t\t\treturn nil\n\t\t\t} else {\n\t\t\t\t// Need to replace the existing one with a better one (or just remove it if we have no healthy endpoints).\n\t\t\t\tglog.V(4).Infof(\"Existing recordset %v not equivalent to needed recordset %v removing existing and adding needed.\", rrsetList, newRrset)\n\t\t\t\tchangeSet := rrsets.StartChangeset()\n\t\t\t\tfor i := range rrsetList {\n\t\t\t\t\tchangeSet = changeSet.Remove(rrsetList[i])\n\t\t\t\t}\n\t\t\t\tif uplevelCname != \"\" {\n\t\t\t\t\tchangeSet = changeSet.Add(newRrset)\n\t\t\t\t\tif err := changeSet.Apply(); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tglog.V(4).Infof(\"Successfully replaced needed recordset %v -> %v\", found, newRrset)\n\t\t\t\t} else {\n\t\t\t\t\tif err := changeSet.Apply(); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tglog.V(4).Infof(\"Successfully removed existing recordset %v\", found)\n\t\t\t\t\tglog.V(4).Infof(\"Uplevel CNAME is empty string. Not adding recordset %v\", newRrset)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t// We have an rrset in DNS, possibly with some missing addresses and some unwanted addresses.\n\t\t\t// And we have healthy endpoints. Just replace what'd there with the healthy endpoints, if it'd not already correct.\n\t\t\tglog.V(4).Infof(\"%d: Healthy endpoints %v exist. Recordset %v exists. Reconciling.\", dnsName, endpoints, rrsetList)\n\t\t\tresolvedEndpoints, err := getResolvedEndpoints(endpoints, d.netWrapper)\n\t\t\tif err != nil { // Some invalid addresses or otherwise unresolvable DNS names.\n\t\t\t\treturn err // TODO: We could potentially add the ones we did get back, even if some of them failed to resolve.\n\t\t\t}\n\t\t\tnewRrset := rrsets.New(dnsName, resolvedEndpoints, minDNSTTL, rrstype.A)\n\t\t\tglog.V(4).Infof(\"Have recordset %v. Need recordset %v\", rrsetList, newRrset)\n\t\t\tfound := findRrset(rrsetList, newRrset)\n\t\t\tif found != nil {\n\t\t\t\tglog.V(4).Infof(\"Existing recordset %v is equivalent to needed recordset %v, our work is done here.\", found, newRrset)\n\t\t\t\t// TODO: We could be more thorough about checking for equivalence to avoid unnecessary updates, but in the\n\t\t\t\t// worst case we'll just replace what'd there with an equivalent, if not exactly identical record set.\n\t\t\t\treturn nil\n\t\t\t} else {\n\t\t\t\t// Need to replace the existing one with a better one\n\t\t\t\tglog.V(4).Infof(\"Existing recordset %v is not equivalent to needed recordset %v, removing existing and adding needed.\", found, newRrset)\n\t\t\t\tchangeSet := rrsets.StartChangeset()\n\t\t\t\tfor i := range rrsetList {\n\t\t\t\t\tchangeSet = changeSet.Remove(rrsetList[i])\n\t\t\t\t}\n\t\t\t\tchangeSet = changeSet.Add(newRrset)\n\t\t\t\tif err = changeSet.Apply(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tglog.V(4).Infof(\"Successfully replaced recordset %v -> %v\", found, newRrset)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func route53ResourceRecordSetsForZone(opts Options, logger logrus.FieldLogger, svc *route53.Route53, zone *route53.HostedZone, zoneTags Tags, set *Set) ([]*route53ResourceRecordSet, error) {\n\tvar toDelete []*route53ResourceRecordSet\n\n\trecordsPageFunc := func(records *route53.ListResourceRecordSetsOutput, _ bool) bool {\n\t\tfor _, rrs := range records.ResourceRecordSets {\n\t\t\tif !opts.SkipRoute53ManagementCheck && !resourceRecordSetIsManaged(rrs) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\to := &route53ResourceRecordSet{zone: zone, obj: rrs}\n\t\t\t// no tags for ResourceRecordSets, so use zone tags instead\n\t\t\tif !set.Mark(opts, o, nil, zoneTags) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif _, ok := opts.SkipResourceRecordSetTypes[*rrs.Type]; !ok {\n\t\t\t\tlogger.Warningf(\"%s: deleting %T: %s\", o.ARN(), rrs, *rrs.Name)\n\t\t\t\tif !opts.DryRun {\n\t\t\t\t\ttoDelete = append(toDelete, o)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\terr := svc.ListResourceRecordSetsPages(&route53.ListResourceRecordSetsInput{HostedZoneId: zone.Id}, recordsPageFunc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn toDelete, nil\n}",
"func (s *cloudDNSService) getResourceRecordSets(dnsZone string) ([]*dns.ResourceRecordSet, error) {\n\ttimer := pkg.NewTimer(prometheus.ObserverFunc(func(v float64) {\n\t\trequestRecordsTimeSummary.WithLabelValues(dnsZone).Observe(v)\n\t}))\n\tdefer timer.ObserveDuration()\n\n\tpageToken := \"\"\n\n\tresourceRecordSets := make([]*dns.ResourceRecordSet, 0, 16)\n\tfor {\n\t\treq := s.service.ResourceRecordSets.List(s.project, dnsZone)\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken(pageToken)\n\t\t}\n\t\tresp, err := req.Do()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"[Cloud DNS] Error getting DNS resourceRecordSets from zone %s: %v\", dnsZone, err)\n\t\t}\n\t\tfor _, r := range resp.Rrsets {\n\t\t\tresourceRecordSets = append(resourceRecordSets, r)\n\t\t}\n\t\tif resp.NextPageToken == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tpageToken = resp.NextPageToken\n\t}\n\treturn resourceRecordSets, nil\n}",
"func NewRecordSet(ctx *pulumi.Context,\n\tname string, args *RecordSetArgs, opts ...pulumi.ResourceOption) (*RecordSet, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.RecordType == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'RecordType'\")\n\t}\n\tif args.ResourceGroupName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ResourceGroupName'\")\n\t}\n\tif args.ZoneName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ZoneName'\")\n\t}\n\taliases := pulumi.Aliases([]pulumi.Alias{\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:network/v20170901:RecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:network:RecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:network:RecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:network/v20150504preview:RecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:network/v20150504preview:RecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:network/v20160401:RecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:network/v20160401:RecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:network/v20171001:RecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:network/v20171001:RecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:network/v20180301preview:RecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:network/v20180301preview:RecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:network/v20180501:RecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:network/v20180501:RecordSet\"),\n\t\t},\n\t})\n\topts = append(opts, aliases)\n\tvar resource RecordSet\n\terr := ctx.RegisterResource(\"azure-native:network/v20170901:RecordSet\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (d *DNS) DeleteResourceRecordSets(projectID string, managedZone string) error {\n\tvar deletions []*v1.ResourceRecordSet\n\tresourceRecordSets, err := d.GetResourceRecordSets(projectID, managedZone)\n\tif err != nil {\n\t\treturn err\n\t}\n\td.log.Info(\"Deleting all records from DNS zone %s:\", managedZone)\n\tfor _, resourceRecordSet := range resourceRecordSets {\n\t\tif resourceRecordSet.Type == \"SOA\" || resourceRecordSet.Type == \"NS\" {\n\t\t\tcontinue\n\t\t}\n\t\tdeletions = append(deletions, resourceRecordSet)\n\t\td.log.ListItem(\"%s %s\", resourceRecordSet.Type, resourceRecordSet.Name)\n\t}\n\tchange := &v1.Change{\n\t\tDeletions: deletions,\n\t}\n\tif err := d.executeChange(projectID, managedZone, change); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (ac *azureClient) CreateOrUpdateRecordSet(ctx context.Context, resourceGroupName string, privateZoneName string, recordType privatedns.RecordType, name string, set privatedns.RecordSet) error {\n\tctx, _, done := tele.StartSpanWithLogger(ctx, \"privatedns.AzureClient.CreateOrUpdateRecordSet\")\n\tdefer done()\n\n\t_, err := ac.recordsets.CreateOrUpdate(ctx, resourceGroupName, privateZoneName, recordType, name, set, \"\", \"\")\n\treturn err\n}",
"func (s *dnsClient) CreateOrUpdateRecordSet(ctx context.Context, managedZone, name, recordType string, rrdatas []string, ttl int64) error {\n\tproject, managedZone := s.projectAndManagedZone(managedZone)\n\tname = ensureTrailingDot(name)\n\trrs, err := s.getResourceRecordSet(ctx, project, managedZone, name, recordType)\n\tif err != nil {\n\t\treturn err\n\t}\n\trrdatas = formatRrdatas(recordType, rrdatas)\n\tchange := &googledns.Change{}\n\tif rrs != nil {\n\t\tif reflect.DeepEqual(rrs.Rrdatas, rrdatas) && rrs.Ttl == ttl {\n\t\t\treturn nil\n\t\t}\n\t\tchange.Deletions = append(change.Deletions, rrs)\n\t}\n\tchange.Additions = append(change.Additions, &googledns.ResourceRecordSet{Name: name, Type: recordType, Rrdatas: rrdatas, Ttl: ttl})\n\t_, err = s.service.Changes.Create(project, managedZone, change).Context(ctx).Do()\n\treturn err\n}",
"func (p *Provider) AppendRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\n\tclient := &http.Client{}\n\n\tvar addedRecords []libdns.Record\n\n\tfor _, record := range records {\n\t\tbody := &LeasewebRecordSet{\n\t\t\tName: record.Name,\n\t\t\tType: record.Type,\n\t\t\tContent: []string{record.Value},\n\t\t\tTTL: int(record.TTL.Seconds()),\n\t\t}\n\n\t\tbodyBuffer := new(bytes.Buffer)\n\t\tjson.NewEncoder(bodyBuffer).Encode(body)\n\n\t\treq, err := http.NewRequest(http.MethodPost, fmt.Sprintf(\"https://api.leaseweb.com/hosting/v2/domains/%s/resourceRecordSets\", zone), bodyBuffer)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treq.Header.Add(LeasewebApiKeyHeader, p.APIKey)\n\n\t\tres, err := client.Do(req)\n\t\tdefer res.Body.Close()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif res.StatusCode < 200 || res.StatusCode > 299 {\n\t\t\treturn nil, fmt.Errorf(\"Received StatusCode %d from Leaseweb API\", res.StatusCode)\n\t\t}\n\n\t\taddedRecords = append(addedRecords, record)\n\t}\n\n\treturn addedRecords, nil\n}",
"func (client DnsClient) updateRRSet(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPut, \"/zones/{zoneNameOrId}/records/{domain}/{rtype}\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response UpdateRRSetResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}",
"func NewResourceRecordSet(ctx *pulumi.Context,\n\tname string, args *ResourceRecordSetArgs, opts ...pulumi.ResourceOption) (*ResourceRecordSet, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.ManagedZone == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ManagedZone'\")\n\t}\n\treplaceOnChanges := pulumi.ReplaceOnChanges([]string{\n\t\t\"managedZone\",\n\t\t\"project\",\n\t})\n\topts = append(opts, replaceOnChanges)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource ResourceRecordSet\n\terr := ctx.RegisterResource(\"google-native:dns/v1beta2:ResourceRecordSet\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (_Contract *ContractTransactor) SetDNSRecords(opts *bind.TransactOpts, node [32]byte, data []byte) (*types.Transaction, error) {\n\treturn _Contract.contract.Transact(opts, \"setDNSRecords\", node, data)\n}",
"func (s *ResourceRecordSetServer) applyResourceRecordSet(ctx context.Context, c *dns.Client, request *dnspb.ApplyDnsResourceRecordSetRequest) (*dnspb.DnsResourceRecordSet, error) {\n\tp := ProtoToResourceRecordSet(request.GetResource())\n\tres, err := c.ApplyResourceRecordSet(ctx, p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := ResourceRecordSetToProto(res)\n\treturn r, nil\n}",
"func NewRecordSet(ctx *pulumi.Context,\n\tname string, args *RecordSetArgs, opts ...pulumi.ResourceOption) (*RecordSet, error) {\n\tif args == nil || args.ManagedZone == nil {\n\t\treturn nil, errors.New(\"missing required argument 'ManagedZone'\")\n\t}\n\tif args == nil || args.Rrdatas == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Rrdatas'\")\n\t}\n\tif args == nil || args.Ttl == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Ttl'\")\n\t}\n\tif args == nil || args.Type == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Type'\")\n\t}\n\tif args == nil {\n\t\targs = &RecordSetArgs{}\n\t}\n\tvar resource RecordSet\n\terr := ctx.RegisterResource(\"gcp:dns/recordSet:RecordSet\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
DeleteResourceRecordSets will remove all resource record sets from a managed zone
|
func (d *DNS) DeleteResourceRecordSets(projectID string, managedZone string) error {
var deletions []*v1.ResourceRecordSet
resourceRecordSets, err := d.GetResourceRecordSets(projectID, managedZone)
if err != nil {
return err
}
d.log.Info("Deleting all records from DNS zone %s:", managedZone)
for _, resourceRecordSet := range resourceRecordSets {
if resourceRecordSet.Type == "SOA" || resourceRecordSet.Type == "NS" {
continue
}
deletions = append(deletions, resourceRecordSet)
d.log.ListItem("%s %s", resourceRecordSet.Type, resourceRecordSet.Name)
}
change := &v1.Change{
Deletions: deletions,
}
if err := d.executeChange(projectID, managedZone, change); err != nil {
return err
}
return nil
}
|
[
"func route53ResourceRecordSetsForZone(opts Options, logger logrus.FieldLogger, svc *route53.Route53, zone *route53.HostedZone, zoneTags Tags, set *Set) ([]*route53ResourceRecordSet, error) {\n\tvar toDelete []*route53ResourceRecordSet\n\n\trecordsPageFunc := func(records *route53.ListResourceRecordSetsOutput, _ bool) bool {\n\t\tfor _, rrs := range records.ResourceRecordSets {\n\t\t\tif !opts.SkipRoute53ManagementCheck && !resourceRecordSetIsManaged(rrs) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\to := &route53ResourceRecordSet{zone: zone, obj: rrs}\n\t\t\t// no tags for ResourceRecordSets, so use zone tags instead\n\t\t\tif !set.Mark(opts, o, nil, zoneTags) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif _, ok := opts.SkipResourceRecordSetTypes[*rrs.Type]; !ok {\n\t\t\t\tlogger.Warningf(\"%s: deleting %T: %s\", o.ARN(), rrs, *rrs.Name)\n\t\t\t\tif !opts.DryRun {\n\t\t\t\t\ttoDelete = append(toDelete, o)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\terr := svc.ListResourceRecordSetsPages(&route53.ListResourceRecordSetsInput{HostedZoneId: zone.Id}, recordsPageFunc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn toDelete, nil\n}",
"func (s *Synk) deleteResourceSets(ctx context.Context, name string, version int32) error {\n\tc := s.client.Resource(resourceSetGVR)\n\n\tlist, err := c.List(ctx, metav1.ListOptions{})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"list existing resources\")\n\t}\n\tfor _, r := range list.Items {\n\t\tn, v, ok := decodeResourceSetName(r.GetName())\n\t\tif !ok || n != name || v >= version {\n\t\t\tcontinue\n\t\t}\n\t\t// TODO: should we possibly opt for foreground deletion here so\n\t\t// we only return after all dependents have been deleted as well?\n\t\t// kubectl doesn't allow to opt into foreground deletion in general but\n\t\t// here it would likely bring us closer to the apply --prune semantics.\n\t\tif err := c.Delete(ctx, r.GetName(), metav1.DeleteOptions{}); err != nil {\n\t\t\treturn errors.Wrapf(err, \"delete ResourceSet %q\", r.GetName())\n\t\t}\n\t}\n\treturn nil\n}",
"func (c *client) DeleteRecordSet(zone, name, rrtype string) error {\n\tpath := fmt.Sprintf(\"/api/v1/servers/localhost/zones/%s\", url.PathEscape(zone))\n\n\tset := models.ResourceRecordSet{\n\t\tName: name,\n\t\tType: rrtype,\n\t\tChangeType: models.ChangeTypeDelete,\n\t}\n\n\tpatch := models.Zone{\n\t\tResourceRecordSets: []models.ResourceRecordSet{set},\n\t}\n\n\treturn c.httpClient.Patch(path, nil, pdnshttp.WithJSONRequestBody(&patch))\n}",
"func (d *DNS) SetResourceRecordSets(projectID string, managedZone string, records []*v1.ResourceRecordSet) error {\n\tvar deletions []*v1.ResourceRecordSet\n\tvar additions []*v1.ResourceRecordSet\n\tvar change *v1.Change\n\tlogItems := []string{}\n\tfor _, record := range records {\n\t\texisting, err := d.GetResourceRecordSet(projectID, managedZone, record.Name)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error trying to get existing resource record set: %s\", err)\n\t\t}\n\t\taction := \"creating\"\n\t\tif existing != nil {\n\t\t\tdeletions = append(deletions, existing)\n\t\t\taction = \"recreating\"\n\t\t}\n\t\tlogItems = append(logItems, fmt.Sprintf(\"====> %s %s => %s %s\", action, record.Name, record.Type, strings.Join(record.Rrdatas, \",\")))\n\t\tadditions = append(additions, record)\n\t}\n\td.log.Info(\"Ensuring the DNS zone %s has the following records:\", managedZone)\n\tfor _, item := range logItems {\n\t\td.log.ListItem(item)\n\t}\n\tif len(deletions) > 0 {\n\t\tchange = &v1.Change{\n\t\t\tDeletions: deletions,\n\t\t}\n\t\tif err := d.executeChange(projectID, managedZone, change); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tchange = &v1.Change{\n\t\tAdditions: additions,\n\t}\n\tif err := d.executeChange(projectID, managedZone, change); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (s *cloudDNSService) getResourceRecordSets(dnsZone string) ([]*dns.ResourceRecordSet, error) {\n\ttimer := pkg.NewTimer(prometheus.ObserverFunc(func(v float64) {\n\t\trequestRecordsTimeSummary.WithLabelValues(dnsZone).Observe(v)\n\t}))\n\tdefer timer.ObserveDuration()\n\n\tpageToken := \"\"\n\n\tresourceRecordSets := make([]*dns.ResourceRecordSet, 0, 16)\n\tfor {\n\t\treq := s.service.ResourceRecordSets.List(s.project, dnsZone)\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken(pageToken)\n\t\t}\n\t\tresp, err := req.Do()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"[Cloud DNS] Error getting DNS resourceRecordSets from zone %s: %v\", dnsZone, err)\n\t\t}\n\t\tfor _, r := range resp.Rrsets {\n\t\t\tresourceRecordSets = append(resourceRecordSets, r)\n\t\t}\n\t\tif resp.NextPageToken == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tpageToken = resp.NextPageToken\n\t}\n\treturn resourceRecordSets, nil\n}",
"func (p *Provider) DeleteRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\n\tclient := &http.Client{}\n\n\tvar deletedRecords []libdns.Record\n\n\tfor _, record := range records {\n\t\treq, err := http.NewRequest(http.MethodDelete, fmt.Sprintf(\"https://api.leaseweb.com/hosting/v2/domains/%s/resourceRecordSets/%s/%s\", zone, record.Name, record.Type), nil)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treq.Header.Add(LeasewebApiKeyHeader, p.APIKey)\n\n\t\tres, err := client.Do(req)\n\t\tdefer res.Body.Close()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif res.StatusCode < 200 || res.StatusCode > 299 {\n\t\t\treturn nil, fmt.Errorf(\"Received StatusCode %d from Leaseweb API\", res.StatusCode)\n\t\t}\n\n\t\tdeletedRecords = append(deletedRecords, record)\n\t}\n\n\treturn deletedRecords, nil\n}",
"func (ac *azureClient) DeleteRecordSet(ctx context.Context, resourceGroupName string, privateZoneName string, recordType privatedns.RecordType, name string) error {\n\tctx, _, done := tele.StartSpanWithLogger(ctx, \"privatedns.AzureClient.DeleteRecordSet\")\n\tdefer done()\n\n\t_, err := ac.recordsets.Delete(ctx, resourceGroupName, privateZoneName, recordType, name, \"\")\n\treturn err\n}",
"func (s *dnsClient) DeleteRecordSet(ctx context.Context, managedZone, name, recordType string) error {\n\tproject, managedZone := s.projectAndManagedZone(managedZone)\n\tname = ensureTrailingDot(name)\n\trrs, err := s.getResourceRecordSet(ctx, project, managedZone, name, recordType)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif rrs == nil {\n\t\treturn nil\n\t}\n\tchange := &googledns.Change{\n\t\tDeletions: []*googledns.ResourceRecordSet{rrs},\n\t}\n\t_, err = s.service.Changes.Create(project, managedZone, change).Context(ctx).Do()\n\treturn err\n}",
"func (d *DNS) GetResourceRecordSets(projectID string, managedZone string) ([]*v1.ResourceRecordSet, error) {\n\tctx := context.Background()\n\trrsService := v1.NewResourceRecordSetsService(d.V1)\n\trrsListCall := rrsService.List(projectID, managedZone).Context(ctx)\n\trrsList, err := d.Calls.ResourceRecordSetsList.Do(rrsListCall)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn rrsList.Rrsets, nil\n}",
"func testAccCheckDnsRecordSetDestroyProducerFramework(t *testing.T) func(s *terraform.State) error {\n\n\treturn func(s *terraform.State) error {\n\t\tfor name, rs := range s.RootModule().Resources {\n\t\t\tif rs.Type != \"google_dns_record_set\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif strings.HasPrefix(name, \"data.\") {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tp := acctest.GetFwTestProvider(t)\n\n\t\t\turl, err := acctest.ReplaceVarsForFrameworkTest(&p.FrameworkProvider.FrameworkProviderConfig, rs, \"{{DNSBasePath}}projects/{{project}}/managedZones/{{managed_zone}}/rrsets/{{name}}/{{type}}\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbillingProject := \"\"\n\n\t\t\tif !p.BillingProject.IsNull() && p.BillingProject.String() != \"\" {\n\t\t\t\tbillingProject = p.BillingProject.String()\n\t\t\t}\n\n\t\t\t_, diags := fwtransport.SendFrameworkRequest(&p.FrameworkProvider.FrameworkProviderConfig, \"GET\", billingProject, url, p.UserAgent, nil)\n\t\t\tif !diags.HasError() {\n\t\t\t\treturn fmt.Errorf(\"DNSResourceDnsRecordSet still exists at %s\", url)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n}",
"func (client DnsClient) deleteRRSet(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodDelete, \"/zones/{zoneNameOrId}/records/{domain}/{rtype}\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response DeleteRRSetResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}",
"func ResetRecords(resources *[]shaman.Resource) error {\n\tif storage == nil {\n\t\treturn nil\n\t}\n\tfor i := range *resources {\n\t\t(*resources)[i].Validate()\n\t}\n\n\treturn storage.resetRecords(*resources)\n}",
"func (c *Client) ChangeRecordSets(upsert, del, create []*route53.ResourceRecordSet, zoneID string) error {\n\tclient, err := c.initRoute53Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar changes []*route53.Change\n\tchanges = append(changes, createChangesList(\"CREATE\", create)...)\n\tchanges = append(changes, createChangesList(\"UPSERT\", upsert)...)\n\tchanges = append(changes, createChangesList(\"DELETE\", del)...)\n\tif len(changes) > 0 {\n\t\tparams := &route53.ChangeResourceRecordSetsInput{\n\t\t\tChangeBatch: &route53.ChangeBatch{\n\t\t\t\tChanges: changes,\n\t\t\t},\n\t\t\tHostedZoneId: aws.String(zoneID),\n\t\t}\n\t\t_, err = client.ChangeResourceRecordSets(params)\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (r Dns_Domain_ResourceRecord) DeleteObjects(templateObjects []datatypes.Dns_Domain_ResourceRecord) (resp bool, err error) {\n\tparams := []interface{}{\n\t\ttemplateObjects,\n\t}\n\terr = r.Session.DoRequest(\"SoftLayer_Dns_Domain_ResourceRecord\", \"deleteObjects\", params, &r.Options, &resp)\n\treturn\n}",
"func (c *Client) ListRecordSets(zoneID string) ([]*route53.ResourceRecordSet, error) {\n\trecords := make([]*route53.ResourceRecordSet, 0)\n\n\tclient, err := c.initRoute53Client()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparams := &route53.ListResourceRecordSetsInput{\n\t\tHostedZoneId: aws.String(zoneID),\n\t}\n\n\terr = client.ListResourceRecordSetsPages(params, func(resp *route53.ListResourceRecordSetsOutput, lastPage bool) bool {\n\t\tlog.Debugf(\"Getting a list of AWS RRS of length: %d\", len(resp.ResourceRecordSets))\n\t\trecords = append(records, resp.ResourceRecordSets...)\n\t\treturn !lastPage\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn records, nil\n}",
"func (s *FastDNSv2Service) DeleteRecordSet(ctx context.Context, opt *RecordSetOptions) (*Response, error) {\n\tu := fmt.Sprintf(\"/config-dns/v2/zones/%v/names/%v/types/%v\", opt.Zone, opt.Name, opt.Type)\n\treq, err := s.client.NewRequest(\"DELETE\", u, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.client.Do(ctx, req, nil)\n}",
"func deleteResources(kubeconfig string) error {\n\tcfg, err := clientcmd.BuildConfigFromFlags(\"\", kubeconfig)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating config object %v\", err)\n\t}\n\trbacConfig, err := rbacclient.NewForConfig(cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcore, _ := coreclient.NewForConfig(cfg)\n\n\t// delete role binding\n\tif err := rbacConfig.RoleBindings(projectName).Delete(context.TODO(), resourceName, metav1.DeleteOptions{}); err != nil {\n\t\treturn fmt.Errorf(\"error deleting role binding\")\n\t}\n\t// delete cluster role binding\n\tif err := rbacConfig.ClusterRoleBindings().Delete(context.TODO(), resourceName, metav1.DeleteOptions{}); err != nil {\n\t\treturn fmt.Errorf(\"error deleting cluster role binding\")\n\t}\n\t// delete service account\n\tif err := core.ServiceAccounts(projectName).Delete(context.TODO(), resourceName, metav1.DeleteOptions{}); err != nil {\n\t\treturn fmt.Errorf(\"error deleting service account\")\n\t}\n\t// delete roles\n\tif err := rbacConfig.Roles(projectName).Delete(context.TODO(), resourceName, metav1.DeleteOptions{}); err != nil {\n\t\treturn fmt.Errorf(\"error deleting role\")\n\t}\n\t// delete cluster role\n\tif err := rbacConfig.ClusterRoles().Delete(context.TODO(), resourceName, metav1.DeleteOptions{}); err != nil {\n\t\treturn fmt.Errorf(\"error deleting cluster roles\")\n\t}\n\t// delete namespace\n\tif err := core.Namespaces().Delete(context.TODO(), resourceName, metav1.DeleteOptions{}); err != nil {\n\t\treturn fmt.Errorf(\"error deleting namespace\")\n\t}\n\tlog.Print(\"successfully deleted all resources\")\n\treturn nil\n\n}",
"func (p *Provider) DeleteRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {\n\t// delete flow is not implemented by acme-dns yet: https://github.com/joohoi/acme-dns/search?q=delete&type=issues\n\treturn nil, nil\n}",
"func DeletePrjPrizeSets(id int) (err error) {\n\to := orm.NewOrm()\n\tv := PrjPrizeSets{Id: id}\n\t// ascertain id exists in the database\n\tif err = o.Read(&v); err == nil {\n\t\tvar num int64\n\t\tif num, err = o.Delete(&PrjPrizeSets{Id: id}); err == nil {\n\t\t\tfmt.Println(\"Number of records deleted in database:\", num)\n\t\t}\n\t}\n\treturn\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Meta sets the meta data to be included in the aggregation response.
|
func (a *ChildrenAggregation) Meta(metaData map[string]interface{}) *ChildrenAggregation {
a.meta = metaData
return a
}
|
[
"func (s *ExtendedStatsBucketAggregation) Meta(metaData map[string]interface{}) *ExtendedStatsBucketAggregation {\n\ts.meta = metaData\n\treturn s\n}",
"func (a *RareTermsAggregation) Meta(metaData map[string]interface{}) *RareTermsAggregation {\n\ta.meta = metaData\n\treturn a\n}",
"func (a *FilterAggregation) Meta(metaData map[string]interface{}) *FilterAggregation {\n\ta.meta = metaData\n\treturn a\n}",
"func (a *APITest) Meta(meta map[string]interface{}) *APITest {\n\ta.meta = meta\n\treturn a\n}",
"func (c *Client) Meta(key, value string) PublishOpt {\n\treturn func(p *Publication) error {\n\t\tif p.Entry.Meta == nil {\n\t\t\tp.Entry.Meta = map[string]string{}\n\t\t}\n\t\tp.Entry.Meta[key] = value\n\t\treturn nil\n\t}\n}",
"func Meta(key, value string) Option {\n\treturn setHeader(\"X-Oss-Meta-\"+key, value)\n}",
"func setMeta(resp http.ResponseWriter, m *structs.QueryMeta) {\n\tsetIndex(resp, m.Index)\n\tsetLastContact(resp, m.LastContact)\n\tsetKnownLeader(resp, m.KnownLeader)\n\tsetConsistency(resp, m.ConsistencyLevel)\n}",
"func (o *PeopleMultiResponse) SetMeta(v ViewMeta) {\n\to.Meta = &v\n}",
"func (resp *DataResponse) SetMeta(meta interface{}) error {\n\tkind := reflect.ValueOf(meta).Kind()\n\tif kind != reflect.Struct {\n\t\treturn errors.New(\"Argument meta should be of type struct!\")\n\t}\n\tresp.Meta = meta\n\treturn nil\n}",
"func (o *CustomfieldCustomFieldsResponse) SetMeta(v ViewMeta) {\n\to.Meta = &v\n}",
"func (res *Resource) Meta(meta *Meta) *Meta {\n\tif res.GetMeta(meta.Name) != nil {\n\t\tutils.ExitWithMsg(\"Duplicated meta %v defined for resource %v\", meta.Name, res.Name)\n\t}\n\tres.Metas = append(res.Metas, meta)\n\tmeta.baseResource = res\n\tmeta.updateMeta()\n\treturn meta\n}",
"func MetaHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.Header().Set(\"Access-Control-Allow-Headers\", \"Content-Type\")\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tif r.Method == \"OPTIONS\" {\n\t\treturn\n\t}\n\n\tlog.Println(\"Received a GET request for retrieving meta data\")\n\n\t// Query db for the specific search criterion\n\tjs, err := dao.NewEventDao(db.DashDB).MetaMapping()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Write(js)\n\tlog.Println(\"Meta data obtained successfully\")\n}",
"func (o *DashboardUserDashboardsResponse) SetMeta(v ViewMeta) {\n\to.Meta = &v\n}",
"func (a *App) GetAllMeta(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(types.ContentType, types.ContentTypeApplicationJSON)\n\tdocs, err := a.Meta.GetMetaDocAll(a.correlationID)\n\tif err != nil {\n\t\trespondWithError(err, http.StatusNotFound, w)\n\t\treturn\n\t}\n\tdocs, _ = StripBlobStore(docs)\n\terr = json.NewEncoder(w).Encode(docs)\n\tif err != nil {\n\t\trespondWithError(err, http.StatusInternalServerError, w)\n\t\treturn\n\t}\n}",
"func (e *Extractor) Meta(key, val string) error {\n\tif e.info == nil {\n\t\te.info = make(map[string]string, 1)\n\t}\n\te.info[key] = val\n\treturn nil\n}",
"func (src *BgplgdSource) makeResponseMeta() *api.Meta {\n\treturn &api.Meta{\n\t\tCacheStatus: api.CacheStatus{\n\t\t\tCachedAt: time.Now().UTC(),\n\t\t},\n\t\tVersion: BgplgdSourceVersion,\n\t\tResultFromCache: false,\n\t\tTTL: time.Now().UTC().Add(src.cfg.CacheTTL),\n\t}\n}",
"func (m *Mutator) Meta(ctx context.Context) (Meta, error) {\n\tif err := m.cache(ctx); err != nil {\n\t\treturn Meta{}, errors.Wrap(err, \"getting cache failed\")\n\t}\n\n\tvar created time.Time\n\tif m.config.Created != nil {\n\t\tcreated = *m.config.Created\n\t}\n\treturn Meta{\n\t\tCreated: created,\n\t\tAuthor: m.config.Author,\n\t\tArchitecture: m.config.Architecture,\n\t\tOS: m.config.OS,\n\t}, nil\n}",
"func Meta(\n\tcli plugin.CliConnection,\n\targs []string,\n\tc http.Client,\n\tlog Logger,\n\ttableWriter io.Writer,\n\tmopts ...MetaOption,\n) {\n\topts := getOptions(args, log, mopts...)\n\tclient := createLogCacheClient(c, log, cli)\n\ttw := tabwriter.NewWriter(tableWriter, 0, 2, 2, ' ', 0)\n\tusername, err := cli.Username()\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not get username: %s\", err)\n\t}\n\n\tvar originalMeta map[string]*logcache_v1.MetaInfo\n\tvar currentMeta map[string]*logcache_v1.MetaInfo\n\twriteRetrievingMetaHeader(opts, tw, username)\n\tcurrentMeta, err = client.Meta(context.TODO())\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to read Meta information: %s\", err)\n\t}\n\n\tif opts.EnableNoise {\n\t\toriginalMeta = currentMeta\n\t\twriteWaiting(opts, tw, username)\n\t\ttime.Sleep(opts.metaNoiseSleepDuration)\n\t\twriteRetrievingMetaHeader(opts, tw, username)\n\t\tcurrentMeta, err = client.Meta(context.TODO())\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed to read Meta information: %s\", err)\n\t\t}\n\t}\n\n\tresources := make(map[string]source)\n\tif !opts.ShowGUID {\n\t\twriteAppsAndServicesHeader(opts, tw, username)\n\t\tresources, err = getSourceInfo(currentMeta, cli)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed to read application information: %s\", err)\n\t\t}\n\t}\n\n\twriteHeaders(opts, tw, username)\n\n\trows := toDisplayRows(resources, currentMeta, originalMeta)\n\trows = filterRows(opts, rows)\n\tsortRows(opts, rows)\n\n\tfor _, r := range rows {\n\t\tformat, items := tableFormat(opts, r)\n\t\tfmt.Fprintf(tw, format, items...)\n\t}\n\n\tif err = tw.Flush(); err != nil {\n\t\tlog.Fatalf(\"Error writing results\")\n\t}\n}",
"func (r *Request) SetMeta(v interface{}) error {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.Meta = (*json.RawMessage)(&b)\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewCreateSubCategoryCreated creates CreateSubCategoryCreated with default headers values
|
func NewCreateSubCategoryCreated() *CreateSubCategoryCreated {
return &CreateSubCategoryCreated{}
}
|
[
"func NewSubCategoryTemplate()(*SubCategoryTemplate) {\n m := &SubCategoryTemplate{\n FilePlanDescriptorTemplate: *NewFilePlanDescriptorTemplate(),\n }\n return m\n}",
"func NewSubCategory()(*SubCategory) {\n m := &SubCategory{\n FilePlanDescriptorBase: *NewFilePlanDescriptorBase(),\n }\n return m\n}",
"func CreateCategory (w http.ResponseWriter, r *http.Request) {\n\tvar newCategory Category\n\n\t//get the information containing in request's body\n\t//or report an error\n\treqBody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"Kindly enter data with the category name and description only in order to update\")\n\t\tlog.Fatal(err.Error())\n\t}\n\n\t//generate unique categoryID\n\tnewCategory.CategoryID = xid.New().String()\n\t//unmarshal the information from JSON into the Category instance\n\t//or report an error\n\tif err = json.Unmarshal(reqBody, &newCategory); err != nil {\n\t\tlog.Printf(\"Body parse error, %v\", err.Error())\n\t\tw.WriteHeader(400)\n\t\treturn\n\t}\n\n\t//CategoryName is required field\n\tif len(newCategory.CategoryName) == 0 {\n\t\tw.WriteHeader(422)\n\t\tfmt.Fprintf(w, \"Kindly enter data with the category name in order to create new category\")\n\t\treturn\n\t}\n\n\t//append the new category to the slice\n\tCategories = append(Categories, newCategory)\n\tw.WriteHeader(http.StatusCreated)\n\n\t//return the category in response\n\t//or report an error\n\tif err = json.NewEncoder(w).Encode(newCategory); err != nil {\n\t\tlog.Printf(err.Error())\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n}",
"func (w *ServerInterfaceWrapper) CreateCategory(ctx echo.Context) error {\n\tvar err error\n\n\t// Invoke the callback with all the unmarshalled arguments\n\terr = w.Handler.CreateCategory(ctx)\n\treturn err\n}",
"func (aih *AdminCategoriesHandler) CreateCategories(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\n\tcontxt := r.Context()\n\tvar ctx, _ = context.WithTimeout(contxt, 30*time.Second)\n\n\tif err := r.ParseMultipartForm(global.MaxUploadSize); err != nil {\n\t\t//fmt.Printf(\"Could not parse multipart form: %v\\n\", err)\n\t\thelpers.RenderResponse(w, err, global.ParseFile, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// form is found in github.com/birukbelay/items/utils/validators/form\n\tVErrors, valid := FormValidators.FormCategoriesValidator(r.PostForm)\n\tif !valid {\n\t\thelpers.RenderResponse(w, VErrors, global.ParseFile, http.StatusBadRequest)\n\t\treturn\n\t}\n\tcategories, er := InitiateCategories(r.PostForm)\n\tif er!=nil{\n\t\thelpers.RenderResponse(w, er, global.CategoriesInitialization, http.StatusBadRequest)\n\t}\n\n\n\n\t//categories.ID = primitive.NewObjectID()\n\n\t// IMAGE UPLOAD\n\timg, err, status, statusCode := helpers.UploadFile(r, false, \"\", \"categories\" )\n\tif err != nil {\n\t\thelpers.RenderResponse(w, err, status, statusCode)\n\t\treturn\n\t}\n\tcategories.Image = img\n\n\t// calling the service\n\tgen, errs := aih.categoriesService.StoreCategories(ctx, categories)\n\tif len(errs) > 0 {\n\t\thelpers.RenderResponse(w, errs, global.StatusInternalServerError, 404)\n\t\treturn\n\t}\n\n\thelpers.RenderResponse(w, gen, global.Success, http.StatusCreated)\n\n\t//p := fmt.Sprintf(\"/v1/admin/Categories/%d\", categories.ID)\n\t//w.Header().Set(\"Location\", p)\n\treturn\n}",
"func CreateCategory(w http.ResponseWriter, req *http.Request) {\n\t// esta variable es el body de categoria, como todos los campos que tenga\n\tvar body domain.Category\n\n\t// comprueba que lo que le hemos pasado tiene los campos que corresponde\n\tif err := parseBody(req, &body); err != nil {\n\t\trespondWithError(w, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\t_, err := domain.InsertCaterogy(body)\n\tif err != nil {\n\t\tbadRequest(w, err.Error())\n\t\treturn\n\t}\n\n\trespondWithJSON(w, http.StatusOK, body)\n}",
"func (ch *Handler) CreateCategory(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(\"App : POST /app/category API hit!\")\n\tvar request CreateCategoryRequest\n\tbody := json.NewDecoder(r.Body)\n\terr := body.Decode(&request)\n\tif err != nil {\n\t\tlog.Println(\"Error : \", err.Error())\n\t\tutils.Fail(w, 400, utils.DecodeErrorCode, err.Error())\n\t\treturn\n\t}\n\tvalidator := validator.New()\n\terr = validator.Struct(request)\n\tif err != nil {\n\t\tlog.Println(\"Error : \", err.Error())\n\t\tutils.Fail(w, 400, utils.ValidationErrorCode, err.Error())\n\t\treturn\n\t}\n\tcategory, err := ch.cs.CreateCategory(r.Context(), &request)\n\tif err != nil {\n\t\tlog.Println(\"Error : \", err.Error())\n\t\tif err.Error() == utils.CategoryNameExistsError {\n\t\t\tutils.Fail(w, 200, utils.CategoryNameExistsErrorCode, err.Error())\n\t\t\treturn\n\t\t}\n\t\tutils.Fail(w, 500, utils.DatabaseErrorCode, err.Error())\n\t\treturn\n\t}\n\tlog.Println(\"App : category created! id_category : \", category.ID)\n\tutils.Send(w, 200, category)\n}",
"func (gpi *Interpreter) CreateSub(name string) *Sub {\n\treturn gpi.sub(name, C.GV_ADD)\n}",
"func CategoriesCreatePOST(c *gin.Context) {\n\tcategory := models.Category{}\n\tcategory.Name = c.PostForm(\"name\")\n\tcategory.Intro = c.PostForm(\"intro\")\n\tcategory.Content = c.PostForm(\"content\")\n\tcategory.Title = c.PostForm(\"title\")\n\tcategory.Description = c.PostForm(\"description\")\n\tcategory.Type = c.PostForm(\"type\")\n\tfile, _ := c.FormFile(\"image\")\n\tif file != nil {\n\t\tif _, err := os.Stat(\"public/upload/\" + category.Type); os.IsNotExist(err) {\n\t\t\tfmt.Println(\"create folder\")\n\t\t\tos.Mkdir(\"public/upload/\"+category.Type, 0755)\n\t\t}\n\t\tc.SaveUploadedFile(file, \"public/upload/\"+category.Type)\n\n\t\tcategory.Image = file.Filename\n\t}\n\tc.JSON(http.StatusBadRequest, gin.H{\"error\": category})\n}",
"func (o *Enterprise) CreateWebCategory(child *WebCategory) *bambou.Error {\n\n\treturn bambou.CurrentSession().CreateChild(o, child)\n}",
"func (o *CreateSubCategoryCreated) WithPayload(payload *models.SubCategory) *CreateSubCategoryCreated {\n\to.Payload = payload\n\treturn o\n}",
"func NewCreateSubCategoryBadRequest() *CreateSubCategoryBadRequest {\n\treturn &CreateSubCategoryBadRequest{}\n}",
"func (_Mcapscontroller *McapscontrollerSession) CreateCategory(metadataHash [32]byte) (*types.Transaction, error) {\n\treturn _Mcapscontroller.Contract.CreateCategory(&_Mcapscontroller.TransactOpts, metadataHash)\n}",
"func (_Mcapscontroller *McapscontrollerTransactorSession) CreateCategory(metadataHash [32]byte) (*types.Transaction, error) {\n\treturn _Mcapscontroller.Contract.CreateCategory(&_Mcapscontroller.TransactOpts, metadataHash)\n}",
"func createCategory(w http.ResponseWriter, r *http.Request) {\n\tdata := CategoryRequest{}\n\tif err := render.Bind(r, &data); err != nil {\n\t\trender.Render(w, r, ErrInvalidRequest(err))\n\t\treturn\n\t}\n\n\tif err := dbNewCategory(data.Category); err != nil {\n\t\trender.Render(w, r, ErrInvalidRequest(err))\n\t\treturn\n\t}\n\n\trender.Status(r, http.StatusCreated)\n\trender.Render(w, r, newCategoryResponse(data.Category))\n}",
"func (app *App) createCategory(resp http.ResponseWriter, req *http.Request) {\n\tlog.Println(\"/category\")\n\tvar cat *category.CategoryResp\n\t//Decoding the request\n\terr := json.NewDecoder(req.Body).Decode(&cat)\n\tif err != nil {\n\t\tresp, err = NewMessage(err.Error(), http.StatusInternalServerError, resp)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\n\t//Category nil check\n\tif cat == nil {\n\t\tresp, err = NewMessage(\"category must not be empty\", http.StatusPreconditionFailed, resp)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\n\t// Subcategories id mapping\n\terr = MapWithId(cat, 0)\n\tif err != nil {\n\t\tresp, err = NewMessage(\"sub categories nesting limit exceeded\", http.StatusPreconditionFailed, resp)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\t//Products id mapping and variant mapping\n\tfor _, prod := range cat.Products {\n\t\tprod.Id = helper.New(Products)\n\t\tfor _, variant := range prod.Variants {\n\t\t\tvariant.Id = helper.New(Var)\n\t\t}\n\t}\n\n\t//Insert the categories\n\terr = category.InsertCategories(cat, app.DB)\n\tif err != nil {\n\t\tresp, err = NewMessage(err.Error(), http.StatusPreconditionFailed, resp)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\n\t//Insert Nested products and its variant if there is nesting exist\n\tif err := category.InsertProductsAndVariant(cat, app.DB); err != nil {\n\t\tresp, err = NewMessage(err.Error(), http.StatusPreconditionFailed, resp)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\n\t//Binding the response\n\tresp, err = BindResponse(cat, resp, http.StatusOK)\n\tif err != nil {\n\t\tresp, err = NewMessage(err.Error(), http.StatusInternalServerError, resp)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\n}",
"func TestCreateCategoryEmptyBody (t *testing.T) {\n\t//initial length of []Categories\n\tinitialLen := len(Categories)\n\t//empty body\n\trequestBody := &Category{}\n\tjsonCategory, _ := json.Marshal(requestBody)\n\treq, err := http.NewRequest(\"POST\", \"/categories/new\", bytes.NewBuffer(jsonCategory))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\trr := httptest.NewRecorder()\n\thandler := http.HandlerFunc(CreateCategory)\n\n\thandler.ServeHTTP(rr, req)\n\n\tassert.Equal(t, 422, rr.Code, \"Unprocessable Entity response is expected\")\n\t//the length of []Categories should not change after trying to create new empty category\n\tassert.Equal(t, initialLen, len(Categories), \"Expected length to stay the same after adding empty category name\")\n}",
"func TestCreateCategory(t *testing.T) {\n\t//initial length of []Categories\n\tinitialLen := len(Categories)\n\t//parameters passed to request body\n\trequestBody := &Category{\n\t\tCategoryName: \t\t\"Super Cool Category\",\n\t\tCategoryDescription: \"Brand new cool Category\",\n\t}\n\tjsonCategory, _ := json.Marshal(requestBody)\n\t//Create a request to pass to the handler with request body as a third parameter\n\treq, err := http.NewRequest(\"POST\", \"/categories/new\", bytes.NewBuffer(jsonCategory))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\trr := httptest.NewRecorder()\n\thandler := http.HandlerFunc(CreateCategory)\n\n\thandler.ServeHTTP(rr, req)\n\n\tassert.Equal(t, 201, rr.Code, \"Created response is expected\")\n\t//the length of []Categories should increase after creating new category\n\tassert.NotEqual(t, initialLen, len(Categories), \"Expected length to increase after creating new Category\")\n}",
"func CreateSubCategoryTemplateFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {\n return NewSubCategoryTemplate(), nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
WithPayload adds the payload to the create sub category created response
|
func (o *CreateSubCategoryCreated) WithPayload(payload *models.SubCategory) *CreateSubCategoryCreated {
o.Payload = payload
return o
}
|
[
"func (o *CreateSubCategoryCreated) SetPayload(payload *models.SubCategory) {\n\to.Payload = payload\n}",
"func CreateCategory (w http.ResponseWriter, r *http.Request) {\n\tvar newCategory Category\n\n\t//get the information containing in request's body\n\t//or report an error\n\treqBody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"Kindly enter data with the category name and description only in order to update\")\n\t\tlog.Fatal(err.Error())\n\t}\n\n\t//generate unique categoryID\n\tnewCategory.CategoryID = xid.New().String()\n\t//unmarshal the information from JSON into the Category instance\n\t//or report an error\n\tif err = json.Unmarshal(reqBody, &newCategory); err != nil {\n\t\tlog.Printf(\"Body parse error, %v\", err.Error())\n\t\tw.WriteHeader(400)\n\t\treturn\n\t}\n\n\t//CategoryName is required field\n\tif len(newCategory.CategoryName) == 0 {\n\t\tw.WriteHeader(422)\n\t\tfmt.Fprintf(w, \"Kindly enter data with the category name in order to create new category\")\n\t\treturn\n\t}\n\n\t//append the new category to the slice\n\tCategories = append(Categories, newCategory)\n\tw.WriteHeader(http.StatusCreated)\n\n\t//return the category in response\n\t//or report an error\n\tif err = json.NewEncoder(w).Encode(newCategory); err != nil {\n\t\tlog.Printf(err.Error())\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n}",
"func (ch *Handler) CreateCategory(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(\"App : POST /app/category API hit!\")\n\tvar request CreateCategoryRequest\n\tbody := json.NewDecoder(r.Body)\n\terr := body.Decode(&request)\n\tif err != nil {\n\t\tlog.Println(\"Error : \", err.Error())\n\t\tutils.Fail(w, 400, utils.DecodeErrorCode, err.Error())\n\t\treturn\n\t}\n\tvalidator := validator.New()\n\terr = validator.Struct(request)\n\tif err != nil {\n\t\tlog.Println(\"Error : \", err.Error())\n\t\tutils.Fail(w, 400, utils.ValidationErrorCode, err.Error())\n\t\treturn\n\t}\n\tcategory, err := ch.cs.CreateCategory(r.Context(), &request)\n\tif err != nil {\n\t\tlog.Println(\"Error : \", err.Error())\n\t\tif err.Error() == utils.CategoryNameExistsError {\n\t\t\tutils.Fail(w, 200, utils.CategoryNameExistsErrorCode, err.Error())\n\t\t\treturn\n\t\t}\n\t\tutils.Fail(w, 500, utils.DatabaseErrorCode, err.Error())\n\t\treturn\n\t}\n\tlog.Println(\"App : category created! id_category : \", category.ID)\n\tutils.Send(w, 200, category)\n}",
"func (o *CreateACLAccepted) WithPayload(payload *models.ACL) *CreateACLAccepted {\n\to.Payload = payload\n\treturn o\n}",
"func NewSubCategory()(*SubCategory) {\n m := &SubCategory{\n FilePlanDescriptorBase: *NewFilePlanDescriptorBase(),\n }\n return m\n}",
"func (app *App) createCategory(resp http.ResponseWriter, req *http.Request) {\n\tlog.Println(\"/category\")\n\tvar cat *category.CategoryResp\n\t//Decoding the request\n\terr := json.NewDecoder(req.Body).Decode(&cat)\n\tif err != nil {\n\t\tresp, err = NewMessage(err.Error(), http.StatusInternalServerError, resp)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\n\t//Category nil check\n\tif cat == nil {\n\t\tresp, err = NewMessage(\"category must not be empty\", http.StatusPreconditionFailed, resp)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\n\t// Subcategories id mapping\n\terr = MapWithId(cat, 0)\n\tif err != nil {\n\t\tresp, err = NewMessage(\"sub categories nesting limit exceeded\", http.StatusPreconditionFailed, resp)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\t//Products id mapping and variant mapping\n\tfor _, prod := range cat.Products {\n\t\tprod.Id = helper.New(Products)\n\t\tfor _, variant := range prod.Variants {\n\t\t\tvariant.Id = helper.New(Var)\n\t\t}\n\t}\n\n\t//Insert the categories\n\terr = category.InsertCategories(cat, app.DB)\n\tif err != nil {\n\t\tresp, err = NewMessage(err.Error(), http.StatusPreconditionFailed, resp)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\n\t//Insert Nested products and its variant if there is nesting exist\n\tif err := category.InsertProductsAndVariant(cat, app.DB); err != nil {\n\t\tresp, err = NewMessage(err.Error(), http.StatusPreconditionFailed, resp)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\n\t//Binding the response\n\tresp, err = BindResponse(cat, resp, http.StatusOK)\n\tif err != nil {\n\t\tresp, err = NewMessage(err.Error(), http.StatusInternalServerError, resp)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\n}",
"func CreateCategory(w http.ResponseWriter, req *http.Request) {\n\t// esta variable es el body de categoria, como todos los campos que tenga\n\tvar body domain.Category\n\n\t// comprueba que lo que le hemos pasado tiene los campos que corresponde\n\tif err := parseBody(req, &body); err != nil {\n\t\trespondWithError(w, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\t_, err := domain.InsertCaterogy(body)\n\tif err != nil {\n\t\tbadRequest(w, err.Error())\n\t\treturn\n\t}\n\n\trespondWithJSON(w, http.StatusOK, body)\n}",
"func (w *ServerInterfaceWrapper) CreateCategory(ctx echo.Context) error {\n\tvar err error\n\n\t// Invoke the callback with all the unmarshalled arguments\n\terr = w.Handler.CreateCategory(ctx)\n\treturn err\n}",
"func (o *ReplaceApiextensionsV1beta1CustomResourceDefinitionCreated) SetPayload(payload *models.IoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1CustomResourceDefinition) {\n\to.Payload = payload\n}",
"func (o *CreateAdOK) WithPayload(payload *models.CreateAdResp) *CreateAdOK {\n\to.Payload = payload\n\treturn o\n}",
"func (o *ReplaceApiextensionsV1beta1CustomResourceDefinitionCreated) WithPayload(payload *models.IoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1CustomResourceDefinition) *ReplaceApiextensionsV1beta1CustomResourceDefinitionCreated {\n\to.Payload = payload\n\treturn o\n}",
"func (h CategoryHandler) Create(c echo.Context) error {\n\tclaims, err := auth.ExtractClaims(c)\n\tif err != nil {\n\t\tlogrus.Errorf(\"create category: failed to extract claims: %s\", err)\n\n\t\treturn echo.NewHTTPError(http.StatusInternalServerError, \"failed to extract jwt claims\")\n\t}\n\n\tif !claims.Privieged {\n\t\treturn echo.NewHTTPError(http.StatusUnauthorized, \"only admin can use this endpoint\")\n\t}\n\n\treq := &request.CreateCategory{}\n\n\tif err := c.Bind(req); err != nil {\n\t\tlogrus.Errorf(\"create category: bind failed: %s\", err.Error())\n\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"bind request failed: %s\", err))\n\t}\n\n\tif err := c.Validate(req); err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"bad request: %s\", err.Error()))\n\t}\n\n\tcategory := &model.Category{\n\t\tName: req.Name,\n\t}\n\n\tif err := h.CategoryRepo.Save(category); err != nil {\n\t\tlogrus.Errorf(\"failed to create category: %s\", err)\n\n\t\treturn echo.NewHTTPError(http.StatusInternalServerError, \"failed to create category\")\n\t}\n\n\treturn c.JSON(http.StatusCreated, category)\n}",
"func NewSubCategoryTemplate()(*SubCategoryTemplate) {\n m := &SubCategoryTemplate{\n FilePlanDescriptorTemplate: *NewFilePlanDescriptorTemplate(),\n }\n return m\n}",
"func (cntrlr *Controller) AddCategory(w http.ResponseWriter, r *http.Request) {\n\tid := mux.Vars(r)[\"userid\"]\n\tuid, err := web.ParseID(id)\n\tcategory := *models.NewCategoryWithUserID(*uid)\n\terr = web.UnmarshalJSON(r, &category)\n\tif err != nil {\n\t\tweb.RespondError(&w, web.NewValidationError(\"Form Parse\", map[string]string{\"error\": \"data can't handle\"}))\n\t\treturn\n\t}\n\n\tif category.GetCategoryName() == \"\" {\n\t\tweb.RespondError(&w, web.NewValidationError(\"Require\", map[string]string{\"error\": \"Category Name Required\"}))\n\t\treturn\n\t}\n\t// category.UserID = *uid\n\tcategory.ID = web.GetUUID()\n\terr = cntrlr.bmcsrv.AddBookmarkCategory(&category)\n\tif err != nil {\n\t\tweb.RespondError(&w, err)\n\t\treturn\n\t}\n\tfmt.Println(category.GetCategoryID())\n\tweb.RespondJSON(&w, http.StatusOK, category.GetCategoryID())\n}",
"func (o *CreateHPCResourceCreated) SetPayload(payload *models.CreatedResponse) {\n\to.Payload = payload\n}",
"func (o *CreateAdOK) SetPayload(payload *models.CreateAdResp) {\n\to.Payload = payload\n}",
"func createCategory(w http.ResponseWriter, r *http.Request) {\n\tdata := CategoryRequest{}\n\tif err := render.Bind(r, &data); err != nil {\n\t\trender.Render(w, r, ErrInvalidRequest(err))\n\t\treturn\n\t}\n\n\tif err := dbNewCategory(data.Category); err != nil {\n\t\trender.Render(w, r, ErrInvalidRequest(err))\n\t\treturn\n\t}\n\n\trender.Status(r, http.StatusCreated)\n\trender.Render(w, r, newCategoryResponse(data.Category))\n}",
"func (o *DeleteApiextensionsV1CollectionCustomResourceDefinitionOK) SetPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) {\n\to.Payload = payload\n}",
"func TestCreateCategory(t *testing.T) {\n\t//initial length of []Categories\n\tinitialLen := len(Categories)\n\t//parameters passed to request body\n\trequestBody := &Category{\n\t\tCategoryName: \t\t\"Super Cool Category\",\n\t\tCategoryDescription: \"Brand new cool Category\",\n\t}\n\tjsonCategory, _ := json.Marshal(requestBody)\n\t//Create a request to pass to the handler with request body as a third parameter\n\treq, err := http.NewRequest(\"POST\", \"/categories/new\", bytes.NewBuffer(jsonCategory))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\trr := httptest.NewRecorder()\n\thandler := http.HandlerFunc(CreateCategory)\n\n\thandler.ServeHTTP(rr, req)\n\n\tassert.Equal(t, 201, rr.Code, \"Created response is expected\")\n\t//the length of []Categories should increase after creating new category\n\tassert.NotEqual(t, initialLen, len(Categories), \"Expected length to increase after creating new Category\")\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetPayload sets the payload to the create sub category created response
|
func (o *CreateSubCategoryCreated) SetPayload(payload *models.SubCategory) {
o.Payload = payload
}
|
[
"func (o *PutSlideSuperlikeCreated) SetPayload(payload models.Success) {\n\to.Payload = payload\n}",
"func (o *CreateSubCategoryCreated) WithPayload(payload *models.SubCategory) *CreateSubCategoryCreated {\n\to.Payload = payload\n\treturn o\n}",
"func (o *CreateHPCResourceCreated) SetPayload(payload *models.CreatedResponse) {\n\to.Payload = payload\n}",
"func (o *CreateAdOK) SetPayload(payload *models.CreateAdResp) {\n\to.Payload = payload\n}",
"func (o *ReplaceApiextensionsV1beta1CustomResourceDefinitionCreated) SetPayload(payload *models.IoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1CustomResourceDefinition) {\n\to.Payload = payload\n}",
"func (o *CreateACLAccepted) SetPayload(payload *models.ACL) {\n\to.Payload = payload\n}",
"func (o *CreateFoldersCreated) SetPayload(payload *models.CreateFolderResp) {\n\to.Payload = payload\n}",
"func (o *CreateZoneCreated) SetPayload(payload *models.CreateZoneResponse) {\n\to.Payload = payload\n}",
"func (o *CreateACLCreated) SetPayload(payload *models.ACL) {\n\to.Payload = payload\n}",
"func (o *AddComponentRoleInternalServerError) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}",
"func (o *PostCharactersCharacterIDCspaCreated) SetPayload(payload *models.PostCharactersCharacterIDCspaCreatedBody) {\n\to.Payload = payload\n}",
"func (o *CreateClusterCreated) SetPayload(payload *models.Kluster) {\n\to.Payload = payload\n}",
"func (o *AddComponentRoleCreated) SetPayload(payload models.ULID) {\n\to.Payload = payload\n}",
"func (o *DeleteApiextensionsV1CollectionCustomResourceDefinitionOK) SetPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) {\n\to.Payload = payload\n}",
"func (o *SetResourceCreated) SetPayload(payload *models.Resource) {\n\to.Payload = payload\n}",
"func (o *CreateCoreV1NamespacedConfigMapCreated) SetPayload(payload *models.IoK8sAPICoreV1ConfigMap) {\n\to.Payload = payload\n}",
"func (o *PatchFoldersIDOK) SetPayload(payload *models.CreateFolderResp) {\n\to.Payload = payload\n}",
"func (o *PutSlideLikeCreated) SetPayload(payload models.Success) {\n\to.Payload = payload\n}",
"func (o *PatchApiextensionsV1beta1CustomResourceDefinitionStatusOK) SetPayload(payload *models.IoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1CustomResourceDefinition) {\n\to.Payload = payload\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewCreateSubCategoryBadRequest creates CreateSubCategoryBadRequest with default headers values
|
func NewCreateSubCategoryBadRequest() *CreateSubCategoryBadRequest {
return &CreateSubCategoryBadRequest{}
}
|
[
"func NewCategoryCreateBadRequest() *CategoryCreateBadRequest {\n\treturn &CategoryCreateBadRequest{}\n}",
"func NewCreateCategoryBadRequest() *CreateCategoryBadRequest {\n\treturn &CreateCategoryBadRequest{}\n}",
"func TestCreateCategoryEmptyBody (t *testing.T) {\n\t//initial length of []Categories\n\tinitialLen := len(Categories)\n\t//empty body\n\trequestBody := &Category{}\n\tjsonCategory, _ := json.Marshal(requestBody)\n\treq, err := http.NewRequest(\"POST\", \"/categories/new\", bytes.NewBuffer(jsonCategory))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\trr := httptest.NewRecorder()\n\thandler := http.HandlerFunc(CreateCategory)\n\n\thandler.ServeHTTP(rr, req)\n\n\tassert.Equal(t, 422, rr.Code, \"Unprocessable Entity response is expected\")\n\t//the length of []Categories should not change after trying to create new empty category\n\tassert.Equal(t, initialLen, len(Categories), \"Expected length to stay the same after adding empty category name\")\n}",
"func CreateCompanyBranchHyCompanybranchBadRequest(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.HyCompanybranchController, id int, payload *app.CreateCompanyBranchHyCompanybranchPayload) (http.ResponseWriter, error) {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\t\tresp interface{}\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) { resp = r }\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Validate payload\n\terr := payload.Validate()\n\tif err != nil {\n\t\te, ok := err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(err) // bug\n\t\t}\n\t\treturn nil, e\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/api/company/branch/%v\", id),\n\t}\n\treq, _err := http.NewRequest(\"POST\", u.String(), nil)\n\tif _err != nil {\n\t\tpanic(\"invalid test \" + _err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\tprms[\"ID\"] = []string{fmt.Sprintf(\"%v\", id)}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"HyCompanybranchTest\"), rw, req, prms)\n\tcreateCompanyBranchCtx, __err := app.NewCreateCompanyBranchHyCompanybranchContext(goaCtx, req, service)\n\tif __err != nil {\n\t\t_e, _ok := __err.(goa.ServiceError)\n\t\tif !_ok {\n\t\t\tpanic(\"invalid test data \" + __err.Error()) // bug\n\t\t}\n\t\treturn nil, _e\n\t}\n\tcreateCompanyBranchCtx.Payload = payload\n\n\t// Perform action\n\t__err = ctrl.CreateCompanyBranch(createCompanyBranchCtx)\n\n\t// Validate response\n\tif __err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", __err, logBuf.String())\n\t}\n\tif rw.Code != 400 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 400\", rw.Code)\n\t}\n\tvar mt error\n\tif resp != nil {\n\t\tvar __ok bool\n\t\tmt, __ok = resp.(error)\n\t\tif !__ok {\n\t\t\tt.Fatalf(\"invalid response media: got variable of type %T, value %+v, expected instance of error\", resp, resp)\n\t\t}\n\t}\n\n\t// Return results\n\treturn rw, mt\n}",
"func CreateBadRequest(errorMessage string) BadRequest {\n\treturn BadRequest{Error: errorMessage}\n}",
"func (ch *Handler) CreateCategory(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(\"App : POST /app/category API hit!\")\n\tvar request CreateCategoryRequest\n\tbody := json.NewDecoder(r.Body)\n\terr := body.Decode(&request)\n\tif err != nil {\n\t\tlog.Println(\"Error : \", err.Error())\n\t\tutils.Fail(w, 400, utils.DecodeErrorCode, err.Error())\n\t\treturn\n\t}\n\tvalidator := validator.New()\n\terr = validator.Struct(request)\n\tif err != nil {\n\t\tlog.Println(\"Error : \", err.Error())\n\t\tutils.Fail(w, 400, utils.ValidationErrorCode, err.Error())\n\t\treturn\n\t}\n\tcategory, err := ch.cs.CreateCategory(r.Context(), &request)\n\tif err != nil {\n\t\tlog.Println(\"Error : \", err.Error())\n\t\tif err.Error() == utils.CategoryNameExistsError {\n\t\t\tutils.Fail(w, 200, utils.CategoryNameExistsErrorCode, err.Error())\n\t\t\treturn\n\t\t}\n\t\tutils.Fail(w, 500, utils.DatabaseErrorCode, err.Error())\n\t\treturn\n\t}\n\tlog.Println(\"App : category created! id_category : \", category.ID)\n\tutils.Send(w, 200, category)\n}",
"func NewCreateSubaccountBadRequest() *CreateSubaccountBadRequest {\n\treturn &CreateSubaccountBadRequest{}\n}",
"func TestCreateCategoryWrongJSONSyntax(t *testing.T) {\n\t//initial length of []Categories\n\tinitialLen := len(Categories)\n\t//parameters passed to request body\n\trequestBody := `{{\"CategoryID\":\"bq4fasj7jhfi127rimlg\",\"CategoryName\":\"Name\",,,}}`\n\treq, err := http.NewRequest(\"POST\", \"/categories/new\", bytes.NewBufferString(requestBody))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\trr := httptest.NewRecorder()\n\thandler := http.HandlerFunc(CreateCategory)\n\n\thandler.ServeHTTP(rr, req)\n\n\tassert.Equal(t, 400, rr.Code, \"Bad request response is expected\")\n\tassert.Equal(t, initialLen, len(Categories), \"Expected length to stay the same after wrong syntax json\")\n\n}",
"func newErrBadRequest(ctx *context.T, err error) error {\n\treturn verror.New(errBadRequest, ctx, err)\n}",
"func CreateCompanyHyCompanyBadRequest(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.HyCompanyController, payload *app.CreateCompanyHyCompanyPayload) (http.ResponseWriter, error) {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\t\tresp interface{}\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) { resp = r }\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Validate payload\n\terr := payload.Validate()\n\tif err != nil {\n\t\te, ok := err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(err) // bug\n\t\t}\n\t\treturn nil, e\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/api/company\"),\n\t}\n\treq, _err := http.NewRequest(\"POST\", u.String(), nil)\n\tif _err != nil {\n\t\tpanic(\"invalid test \" + _err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"HyCompanyTest\"), rw, req, prms)\n\tcreateCompanyCtx, __err := app.NewCreateCompanyHyCompanyContext(goaCtx, req, service)\n\tif __err != nil {\n\t\t_e, _ok := __err.(goa.ServiceError)\n\t\tif !_ok {\n\t\t\tpanic(\"invalid test data \" + __err.Error()) // bug\n\t\t}\n\t\treturn nil, _e\n\t}\n\tcreateCompanyCtx.Payload = payload\n\n\t// Perform action\n\t__err = ctrl.CreateCompany(createCompanyCtx)\n\n\t// Validate response\n\tif __err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", __err, logBuf.String())\n\t}\n\tif rw.Code != 400 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 400\", rw.Code)\n\t}\n\tvar mt error\n\tif resp != nil {\n\t\tvar __ok bool\n\t\tmt, __ok = resp.(error)\n\t\tif !__ok {\n\t\t\tt.Fatalf(\"invalid response media: got variable of type %T, value %+v, expected instance of error\", resp, resp)\n\t\t}\n\t}\n\n\t// Return results\n\treturn rw, mt\n}",
"func ValidateCreateBadRequestResponseBody(body *CreateBadRequestResponseBody) (err error) {\n\tif body.Name == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"name\", \"body\"))\n\t}\n\tif body.ID == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"id\", \"body\"))\n\t}\n\tif body.Message == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"message\", \"body\"))\n\t}\n\tif body.Temporary == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"temporary\", \"body\"))\n\t}\n\tif body.Timeout == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"timeout\", \"body\"))\n\t}\n\tif body.Fault == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"fault\", \"body\"))\n\t}\n\treturn\n}",
"func CreateCategory (w http.ResponseWriter, r *http.Request) {\n\tvar newCategory Category\n\n\t//get the information containing in request's body\n\t//or report an error\n\treqBody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"Kindly enter data with the category name and description only in order to update\")\n\t\tlog.Fatal(err.Error())\n\t}\n\n\t//generate unique categoryID\n\tnewCategory.CategoryID = xid.New().String()\n\t//unmarshal the information from JSON into the Category instance\n\t//or report an error\n\tif err = json.Unmarshal(reqBody, &newCategory); err != nil {\n\t\tlog.Printf(\"Body parse error, %v\", err.Error())\n\t\tw.WriteHeader(400)\n\t\treturn\n\t}\n\n\t//CategoryName is required field\n\tif len(newCategory.CategoryName) == 0 {\n\t\tw.WriteHeader(422)\n\t\tfmt.Fprintf(w, \"Kindly enter data with the category name in order to create new category\")\n\t\treturn\n\t}\n\n\t//append the new category to the slice\n\tCategories = append(Categories, newCategory)\n\tw.WriteHeader(http.StatusCreated)\n\n\t//return the category in response\n\t//or report an error\n\tif err = json.NewEncoder(w).Encode(newCategory); err != nil {\n\t\tlog.Printf(err.Error())\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n}",
"func NewSubCategory()(*SubCategory) {\n m := &SubCategory{\n FilePlanDescriptorBase: *NewFilePlanDescriptorBase(),\n }\n return m\n}",
"func NewBadRequest(msg string, details ...string) APIError {\n\treturn NewAPIError(msg, strings.Join(details, \", \"), http.StatusBadRequest)\n}",
"func (app *App) createCategory(resp http.ResponseWriter, req *http.Request) {\n\tlog.Println(\"/category\")\n\tvar cat *category.CategoryResp\n\t//Decoding the request\n\terr := json.NewDecoder(req.Body).Decode(&cat)\n\tif err != nil {\n\t\tresp, err = NewMessage(err.Error(), http.StatusInternalServerError, resp)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\n\t//Category nil check\n\tif cat == nil {\n\t\tresp, err = NewMessage(\"category must not be empty\", http.StatusPreconditionFailed, resp)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\n\t// Subcategories id mapping\n\terr = MapWithId(cat, 0)\n\tif err != nil {\n\t\tresp, err = NewMessage(\"sub categories nesting limit exceeded\", http.StatusPreconditionFailed, resp)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\t//Products id mapping and variant mapping\n\tfor _, prod := range cat.Products {\n\t\tprod.Id = helper.New(Products)\n\t\tfor _, variant := range prod.Variants {\n\t\t\tvariant.Id = helper.New(Var)\n\t\t}\n\t}\n\n\t//Insert the categories\n\terr = category.InsertCategories(cat, app.DB)\n\tif err != nil {\n\t\tresp, err = NewMessage(err.Error(), http.StatusPreconditionFailed, resp)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\n\t//Insert Nested products and its variant if there is nesting exist\n\tif err := category.InsertProductsAndVariant(cat, app.DB); err != nil {\n\t\tresp, err = NewMessage(err.Error(), http.StatusPreconditionFailed, resp)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\n\t//Binding the response\n\tresp, err = BindResponse(cat, resp, http.StatusOK)\n\tif err != nil {\n\t\tresp, err = NewMessage(err.Error(), http.StatusInternalServerError, resp)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\n}",
"func CreateBadRequestResponse(w http.ResponseWriter, err error) {\n\tw.WriteHeader(http.StatusBadRequest)\n\tbytes, _ := json.Marshal(err.Error())\n\tw.Write(bytes)\n}",
"func SetNewBadRequestByFormat(ef *ErrorFormat) *ErrorMessage {\n\treturn &ErrorMessage{\n\t\tCode: http.StatusBadRequest,\n\t\tErrorList: []*ErrorFormat{\n\t\t\tef,\n\t\t},\n\t}\n}",
"func NewCreateBadRequest(body *CreateBadRequestResponseBody) *goa.ServiceError {\n\tv := &goa.ServiceError{\n\t\tName: *body.Name,\n\t\tID: *body.ID,\n\t\tMessage: *body.Message,\n\t\tTemporary: *body.Temporary,\n\t\tTimeout: *body.Timeout,\n\t\tFault: *body.Fault,\n\t}\n\n\treturn v\n}",
"func ValidateCreateCompanyBadRequestResponseBody(body *CreateCompanyBadRequestResponseBody) (err error) {\n\tif body.Name == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"name\", \"body\"))\n\t}\n\tif body.ID == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"id\", \"body\"))\n\t}\n\tif body.Message == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"message\", \"body\"))\n\t}\n\tif body.Temporary == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"temporary\", \"body\"))\n\t}\n\tif body.Timeout == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"timeout\", \"body\"))\n\t}\n\tif body.Fault == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"fault\", \"body\"))\n\t}\n\treturn\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
New creates processor for k8s Secret resource.
|
func New() helmify.Processor {
return &secret{}
}
|
[
"func Create(c *client.Client, i *Instance) error {\n\tsecretType, err := detectSecretType(i.Type)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsecret := v1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: i.Name,\n\t\t\tNamespace: i.Namespace,\n\t\t},\n\t\tData: map[string][]byte{\n\t\t\ti.Key: []byte(i.Value),\n\t\t},\n\t\tType: secretType,\n\t}\n\t_, err = c.Clientset.CoreV1().Secrets(i.Namespace).Create(\n\t\tcontext.TODO(),\n\t\t&secret,\n\t\tmetav1.CreateOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (h *Handler) CreateSecret(cr *v1alpha1.SecretKMS) error {\n\tsecret := &corev1.Secret{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Secret\",\n\t\t\tAPIVersion: \"v1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cr.Spec.Secret,\n\t\t\tNamespace: cr.Namespace,\n\t\t},\n\t}\n\n\terr := sdk.Get(secret)\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tif err != nil && !k8serrors.IsNotFound(err) {\n\t\treturn err\n\t}\n\n\tlogrus.Debugf(\"Creating Secret from SecretKMS %s\", cr.Name)\n\n\tparent := fmt.Sprintf(\"projects/%s/locations/%s/keyRings/%s/cryptoKeys/%s\",\n\t\tcr.Spec.Provider.GoogleCloud.Project,\n\t\tcr.Spec.Provider.GoogleCloud.Location,\n\t\tcr.Spec.Provider.GoogleCloud.Keyring,\n\t\tcr.Spec.Provider.GoogleCloud.Key)\n\n\tb, err := base64.StdEncoding.DecodeString(cr.Spec.Provider.GoogleCloud.Data)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"malformed data in SecretKMS\")\n\t}\n\n\treq := &kmspb.DecryptRequest{\n\t\tName: parent,\n\t\tCiphertext: b,\n\t}\n\n\tlogrus.Debugln(\"Data\", cr.Spec.Provider.GoogleCloud.Data)\n\n\tlogrus.Debugln(\"Sending decrypt request\")\n\tresp, err := h.CloudKMS.Decrypt(context.Background(), req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsecret.Data = make(map[string][]byte)\n\tsecret.Data[cr.Spec.File] = resp.Plaintext\n\n\treturn sdk.Create(secret)\n}",
"func CreateSecret(cr, namespace, app, description, name, key, value string) *corev1.Secret {\n\tlabels := map[string]string{\n\t\t\"app\": app,\n\t\t\"deployedby\": \"aqua-operator\",\n\t\t\"aquasecoperator_cr\": cr,\n\t}\n\tannotations := map[string]string{\n\t\t\"description\": description,\n\t}\n\tsecret := &corev1.Secret{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: \"core/v1\",\n\t\t\tKind: \"Secret\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t\tLabels: labels,\n\t\t\tAnnotations: annotations,\n\t\t},\n\t\tType: corev1.SecretTypeOpaque,\n\t\tData: map[string][]byte{\n\t\t\tkey: []byte(value),\n\t\t},\n\t}\n\n\treturn secret\n}",
"func newSecretForCR(cr *crdv1alpha1.VaultSecret) *corev1.Secret {\n\n\tsecret := vault.GetSecret(cr.Spec.Path)\n\n\tcr.Status.RequestId = secret.RequestID\n\n\tvar secretMap map[string][]byte\n\tsecretMap = make(map[string][]byte)\n\tfor key, secret := range secret.Data {\n\t\tsecretMap[key] = []byte(secret.(string))\n\t}\n\n\tlabels := map[string]string{\n\t\t\"app\": cr.Name,\n\t}\n\treturn &corev1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cr.Name,\n\t\t\tNamespace: cr.Namespace,\n\t\t\tLabels: labels,\n\t\t},\n\t\tType: \"Opaque\",\n\t\tData: secretMap,\n\t}\n}",
"func NewSecretFromRuntime(obj interface{}, config CtorConfig) K8sResource {\n\ts := &Secret{}\n\ts.FromRuntime(obj, config)\n\treturn s\n}",
"func New() *Secret {\n\treturn &Secret{}\n}",
"func newSecretObj(name, namespace string, secretType corev1.SecretType) *corev1.Secret {\n\treturn &corev1.Secret{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: corev1.SchemeGroupVersion.String(),\n\t\t\tKind: \"Secret\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t},\n\t\tType: secretType,\n\t\tData: map[string][]byte{},\n\t}\n}",
"func newGCPSecretCR(namespace, creds string) *corev1.Secret {\n\treturn &corev1.Secret{\n\t\tType: \"Opaque\",\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Secret\",\n\t\t\tAPIVersion: \"v1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: gcpSecretName,\n\t\t\tNamespace: namespace,\n\t\t},\n\t\tData: map[string][]byte{\n\t\t\t\"osServiceAccount.json\": []byte(creds),\n\t\t},\n\t}\n}",
"func (ct *controllerTest) CreateSecret(name string, data map[string][]byte) error {\n\t_, err := ct.k8sClient.CoreV1().Secrets(testNamespace).Create(&corev1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: testNamespace,\n\t\t\tName: name,\n\t\t},\n\t\tData: data,\n\t})\n\treturn err\n}",
"func NewSecret(ctx *pulumi.Context,\n\tname string, args *SecretArgs, opts ...pulumi.ResourceOption) (*Secret, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Replication == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Replication'\")\n\t}\n\tif args.SecretId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'SecretId'\")\n\t}\n\treplaceOnChanges := pulumi.ReplaceOnChanges([]string{\n\t\t\"project\",\n\t\t\"secretId\",\n\t})\n\topts = append(opts, replaceOnChanges)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Secret\n\terr := ctx.RegisterResource(\"google-native:secretmanager/v1:Secret\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (c *secrets) Create(ctx context.Context, secret *v1.Secret,\n\topts metav1.CreateOptions) (result *v1.Secret, err error) {\n\tresult = &v1.Secret{}\n\terr = c.client.Post().\n\t\tResource(\"secrets\").\n\t\tVersionedParams(opts).\n\t\tBody(secret).\n\t\tDo(ctx).\n\t\tInto(result)\n\n\treturn\n}",
"func newSecret(name string) corev1.Secret {\n\tconst (\n\t\t// defaultCert is a PEM-encoded certificate.\n\t\tdefaultCert = `-----BEGIN CERTIFICATE-----\nMIIDIjCCAgqgAwIBAgIBBjANBgkqhkiG9w0BAQUFADCBoTELMAkGA1UEBhMCVVMx\nCzAJBgNVBAgMAlNDMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0Rl\nZmF1bHQgQ29tcGFueSBMdGQxEDAOBgNVBAsMB1Rlc3QgQ0ExGjAYBgNVBAMMEXd3\ndy5leGFtcGxlY2EuY29tMSIwIAYJKoZIhvcNAQkBFhNleGFtcGxlQGV4YW1wbGUu\nY29tMB4XDTE2MDExMzE5NDA1N1oXDTI2MDExMDE5NDA1N1owfDEYMBYGA1UEAxMP\nd3d3LmV4YW1wbGUuY29tMQswCQYDVQQIEwJTQzELMAkGA1UEBhMCVVMxIjAgBgkq\nhkiG9w0BCQEWE2V4YW1wbGVAZXhhbXBsZS5jb20xEDAOBgNVBAoTB0V4YW1wbGUx\nEDAOBgNVBAsTB0V4YW1wbGUwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAM0B\nu++oHV1wcphWRbMLUft8fD7nPG95xs7UeLPphFZuShIhhdAQMpvcsFeg+Bg9PWCu\nv3jZljmk06MLvuWLfwjYfo9q/V+qOZVfTVHHbaIO5RTXJMC2Nn+ACF0kHBmNcbth\nOOgF8L854a/P8tjm1iPR++vHnkex0NH7lyosVc/vAgMBAAGjDTALMAkGA1UdEwQC\nMAAwDQYJKoZIhvcNAQEFBQADggEBADjFm5AlNH3DNT1Uzx3m66fFjqqrHEs25geT\nyA3rvBuynflEHQO95M/8wCxYVyuAx4Z1i4YDC7tx0vmOn/2GXZHY9MAj1I8KCnwt\nJik7E2r1/yY0MrkawljOAxisXs821kJ+Z/51Ud2t5uhGxS6hJypbGspMS7OtBbw7\n8oThK7cWtCXOldNF6ruqY1agWnhRdAq5qSMnuBXuicOP0Kbtx51a1ugE3SnvQenJ\nnZxdtYUXvEsHZC/6bAtTfNh+/SwgxQJuL2ZM+VG3X2JIKY8xTDui+il7uTh422lq\nwED8uwKl+bOj6xFDyw4gWoBxRobsbFaME8pkykP1+GnKDberyAM=\n-----END CERTIFICATE-----\n`\n\t\t// defaultKey is a PEM-encoded private key.\n\t\tdefaultKey = `-----BEGIN RSA PRIVATE KEY-----\nMIICWwIBAAKBgQDNAbvvqB1dcHKYVkWzC1H7fHw+5zxvecbO1Hiz6YRWbkoSIYXQ\nEDKb3LBXoPgYPT1grr942ZY5pNOjC77li38I2H6Pav1fqjmVX01Rx22iDuUU1yTA\ntjZ/gAhdJBwZjXG7YTjoBfC/OeGvz/LY5tYj0fvrx55HsdDR+5cqLFXP7wIDAQAB\nAoGAfE7P4Zsj6zOzGPI/Izj7Bi5OvGnEeKfzyBiH9Dflue74VRQkqqwXs/DWsNv3\nc+M2Y3iyu5ncgKmUduo5X8D9To2ymPRLGuCdfZTxnBMpIDKSJ0FTwVPkr6cYyyBk\n5VCbc470pQPxTAAtl2eaO1sIrzR4PcgwqrSOjwBQQocsGAECQQD8QOra/mZmxPbt\nbRh8U5lhgZmirImk5RY3QMPI/1/f4k+fyjkU5FRq/yqSyin75aSAXg8IupAFRgyZ\nW7BT6zwBAkEA0A0ugAGorpCbuTa25SsIOMxkEzCiKYvh0O+GfGkzWG4lkSeJqGME\nkeuJGlXrZNKNoCYLluAKLPmnd72X2yTL7wJARM0kAXUP0wn324w8+HQIyqqBj/gF\nVt9Q7uMQQ3s72CGu3ANZDFS2nbRZFU5koxrggk6lRRk1fOq9NvrmHg10AQJABOea\npgfj+yGLmkUw8JwgGH6xCUbHO+WBUFSlPf+Y50fJeO+OrjqPXAVKeSV3ZCwWjKT4\n9viXJNJJ4WfF0bO/XwJAOMB1wQnEOSZ4v+laMwNtMq6hre5K8woqteXICoGcIWe8\nu3YLAbyW/lHhOCiZu2iAI8AbmXem9lW6Tr7p/97s0w==\n-----END RSA PRIVATE KEY-----\n`\n\t)\n\treturn corev1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t\tData: map[string][]byte{\n\t\t\t\"tls.crt\": []byte(defaultCert),\n\t\t\t\"tls.key\": []byte(defaultKey),\n\t\t},\n\t}\n}",
"func newSecretForCR(cr *certmergev1alpha1.CertMerge) *corev1.Secret {\n\tlabels := map[string]string{\n\t\t\"certmerge\": cr.Name,\n\t\t\"creator\": \"certmerge-operator\",\n\t}\n\treturn &corev1.Secret{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Secret\",\n\t\t\tAPIVersion: \"v1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cr.Spec.SecretName,\n\t\t\tNamespace: cr.Spec.SecretNamespace,\n\t\t\tLabels: labels,\n\t\t},\n\t\tType: corev1.SecretTypeOpaque,\n\t}\n}",
"func newSecret(ownerID, repoID int64, name, data string) *Secret {\n\treturn &Secret{\n\t\tOwnerID: ownerID,\n\t\tRepoID: repoID,\n\t\tName: strings.ToUpper(name),\n\t\tData: data,\n\t}\n}",
"func createSecret(ingressType ingress.CallType, cn, ns string, ic IngressCredential) *v1.Secret {\n\tif ingressType == ingress.Mtls {\n\t\treturn &v1.Secret{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: cn,\n\t\t\t\tNamespace: ns,\n\t\t\t},\n\t\t\tData: map[string][]byte{\n\t\t\t\tgenericScrtCert: []byte(ic.ServerCert),\n\t\t\t\tgenericScrtKey: []byte(ic.PrivateKey),\n\t\t\t\tgenericScrtCaCert: []byte(ic.CaCert),\n\t\t\t},\n\t\t}\n\t}\n\treturn &v1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cn,\n\t\t\tNamespace: ns,\n\t\t},\n\t\tData: map[string][]byte{\n\t\t\ttlsScrtCert: []byte(ic.ServerCert),\n\t\t\ttlsScrtKey: []byte(ic.PrivateKey),\n\t\t},\n\t}\n}",
"func createSecret(clientset internalclientset.Interface, clientConfig *clientcmdapi.Config, namespace, federationName, joiningClusterName, contextName, secretName string, dryRun bool) (runtime.Object, error) {\n\t// Minify the kubeconfig to ensure that there is only information\n\t// relevant to the cluster we are registering.\n\tnewClientConfig, err := minifyConfig(clientConfig, contextName)\n\tif err != nil {\n\t\tglog.V(2).Infof(\"Failed to minify the kubeconfig for the given context %q: %v\", contextName, err)\n\t\treturn nil, err\n\t}\n\n\t// Flatten the kubeconfig to ensure that all the referenced file\n\t// contents are inlined.\n\terr = clientcmdapi.FlattenConfig(newClientConfig)\n\tif err != nil {\n\t\tglog.V(2).Infof(\"Failed to flatten the kubeconfig for the given context %q: %v\", contextName, err)\n\t\treturn nil, err\n\t}\n\n\treturn util.CreateKubeconfigSecret(clientset, newClientConfig, namespace, secretName, federationName, joiningClusterName, dryRun)\n}",
"func newSecrets(c *APIV1Client) *secrets {\n\treturn &secrets{\n\t\tclient: c.RESTClient(),\n\t}\n}",
"func (k *Kubeclient) Create(secret *corev1.Secret) (*corev1.Secret, error) {\n\tif secret == nil {\n\t\treturn nil, errors.New(\"failed to create secret: nil secret object\")\n\t}\n\tcli, err := k.getClientsetOrCached()\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create secret\")\n\t}\n\treturn k.create(cli, k.namespace, secret)\n}",
"func Secret(objectMeta metav1.ObjectMeta, data map[string][]byte) *corev1.Secret {\n\treturn &corev1.Secret{\n\t\tObjectMeta: objectMeta,\n\t\tData: data,\n\t\tType: secretTypeForData(data),\n\t\tImmutable: pointer.Bool(true),\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Process k8s Secret object into template. Returns false if not capable of processing given resource type.
|
func (d secret) Process(appMeta helmify.AppMetadata, obj *unstructured.Unstructured) (bool, helmify.Template, error) {
if obj.GroupVersionKind() != configMapGVC {
return false, nil, nil
}
sec := corev1.Secret{}
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &sec)
if err != nil {
return true, nil, errors.Wrap(err, "unable to cast to secret")
}
meta, err := processor.ProcessObjMeta(appMeta, obj)
if err != nil {
return true, nil, err
}
name := appMeta.TrimName(obj.GetName())
nameCamelCase := strcase.ToLowerCamel(name)
secretType := string(sec.Type)
if secretType != "" {
secretType, err = yamlformat.Marshal(map[string]interface{}{"type": secretType}, 0)
if err != nil {
return true, nil, err
}
}
values := helmify.Values{}
var data, stringData string
templatedData := map[string]string{}
for key := range sec.Data {
keyCamelCase := strcase.ToLowerCamel(key)
if key == strings.ToUpper(key) {
keyCamelCase = strcase.ToLowerCamel(strings.ToLower(key))
}
templatedName, err := values.AddSecret(true, nameCamelCase, keyCamelCase)
if err != nil {
return true, nil, errors.Wrap(err, "unable add secret to values")
}
templatedData[key] = templatedName
}
if len(templatedData) != 0 {
data, err = yamlformat.Marshal(map[string]interface{}{"data": templatedData}, 0)
if err != nil {
return true, nil, err
}
data = strings.ReplaceAll(data, "'", "")
data = format.FixUnterminatedQuotes(data)
}
templatedData = map[string]string{}
for key := range sec.StringData {
keyCamelCase := strcase.ToLowerCamel(key)
if key == strings.ToUpper(key) {
keyCamelCase = strcase.ToLowerCamel(strings.ToLower(key))
}
templatedName, err := values.AddSecret(false, nameCamelCase, keyCamelCase)
if err != nil {
return true, nil, errors.Wrap(err, "unable add secret to values")
}
templatedData[key] = templatedName
}
if len(templatedData) != 0 {
stringData, err = yamlformat.Marshal(map[string]interface{}{"stringData": templatedData}, 0)
if err != nil {
return true, nil, err
}
stringData = strings.ReplaceAll(stringData, "'", "")
stringData = format.FixUnterminatedQuotes(stringData)
}
return true, &result{
name: name + ".yaml",
data: struct {
Type string
Meta string
Data string
StringData string
}{Type: secretType, Meta: meta, Data: data, StringData: stringData},
values: values,
}, nil
}
|
[
"func isSecret(resource v1alpha1.BackingServiceResource) bool {\n\treturn strings.ToLower(resource.Group+\".\"+resource.Version+\".\"+resource.Kind) == \".v1.secret\"\n}",
"func UnmarshalSecret(m map[string]json.RawMessage, result interface{}) (err error) {\n\t// Retrieve discriminator value to determine correct \"subclass\".\n\tvar discValue string\n\terr = core.UnmarshalPrimitive(m, \"secret_type\", &discValue)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"error unmarshalling discriminator property 'secret_type': %s\", err.Error())\n\t\treturn\n\t}\n\tif discValue == \"\" {\n\t\terr = fmt.Errorf(\"required discriminator property 'secret_type' not found in JSON object\")\n\t\treturn\n\t}\n\tif discValue == \"arbitrary\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalArbitrarySecret)\n\t} else if discValue == \"imported_cert\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalImportedCertificate)\n\t} else if discValue == \"public_cert\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalPublicCertificate)\n\t} else if discValue == \"private_cert\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalPrivateCertificate)\n\t} else if discValue == \"kv\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalKVSecret)\n\t} else if discValue == \"iam_credentials\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalIAMCredentialsSecret)\n\t} else if discValue == \"username_password\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalUsernamePasswordSecret)\n\t} else {\n\t\terr = fmt.Errorf(\"unrecognized value for discriminator property 'secret_type': %s\", discValue)\n\t}\n\treturn\n}",
"func Secret(objectMeta metav1.ObjectMeta, data map[string][]byte) *corev1.Secret {\n\treturn &corev1.Secret{\n\t\tObjectMeta: objectMeta,\n\t\tData: data,\n\t\tType: secretTypeForData(data),\n\t\tImmutable: pointer.Bool(true),\n\t}\n}",
"func runTemplate(cr *ricobergerdev1alpha1.VaultSecret, tmpl string, secrets map[string][]byte) ([]byte, error) {\n\t// Set up the context\n\tsd := templateContext{\n\t\tSecrets: make(map[string]string, len(secrets)),\n\t\tVault: templateVaultContext{\n\t\t\tPath: cr.Spec.Path,\n\t\t\tAddress: os.Getenv(\"VAULT_ADDRESS\"),\n\t\t},\n\t\tNamespace: cr.Namespace,\n\t\tLabels: cr.Labels,\n\t\tAnnotations: cr.Annotations,\n\t}\n\n\t// For templating, these should all be strings, convert\n\tfor k, v := range secrets {\n\t\tsd.Secrets[k] = string(v)\n\t}\n\n\t// We need to exclude some functions for security reasons and proper working of the operator, don't use TxtFuncMap:\n\t// - no environment-variable related functions to prevent secrets from accessing the VAULT environment variables\n\t// - no filesystem functions? Directory functions don't actually allow access to the FS, so they're OK.\n\t// - no other non-idempotent functions like random and crypto functions\n\tfuncmap := sprig.HermeticTxtFuncMap()\n\tdelete(funcmap, \"genPrivateKey\")\n\tdelete(funcmap, \"genCA\")\n\tdelete(funcmap, \"genSelfSignedCert\")\n\tdelete(funcmap, \"genSignedCert\")\n\tdelete(funcmap, \"htpasswd\") // bcrypt strings contain salt\n\n\ttmplParser := template.New(\"data\").Funcs(funcmap)\n\n\t// use other delimiters to prevent clashing with Helm templates\n\ttmplParser.Delims(\"{%\", \"%}\")\n\n\tt, err := tmplParser.Parse(tmpl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar bout bytes.Buffer\n\terr = t.Execute(&bout, sd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn bout.Bytes(), nil\n}",
"func IsSecret(s string) bool {\n\treturn SecretValue == s || VolumeMountSecretValue == s\n}",
"func ProcessSecret(outWriter, errWriter io.Writer, secret map[string]interface{}, secretKey string, decodeAll bool) error {\n\tdata, ok := secret[\"data\"].(map[string]interface{})\n\tif !ok {\n\t\treturn ErrSecretEmpty\n\t}\n\n\tvar keys []string\n\tfor k := range data {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\n\tif decodeAll {\n\t\tfor _, k := range keys {\n\t\t\tb64d, _ := base64.StdEncoding.DecodeString(data[k].(string))\n\t\t\t_, _ = fmt.Fprintf(outWriter, \"%s='%s'\\n\", k, strings.TrimSpace(string(b64d)))\n\t\t}\n\t} else if len(data) == 1 {\n\t\tfor k, v := range data {\n\t\t\t_, _ = fmt.Fprintf(errWriter, singleKeyDescription+\"\\n\", k)\n\t\t\tb64d, _ := base64.StdEncoding.DecodeString(v.(string))\n\t\t\t_, _ = fmt.Fprint(outWriter, string(b64d))\n\t\t}\n\t} else if secretKey != \"\" {\n\t\tif v, ok := data[secretKey]; ok {\n\t\t\tb64d, _ := base64.StdEncoding.DecodeString(v.(string))\n\t\t\t_, _ = fmt.Fprint(outWriter, string(b64d))\n\t\t} else {\n\t\t\treturn ErrSecretKeyNotFound\n\t\t}\n\t} else {\n\t\t_, _ = fmt.Fprintln(errWriter, listDescription)\n\t\tfor k := range data {\n\t\t\t_, _ = fmt.Fprintf(outWriter, \"%s %s\\n\", listPrefix, k)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (r *reconciler) hasSecret(meta metav1.Object, o runtime.Object) bool {\n\tic := o.(*operatorv1.IngressController)\n\tsecretName := controller.RouterEffectiveDefaultCertificateSecretName(ic, r.operandNamespace)\n\tsecret := &corev1.Secret{}\n\tif err := r.client.Get(context.Background(), secretName, secret); err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\treturn false\n\t\t}\n\t\tlog.Error(err, \"failed to look up secret for ingresscontroller\", \"name\", secretName, \"related\", meta.GetSelfLink())\n\t}\n\treturn true\n}",
"func (d deployment) Process(appMeta helmify.AppMetadata, obj *unstructured.Unstructured) (bool, helmify.Template, error) {\n\tif obj.GroupVersionKind() != deploymentGVC {\n\t\treturn false, nil, nil\n\t}\n\tdepl := appsv1.Deployment{}\n\terr := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &depl)\n\tif err != nil {\n\t\treturn true, nil, errors.Wrap(err, \"unable to cast to deployment\")\n\t}\n\tmeta, err := processor.ProcessObjMeta(appMeta, obj)\n\tif err != nil {\n\t\treturn true, nil, err\n\t}\n\n\tvalues := helmify.Values{}\n\n\tname := appMeta.TrimName(obj.GetName())\n\treplicas, err := processReplicas(name, &depl, &values)\n\tif err != nil {\n\t\treturn true, nil, err\n\t}\n\n\tmatchLabels, err := yamlformat.Marshal(map[string]interface{}{\"matchLabels\": depl.Spec.Selector.MatchLabels}, 0)\n\tif err != nil {\n\t\treturn true, nil, err\n\t}\n\tmatchExpr := \"\"\n\tif depl.Spec.Selector.MatchExpressions != nil {\n\t\tmatchExpr, err = yamlformat.Marshal(map[string]interface{}{\"matchExpressions\": depl.Spec.Selector.MatchExpressions}, 0)\n\t\tif err != nil {\n\t\t\treturn true, nil, err\n\t\t}\n\t}\n\tselector := fmt.Sprintf(selectorTempl, matchLabels, appMeta.ChartName(), matchExpr)\n\tselector = strings.Trim(selector, \" \\n\")\n\tselector = string(yamlformat.Indent([]byte(selector), 4))\n\n\tpodLabels, err := yamlformat.Marshal(depl.Spec.Template.ObjectMeta.Labels, 8)\n\tif err != nil {\n\t\treturn true, nil, err\n\t}\n\tpodLabels += fmt.Sprintf(\"\\n {{- include \\\"%s.selectorLabels\\\" . | nindent 8 }}\", appMeta.ChartName())\n\n\tpodAnnotations := \"\"\n\tif len(depl.Spec.Template.ObjectMeta.Annotations) != 0 {\n\t\tpodAnnotations, err = yamlformat.Marshal(map[string]interface{}{\"annotations\": depl.Spec.Template.ObjectMeta.Annotations}, 6)\n\t\tif err != nil {\n\t\t\treturn true, nil, err\n\t\t}\n\n\t\tpodAnnotations = \"\\n\" + podAnnotations\n\t}\n\n\tnameCamel := strcase.ToLowerCamel(name)\n\tspecMap, podValues, err := pod.ProcessSpec(nameCamel, appMeta, depl.Spec.Template.Spec)\n\tif err != nil {\n\t\treturn true, nil, err\n\t}\n\terr = values.Merge(podValues)\n\tif err != nil {\n\t\treturn true, nil, err\n\t}\n\n\tspec, err := yamlformat.Marshal(specMap, 6)\n\tif err != nil {\n\t\treturn true, nil, err\n\t}\n\n\tspec = strings.ReplaceAll(spec, \"'\", \"\")\n\n\treturn true, &result{\n\t\tvalues: values,\n\t\tdata: struct {\n\t\t\tMeta string\n\t\t\tReplicas string\n\t\t\tSelector string\n\t\t\tPodLabels string\n\t\t\tPodAnnotations string\n\t\t\tSpec string\n\t\t}{\n\t\t\tMeta: meta,\n\t\t\tReplicas: replicas,\n\t\t\tSelector: selector,\n\t\t\tPodLabels: podLabels,\n\t\t\tPodAnnotations: podAnnotations,\n\t\t\tSpec: spec,\n\t\t},\n\t}, nil\n}",
"func (c crd) Process(appMeta helmify.AppMetadata, obj *unstructured.Unstructured) (bool, helmify.Template, error) {\n\tif obj.GroupVersionKind() != crdGVC {\n\t\treturn false, nil, nil\n\t}\n\tspecUnstr, ok, err := unstructured.NestedMap(obj.Object, \"spec\")\n\tif err != nil || !ok {\n\t\treturn true, nil, errors.Wrap(err, \"unable to create crd template\")\n\t}\n\tversions, _ := yaml.Marshal(specUnstr)\n\tversions = yamlformat.Indent(versions, 2)\n\tversions = bytes.TrimRight(versions, \"\\n \")\n\n\tres := fmt.Sprintf(crdTeml, obj.GetName(), appMeta.ChartName(), string(versions))\n\tname, _, err := unstructured.NestedString(obj.Object, \"spec\", \"names\", \"singular\")\n\tif err != nil || !ok {\n\t\treturn true, nil, errors.Wrap(err, \"unable to create crd template\")\n\t}\n\treturn true, &result{\n\t\tname: name + \"-crd.yaml\",\n\t\tdata: []byte(res),\n\t}, nil\n}",
"func detectSecretType(s string) (v1.SecretType, error) {\n\tswitch strings.ToLower(s) {\n\tcase \"opaque\":\n\t\treturn v1.SecretTypeOpaque, nil\n\tcase \"kubernetes.io/basic-auth\":\n\t\treturn v1.SecretTypeBasicAuth, nil\n\tcase \"kubernetes.io/tls\":\n\t\treturn v1.SecretTypeTLS, nil\n\tcase \"kubernetes.io/ssh-auth\":\n\t\treturn v1.SecretTypeSSHAuth, nil\n\tcase \"kubernetes.io/service-account-token\":\n\t\treturn v1.SecretTypeServiceAccountToken, nil\n\tcase \"kubernetes.io/dockercfg\":\n\t\treturn v1.SecretTypeDockercfg, nil\n\tcase \"kubernetes.io/dockerconfigjson\":\n\t\treturn v1.SecretTypeDockerConfigJson, nil\n\t}\n\treturn \"\", errors.New(\"unknown secretType yet\")\n}",
"func (h *Handler) CreateSecret(cr *v1alpha1.SecretKMS) error {\n\tsecret := &corev1.Secret{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Secret\",\n\t\t\tAPIVersion: \"v1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cr.Spec.Secret,\n\t\t\tNamespace: cr.Namespace,\n\t\t},\n\t}\n\n\terr := sdk.Get(secret)\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tif err != nil && !k8serrors.IsNotFound(err) {\n\t\treturn err\n\t}\n\n\tlogrus.Debugf(\"Creating Secret from SecretKMS %s\", cr.Name)\n\n\tparent := fmt.Sprintf(\"projects/%s/locations/%s/keyRings/%s/cryptoKeys/%s\",\n\t\tcr.Spec.Provider.GoogleCloud.Project,\n\t\tcr.Spec.Provider.GoogleCloud.Location,\n\t\tcr.Spec.Provider.GoogleCloud.Keyring,\n\t\tcr.Spec.Provider.GoogleCloud.Key)\n\n\tb, err := base64.StdEncoding.DecodeString(cr.Spec.Provider.GoogleCloud.Data)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"malformed data in SecretKMS\")\n\t}\n\n\treq := &kmspb.DecryptRequest{\n\t\tName: parent,\n\t\tCiphertext: b,\n\t}\n\n\tlogrus.Debugln(\"Data\", cr.Spec.Provider.GoogleCloud.Data)\n\n\tlogrus.Debugln(\"Sending decrypt request\")\n\tresp, err := h.CloudKMS.Decrypt(context.Background(), req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsecret.Data = make(map[string][]byte)\n\tsecret.Data[cr.Spec.File] = resp.Plaintext\n\n\treturn sdk.Create(secret)\n}",
"func isSecret(e pack.PackageEnvelope) bool {\n\treturn e.HasLabel(pack.PurposeLabel, pack.PurposePlanetSecrets)\n}",
"func UnmarshalKVSecret(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(KVSecret)\n\terr = core.UnmarshalPrimitive(m, \"created_by\", &obj.CreatedBy)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"created_at\", &obj.CreatedAt)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"crn\", &obj.Crn)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"custom_metadata\", &obj.CustomMetadata)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"description\", &obj.Description)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"downloaded\", &obj.Downloaded)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"id\", &obj.ID)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"labels\", &obj.Labels)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"locks_total\", &obj.LocksTotal)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"name\", &obj.Name)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"secret_group_id\", &obj.SecretGroupID)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"secret_type\", &obj.SecretType)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"state\", &obj.State)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"state_description\", &obj.StateDescription)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"updated_at\", &obj.UpdatedAt)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"versions_total\", &obj.VersionsTotal)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"data\", &obj.Data)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}",
"func (o *WebhooksIntegrationCustomVariableResponse) GetIsSecret() bool {\n\tif o == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\treturn o.IsSecret\n}",
"func (v PropertyValue) IsSecret() bool {\n\t_, is := v.V.(*Secret)\n\treturn is\n}",
"func (c *PASSecretClient) Create(path string, description string, value interface{}) (bool, string, *http.Response, error) {\n\n\tvar secretType secretinternal.Secrettypes\n\n\tswitch value.(type) {\n\tcase string:\n\t\tsecretType = secretinternal.TEXT\n\tcase map[string]string:\n\t\tsecretType = secretinternal.KEYVALUE\n\tdefault:\n\t\treturn false, \"\", nil, ErrSecretTypeNotSupported\n\t}\n\n\treq := c.apiClient.SecretsApi.SecretsCreate(context.Background())\n\tif secretType == secretinternal.TEXT {\n\t\ttextSecret := secretinternal.NewSecretTextWritable(value.(string), secretType, path)\n\t\treq = req.SecretWritable(textSecret)\n\t} else {\n\t\tbagSecret := secretinternal.NewSecretBagWritable(value.(map[string]string), secretType, path)\n\t\treq = req.SecretWritable(bagSecret)\n\t}\n\tresp, r, err := req.Execute()\n\tif err == nil {\n\t\tif resp.Meta.Id != nil {\n\t\t\treturn true, *resp.Meta.Id, r, nil\n\t\t}\n\t\thasID, id := c.getIDFromObject(&resp.SecretWritable)\n\t\tif hasID {\n\t\t\treturn true, id, r, nil\n\t\t}\n\t\treturn false, \"\", r, ErrUnexpectedResponse\n\t}\n\tif r != nil {\n\t\tswitch r.StatusCode {\n\t\tcase 400: // bad request\n\t\t\treturn false, \"\", r, ErrBadPathName\n\t\tcase 401: // unauthorized access\n\t\t\treturn false, \"\", r, ErrNoCreatePermission\n\t\tcase 409: // conflict - object already exists\n\t\t\treturn false, \"\", r, ErrExists\n\t\tcase 500:\n\t\t\t// some request error is returned as 500 errors by backend\n\t\t\t// check for the ones that we know\n\t\t\tisAPIErr, summary := c.handleOpenAPIError(err)\n\t\t\tif isAPIErr {\n\t\t\t\tif summary == \"A set must have a name\" {\n\t\t\t\t\treturn false, \"\", r, ErrBadPathName\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false, \"\", r, ErrUnexpectedResponse\n\t\tdefault:\n\t\t\treturn false, \"\", r, ErrUnexpectedResponse\n\t\t}\n\t}\n\n\treturn false, \"\", r, err\n}",
"func (s *Synchronizer) SynchronizeSecret(key string) (bool, runtime.Object, error) {\n\t//\n\t// Get shadow resource\n\t//\n\t// Convert the namespace/name string into a distinct namespace and name\n\tnamespace, name, err := cache.SplitMetaNamespaceKey(key)\n\tif err != nil {\n\t\tutil.HandleError(fmt.Errorf(\"invalid resource key: %s\", key))\n\t\treturn false, nil, nil\n\t}\n\n\tsvcSecret, err := s.coreSDK.GetSecretFromCache(namespace, name)\n\tif err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\tutil.HandleError(fmt.Errorf(\"secret '%s' in work queue no longer exists\", key))\n\t\t\treturn false, nil, nil\n\t\t}\n\n\t\treturn false, nil, err\n\t}\n\n\tif svcSecret.Name == \"\" {\n\t\t// We choose to absorb the error here as the worker would requeue the\n\t\t// resource otherwise. Instead, the next time the resource is updated\n\t\t// the resource will be queued again.\n\t\tutil.HandleError(fmt.Errorf(\"%s: secret name must be specified\", key))\n\t\treturn false, nil, nil\n\t}\n\n\t//\n\t// Sync service catalog resource back to the shadow resource\n\t//\n\n\t// Get the corresponding shadow resource\n\tshadowSecretName := builder.BoundSecretName(svcSecret.Name)\n\tsecret, err := s.coreSDK.GetSecretFromCache(svcSecret.Namespace, shadowSecretName)\n\t// If the resource doesn't exist, we'll create it\n\tif apierrors.IsNotFound(err) {\n\t\ttbnd, err := s.GetTemplatedBindingFromShadowSecret(svcSecret)\n\t\tif err != nil {\n\t\t\treturn false, svcSecret, err\n\t\t}\n\t\tif tbnd == nil {\n\t\t\t// ignore unmanaged secrets\n\t\t\treturn false, nil, nil\n\t\t}\n\n\t\tsecret, err = builder.BuildBoundSecret(svcSecret, tbnd)\n\t\tif err != nil {\n\t\t\treturn false, svcSecret, err\n\t\t}\n\t\tsecret, err = s.coreSDK.Core().Secrets(secret.Namespace).Create(secret)\n\t}\n\n\t// If an error occurs during Get/Create, we'll requeue the item so we can\n\t// attempt processing again later. This could have been caused by a\n\t// temporary network failure, or any other transient reason.\n\tif err != nil {\n\t\treturn false, svcSecret, err\n\t}\n\n\t// If the shadow secret is not controlled by the service catalog managed secret,\n\t// we should log a warning to the event recorder and retry\n\tif !meta.IsControlledBy(secret, svcSecret) {\n\t\treturn false, nil, nil\n\t}\n\n\t//\n\t// Sync updates to service catalog resource back to the shadow resource\n\t//\n\ttbnd, err := s.GetTemplatedBindingFromShadowSecret(svcSecret)\n\tif err != nil {\n\t\treturn false, svcSecret, err\n\t}\n\tif tbnd == nil {\n\t\t// ignore unmanaged secrets\n\t\treturn false, nil, nil\n\t}\n\n\tif refreshedSecret, changed := builder.RefreshSecret(svcSecret, tbnd, secret); changed {\n\t\tsecret, err = s.coreSDK.Core().Secrets(refreshedSecret.Namespace).Update(refreshedSecret)\n\n\t\t// If an error occurs during Update, we'll requeue the item so we can\n\t\t// attempt processing again later. This could have been caused by a\n\t\t// temporary network failure, or any other transient reason.\n\t\tif err != nil {\n\t\t\treturn false, svcSecret, err\n\t\t}\n\t}\n\n\t//\n\t// Update shadow resource status with the service catalog resource state\n\t//\n\terr = s.updateSecretStatus(secret, svcSecret)\n\tif err != nil {\n\t\treturn false, svcSecret, err\n\t}\n\n\treturn true, svcSecret, nil\n}",
"func UnmarshalSecretMetadata(m map[string]json.RawMessage, result interface{}) (err error) {\n\t// Retrieve discriminator value to determine correct \"subclass\".\n\tvar discValue string\n\terr = core.UnmarshalPrimitive(m, \"secret_type\", &discValue)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"error unmarshalling discriminator property 'secret_type': %s\", err.Error())\n\t\treturn\n\t}\n\tif discValue == \"\" {\n\t\terr = fmt.Errorf(\"required discriminator property 'secret_type' not found in JSON object\")\n\t\treturn\n\t}\n\tif discValue == \"imported_cert\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalImportedCertificateMetadata)\n\t} else if discValue == \"public_cert\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalPublicCertificateMetadata)\n\t} else if discValue == \"kv\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalKVSecretMetadata)\n\t} else if discValue == \"username_password\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalUsernamePasswordSecretMetadata)\n\t} else if discValue == \"iam_credentials\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalIAMCredentialsSecretMetadata)\n\t} else if discValue == \"arbitrary\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalArbitrarySecretMetadata)\n\t} else if discValue == \"private_cert\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalPrivateCertificateMetadata)\n\t} else {\n\t\terr = fmt.Errorf(\"unrecognized value for discriminator property 'secret_type': %s\", discValue)\n\t}\n\treturn\n}",
"func createSecret(ingressType ingress.CallType, cn, ns string, ic IngressCredential) *v1.Secret {\n\tif ingressType == ingress.Mtls {\n\t\treturn &v1.Secret{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: cn,\n\t\t\t\tNamespace: ns,\n\t\t\t},\n\t\t\tData: map[string][]byte{\n\t\t\t\tgenericScrtCert: []byte(ic.ServerCert),\n\t\t\t\tgenericScrtKey: []byte(ic.PrivateKey),\n\t\t\t\tgenericScrtCaCert: []byte(ic.CaCert),\n\t\t\t},\n\t\t}\n\t}\n\treturn &v1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cn,\n\t\t\tNamespace: ns,\n\t\t},\n\t\tData: map[string][]byte{\n\t\t\ttlsScrtCert: []byte(ic.ServerCert),\n\t\t\ttlsScrtKey: []byte(ic.PrivateKey),\n\t\t},\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Delete provides a mock function with given fields: ctx, constraintID, formationTemplateID
|
func (_m *ConstraintReferenceService) Delete(ctx context.Context, constraintID string, formationTemplateID string) error {
ret := _m.Called(ctx, constraintID, formationTemplateID)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok {
r0 = rf(ctx, constraintID, formationTemplateID)
} else {
r0 = ret.Error(0)
}
return r0
}
|
[
"func (s *service) Delete(ctx context.Context, constraintID, formationTemplateID string) error {\n\tif err := s.repo.Delete(ctx, formationTemplateID, constraintID); err != nil {\n\t\treturn errors.Wrapf(err, \"while deleting Formation Template Constraint Reference for Constraint with ID %q and Formation Template with ID %q\", constraintID, formationTemplateID)\n\t}\n\n\treturn nil\n}",
"func (_m *Manager) Delete(ctx context.Context, projectID int64, meta ...string) error {\n\t_va := make([]interface{}, len(meta))\n\tfor _i := range meta {\n\t\t_va[_i] = meta[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, ctx, projectID)\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, int64, ...string) error); ok {\n\t\tr0 = rf(ctx, projectID, meta...)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func DeleteTemplateMocked(t *testing.T, templateIn *types.Template) {\n\n\tassert := assert.New(t)\n\n\t// wire up\n\tcs := &utils.MockConcertoService{}\n\tds, err := NewTemplateService(cs)\n\tassert.Nil(err, \"Couldn't load template service\")\n\tassert.NotNil(ds, \"Template service not instanced\")\n\n\t// to json\n\tdIn, err := json.Marshal(templateIn)\n\tassert.Nil(err, \"Template test data corrupted\")\n\n\t// call service\n\tcs.On(\"Delete\", fmt.Sprintf(\"/blueprint/templates/%s\", templateIn.ID)).Return(dIn, 200, nil)\n\terr = ds.DeleteTemplate(templateIn.ID)\n\tassert.Nil(err, \"Error deleting template\")\n\n}",
"func (_m *TombstoneRepository) Delete(ctx context.Context, tenant string, id string) error {\n\tret := _m.Called(ctx, tenant, id)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok {\n\t\tr0 = rf(ctx, tenant, id)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func (_m *BundleRepository) Delete(ctx context.Context, tenant string, id string) error {\n\tret := _m.Called(ctx, tenant, id)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok {\n\t\tr0 = rf(ctx, tenant, id)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func (_m *Storage) Delete(projectName string) error {\n\tret := _m.Called(projectName)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string) error); ok {\n\t\tr0 = rf(projectName)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func (_m *DBClient) DeleteTransmission(age int64, status models.TransmissionStatus) error {\n\tret := _m.Called(age, status)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(int64, models.TransmissionStatus) error); ok {\n\t\tr0 = rf(age, status)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func (_m *Usecase) Delete(ctx context.Context, id int, pocketId int) error {\n\tret := _m.Called(ctx, id, pocketId)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, int, int) error); ok {\n\t\tr0 = rf(ctx, id, pocketId)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func (_m *SecretStorage) Delete(id models.Id, projectId models.Id) error {\n\tret := _m.Called(id, projectId)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(models.Id, models.Id) error); ok {\n\t\tr0 = rf(id, projectId)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func (_m *TemplateClient) DeleteTemplate(filename string, tmplName string) error {\n\tret := _m.Called(filename, tmplName)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string, string) error); ok {\n\t\tr0 = rf(filename, tmplName)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func (_m *ISession) GuildTemplateDelete(guildID string, templateCode string, options ...discordgo.RequestOption) error {\n\t_va := make([]interface{}, len(options))\n\tfor _i := range options {\n\t\t_va[_i] = options[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, guildID, templateCode)\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string, string, ...discordgo.RequestOption) error); ok {\n\t\tr0 = rf(guildID, templateCode, options...)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func (_m *DAO) Delete(ctx context.Context, vendor string, repository string, digest string) error {\n\tret := _m.Called(ctx, vendor, repository, digest)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string, string, string) error); ok {\n\t\tr0 = rf(ctx, vendor, repository, digest)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func (_m *SecretService) Delete(ctx context.Context, secretID int32, projectID int32) error {\n\tret := _m.Called(ctx, secretID, projectID)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, int32, int32) error); ok {\n\t\tr0 = rf(ctx, secretID, projectID)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func (_m *Client) DeleteTemplate(_a0 context.Context, _a1 build.DeleteTemplateArgs) error {\n\tret := _m.Called(_a0, _a1)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, build.DeleteTemplateArgs) error); ok {\n\t\tr0 = rf(_a0, _a1)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func (m *TeamTemplateDefinitionItemRequestBuilder) Delete(ctx context.Context, requestConfiguration *TeamTemplateDefinitionItemRequestBuilderDeleteRequestConfiguration)(error) {\n requestInfo, err := m.ToDeleteRequestInformation(ctx, requestConfiguration);\n if err != nil {\n return err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n err = m.BaseRequestBuilder.RequestAdapter.SendNoContent(ctx, requestInfo, errorMapping)\n if err != nil {\n return err\n }\n return nil\n}",
"func (_m *ServerConnexion) Delete(oath string) error {\n\tret := _m.Called(oath)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string) error); ok {\n\t\tr0 = rf(oath)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func (_m *MockObjectStore) Delete(_a0 context.Context, _a1 *proto.Ref) error {\n\tret := _m.Called(_a0, _a1)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *proto.Ref) error); ok {\n\t\tr0 = rf(_a0, _a1)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func (_m *TemplatesRepositoryMock) Delete(_a0 string) error {\n\tret := _m.Called(_a0)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string) error); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func (m *MockProduct) DeleteSharedLicences(arg0 context.Context, arg1 db.DeleteSharedLicencesParams) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteSharedLicences\", arg0, arg1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewConstraintReferenceService creates a new instance of ConstraintReferenceService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
func NewConstraintReferenceService(t mockConstructorTestingTNewConstraintReferenceService) *ConstraintReferenceService {
mock := &ConstraintReferenceService{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
|
[
"func NewFormationConstraintSvc(t mockConstructorTestingTNewFormationConstraintSvc) *FormationConstraintSvc {\n\tmock := &FormationConstraintSvc{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NewService(repo formationTemplateConstraintReferenceRepository, converter constraintReferenceConverter) *service {\n\treturn &service{\n\t\trepo: repo,\n\t\tconverter: converter,\n\t}\n}",
"func NewAllocationService(t mockConstructorTestingTNewAllocationService) *AllocationService {\n\tmock := &AllocationService{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NewService(t mockConstructorTestingTNewService) *Service {\n\tmock := &Service{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NewService(t testing.TB) *Service {\n\tmock := &Service{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NewRef(t *testing.T, kubeClient *kubeset.Clientset, testNamespace string) *JenkinsRef {\n\tsvc, err := kubeClient.CoreV1().Services(testNamespace).Get(context.Background(), \"jenkins\", metav1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"%#v\", err)\n\t}\n\tserviceIP := svc.Spec.ClusterIP\n\tport := svc.Spec.Ports[0].Port\n\n\tj := &JenkinsRef{\n\t\thost: serviceIP,\n\t\tport: fmt.Sprintf(\"%d\", port),\n\t\tnamespace: testNamespace,\n\t\turi_tester: NewTester(kubeClient, testNamespace, t),\n\t\tt: t,\n\t}\n\treturn j\n}",
"func (r *ReconcileCanary) CreateServiceForTargetRef(instance *kharonv1alpha1.Canary) (*corev1.Service, error) {\n\t// We have to check if there is a Service called as the TargetRef.Name, otherwise create it\n\ttargetService := &corev1.Service{}\n\terr := r.client.Get(context.TODO(), types.NamespacedName{Name: instance.Spec.TargetRef.Name, Namespace: instance.Namespace}, targetService)\n\tif err != nil && errors.IsNotFound(err) {\n\t\tportName := instance.Spec.TargetRefContainerPort.StrVal\n\t\tif len(portName) <= 0 {\n\t\t\tportName = fmt.Sprintf(\"%d-%s\", instance.Spec.TargetRefContainerPort.IntVal, strings.ToLower(string(instance.Spec.TargetRefContainerProtocol)))\n\t\t}\n\t\t// The Service we need should be named as the Deployment because exposes the Deployment logic (as a canary)\n\t\ttargetServiceDef := &TargetServiceDef{\n\t\t\tserviceName: instance.Spec.TargetRef.Name,\n\t\t\tnamespace: instance.Namespace,\n\t\t\tselector: instance.Spec.TargetRefSelector,\n\t\t\tportName: portName,\n\t\t\tprotocol: instance.Spec.TargetRefContainerProtocol,\n\t\t\tport: instance.Spec.TargetRefContainerPort.IntVal,\n\t\t\ttargetPort: instance.Spec.TargetRefContainerPort,\n\t\t}\n\t\ttargetService = newServiceFromTargetServiceDef(targetServiceDef)\n\t\t// Set Canary instance as the owner and controller\n\t\tif err := controllerutil.SetControllerReference(instance, targetService, r.scheme); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlog.Info(\"Creating the canary service\", \"CanaryService.Namespace\", targetService.Namespace, \"CanaryService.Name\", targetService.Name)\n\t\terr = r.client.Create(context.TODO(), targetService)\n\t\tif err != nil && !errors.IsAlreadyExists(err) {\n\t\t\treturn nil, err\n\t\t}\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn targetService, nil\n}",
"func New() (*ReferenceManager, error) {\n\treturn &ReferenceManager{\n\t\tHandler: admission.NewHandler(admission.Create, admission.Update),\n\t}, nil\n}",
"func NewServices(t mockConstructorTestingTNewServices) *Services {\n\tmock := &Services{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func newmockMsrService(t newmockMsrServiceT) *mockMsrService {\n\tmock := &mockMsrService{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NewClientService(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *ClientService {\n\tmock := &ClientService{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NewApplicationService(t mockConstructorTestingTNewApplicationService) *ApplicationService {\n\tmock := &ApplicationService{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func (s *service) Create(ctx context.Context, in *model.FormationTemplateConstraintReference) error {\n\tlog.C(ctx).Infof(\"Creating an Formation Template Constraint Reference for Constraint with ID %q and Formation Template with ID %q\", in.ConstraintID, in.FormationTemplateID)\n\n\tif err := s.repo.Create(ctx, in); err != nil {\n\t\treturn errors.Wrapf(err, \"while creating Formation Template Constraint Reference for Constraint with ID %q and Formation Template with ID %q\", in.ConstraintID, in.FormationTemplateID)\n\t}\n\n\treturn nil\n}",
"func NewService(repo formationConstraintRepository, formationTemplateConstraintReferenceRepo formationTemplateConstraintReferenceRepository, uidSvc uidService, converter formationConstraintConverter) *service {\n\treturn &service{\n\t\trepo: repo,\n\t\tformationTemplateConstraintReferenceRepo: formationTemplateConstraintReferenceRepo,\n\t\tuidSvc: uidSvc,\n\t\tconverter: converter,\n\t}\n}",
"func NewIdentityService(t mockConstructorTestingTNewIdentityService) *IdentityService {\n\tmock := &IdentityService{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NewReference(n asyncpi.Name) *Reference {\n\treturn &Reference{AttachType(n)}\n}",
"func newService(logger glog.Logger,\n\tdb *AuditDB,\n\trootcaPool *x509.CertPool,\n\talertPusher *glog.Alert,\n) (*service, error) {\n\treturn &service{\n\t\tlogger: logger,\n\t\tdb: db,\n\t\trootcaPool: rootcaPool,\n\t\talertPusher: alertPusher,\n\t}, nil\n}",
"func NewDependencies(\n\tcfg *config.Config,\n\tentClient *ent.Client,\n\tserver *httpx.Server,\n\tclinicService services.ClinicService,\n\tappointmentService services.AppointmentService,\n\tpetService services.PetService,\n\tuserService services.UserService,\n\tcustomerService services.CustomerService,\n\tveterinarianService services.VeterinarianService,\n) Dependencies {\n\treturn Dependencies{\n\t\tConfig: cfg,\n\t\tEntClient: entClient,\n\t\tServer: server,\n\t\tClinicService: clinicService,\n\t\tAppointmentService: appointmentService,\n\t\tPetService: petService,\n\t\tUserService: userService,\n\t\tCustomerService: customerService,\n\t\tVeterinarianService: veterinarianService,\n\t}\n}",
"func (f *factory) NewDepRef(w wallets.IWallet) (string, error) {\n\t// f.depRefMutex.Lock()\n\t// defer f.depRefMutex.Unlock()\n\tfor attempt := 0; attempt < 10; attempt++ {\n\t\tref := \"W-\"\n\t\tref += string('0' + rand.Intn(10))\n\t\tref += string('A' + rand.Intn(26))\n\t\tref += string('A' + rand.Intn(26))\n\t\tref += string('A' + rand.Intn(26))\n\t\tref += \"-\"\n\t\tref += string('0' + rand.Intn(10))\n\t\tref += string('A' + rand.Intn(26))\n\t\tref += string('A' + rand.Intn(26))\n\t\tref += string('A' + rand.Intn(26))\n\n\t\treturn \"\", log.Wrapf(nil, \"todo: dep ref not yet stored in db\")\n\t} //for each attempt\n\treturn \"\", log.Wrapf(nil, \"Unable to generate deposit reference\")\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
idDistance calculates the distance of a and b accounting for wraparound using max. Wraparound means that a may be closer to b if they traveled through max. The lowest value of the following is returned: |a b| max a + b + 1 max b + a + 1 Expressions that evaluate to be larger than max are ignored to prevent overflowing.
|
func idDistance(a, b id.ID, max id.ID) id.ID {
// Wrap distance will always be smaller when a > b so
// swap the two if that doesn't hold.
if id.Compare(a, b) < 0 {
return idDistance(b, a, max)
}
var (
one = id.ID{Low: 1}
directDist = absSub(b, a)
maxDist = idSub(max, a)
)
// Don't wrap around if b+1 or (max-a)+b+1 would overflow.
if addOverflows(b, one, max) || addOverflows(maxDist, idAdd(b, one), max) {
return directDist
}
wraparoundDist := idAdd(maxDist, idAdd(b, one))
// Return the smaller of direct and wraparound distance.
if id.Compare(wraparoundDist, directDist) < 0 {
return wraparoundDist
}
return directDist
}
|
[
"func (kademliaID KademliaID) CalcDistance(target *KademliaID) *KademliaID {\n\tresult := KademliaID{}\n\tfor i := 0; i < IDLength; i++ {\n\t\tresult[i] = kademliaID[i] ^ target[i]\n\t}\n\treturn &result\n}",
"func maxDistance(colors []int) int {\n\tleft := make(map[int]int)\n\tright := make(map[int]int)\n\tfor i, v := range colors {\n\t\tif _, ok := left[v]; !ok {\n\t\t\tleft[v] = i\n\t\t}\n\n\t\tj := len(colors) - i - 1\n\t\tx := colors[j]\n\t\tif _, ok := right[x]; !ok {\n\t\t\tright[x] = j\n\t\t}\n\t}\n\n\tmax := 0\n\tfor u, x := range left {\n\t\tfor v, y := range right {\n\t\t\tif u != v && y-x > max {\n\t\t\t\tmax = y - x\n\t\t\t}\n\t\t}\n\t}\n\n\treturn max\n\n\t// TODO (tai): can be faster\n}",
"func (nodeID NodeID) CalcDistance(target *NodeID) *NodeID {\n\tresult := NodeID{}\n\tfor i := 0; i < IDLength; i++ {\n\t\tresult[i] = nodeID[i] ^ target[i]\n\t}\n\treturn &result\n}",
"func LevenshteinDistanceMax(a, b string, max int) (int, bool) {\n\tv, wasMax, _ := LevenshteinDistanceMaxReuseSlice(a, b, max, nil)\n\treturn v, wasMax\n}",
"func LogDist(a, b ID) int {\n\tlz := 0\n\tfor i := range a {\n\t\tx := a[i] ^ b[i]\n\t\tif x == 0 {\n\t\t\tlz += 8\n\t\t} else {\n\t\t\tlz += bits.LeadingZeros8(x)\n\t\t\tbreak\n\t\t}\n\t}\n\treturn len(a)*8 - lz\n}",
"func main() {\n\tfmt.Println(maxDistToClosest([]int{1, 0, 0, 0, 1, 0, 1}))\n\tfmt.Println(maxDistToClosest([]int{1, 0, 0, 0}))\n\tfmt.Println(maxDistToClosest([]int{0, 0, 0, 1}))\n}",
"func digTurnDist(p1, p2 Coord) int {\n return int(math.Ceil(float64(digDist(p1, p2)) / MOVE_DIST))\n}",
"func Max(a, b int64) int64 {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}",
"func TestCalcDistance(t *testing.T) {\n\tk0 := NewKademliaID(\"0000000000000000000000000000000000000000\")\n\tkm := NewKademliaID(\"123456789abcde23456789abcde3456789abc456\")\n\tkn := NewKademliaID(\"23456789abcde3456789abcde456789abc56789a\")\n\tkx := NewKademliaID(\"317131f131713d6622ee226629b53dfd35fdbccc\") // Known xor(km, kn)\n\t\n\tdmm := km.CalcDistance(km)\n\n\t// Distance of km against itself is zero\n\tif !dmm.Equals(k0) {\n\t\tt.Errorf(\"TestCalcDistance: Error - Self distancing\")\n\t}\n\n\tdnm := kn.CalcDistance(km)\n\tdmn := km.CalcDistance(kn)\n\n\t// Distance between km and kn is the same \n\tif !dnm.Equals(dmn) {\n\t\tt.Errorf(\"TestCalcDistance: Error - Symmetry\")\n\t}\n\n\t// Distance between n and m is the known XOR distance\n\tif !dnm.Equals(kx) {\n\t\tt.Errorf(\"TestCalcDistance: Error - Known distance\")\n\t}\n}",
"func Distance(a, b *Res) int {\n\treturn abs(int(b.w)-int(a.w)) + abs(int(b.h)-int(a.h))\n}",
"func compareDistance(d1, d2 Distance) int {\n\tfor i := 0; i < MaxCapacity; i++ {\n\t\tif d1[i] > d2[i] {\n\t\t\treturn 1\n\t\t}\n\n\t\tif d1[i] < d2[i] {\n\t\t\treturn -1\n\t\t}\n\t}\n\n\treturn 0\n}",
"func getDistance(x1 float64, y1 float64, x2 float64, y2 float64) float64 {\n\treturn math.Sqrt(math.Pow(x1-x2, 2) + math.Pow(y1-y2, 2))\n}",
"func (id ID) SortByDistance(ids []ID) []ID {\n\tidsCopy := make([]ID, len(ids))\n\tcopy(idsCopy, ids)\n\tbdtc := &byDistanceToCenter{\n\t\tCenter: id,\n\t\tIds: idsCopy,\n\t}\n\tsort.Sort(bdtc)\n\treturn bdtc.Ids\n}",
"func distance(a, b *Vertex) float64 {\n\treturn math.Sqrt(math.Pow(b.X-a.X, 2) + math.Pow(b.Y-a.Y, 2))\n}",
"func (m match) dist() uint32 {\n\treturn uint32(m.distance - minDistance)\n}",
"func WrapMinDist(ci, max, ctr float32) float32 {\n\tnwd := mat32.Abs(ci - ctr) // no-wrap dist\n\tif mat32.Abs((ci+max)-ctr) < nwd {\n\t\treturn ci + max\n\t}\n\tif mat32.Abs((ci-max)-ctr) < nwd {\n\t\treturn ci - max\n\t}\n\treturn ci\n}",
"func (c Cell) MaxDistance(target Point) s1.ChordAngle {\n\t// First check the 4 cell vertices. If all are within the hemisphere\n\t// centered around target, the max distance will be to one of these vertices.\n\ttargetUVW := faceXYZtoUVW(int(c.face), target)\n\tmaxDist := maxChordAngle(c.vertexChordDist2(targetUVW, false, false),\n\t\tc.vertexChordDist2(targetUVW, true, false),\n\t\tc.vertexChordDist2(targetUVW, false, true),\n\t\tc.vertexChordDist2(targetUVW, true, true))\n\n\tif maxDist <= s1.RightChordAngle {\n\t\treturn maxDist\n\t}\n\n\t// Otherwise, find the minimum distance dMin to the antipodal point and the\n\t// maximum distance will be pi - dMin.\n\treturn s1.StraightChordAngle - c.Distance(Point{target.Mul(-1)})\n}",
"func maxInt64(a, b int64) int64 {\n\tif a > b {\n\t\treturn a\n\t}\n\n\treturn b\n}",
"func closestSteps(orig rect, a, b []rect) int64 {\n\tdist := int64(-1)\n\n\taPrev := orig\n\taPrevSteps := int64(0)\n\tfor _, ar := range a {\n\t\tbPrev := orig\n\t\tbPrevSteps := int64(0)\n\t\tfor _, br := range b {\n\t\t\tif ar.intersects(br) {\n\t\t\t\tdx := ar.intersection(br)\n\t\t\t\td := aPrevSteps + dx.dist(aPrev) + bPrevSteps + dx.dist(bPrev)\n\t\t\t\tif d > 0 && (dist < 0 || d < dist) {\n\t\t\t\t\tdist = d\n\t\t\t\t}\n\t\t\t}\n\t\t\tbPrev = br\n\t\t\tbPrevSteps += br.steps()\n\t\t}\n\t\taPrev = ar\n\t\taPrevSteps += ar.steps()\n\t}\n\treturn dist\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
absSub :: | a b |
|
func absSub(a, b id.ID) id.ID {
cmp := id.Compare(a, b)
switch {
case cmp < 0: // a < b
return idSub(b, a)
case cmp == 0: // a == b
return id.Zero
case cmp > 0: // a > b
return idSub(a, b)
default:
panic("impossible case")
}
}
|
[
"func (v Vec) AbsSub(other Vec) Vec {\n\treturn v.Copy().AbsSubBy(other)\n}",
"func (a Vec2) Sub(b Vec2) Vec2 {\n\treturn Vec2{a.X - b.X, a.Y - b.Y}\n}",
"func (t Torus) Sub(a, b Point) Point {\n\ta, b = t.normPair(a, b)\n\treturn a.Sub(b)\n}",
"func (a Vector) Sub(b Vector) Vector {\n return Vector{a.X - b.X, a.Y - b.Y}\n}",
"func (v V) Sub(a V) V {\n\treturn V{v.X - a.X, v.Y - a.Y}\n}",
"func (a ImpactAmount) sub(b ImpactAmount) ImpactAmount {\n\tif b >= a {\n\t\treturn 0\n\t}\n\treturn a - b\n}",
"func (g *G2) Sub(c, a, b *PointG2) *PointG2 {\n\td := &PointG2{}\n\tg.Neg(d, b)\n\tg.Add(c, a, d)\n\treturn c\n}",
"func Modsub(a, b, m *ED25519.BIG) *ED25519.BIG {\n\treturn Modadd(a, ED25519.Modneg(b, m), m)\n}",
"func Sub(a, b Expr) Expr {\n\treturn &subOp{&simpleOperator{a, b, scanner.SUB}}\n}",
"func (v Vec2) Sub(other Vec2) Vec2 {\n\treturn Vec2{v.X - other.X, v.Y - other.Y}\n}",
"func (cs *CS) sub(c1 *Constraint, c2 *Constraint) *Constraint {\n\n\tvar minusOne curve.Element\n\tone := curve.One()\n\tminusOne.Neg(&one)\n\n\texpression := &linearExpression{\n\t\tterm{Wire: c1.outputWire, Coeff: one},\n\t\tterm{Wire: c2.outputWire, Coeff: minusOne},\n\t}\n\n\treturn newConstraint(cs, expression)\n}",
"func gfSub(a, b gfElement) gfElement {\n\treturn a ^ b\n}",
"func SUB(left interface{}, right interface{}) Expr {\n\treturn binaryExprFor(\"-\", left, right)\n}",
"func (a *Vector) SubP(b Vector) {\n a.X = a.X - b.X\n a.Y = a.Y - b.Y\n}",
"func (z *Int) Sub(x, y *Int) *Int {}",
"func Sub(a, b *big.Float) *big.Float {\n\treturn ZeroBigFloat().Sub(a, b)\n}",
"func Sub(valueA gcv.Value, valueB gcv.Value) gcv.Value {\n\tif valueA.Type() == gcv.Complex || valueB.Type() == gcv.Complex {\n\t\treturn gcv.MakeValue(valueA.Complex() - valueB.Complex())\n\t}\n\treturn gcv.MakeValue(valueA.Real() - valueB.Real())\n}",
"func approxSub(a, b float64) float64 {\n\tif ((a < 0 && b < 0) || (a > 0 && b > 0)) && math.Abs(a-b) < 2.22045e-016 {\n\t\treturn 0\n\t}\n\treturn a - b\n}",
"func (v Vec2) Sub(x Vec2) Vec2 {\n\treturn Vec2{v[0] - x[0], v[1] - x[1]}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns true when v + o would overflow max.
|
func addOverflows(v, o, max id.ID) bool {
// o overflows when (max - v) < o
maxDist := idSub(max, v)
return id.Compare(maxDist, o) < 0
}
|
[
"func bounded(n ir.Node, max int64) bool {\n\tif n.Type() == nil || !n.Type().IsInteger() {\n\t\treturn false\n\t}\n\n\tsign := n.Type().IsSigned()\n\tbits := int32(8 * n.Type().Size())\n\n\tif ir.IsSmallIntConst(n) {\n\t\tv := ir.Int64Val(n)\n\t\treturn 0 <= v && v < max\n\t}\n\n\tswitch n.Op() {\n\tcase ir.OAND, ir.OANDNOT:\n\t\tn := n.(*ir.BinaryExpr)\n\t\tv := int64(-1)\n\t\tswitch {\n\t\tcase ir.IsSmallIntConst(n.X):\n\t\t\tv = ir.Int64Val(n.X)\n\t\tcase ir.IsSmallIntConst(n.Y):\n\t\t\tv = ir.Int64Val(n.Y)\n\t\t\tif n.Op() == ir.OANDNOT {\n\t\t\t\tv = ^v\n\t\t\t\tif !sign {\n\t\t\t\t\tv &= 1<<uint(bits) - 1\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif 0 <= v && v < max {\n\t\t\treturn true\n\t\t}\n\n\tcase ir.OMOD:\n\t\tn := n.(*ir.BinaryExpr)\n\t\tif !sign && ir.IsSmallIntConst(n.Y) {\n\t\t\tv := ir.Int64Val(n.Y)\n\t\t\tif 0 <= v && v <= max {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\tcase ir.ODIV:\n\t\tn := n.(*ir.BinaryExpr)\n\t\tif !sign && ir.IsSmallIntConst(n.Y) {\n\t\t\tv := ir.Int64Val(n.Y)\n\t\t\tfor bits > 0 && v >= 2 {\n\t\t\t\tbits--\n\t\t\t\tv >>= 1\n\t\t\t}\n\t\t}\n\n\tcase ir.ORSH:\n\t\tn := n.(*ir.BinaryExpr)\n\t\tif !sign && ir.IsSmallIntConst(n.Y) {\n\t\t\tv := ir.Int64Val(n.Y)\n\t\t\tif v > int64(bits) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tbits -= int32(v)\n\t\t}\n\t}\n\n\tif !sign && bits <= 62 && 1<<uint(bits) <= max {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func tooLarge(x int) bool {\n\tconst max int = 1e6\n\treturn x > max || x < -max\n}",
"func WillOverflow(a, b int64) bool {\n\t// Morally MinInt64 < a+b < MaxInt64, but without overflows.\n\t// First make sure that a <= b. If not, swap them.\n\tif a > b {\n\t\ta, b = b, a\n\t}\n\t// Now b is the larger of the numbers, and we compare sizes\n\t// in a way that can never over- or underflow.\n\tif b > 0 {\n\t\treturn a > math.MaxInt64-b\n\t}\n\treturn math.MinInt64-b > a\n}",
"func chmax(updatedValue *int, target int) bool {\n\tif *updatedValue < target {\n\t\t*updatedValue = target\n\t\treturn true\n\t}\n\treturn false\n}",
"func ChMax(updatedValue *int, target int) bool {\n\tif *updatedValue < target {\n\t\t*updatedValue = target\n\t\treturn true\n\t}\n\treturn false\n}",
"func (v Value) Larger(a Value) bool {\n\treturn int(v) > int(a)\n}",
"func IsPowerOfTwo(v T) bool {\n\tif v == 0 {\n\t\treturn false\n\t}\n\treturn v&(v-1) == 0\n}",
"func ConstOverflow(v constant.Value, t *types.Type) bool {\n\tswitch {\n\tcase t.IsInteger():\n\t\tbits := uint(8 * t.Size())\n\t\tif t.IsUnsigned() {\n\t\t\tx, ok := constant.Uint64Val(v)\n\t\t\treturn !ok || x>>bits != 0\n\t\t}\n\t\tx, ok := constant.Int64Val(v)\n\t\tif x < 0 {\n\t\t\tx = ^x\n\t\t}\n\t\treturn !ok || x>>(bits-1) != 0\n\tcase t.IsFloat():\n\t\tswitch t.Size() {\n\t\tcase 4:\n\t\t\tf, _ := constant.Float32Val(v)\n\t\t\treturn math.IsInf(float64(f), 0)\n\t\tcase 8:\n\t\t\tf, _ := constant.Float64Val(v)\n\t\t\treturn math.IsInf(f, 0)\n\t\t}\n\tcase t.IsComplex():\n\t\tft := types.FloatForComplex(t)\n\t\treturn ConstOverflow(constant.Real(v), ft) || ConstOverflow(constant.Imag(v), ft)\n\t}\n\tbase.Fatalf(\"ConstOverflow: %v, %v\", v, t)\n\tpanic(\"unreachable\")\n}",
"func inBound(min, v, max int) bool {\n\treturn v >= min && v < max\n}",
"func (v Value) OverflowInt(x int64) bool {\n\tswitch v.Kind() {\n\tcase Int, Int8, Int16, Int32, Int64:\n\t\tbitSize := v.typecode.Size() * 8\n\t\ttrunc := (x << (64 - bitSize)) >> (64 - bitSize)\n\t\treturn x != trunc\n\t}\n\tpanic(&ValueError{Method: \"reflect.Value.OverflowInt\", Kind: v.Kind()})\n}",
"func u(a, b interface{}) bool {\n\tav := a.(int)\n\tbv := b.(int)\n\n\tswitch {\n\tcase av < bv:\n\t\treturn true\n\tcase av >= bv:\n\t\treturn false\n\t}\n\treturn false\n}",
"func (z *Int) AddOverflow(x, y *Int) bool {\n\tvar carry bool\n\tfor i := range z {\n\t\tz[i], carry = u64Add(x[i], y[i], carry)\n\t}\n\treturn carry\n}",
"func (obj *interval) HasMax() bool {\n\treturn obj.max != -1\n}",
"func (v Value) OverflowUint(x uint64) bool {\n\tk := v.Kind()\n\tswitch k {\n\tcase Uint, Uintptr, Uint8, Uint16, Uint32, Uint64:\n\t\tbitSize := v.typecode.Size() * 8\n\t\ttrunc := (x << (64 - bitSize)) >> (64 - bitSize)\n\t\treturn x != trunc\n\t}\n\tpanic(&ValueError{Method: \"reflect.Value.OverflowUint\", Kind: v.Kind()})\n}",
"func (z *Int) SubOverflow(x, y *Int) bool {\n\tvar (\n\t\tunderflow bool\n\t)\n\tz[0], underflow = u64Sub(x[0], y[0], underflow)\n\tz[1], underflow = u64Sub(x[1], y[1], underflow)\n\tz[2], underflow = u64Sub(x[2], y[2], underflow)\n\tz[3], underflow = u64Sub(z[3], y[3], underflow)\n\treturn underflow\n}",
"func (t *Check) Max(max, val int64) (bool, error) {\n\treturn max >= val, nil\n}",
"func (ft *factsTable) isNonNegative(v *Value) bool {\n\tif isNonNegative(v) {\n\t\treturn true\n\t}\n\n\tvar max int64\n\tswitch v.Type.Size() {\n\tcase 1:\n\t\tmax = math.MaxInt8\n\tcase 2:\n\t\tmax = math.MaxInt16\n\tcase 4:\n\t\tmax = math.MaxInt32\n\tcase 8:\n\t\tmax = math.MaxInt64\n\tdefault:\n\t\tpanic(\"unexpected integer size\")\n\t}\n\n\t// Check if the recorded limits can prove that the value is positive\n\n\tif l, has := ft.limits[v.ID]; has && (l.min >= 0 || l.umax <= uint64(max)) {\n\t\treturn true\n\t}\n\n\t// Check if v = x+delta, and we can use x's limits to prove that it's positive\n\tif x, delta := isConstDelta(v); x != nil {\n\t\tif l, has := ft.limits[x.ID]; has {\n\t\t\tif delta > 0 && l.min >= -delta && l.max <= max-delta {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif delta < 0 && l.min >= -delta {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\t// Check if v is a value-preserving extension of a non-negative value.\n\tif isCleanExt(v) && ft.isNonNegative(v.Args[0]) {\n\t\treturn true\n\t}\n\n\t// Check if the signed poset can prove that the value is >= 0\n\treturn ft.orderS.OrderedOrEqual(ft.zero, v)\n}",
"func (x *Big) CmpAbs(y *Big) int { return cmp(x, y, true) }",
"func (x *biggerIntPair) raiseMax(y biggerInt) {\n\tif x[1].extra < 0 || y.extra > 0 ||\n\t\t(x[1].extra == 0 && y.extra == 0 && x[1].i.Cmp(y.i) < 0) {\n\t\tx[1] = y\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
idAdd :: v + o
|
func idAdd(v, o id.ID) id.ID {
low, borrow := bits.Add64(v.Low, o.Low, 0)
high, _ := bits.Add64(v.High, o.High, borrow)
return id.ID{High: high, Low: low}
}
|
[
"func Add() {\n\tMatch('+')\n\tTerm()\n\tEmitLn(\"ADD (SP)+,D0\")\n}",
"func Add(a, operand int) int { return operand + a }",
"func (v V) Add(a V) V {\n\treturn V{v.X + a.X, v.Y + a.Y}\n}",
"func (n *NestedInteger) Add(elem NestedInteger) {}",
"func (m *Integer) Add(n int64) { m.value.Add(n) }",
"func (v Vector) Add(o Vector) *Vector {\n\treturn &Vector{v[0] + o[0], v[1] + o[1], v[2] + o[2]}\n}",
"func (m *DropMutation) AddObjectID(u uint32) {\n\tif m.addobject_id != nil {\n\t\t*m.addobject_id += u\n\t} else {\n\t\tm.addobject_id = &u\n\t}\n}",
"func (this *NestedInteger) Add(elem NestedInteger) {}",
"func Add(t1 TermT, t2 TermT) TermT {\n\treturn TermT(C.yices_add(C.term_t(t1), C.term_t(t2)))\n}",
"func (s *SeriesIDSet) Add(id uint64) {\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.AddNoLock(id)\n}",
"func (vec Vector2i) Plus(other Vector2i) Vector2i {\n\treturn Vector2i{X: vec.X + other.X, Y: vec.Y + other.Y}\n}",
"func (t *UTally32) Add(i uint32) (cur uint32) {\n\tcur = uint32(*t)\n\t*t += UTally32(i)\n\n\treturn\n}",
"func addi(a, b, c int, r register) register {\n\tr[c] = r[a] + b\n\treturn r\n}",
"func Vadd(a, b Vec) Vec {\n\treturn Vec{\n\t\ta[0] + b[0],\n\t\ta[1] + b[1],\n\t\ta[2] + b[2],\n\t}\n}",
"func AppendIdValue(val0 interface{}, val1 interface{}) (*QueryAst, error) {\n\tast := val0.(*QueryAst)\n\tast.objectId = append(ast.objectId, string(val1.(*token.Token).Lit))\n\n\treturn ast, nil\n}",
"func (i IntValue64) Add(data OperableData) Data {\n\treturn IntValue64(int64(i) + data.Int64())\n}",
"func Add( a *context.Value, b *context.Value ) (*context.Value,error) {\n if a != nil && b != nil {\n switch a.OperationType( b ) {\n case context.VAR_BOOL:\n return context.IntValue( a.Int() + b.Int() ), nil\n case context.VAR_INT:\n return context.IntValue( a.Int() + b.Int() ), nil\n case context.VAR_FLOAT:\n return context.FloatValue( a.Float() + b.Float() ), nil\n case context.VAR_STRING:\n return context.StringValue( a.String() + b.String() ), nil\n case context.VAR_COMPLEX:\n return context.ComplexValue( a.Complex() + b.Complex() ), nil\n }\n }\n\n return nil, errors.New( \"Unsupported type for add\" )\n}",
"func (n Ndau) Add(other Ndau) (Ndau, error) {\n\tt, err := signed.Add(int64(n), int64(other))\n\treturn Ndau(t), err\n}",
"func opcodeAdd(op *parsedOpcode, vm *Engine) error {\n\tv0, err := vm.dstack.PopInt()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tv1, err := vm.dstack.PopInt()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvm.dstack.PushInt(v0 + v1)\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
control the terminal mode Set a tty terminal to raw mode.
|
func setRawMode(fd int) (*raw.Termios, error) {
// make sure this is a tty
if !isatty.IsTerminal(uintptr(fd)) {
return nil, fmt.Errorf("fd %d is not a tty", fd)
}
// get the terminal IO mode
originalMode, err := raw.TcGetAttr(uintptr(fd))
if err != nil {
return nil, err
}
// modify the original mode
newMode := *originalMode
newMode.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON)
newMode.Oflag &^= syscall.OPOST
newMode.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN)
newMode.Cflag &^= (syscall.CSIZE | syscall.PARENB)
newMode.Cflag |= syscall.CS8
newMode.Cc[syscall.VMIN] = 1
newMode.Cc[syscall.VTIME] = 0
err = raw.TcSetAttr(uintptr(fd), &newMode)
if err != nil {
return nil, err
}
return originalMode, nil
}
|
[
"func (i *In) SetRawTerminal() (err error) {\n\tif !i.isTerminal || os.Getenv(\"NORAW\") != \"\" {\n\t\treturn nil\n\t}\n\ti.state, err = term.SetRawTerminal(i.fd)\n\treturn err\n}",
"func SetRawTerminal(fd FileDescriptor) (state *TerminalState, err error) {\n\tvar s *mobyterm.State\n\ts, err = mobyterm.SetRawTerminal(fd)\n\tif s != nil {\n\t\tstate = &TerminalState{\n\t\t\tstate: *s,\n\t\t}\n\t}\n\treturn\n}",
"func SetRawTerminalOutput(fd FileDescriptor) (state *TerminalState, err error) {\n\tvar s *mobyterm.State\n\ts, err = mobyterm.SetRawTerminalOutput(fd)\n\tif s != nil {\n\t\tstate = &TerminalState{\n\t\t\tstate: *s,\n\t\t}\n\t}\n\treturn\n}",
"func disableRawMode(fd int, oldTermios *unix.Termios) {\n\t_ = unix.IoctlSetTermios(fd, ioctlWriteTermios, oldTermios)\n\treturn\n}",
"func enableRawMode(fd int) (*unix.Termios, error) {\n\ttermios, err := unix.IoctlGetTermios(fd, ioctlReadTermios)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\toldTermios := *termios\n\n\t// Clear bits for functionality we do not want, recall &^ is bitwise clear.\n\ttermios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP\n\t// ICRNL disables carriage returns (\\r) -> newline (\\n) conversion.\n\t// IXON disables Ctrl-S and Ctrl-Q.\n\ttermios.Iflag &^= unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON\n\t// OPOST disables output processing, so \\r doesn't have \\n appended.\n\ttermios.Oflag &^= unix.OPOST\n\t// ECHO don't echo keypresses.\n\t// ICANON disables canonical mode, input is read by-byte not by-line.\n\t// ISIG disables Ctrl-C and Ctrl-Z.\n\t// IEXTEN disables Ctrl-V.\n\ttermios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN\n\ttermios.Cflag &^= unix.CSIZE | unix.PARENB\n\ttermios.Cflag |= unix.CS8\n\n\t// This might not be desired later, but for now, timeout readByte() after 100ms and\n\t// don't require a min amount of bytes to read before returning.\n\t// Minimum bytes to read before readByte() returns.\n\ttermios.Cc[unix.VMIN] = 0\n\t// 100ms timeout for readByte().\n\ttermios.Cc[unix.VTIME] = 1\n\n\t// TODO - might need to specify TCSAFLUSH to indicate when the termios change should apply.\n\tif err := unix.IoctlSetTermios(fd, ioctlWriteTermios, termios); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &oldTermios, nil\n}",
"func Reset() {\n\trawMode := exec.Command(\"/bin/stty\", \"-raw\")\n\trawMode.Stdin = os.Stdin\n\trawMode.Run()\n\tVT100ShowCursor(os.Stdout, true)\n}",
"func (l *LineReader) raw() {\n\t// STD_OUTPUT_HANDLE\n\th, errno := syscall.GetStdHandle(-11)\n\tt.h = uintptr(h)\n\tif int32(t.h) == -1 {\n\t\terr := os.Errno(errno)\n\t\tpanic(err)\n\t}\n\tok, _, e := syscall.Syscall(procGetConsoleMode, 2,\n\t\tt.h, uintptr(unsafe.Pointer(&t.origTerm)), 0)\n\tif ok == 0 {\n\t\terr := os.NewSyscallError(\"GetConsoleMode\", int(e))\n\t\tpanic(err)\n\t}\n\n\traw := t.origTerm\n\traw &^= _ENABLE_LINE_INPUT | _ENABLE_ECHO_INPUT | _ENABLE_PROCESSED_INPUT | _ENABLE_WINDOW_INPUT\n\tok, _, e = syscall.Syscall(procSetConsoleMode, 2, t.h, uintptr(raw), 0)\n\tif ok == 0 {\n\t\terr := os.NewSyscallError(\"SetConsoleMode\", int(e))\n\t\tpanic(err)\n\t}\n\n\twin := t.getConsoleInfo()\n\tt.cols = int(win.dwSize.x)\n\tt.rows = int(win.dwSize.y)\n\n\tt.buf = new(buffer)\n}",
"func setRawTerminal(stream interface{}) (cleanup func(), err error) {\n\tfd, isTerminal := term.GetFdInfo(stream)\n\tif !isTerminal {\n\t\treturn nil, errors.New(\"not a terminal\")\n\t}\n\n\tstate, err := term.SetRawTerminal(fd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn func() { term.RestoreTerminal(fd, state) }, nil\n}",
"func (r *terminal) Start() {\n\tif r == nil {\n\t\treturn\n\t}\n\tfd := int(os.Stdin.Fd())\n\treset, err := enableNonBlockingRead(fd)\n\tif err != nil {\n\t\tlog.Warnf(\"failed to put terminal (fd %d) into raw mode: %v\", fd, err)\n\t\treturn\n\t}\n\tr.reset = reset\n}",
"func StartRaw(c *exec.Cmd) (pty *os.File, restore func(), err error) {\n\tpty, err = Start(c)\n\toldState, err := terminal.MakeRaw(int(pty.Fd()))\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn pty, func() {\n\t\t_ = terminal.Restore(int(pty.Fd()), oldState)\n\t}, nil\n}",
"func setRawTerminalOutput(stream interface{}) (cleanup func(), err error) {\n\tfd, isTerminal := term.GetFdInfo(stream)\n\tif !isTerminal {\n\t\treturn nil, errors.New(\"not a terminal\")\n\t}\n\n\tstate, err := term.SetRawTerminalOutput(fd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn func() { term.RestoreTerminal(fd, state) }, nil\n}",
"func makeInputRaw(fd windows.Handle, mode uint32) error {\n\t// See\n\t// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx\n\t// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx\n\n\t// Disable these modes\n\tmode &^= windows.ENABLE_ECHO_INPUT\n\tmode &^= windows.ENABLE_LINE_INPUT\n\tmode &^= windows.ENABLE_MOUSE_INPUT\n\tmode &^= windows.ENABLE_WINDOW_INPUT\n\tmode &^= windows.ENABLE_PROCESSED_INPUT\n\n\t// Enable these modes\n\tmode |= windows.ENABLE_EXTENDED_FLAGS\n\tmode |= windows.ENABLE_INSERT_MODE\n\tmode |= windows.ENABLE_QUICK_EDIT_MODE\n\n\tif vtInputSupported {\n\t\tmode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT\n\t}\n\n\tif err := windows.SetConsoleMode(fd, mode); err != nil {\n\t\treturn fmt.Errorf(\"unable to set console to raw mode: %w\", err)\n\t}\n\n\treturn nil\n}",
"func (p *port) restoreTermios() {\n\tif p.oldTermios == nil {\n\t\treturn\n\t}\n\tif err := tcsetattr(int(p.file.Fd()), p.oldTermios); err != nil {\n\t\t// Warning only.\n\t\tlog.Printf(\"serial: could not restore setting: %v\\n\", err)\n\t\treturn\n\t}\n\tp.oldTermios = nil\n}",
"func MakeRaw(fd uintptr) (*State, error) {\n\t// This doesn't really work. The exec.Command() runs a sub-shell\n\t// so the stty mods don't affect the client process.\n\tcmd := exec.Command(\"stty\", \"-echo raw\")\n\tcmd.Run()\n\treturn &State{}, nil\n}",
"func MakeRaw(fd uintptr) (*State, error) {\r\n\tvar oldState State\r\n\tif _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\tnewState := oldState.termios\r\n\tnewState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON)\r\n\tnewState.Oflag &^= OPOST\r\n\tnewState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN)\r\n\tnewState.Cflag &^= (CSIZE | PARENB)\r\n\tnewState.Cflag |= CS8\r\n\tnewState.Cc[syscall.VMIN] = 1\r\n\tnewState.Cc[syscall.VTIME] = 0\r\n\r\n\tif _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 {\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\treturn &oldState, nil\r\n}",
"func MakeRaw(fd int) (*State, error) {\n\tvar oldState State\n\tif _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TCGETS), uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {\n\t\treturn nil, err\n\t}\n\n\tnewState := oldState.termios\n\tnewState.Iflag &^= syscall.ISTRIP | syscall.INLCR | syscall.ICRNL | syscall.IGNCR | syscall.IXON | syscall.IXOFF\n\tnewState.Lflag &^= syscall.ECHO | syscall.ICANON | syscall.ISIG\n\tif _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TCSETS), uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {\n\t\treturn nil, err\n\t}\n\n\treturn &oldState, nil\n}",
"func TerminalRestore(fd uintptr, termios *syscall.Termios) error {\n\treturn tcset(fd, termios)\n}",
"func setTermios(state syscall.Termios) {\n\tsyscall.Syscall6(syscall.SYS_IOCTL, uintptr(0), uintptr(syscall.TIOCSETA), uintptr(unsafe.Pointer(&state)), 0, 0, 0)\n}",
"func TerminalMode() (ModeApplier, error) {\n\treturn getMode(unix.Stdin)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add a byte to a utf8 decode. Return the rune and it's size in bytes.
|
func (u *utf8) add(c byte) (r rune, size int) {
switch u.state {
case getByte0:
if c&0x80 == 0 {
// 1 byte
return rune(c), 1
} else if c&0xe0 == 0xc0 {
// 2 byte
u.val = int32(c&0x1f) << 6
u.count = 2
u.state = get1More
return KeycodeNull, 0
} else if c&0xf0 == 0xe0 {
// 3 bytes
u.val = int32(c&0x0f) << 6
u.count = 3
u.state = get2More
return KeycodeNull, 0
} else if c&0xf8 == 0xf0 {
// 4 bytes
u.val = int32(c&0x07) << 6
u.count = 4
u.state = get3More
return KeycodeNull, 0
}
case get3More:
if c&0xc0 == 0x80 {
u.state = get2More
u.val |= int32(c & 0x3f)
u.val <<= 6
return KeycodeNull, 0
}
case get2More:
if c&0xc0 == 0x80 {
u.state = get1More
u.val |= int32(c & 0x3f)
u.val <<= 6
return KeycodeNull, 0
}
case get1More:
if c&0xc0 == 0x80 {
u.state = getByte0
u.val |= int32(c & 0x3f)
return rune(u.val), u.count
}
}
// Error
u.state = getByte0
return unicode.ReplacementChar, 1
}
|
[
"func (UTF8Decoder) DecodeRune(p []byte) (rune, int) { return utf8.DecodeRune(p) }",
"func (s *scratch) addRune(r rune) int {\n\tif s.fill+utf8.UTFMax >= cap(s.data) {\n\t\ts.grow()\n\t}\n\n\tn := utf8.EncodeRune(s.data[s.fill:], r)\n\ts.fill += n\n\treturn n\n}",
"func (t *TelWindow) AddByte(b byte) {\n \n}",
"func EncodeByte(b byte) int64 {\n\treturn ((int64(b) - 0x41 + AlphabetSize) % AlphabetSize) + 1\n}",
"func DecodedLen(x int) int { return x * 2 }",
"func (b *Buf) appendRune(r rune) int {\n\tif r < utf8.RuneSelf {\n\t\tb.AppendByte(byte(r))\n\t\treturn 1\n\t}\n\ti := len(b.s)\n\tj := utf8.EncodeRune(b.Tail(utf8.UTFMax), r)\n\tb.s = b.s[:i+j]\n\treturn j\n}",
"func (bd *String) PeekByte(context.Context) (b byte, err error) {\n\tif bd.pos >= len(bd.buf) {\n\t\treturn 0, io.EOF\n\t}\n\n\treturn bd.buf[bd.pos], nil\n}",
"func ByteSize(s string, encoding Encoding) int {\n\tsize := 0\n\tfor _, c := range s {\n\t\tsize = size + RuneByteSize(c, encoding)\n\t}\n\treturn size\n}",
"func (b *BPackage) AddUint8(d uint8) {\n\n\tb.check()\n\n\tb.buf = append(b.buf, d)\n}",
"func appendUtf8(b []byte, r rune) []byte {\n\tvar a [utf8.UTFMax]byte\n\tn := utf8.EncodeRune(a[:], r)\n\tb = append(b, a[:n]...)\n\treturn b\n}",
"func (u *utf8) getRune(fd int, timeout *syscall.Timeval) rune {\n\t// use select() for the timeout\n\tif timeout != nil {\n\t\tfor true {\n\t\t\trd := syscall.FdSet{}\n\t\t\tfdset.Set(fd, &rd)\n\t\t\tn, err := syscall.Select(fd+1, &rd, nil, nil, timeout)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif n == 0 {\n\t\t\t\t// nothing is readable\n\t\t\t\treturn KeycodeNull\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\t// Read the file descriptor\n\tbuf := make([]byte, 1)\n\t_, err := syscall.Read(fd, buf)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"read error %s\\n\", err))\n\t}\n\t// decode the utf8\n\tr, size := u.add(buf[0])\n\tif size == 0 {\n\t\t// incomplete utf8 code point\n\t\treturn KeycodeNull\n\t}\n\tif size == 1 && r == unicode.ReplacementChar {\n\t\t// utf8 decode error\n\t\treturn KeycodeNull\n\t}\n\treturn r\n}",
"func (enc *Encoding) AppendDecode(dst, src []byte) ([]byte, error) {\n\t// Compute the output size without padding to avoid over allocating.\n\tn := len(src)\n\tfor n > 0 && rune(src[n-1]) == enc.padChar {\n\t\tn--\n\t}\n\tn = decodedLen(n, NoPadding)\n\n\tdst = slices.Grow(dst, n)\n\tn, err := enc.Decode(dst[len(dst):][:n], src)\n\treturn dst[:len(dst)+n], err\n}",
"func Xsqlite3Utf8CharLen(tls *libc.TLS, zIn uintptr, nByte int32) int32 {\n\tvar r int32 = 0\n\tvar z uintptr = zIn\n\tvar zTerm uintptr\n\tif nByte >= 0 {\n\t\tzTerm = z + uintptr(nByte)\n\t} else {\n\t\tzTerm = libc.UintptrFromInt32(-1)\n\t}\n\n\tfor int32(*(*U8)(unsafe.Pointer(z))) != 0 && z < zTerm {\n\t\t{\n\t\t\tif int32(*(*U8)(unsafe.Pointer(libc.PostIncUintptr(&z, 1)))) >= 0xc0 {\n\t\t\t\tfor int32(*(*U8)(unsafe.Pointer(z)))&0xc0 == 0x80 {\n\t\t\t\t\tz++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tr++\n\t}\n\treturn r\n}",
"func AppendRuneBytes(dest *[]byte, r rune) int {\n\tif size := utf8.RuneLen(r); size == -1 {\n\t\tmod.Error(\"utf8.RuneLen(r) == -1\")\n\t\treturn -1\n\t}\n\tvar buf [utf8.UTFMax]byte\n\tret := utf8.EncodeRune(buf[:], r)\n\t(*dest) = append((*dest), buf[:ret]...)\n\treturn ret\n}",
"func (r *Result) GetByte() (byte, error) {\n\tres, err := strconv.ParseUint(r.GetString(), 10, 8)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn byte(res), nil\n}",
"func IndexByte(s string, c byte) int",
"func (bd *String) NextByte(context.Context) (b byte, err error) {\n\tif bd.pos >= len(bd.buf) {\n\t\treturn 0, io.EOF\n\t}\n\n\tb = bd.buf[bd.pos]\n\tbd.pos++\n\treturn\n}",
"func (b *Builder) AddByte(name string) *Builder {\n\tb.AddByteWithTag(name, \"\")\n\treturn b\n}",
"func RuneByteSize(r rune, encoding Encoding) int {\n\tif encoding == SJIS {\n\t\treturn sjisRuneByteSize(r)\n\t} else if isUTF16Encoding(encoding) {\n\t\treturn utf16RuneByteSize(r)\n\t}\n\treturn len(string(r))\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
read a single rune from a file descriptor (with timeout) timeout >= 0 : wait for timeout seconds timeout = nil : return immediately
|
func (u *utf8) getRune(fd int, timeout *syscall.Timeval) rune {
// use select() for the timeout
if timeout != nil {
for true {
rd := syscall.FdSet{}
fdset.Set(fd, &rd)
n, err := syscall.Select(fd+1, &rd, nil, nil, timeout)
if err != nil {
continue
}
if n == 0 {
// nothing is readable
return KeycodeNull
}
break
}
}
// Read the file descriptor
buf := make([]byte, 1)
_, err := syscall.Read(fd, buf)
if err != nil {
panic(fmt.Sprintf("read error %s\n", err))
}
// decode the utf8
r, size := u.add(buf[0])
if size == 0 {
// incomplete utf8 code point
return KeycodeNull
}
if size == 1 && r == unicode.ReplacementChar {
// utf8 decode error
return KeycodeNull
}
return r
}
|
[
"func (r *timeoutReadCloser) Read(b []byte) (int, error) {\n\ttimer := time.NewTimer(r.duration)\n\tc := make(chan readResult, 1)\n\n\tgo func() {\n\t\tn, err := r.reader.Read(b)\n\t\ttimer.Stop()\n\t\tc <- readResult{n: n, err: err}\n\t}()\n\n\tselect {\n\tcase data := <-c:\n\t\treturn data.n, data.err\n\tcase <-timer.C:\n\t\treturn 0, &ResponseTimeoutError{TimeoutDur: r.duration}\n\t}\n}",
"func (br *ByteReader) ReadByteWithTimeout(d time.Duration) (b byte, err error) {\n\tif br.ch == nil {\n\t\tbr.ch = make(chan struct {\n\t\t\tbyte\n\t\t\terror\n\t\t})\n\t\tgo br.readByteToChannel(br.ch)\n\t}\n\tselect {\n\tcase <-time.After(d):\n\t\treturn 0, ErrTimeout{}\n\tcase s := <-br.ch:\n\t\tb = s.byte\n\t\terr = s.error\n\t}\n\n\tclose(br.ch)\n\tbr.ch = nil\n\treturn\n}",
"func readTimeout(c <-chan Event, ms uint) (Event, error) {\n\tselect {\n\tcase ev := <-c:\n\t\treturn ev, nil\n\tcase <-time.After(time.Duration(ms) * time.Millisecond):\n\t\treturn Event{}, errChanTimeout\n\t}\n}",
"func TimeoutReader(r io.Reader) io.Reader",
"func TimeoutReader(r io.Reader) io.Reader { return &timeoutReader{r, 0} }",
"func (f *firstLineReader) getLine(timeout time.Duration) (string, error) {\n\tselect {\n\tcase s := <-f.sch:\n\t\treturn s, nil\n\tcase err := <-f.ech:\n\t\treturn err.Error(), err\n\tcase <-time.After(timeout):\n\t\terr := errors.New(\"read timed out\")\n\t\treturn err.Error(), err\n\t}\n}",
"func (d *Dev) ReadTimeout(timeout time.Duration) (int32, error) {\n\t// Wait for the falling edge that indicates the ADC has data.\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\tif !d.IsReady() {\n\t\tif !d.data.WaitForEdge(timeout) {\n\t\t\treturn 0, ErrTimeout\n\t\t}\n\t}\n\treturn d.readRaw()\n}",
"func (c *Client) Read(readTimeout time.Duration) (*commands.Command, error) {\n\tc.wg.Wait()\n\tr := &readCommand{\n\t\ttimeout: readTimeout,\n\t\terr: make(chan error),\n\t\tcommand: make(chan *commands.Command),\n\t}\n\tc.read <- r\n\tselect {\n\tcase cmd := <-r.command:\n\t\treturn cmd, nil\n\tcase err := <-r.err:\n\t\tif err == io.EOF {\n\t\t\tc.wg.Add(1)\n\t\t\tc.Lock()\n\t\t\tc.waiting = true\n\t\t\tc.Unlock()\n\t\t}\n\t\treturn nil, err\n\t}\n}",
"func (dev *Device) read() {\n\tif !dev.Ok {\n\t\t// log.Printf(\"Device is closed === %s\", dev)\n\t\treturn\n\t}\n\tdev.chRecv = make(chan []byte)\n\tgo func() {\n\t\tdefer func() {\n\t\t\tselect {\n\t\t\tcase _, ok := <-dev.chRecv:\n\t\t\t\tif !ok {\n\t\t\t\t\t// log.Println(\"=== chRecv closed ===\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t}\n\t\t\tclose(dev.chRecv)\n\t\t\tlog.Println(\"finish read port\")\n\t\t}()\n\t\tcountError := 0\n\t\t//TODO timeoutRead?\n\t\tfuncerr := func(err error) error {\n\t\t\tif err == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tlog.Printf(\"funcread err: %s\", err)\n\t\t\tswitch {\n\t\t\tcase errors.Is(err, os.ErrClosed):\n\t\t\t\tdev.Ok = false\n\t\t\t\treturn err\n\t\t\tcase errors.Is(err, io.ErrClosedPipe):\n\t\t\t\tdev.Ok = false\n\t\t\t\treturn err\n\t\t\tcase errors.Is(err, io.EOF):\n\t\t\t\tif countError > 3 {\n\t\t\t\t\tif !dev.Ok {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tcountError = 0\n\t\t\t\t}\n\t\t\t\tcountError++\n\t\t\t}\n\n\t\t\treturn nil\n\n\t\t\t// if countError > 3 {\n\t\t\t// dev.Ok = false\n\t\t\t// return err\n\t\t\t// }\n\t\t\t// time.Sleep(1 * time.Second)\n\t\t\t// countError++\n\t\t\t// return nil\n\t\t}\n\t\tbf := bufio.NewReader(dev.port)\n\t\ttempb := make([]byte, 1024)\n\t\t// buff := make([]byte, 1)\n\t\tindxb := 0\n\t\tfor {\n\t\t\tif !dev.Ok {\n\t\t\t\t// log.Printf(\"Device is closed === %s ######\", dev)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// log.Println(\"0\")\n\t\t\t// if dev.mode != 0 {\n\t\t\t// \tline, _, err := bf.ReadLine()\n\t\t\t// \tif err != nil {\n\t\t\t// \t\tif err := funcerr(err); err != nil {\n\t\t\t// \t\t\treturn\n\t\t\t// \t\t}\n\t\t\t// \t\tcontinue\n\t\t\t// \t}\n\t\t\t// \tcountError = 0\n\t\t\t// \tselect {\n\t\t\t// \tcase <-dev.chQuit:\n\t\t\t// \t\treturn\n\t\t\t// \tcase dev.chRecv <- line:\n\t\t\t// \tcase <-time.After(1 * time.Second):\n\t\t\t// \t}\n\t\t\t// \tcontinue\n\t\t\t// }\n\t\t\tb, err := bf.ReadByte()\n\t\t\tif err != nil {\n\t\t\t\tif err := funcerr(err); err != nil {\n\t\t\t\t\t// log.Printf(\"0, err: %s\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// var b byte\n\t\t\t// if n > 0 {\n\t\t\t// \tb = buff[0]\n\t\t\t// } else {\n\t\t\t// \tcontinue\n\t\t\t// }\n\t\t\t// log.Printf(\"0, err: %s, [% X]\", err, buff[:n])\n\t\t\t// if err != nil {\n\t\t\tif err := funcerr(err); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif indxb <= 0 {\n\t\t\t\tif b == '\\x02' {\n\t\t\t\t\ttempb[0] = b\n\t\t\t\t\tindxb = 1\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttempb[indxb] = b\n\t\t\tindxb++\n\t\t\t// fmt.Printf(\"len: %v, %v\\n\", indxb, int(tempb[2])+5)\n\t\t\tif indxb < 6 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// log.Println(\"2\")\n\t\t\tif b == '\\x03' && (indxb >= int(tempb[2])+5) {\n\t\t\t\t// fmt.Printf(\"tempb final: [% X]\\n\", tempb[:indxb])\n\t\t\t\tselect {\n\t\t\t\tcase <-dev.chQuit:\n\t\t\t\t\t// log.Println(\"3\")\n\t\t\t\t\treturn\n\t\t\t\tcase dev.chRecv <- tempb[0:indxb]:\n\t\t\t\t\t// fmt.Printf(\"tempb final: [% X]\\n\", tempb[:])\n\t\t\t\tcase <-time.After(1 * time.Second):\n\t\t\t\t}\n\t\t\t\tindxb = 0\n\t\t\t}\n\t\t}\n\t}()\n\tlog.Println(\"reading port\")\n}",
"func ReaderReadRune(r *strings.Reader,) (rune, int, error)",
"func (c *PgConnection) readWithTimeout(buf []byte) error {\n\tuseTimeout := true\n\tfor {\n\t\tif useTimeout && c.readTimeout > 0 {\n\t\t\tlog.Debugf(\"Setting deadline %s in the future\", c.readTimeout)\n\t\t\tc.conn.SetReadDeadline(time.Now().Add(c.readTimeout))\n\t\t}\n\n\t\t_, err := io.ReadFull(c.conn, buf)\n\t\tc.conn.SetReadDeadline(time.Time{})\n\n\t\tswitch err.(type) {\n\t\tcase nil:\n\t\t\treturn nil\n\t\tcase net.Error:\n\t\t\tif err.(net.Error).Timeout() {\n\t\t\t\tcancelErr := c.sendCancel()\n\t\t\t\tif cancelErr != nil {\n\t\t\t\t\tlog.Debugf(\"Error sending cancel: %s\", cancelErr)\n\t\t\t\t}\n\t\t\t\tuseTimeout = false\n\t\t\t} else {\n\t\t\t\tlog.Debugf(\"Read error: %s\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\tlog.Debugf(\"Read error: %s\", err)\n\t\t\treturn err\n\t\t}\n\t}\n}",
"func (s *Scanner) read() rune {\n\tch, _, err := s.r.ReadRune()\n\tif err != nil {\n\t\treturn eof\n\t}\n\treturn ch\n}",
"func (i *Input) ReadKey(r io.Reader) (Key, error) {\n\tif i.sz > 0 {\n\t\t// move buffer start to index 0 so that the maximum buffer\n\t\t// size is available for more reads if required and reads start\n\t\t// at 0.\n\t\tcopy(i.buf, i.buf[i.sz:i.len])\n\t\ti.len -= i.sz\n\t\ti.sz = 0\n\t}\n\n\tvar rn rune = -1\n\tif i.len > 0 {\n\t\t// try to read a rune from the already loaded bytes\n\t\tc, sz := utf8.DecodeRune(i.buf[:i.len])\n\t\tif c == utf8.RuneError && sz < 2 {\n\t\t\trn = -1\n\t\t} else {\n\t\t\t// valid rune\n\t\t\trn = c\n\t\t\ti.sz = sz\n\t\t}\n\t}\n\n\t// if no valid rune, read more bytes\n\tif rn < 0 {\n\t\tn, err := r.Read(i.buf[i.len:])\n\t\tif err != nil || n == 0 {\n\t\t\tif i.len > 0 {\n\t\t\t\t// we have a partial (invalid) rune, skip over a byte, do\n\t\t\t\t// not return timeout error in this case (we have a byte)\n\t\t\t\ti.sz = 1\n\t\t\t\treturn 0, errors.New(\"invalid rune\")\n\t\t\t}\n\t\t\t// otherwise we have no byte at all, return ErrTimeout if\n\t\t\t// n == 0 and (err == nil || err == io.EOF || err.Timeout() == true)\n\t\t\tif n == 0 {\n\t\t\t\tto, ok := err.(interface{ Timeout() bool })\n\t\t\t\tif err == nil || err == io.EOF || (ok && to.Timeout()) {\n\t\t\t\t\treturn 0, ErrTimeout\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn 0, err\n\t\t}\n\n\t\ti.len += n\n\t\tc, sz := utf8.DecodeRune(i.buf[:i.len])\n\t\tif c == utf8.RuneError && sz < 2 {\n\t\t\ti.sz = 1 // always consume at least one byte\n\t\t\treturn 0, errors.New(\"invalid rune\")\n\t\t}\n\t\trn = c\n\t\ti.sz = sz\n\t}\n\n\t// if rn is a control character (if i.len == 1 so that if an escape\n\t// sequence is read, it does not return immediately with just ESC)\n\tif i.len == 1 && (KeyType(rn) <= KeyUS || KeyType(rn) == KeyDEL) {\n\t\treturn keyFromTypeMod(KeyType(rn), ModNone), nil\n\t}\n\n\t// translate escape sequences\n\tif KeyType(rn) == KeyESC {\n\t\tif i.mouse && bytes.HasPrefix(i.buf[:i.len], []byte(sgrMouseEventPrefix)) {\n\t\t\tif k := i.decodeMouseEvent(); k.Type() == KeyMouse {\n\t\t\t\ti.sz = i.len\n\t\t\t\treturn k, nil\n\t\t\t}\n\t\t}\n\t\t// NOTE: important to use the string conversion exactly like that,\n\t\t// inside the brackets of the map key - the Go compiler optimizes\n\t\t// this to avoid any allocation.\n\t\tif key, ok := i.esc[string(i.buf[:i.len])]; ok {\n\t\t\ti.sz = i.len\n\t\t\treturn key, nil\n\t\t}\n\t\t// if this is an unknown escape sequence, return KeyESCSeq and the\n\t\t// caller may get the uninterpreted sequence from i.Bytes.\n\t\ti.sz = i.len\n\t\treturn keyFromTypeMod(KeyESCSeq, ModNone), nil\n\t}\n\treturn Key(rn), nil\n}",
"func (s *Sniffer) Recv(t *testing.T, timeout time.Duration) []byte {\n\tt.Helper()\n\n\tdeadline := time.Now().Add(timeout)\n\tfor {\n\t\ttimeout = time.Until(deadline)\n\t\tif timeout <= 0 {\n\t\t\treturn nil\n\t\t}\n\t\tusec := timeout.Microseconds()\n\t\tif usec == 0 {\n\t\t\t// Timeout is less than a microsecond; set usec to 1 to avoid\n\t\t\t// blocking indefinitely.\n\t\t\tusec = 1\n\t\t}\n\t\tconst microsInOne = 1e6\n\t\ttv := unix.Timeval{\n\t\t\tSec: usec / microsInOne,\n\t\t\tUsec: usec % microsInOne,\n\t\t}\n\t\tif err := unix.SetsockoptTimeval(s.fd, unix.SOL_SOCKET, unix.SO_RCVTIMEO, &tv); err != nil {\n\t\t\tt.Fatalf(\"can't setsockopt SO_RCVTIMEO: %s\", err)\n\t\t}\n\n\t\tbuf := make([]byte, maxReadSize)\n\t\tnread, _, err := unix.Recvfrom(s.fd, buf, unix.MSG_TRUNC)\n\t\tif err == unix.EINTR || err == unix.EAGAIN {\n\t\t\t// There was a timeout.\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"can't read: %s\", err)\n\t\t}\n\t\tif nread > maxReadSize {\n\t\t\tt.Fatalf(\"received a truncated frame of %d bytes, want at most %d bytes\", nread, maxReadSize)\n\t\t}\n\t\treturn buf[:nread]\n\t}\n}",
"func readRune(r *bufio.Reader) (rune, error) {\n\tr1, _, err := r.ReadRune()\n\n\t// handle \\r\\n\n\tif r1 == '\\r' {\n\t\tr1, _, err = r.ReadRune()\n\t\tif err != nil {\n\t\t\tif r1 != '\\n' {\n\t\t\t\tr.UnreadRune()\n\t\t\t\tr1 = '\\r'\n\t\t\t}\n\t\t}\n\t}\n\treturn r1, err\n}",
"func (s *Scanner) read() rune {\n\tif len(s.peekRunes) > 0 {\n\t\tr := s.peekRunes[0]\n\t\ts.peekRunes = s.peekRunes[1:]\n\t\treturn r\n\t}\n\treturn s.nextRune()\n}",
"func (p *port) Read(b []byte) (n int, err error) {\n\tvar rfds syscall.FdSet\n\n\tfd := int(p.file.Fd())\n\tfdSet(fd, &rfds)\n\n\tvar tv *syscall.Timeval\n\tif p.timeout > 0 {\n\t\ttimeout := syscall.NsecToTimeval(p.timeout.Nanoseconds())\n\t\ttv = &timeout\n\t}\n\tif _, err = syscall.Select(fd+1, &rfds, nil, nil, tv); err != nil {\n\t\terr = fmt.Errorf(\"serial: could not select: %v\", err)\n\t\treturn\n\t}\n\tif !fdIsSet(fd, &rfds) {\n\t\t// Timeout\n\t\terr = ErrTimeout\n\t\treturn\n\t}\n\tn, err = p.file.Read(b)\n\treturn\n}",
"func (t *timedConn) Read(p []byte) (int, error) {\n\tstart := t.now()\n\tn, err := t.ConnectorConn.Read(p)\n\tt.delay(start, n)\n\treturn n, err\n}",
"func channelReadString(channel chan string, timeout time.Duration) (string, bool) {\n\tfor {\n\t\tselect {\n\t\tcase result := <-channel:\n\t\t\treturn result, false // read successfull\n\n\t\tcase <-time.After(timeout * time.Second):\n\t\t\treturn \"\", true // read was blocked\n\t\t}\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Write a string to the file descriptor, return the number of bytes written.
|
func puts(fd int, s string) int {
n, err := syscall.Write(fd, []byte(s))
if err != nil {
panic(fmt.Sprintf("puts error %s\n", err))
}
return n
}
|
[
"func (fs *Fs) WriteString(file *os.File, string string) (int, error) {\n\treturn file.WriteString(string) // #nosec G304\n}",
"func (a ReverseHttpFile) WriteString(s string) (int, error) {\n\treturn 0, syscall.EPERM\n}",
"func (f *MockFile) WriteString(s string) (int, error) {\n\tif f.fs.WithWriteError {\n\t\treturn 0, errors.New(\"mock write error\")\n\t}\n\tif f.closed {\n\t\treturn 0, errors.New(\"mock file is closed\")\n\t}\n\treturn f.Buffer.WriteString(s)\n}",
"func writeString(w io.Writer, s string) (n int, err error) {\n\ttype stringWriter interface {\n\t\tWriteString(string) (n int, err error)\n\t}\n\tif sw, ok := w.(stringWriter); ok {\n\t\treturn sw.WriteString(s) // Avoid copy string\n\t}\n\treturn w.Write([]byte(s)) // Using temporary copy\n}",
"func FileWriteString(f *os.File, s string) (int, error)",
"func (b *Bytes) WriteString(s string) (int, error) {\n\tn := b.Len()\n\tb.grow(n + len(s))\n\tcopy((*b.p)[n:], s)\n\treturn len(s), nil\n}",
"func (b *defaultByteBuffer) WriteString(s string) (n int, err error) {\n\tif b.status&BitWritable == 0 {\n\t\treturn -1, errors.New(\"unwritable buffer, cannot support WriteString\")\n\t}\n\tn = len(s)\n\tb.ensureWritable(n)\n\tcopy(b.buff[b.writeIdx:b.writeIdx+n], s)\n\tb.writeIdx += n\n\treturn\n}",
"func (b *Buffer) WriteString(s string) (n int, err error) {\n\treturn b.Write([]byte(s))\n}",
"func WriteString(w Writer, s string) (int, error) {\n\tif w == nil {\n\t\treturn 0, ErrMissingWriter\n\t}\n\treturn io.WriteString(w, s)\n}",
"func (m *MockNetpollWriter) WriteString(s string) (n int, err error) {\n\treturn\n}",
"func (c Channel) WriteString(name, value string) error {\n\tcName := C.CString(name)\n\tdefer C.free(unsafe.Pointer(cName))\n\tcValue := C.CString(value)\n\tdefer C.free(unsafe.Pointer(cValue))\n\n\terrno := C.iio_channel_attr_write(c.handle, cName, cValue)\n\tif errno < 0 {\n\t\treturn syscall.Errno(-errno)\n\t}\n\t// otherwise it's the number of bytes, which we're not interested in\n\t// at this time\n\treturn nil\n}",
"func write(fd uintptr, p unsafe.Pointer, n int32) int32 {\n\tif fd == 2 { // stderr\n\t\t// Convert to a string, because we know that p won't change during the\n\t\t// call to printstring.\n\t\t// TODO: use unsafe.String instead once we require Go 1.20.\n\t\ts := _string{\n\t\t\tptr: (*byte)(p),\n\t\t\tlength: uintptr(n),\n\t\t}\n\t\tstr := *(*string)(unsafe.Pointer(&s))\n\t\tprintstring(str)\n\t\treturn n\n\t}\n\treturn 0\n}",
"func (s *String) Write(p []byte) (n int, err error) {\n\ts.value += string(p)\n\treturn len(p), nil\n}",
"func (h *Hash) WriteString(s string) (int, error) {\n\t// WriteString mirrors Write. See Write for comments.\n\tsize := len(s)\n\tif h.n > 0 && h.n <= bufSize {\n\t\tk := copy(h.buf[h.n:], s)\n\t\th.n += k\n\t\tif h.n < bufSize {\n\t\t\treturn size, nil\n\t\t}\n\t\ts = s[k:]\n\t\th.flush()\n\t}\n\tif len(s) > bufSize {\n\t\th.initSeed()\n\t\tfor len(s) > bufSize {\n\t\t\tptr := (*byte)((*unsafeheader.String)(unsafe.Pointer(&s)).Data)\n\t\t\th.state.s = rthash(ptr, bufSize, h.state.s)\n\t\t\ts = s[bufSize:]\n\t\t}\n\t}\n\tcopy(h.buf[:], s)\n\th.n = len(s)\n\treturn size, nil\n}",
"func (req *Request) WriteString(s string) (int, error) {\n\treturn req.res.Write([]byte(s))\n}",
"func (t *TestHelper) Write(p []byte) (int, error) {\n\tt.Received = string(p)\n\treturn len(p), nil\n}",
"func WriteString(path, s string) error {\n\treturn WriteFile(path, []byte(s+\"\\n\"), 0o644)\n}",
"func go_write(fd C.int, buf unsafe.Pointer, count C.size_t) C.ssize_t {\n\tC.init_lib_wrapper(C.CString(\"Go Write\"))\n\n\treturn C.true_write(fd, buf, count)\n\n}",
"func (recv *IOChannel) Write(buf string, count uint64, bytesWritten uint64) IOError {\n\tc_buf := C.CString(buf)\n\tdefer C.free(unsafe.Pointer(c_buf))\n\n\tc_count := (C.gsize)(count)\n\n\tc_bytes_written := (C.gsize)(bytesWritten)\n\n\tretC := C.g_io_channel_write((*C.GIOChannel)(recv.native), c_buf, c_count, &c_bytes_written)\n\tretGo := (IOError)(retC)\n\n\treturn retGo\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the number of columns for the terminal. Assume defaultCols if it fails.
|
func getColumns(ifd, ofd int) int {
// try using the ioctl to get the number of cols
var winsize [4]uint16
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(syscall.Stdout), syscall.TIOCGWINSZ, uintptr(unsafe.Pointer(&winsize)))
if err == 0 {
return int(winsize[1])
}
// the ioctl failed - try using the terminal itself
start := getCursorPosition(ifd, ofd)
if start < 0 {
return defaultCols
}
// Go to right margin and get position
if puts(ofd, "\x1b[999C") != 6 {
return defaultCols
}
cols := getCursorPosition(ifd, ofd)
if cols < 0 {
return defaultCols
}
// restore the position
if cols > start {
puts(ofd, fmt.Sprintf("\x1b[%dD", cols-start))
}
return cols
}
|
[
"func GetColumns() uint {\n\tws := &winsize{}\n\tretCode, _, errno := syscall.Syscall(syscall.SYS_IOCTL,\n\t\tuintptr(syscall.Stdin),\n\t\tuintptr(syscall.TIOCGWINSZ),\n\t\tuintptr(unsafe.Pointer(ws)))\n\n\tif int(retCode) == -1 {\n\t\treturn 80\n\t}\n\treturn uint(ws.Col)\n}",
"func (ch *CommonHandle) NumCols() int {\n\treturn len(ch.colEndOffsets)\n}",
"func (b Board) NumCols() int {\n\treturn b.ncols\n}",
"func (c *Chunk) NumCols() int {\n\treturn len(c.columns)\n}",
"func (c *Chart) NumCols() int {\n\treturn c.numCols\n}",
"func (v Chunk) NCols() int {\n\treturn len(v.buf.Columns)\n}",
"func (l *lexer) columnNum() int {\n\tif lf := strings.LastIndex(l.input[:l.pos], \"\\n\"); lf != -1 {\n\t\treturn len(l.input[lf+1 : l.pos])\n\t}\n\treturn len(l.input[:l.pos])\n}",
"func (fw *Writer) NumColumns() int { return fw.Schema.NumColumns() }",
"func (reader *Reader) GetNColumnIn() int {\n\treturn len(reader.InputMetadata)\n}",
"func (s *Simplex) getColumnsLength() int {\n\tcount := 1 // one for RH\n\tcount += len(s.LP.ObjectiveFunction.Variables) //one for each variable\n\tfor _, c := range s.LP.Constraints {\n\t\tswitch c.Operator {\n\t\tcase \"<=\", \"=\":\n\t\t\tcount++ //one artificial\n\t\tcase \">=\":\n\t\t\tcount += 2 //one slack, one artificial\n\t\t}\n\t}\n\treturn count\n}",
"func (r ResultTable) GetColumnCount() int {\n\treturn len(r.DataSchema.ColumnNames)\n}",
"func (t *Table) Cols() int {\n\treturn len(t.ColDefs)\n}",
"func getConsoleSize() (int, int) {\n cols, rows, err := terminal.GetSize(0)\n if err != nil {\n rows = 24\n cols = 80\n }\n return rows, cols\n}",
"func countColumns(node *blackfriday.Node) int {\n\tvar columns int\n\n\tnode.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus {\n\t\tswitch node.Type {\n\t\tcase blackfriday.TableRow:\n\t\t\tif !entering {\n\t\t\t\treturn blackfriday.Terminate\n\t\t\t}\n\t\tcase blackfriday.TableCell:\n\t\t\tif entering {\n\t\t\t\tcolumns++\n\t\t\t}\n\t\tdefault:\n\t\t}\n\t\treturn blackfriday.GoToNext\n\t})\n\treturn columns\n}",
"func GetMaxColumns() int {\r\n\treturn converter.StrToInt(SysString(MaxColumns))\r\n}",
"func TableGetColumnCount() int {\n\treturn int(C.iggTableGetColumnCount())\n}",
"func (r *Rows) ColCount() int {\n\treturn len(r.Columns)\n}",
"func terminalWidth() (int, error) {\n\treturn 0, errors.New(\"Not supported\")\n}",
"func (ref *UIElement) ColumnCount() int64 {\n\tret, _ := ref.Int64Attr(ColumnCountAttribute)\n\treturn ret\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return true if we know we don't support this terminal.
|
func unsupportedTerm() bool {
_, ok := unsupported[os.Getenv("TERM")]
return ok
}
|
[
"func TerminalSupported() bool {\r\n\treturn false\r\n}",
"func IsTerminal(_ int) bool {\n\treturn true\n}",
"func IsTerminal(fd uintptr) bool {\r\n\treturn false\r\n}",
"func IsCygwinTerminal(fd uintptr) bool {\n\treturn false\n}",
"func isTerminal(f *os.File) bool {\n\tlog.Fatalf(\"hyperkit: Function not supported on your OS\")\n\treturn false\n}",
"func terminalIsDumb() bool {\n var term = os.Getenv(\"TERM\")\n\n if term == \"\" || term == \"dumb\" {\n return true\n }\n\n return false\n}",
"func IsTerminal() bool {\n\treturn (os.Getenv(\"TERM\") != \"\" && os.Getenv(\"TERM\") != \"dumb\") || os.Getenv(\"ConEmuANSI\") == \"ON\"\n}",
"func IsTerminal() bool {\n\tfd := syscall.Stderr\n\tvar st uint32\n\tr, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)\n\treturn r != 0 && e == 0\n}",
"func IsTerminal(w io.Writer) bool {\n\tfw, ok := w.(fder)\n\tif !ok {\n\t\treturn false\n\t}\n\tvar st uint32\n\tr, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fw.Fd(), uintptr(unsafe.Pointer(&st)), 0)\n\treturn r != 0 && e == 0\n}",
"func isWindowsTerminal(fd uintptr) bool {\n\tvar st uint32\n\tr, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)\n\treturn r != 0 && e == 0\n}",
"func isTerminal() bool {\n\treturn terminal.IsTerminal(syscall.Stdin)\n}",
"func IsCygwinPty(fd uintptr) bool {\n\treturn false\n}",
"func isCmdNotSupportedErr(err error) bool {\n\tif err.Error() == StatusNotSupported {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func inTerminal() bool {\n\treturn term.IsTerminal(int(os.Stdin.Fd()))\n}",
"func AllowsColorOutput(w io.Writer) bool {\n\tif !IsTerminal(w) {\n\t\treturn false\n\t}\n\n\t// https://en.wikipedia.org/wiki/Computer_terminal#Dumb_terminals\n\tif os.Getenv(\"TERM\") == \"dumb\" {\n\t\treturn false\n\t}\n\n\t// https://no-color.org/\n\tif _, nocolor := os.LookupEnv(\"NO_COLOR\"); nocolor {\n\t\treturn false\n\t}\n\n\t// On Windows WT_SESSION is set by the modern terminal component.\n\t// Older terminals have poor support for UTF-8, VT escape codes, etc.\n\tif runtime.GOOS == \"windows\" && os.Getenv(\"WT_SESSION\") == \"\" {\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func Isatty() bool {\n\treturn CapTTY.Isatty()\n}",
"func IsSupportColor() bool {\n\t// \"TERM=xterm\" support color\n\t// \"TERM=xterm-vt220\" support color\n\t// \"TERM=xterm-256color\" support color\n\t// \"TERM=cygwin\" don't support color\n\tif strings.Contains(os.Getenv(\"TERM\"), \"xterm\") {\n\t\treturn true\n\t}\n\n\t// like on ConEmu software, e.g \"ConEmuANSI=ON\"\n\tif os.Getenv(\"ConEmuANSI\") == \"ON\" {\n\t\treturn true\n\t}\n\n\t// like on ConEmu software, e.g \"ANSICON=189x2000 (189x43)\"\n\tif os.Getenv(\"ANSICON\") != \"\" {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func isTty() bool {\n\t_, isStdinTerminal := term.GetFdInfo(os.Stdin)\n\t_, isStdoutTerminal := term.GetFdInfo(os.Stdout)\n\treturn isStdinTerminal && isStdoutTerminal\n}",
"func (v defaultTTYImpl) Isatty() bool {\n\tif config.MockNoTTY() {\n\t\treturn false\n\t}\n\tif isatty.IsTerminal(os.Stdin.Fd()) &&\n\t\tisatty.IsTerminal(os.Stdout.Fd()) {\n\t\treturn true\n\t} else if isatty.IsCygwinTerminal(os.Stdin.Fd()) &&\n\t\tisatty.IsCygwinTerminal(os.Stdout.Fd()) {\n\t\treturn true\n\t}\n\n\treturn false\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
refresh the edit line
|
func (ls *linestate) refreshLine() {
if ls.ts.mlmode {
ls.refreshMultiline()
} else {
ls.refreshSingleline()
}
}
|
[
"func Edit(defval, prompt string, refresh func(int, int)) string {\n\treturn EditDynamicWithCallback(defval, prompt, refresh, nil)\n}",
"func edit(c *cli.Context) {\n\tlines := content()\n\n\targLen := len(c.Args())\n\n\tvar ind int\n\n\tswitch argLen {\n\tcase 0:\n\t\tind = 0\n\tcase 1:\n\t\tind, _ = strconv.Atoi(c.Args()[0])\n\tdefault:\n\t\tpanic(1)\n\t}\n\n\tselectedLine := lines[ind]\n\tlineArr := strings.Split(selectedLine, \" \")\n\n\tenv := os.Environ()\n\tvimBin, err := exec.LookPath(\"vim\")\n\tcheck(err)\n\n\tplusCmd := fmt.Sprint(\"+\", lineArr[0])\n\tplussCmd := []string{\"vim\", lineArr[1], plusCmd}\n\n\tdebug(\"Whole cmd: %v Index: %v\", plussCmd, c.Args()[0])\n\n\tif true {\n\t\texecErr := syscall.Exec(vimBin, plussCmd, env)\n\t\tcheck(execErr)\n\t}\n}",
"func (l *Linenoise) edit(ifd, ofd int, prompt, init string) (string, error) {\n\t// create the line state\n\tls := newLineState(ifd, ofd, prompt, l)\n\t// set and output the initial line\n\tls.editSet(init)\n\t// The latest history entry is always our current buffer\n\tl.HistoryAdd(ls.String())\n\n\tu := utf8{}\n\n\tfor {\n\t\tr := u.getRune(syscall.Stdin, nil)\n\t\tif r == KeycodeNull {\n\t\t\tcontinue\n\t\t}\n\t\t// Autocomplete when the callback is set.\n\t\t// It returns the character to be handled next.\n\t\tif r == KeycodeTAB && l.completionCallback != nil {\n\t\t\tr = ls.completeLine()\n\t\t\tif r == KeycodeNull {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif r == KeycodeCR || r == l.hotkey {\n\t\t\tl.historyPop(-1)\n\t\t\tif l.hintsCallback != nil {\n\t\t\t\t// Refresh the line without hints to leave the\n\t\t\t\t// line as the user typed it after the newline.\n\t\t\t\thcb := l.hintsCallback\n\t\t\t\tl.hintsCallback = nil\n\t\t\t\tls.refreshLine()\n\t\t\t\tl.hintsCallback = hcb\n\t\t\t}\n\t\t\ts := ls.String()\n\t\t\tif r == l.hotkey {\n\t\t\t\treturn s + string(l.hotkey), nil\n\t\t\t}\n\t\t\treturn s, nil\n\t\t} else if r == KeycodeBS {\n\t\t\t// backspace: remove the character to the left of the cursor\n\t\t\tls.editBackspace()\n\n\t\t} else if r == KeycodeESC {\n\t\t\tif wouldBlock(ifd, &timeout20ms) {\n\t\t\t\t// looks like a single escape- abandon the line\n\t\t\t\tl.historyPop(-1)\n\t\t\t\treturn \"\", nil\n\t\t\t}\n\t\t\t// escape sequence\n\t\t\ts0 := u.getRune(ifd, &timeout20ms)\n\t\t\ts1 := u.getRune(ifd, &timeout20ms)\n\t\t\tif s0 == '[' {\n\t\t\t\t// ESC [ sequence\n\t\t\t\tif s1 >= '0' && s1 <= '9' {\n\t\t\t\t\t// Extended escape, read additional byte.\n\t\t\t\t\ts2 := u.getRune(ifd, &timeout20ms)\n\t\t\t\t\tif s2 == '~' {\n\t\t\t\t\t\tif s1 == '3' {\n\t\t\t\t\t\t\t// delete\n\t\t\t\t\t\t\tls.editDelete()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif s1 == 'A' {\n\t\t\t\t\t\t// cursor up\n\t\t\t\t\t\tls.editSet(l.historyPrev(ls))\n\t\t\t\t\t} else if s1 == 'B' {\n\t\t\t\t\t\t// cursor down\n\t\t\t\t\t\tls.editSet(l.historyNext(ls))\n\t\t\t\t\t} else if s1 == 'C' {\n\t\t\t\t\t\t// cursor right\n\t\t\t\t\t\tls.editMoveRight()\n\t\t\t\t\t} else if s1 == 'D' {\n\t\t\t\t\t\t// cursor left\n\t\t\t\t\t\tls.editMoveLeft()\n\t\t\t\t\t} else if s1 == 'H' {\n\t\t\t\t\t\t// cursor home\n\t\t\t\t\t\tls.editMoveHome()\n\t\t\t\t\t} else if s1 == 'F' {\n\t\t\t\t\t\t// cursor end\n\t\t\t\t\t\tls.editMoveEnd()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else if s0 == '0' {\n\t\t\t\t// ESC 0 sequence\n\t\t\t\tif s1 == 'H' {\n\t\t\t\t\t// cursor home\n\t\t\t\t\tls.editMoveHome()\n\t\t\t\t} else if s1 == 'F' {\n\t\t\t\t\t// cursor end\n\t\t\t\t\tls.editMoveEnd()\n\t\t\t\t}\n\t\t\t}\n\t\t} else if r == KeycodeCtrlA {\n\t\t\t// go to the start of the line\n\t\t\tls.editMoveHome()\n\t\t} else if r == KeycodeCtrlB {\n\t\t\t// cursor left\n\t\t\tls.editMoveLeft()\n\t\t} else if r == KeycodeCtrlC {\n\t\t\t// return QUIT\n\t\t\treturn \"\", ErrQuit\n\t\t} else if r == KeycodeCtrlD {\n\t\t\tif len(ls.buf) > 0 {\n\t\t\t\t// delete: remove the character to the right of the cursor.\n\t\t\t\tls.editDelete()\n\t\t\t} else {\n\t\t\t\t// nothing to delete - QUIT\n\t\t\t\tl.historyPop(-1)\n\t\t\t\treturn \"\", ErrQuit\n\t\t\t}\n\t\t} else if r == KeycodeCtrlE {\n\t\t\t// go to the end of the line\n\t\t\tls.editMoveEnd()\n\t\t} else if r == KeycodeCtrlF {\n\t\t\t// cursor right\n\t\t\tls.editMoveRight()\n\t\t} else if r == KeycodeCtrlH {\n\t\t\t// backspace: remove the character to the left of the cursor\n\t\t\tls.editBackspace()\n\t\t} else if r == KeycodeCtrlK {\n\t\t\t// delete to the end of the line\n\t\t\tls.deleteToEnd()\n\t\t} else if r == KeycodeCtrlL {\n\t\t\t// clear screen\n\t\t\tclearScreen()\n\t\t\tls.refreshLine()\n\t\t} else if r == KeycodeCtrlN {\n\t\t\t// next history item\n\t\t\tls.editSet(l.historyNext(ls))\n\t\t} else if r == KeycodeCtrlP {\n\t\t\t// previous history item\n\t\t\tls.editSet(l.historyPrev(ls))\n\t\t} else if r == KeycodeCtrlT {\n\t\t\t// swap current character with the previous\n\t\t\tls.editSwap()\n\t\t} else if r == KeycodeCtrlU {\n\t\t\t// delete the whole line\n\t\t\tls.deleteLine()\n\t\t} else if r == KeycodeCtrlW {\n\t\t\t// delete previous word\n\t\t\tls.deletePrevWord()\n\t\t} else {\n\t\t\t// insert the character into the line buffer\n\t\t\tls.editInsert(r)\n\t\t}\n\t}\n}",
"func (c *Console) EditRow(id Row, text string) <-chan struct{} {\n\tch := make(chan struct{})\n\tc.jobs <- func() {\n\t\tdiff := c.rowCount - int(id)\n\t\tfmt.Fprintf(c.File, \"%c[%dA\", 27, diff)\n\t\tfmt.Fprintf(c.File, \"\\r%c[2K\", 27)\n\t\tfmt.Fprintf(c.File, \"%s\\n\", strings.TrimSpace(text))\n\t\tfmt.Fprintf(c.File, \"%c[%dB\", 27, diff)\n\t\tclose(ch)\n\t}\n\treturn ch\n}",
"func Edit(g *types.Cmd) {\n\tg.AddOptions(\"--edit\")\n}",
"func (ls *linestate) editSwap() {\n\tif ls.pos > 0 && ls.pos < len(ls.buf) {\n\t\ttmp := ls.buf[ls.pos-1]\n\t\tls.buf[ls.pos-1] = ls.buf[ls.pos]\n\t\tls.buf[ls.pos] = tmp\n\t\tif ls.pos != len(ls.buf)-1 {\n\t\t\tls.pos++\n\t\t}\n\t\tls.refreshLine()\n\t}\n}",
"func (tb *TextBuf) EditDone() {\n\ttb.AutoSaveDelete()\n\ttb.ClearChanged()\n\ttb.LinesToBytes()\n\ttb.TextBufSig.Emit(tb.This(), int64(TextBufDone), tb.Txt)\n}",
"func (e *EditCore) Edit(v *gocui.View, key gocui.Key, ch rune, mod gocui.Modifier) {\n\tswitch {\n\tcase ch != 0 && mod == 0:\n\t\tv.EditWrite(ch)\n\tcase key == gocui.KeySpace:\n\t\tv.EditWrite(' ')\n\tcase key == gocui.KeyBackspace || key == gocui.KeyBackspace2:\n\t\tv.EditDelete(true)\n\tcase key == gocui.KeyDelete:\n\t\tv.EditDelete(false)\n\tcase key == gocui.KeyInsert:\n\t\tv.Overwrite = !v.Overwrite\n\tcase key == gocui.KeyArrowDown:\n\t\tv.MoveCursor(0, 1, false)\n\tcase key == gocui.KeyArrowUp:\n\t\tv.MoveCursor(0, -1, false)\n\tcase key == gocui.KeyArrowLeft:\n\t\tv.MoveCursor(-1, 0, false)\n\tcase key == gocui.KeyArrowRight:\n\t\tv.MoveCursor(1, 0, false)\n\t}\n}",
"func (mv *MainView) Edit(v *gocui.View, key gocui.Key, ch rune, mod gocui.Modifier) {\n\tmv.editQuery(v, key, ch, mod)\n\treturn\n}",
"func EditCommand(c *cli.Context, i storage.Impl) (n storage.Note, err error) {\n\tnName, err := NoteName(c)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\n\tn, err = i.LoadNote(nName)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\n\tif err := writer.WriteNote(&n); err != nil {\n\t\treturn n, err\n\t}\n\n\tif err := i.SaveNote(&n); err != nil {\n\t\treturn n, err\n\t}\n\n\treturn n, nil\n\n}",
"func (mv *MainView) Edit(v *gocui.View, key gocui.Key, ch rune, mod gocui.Modifier) {\n\treturn\n}",
"func (rec *Record) Edit(editNotes bool) error {\n\n\tline := liner.NewLiner()\n\tdefer line.Close()\n\tline.SetCtrlCAborts(true)\n\n\tpos := -1\n\n\tvar err error\n\tvar editedValue string\n\n\taborted := fmt.Errorf(\"Aborted\")\n\n\tif editedValue, err = line.PromptWithSuggestion(config.TitleLabel, rec.Title, pos); err != nil {\n\t\treturn aborted\n\t}\n\trec.Title = editedValue\n\n\tif editedValue, err = line.PromptWithSuggestion(config.AccountLabel, rec.Account, pos); err != nil {\n\t\treturn aborted\n\t}\n\trec.Account = editedValue\n\n\tif editedValue, err = line.PromptWithSuggestion(config.PasswordLabel, rec.Password, pos); err != nil {\n\t\treturn aborted\n\t}\n\trec.Password = editedValue\n\n\ttagsString := strings.Join(rec.Tags, \", \")\n\n\tif editedValue, err = line.PromptWithSuggestion(config.TagsLabel, tagsString, pos); err != nil {\n\t\treturn aborted\n\t}\n\trec.Tags = tagsStringToArray(editedValue)\n\n\tif editedValue, err = line.PromptWithSuggestion(config.URLLabel, rec.Url, pos); err != nil {\n\t\treturn aborted\n\t}\n\trec.Url = editedValue\n\n\tif editNotes {\n\t\t// handle multi-line notes\n\t\tlog.Info(\"\\n%s\", config.NotesLabel)\n\n\t\tlines := strings.Split(rec.Notes, \"\\n\")\n\n\t\twriteBack := \"\"\n\t\tlineIdx := 0\n\n\t\taborted := false\n\n\t\tfor {\n\t\t\tproposal := \"\"\n\n\t\t\tif lineIdx < len(lines) {\n\t\t\t\tproposal = lines[lineIdx]\n\t\t\t}\n\n\t\t\tinput, err := line.PromptWithSuggestion(\"\", proposal, len(proposal))\n\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Debug(\"Aborted? : %v\", err)\n\t\t\t\taborted = true\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\twriteBack += input + \"\\n\"\n\t\t\tlineIdx++\n\t\t}\n\n\t\tif !aborted {\n\t\t\trec.Notes = writeBack\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (tv *TextView) EditDone() {\n\tif tv.Buf != nil {\n\t\ttv.Buf.EditDone()\n\t}\n\ttv.ClearSelected()\n}",
"func (tpl Template) EditRow() error {\n\t_, err := DB.Exec(\n\t\t`UPDATE template SET \n\t\t name = ?,\n\t\t remark = ?,\n\t\t script = ?,\n\t\t package_id_str = ?\n\t\tWHERE\n\t\t id = ?`,\n\t\ttpl.Name,\n\t\ttpl.Remark,\n\t\ttpl.Script,\n\t\ttpl.PackageIDStr,\n\t\ttpl.ID,\n\t)\n\treturn err\n}",
"func (list *List) Edit(ix int, desc string) {\n item := &list.Items[ix]\n item.Description = desc\n}",
"func updateBatchEditOutput() {\n\tif fileReadProg != nil {\n\t\tfileReadProg.printNewLineIfNeeded()\n\t}\n\tdisplayStr := fmt.Sprintf(\"Rows inserted: %d Rows updated: %d Rows deleted: %d\",\n\t\tbatchEditStats.rowsInserted, batchEditStats.rowsUpdated, batchEditStats.rowsDeleted)\n\tbatchEditStats.displayStrLen = cli.DeleteAndPrint(batchEditStats.displayStrLen, displayStr)\n\tbatchEditStats.unprintedEdits = 0\n}",
"func (d *Debug) Refresh(contents string) {\n\td.app.QueueUpdateDraw(func() {\n\t\td.Clear()\n\t\td.SetText(contents)\n\t})\n}",
"func (v *View) Refresh() {\n\ttermui.Render(\n\t\tv.Input,\n\t\tv.Chat,\n\t\tv.Channels,\n\t\tv.Mode,\n\t)\n}",
"func (r *FPGAInfoResource) Edit(id string, item FPGAInfoConfig) error {\n\tif err := r.c.ModQuery(\"PUT\", BasePath+FPGAInfoEndpoint+\"/\"+id, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Swap current character with the previous character.
|
func (ls *linestate) editSwap() {
if ls.pos > 0 && ls.pos < len(ls.buf) {
tmp := ls.buf[ls.pos-1]
ls.buf[ls.pos-1] = ls.buf[ls.pos]
ls.buf[ls.pos] = tmp
if ls.pos != len(ls.buf)-1 {
ls.pos++
}
ls.refreshLine()
}
}
|
[
"func (c *Cursor) Previous() {\n\tc.pos--\n}",
"func swap(a, b rune, plugboard string) string {\n\tfor i, letter := range plugboard {\n\t\tif letter == a {\n\t\t\tplugboard = plugboard[0:i] + string(b) + plugboard[i+1:]\n\t\t} else if letter == b {\n\t\t\tplugboard = plugboard[0:i] + string(a) + plugboard[i+1:]\n\t\t}\n\t}\n\n\treturn plugboard\n}",
"func (c *Cursor) prev() []byte {\n\tdata := c.clips[c.i].data[:c.o]\n\tr, n := utf8.DecodeLastRune(data)\n\tif n == 0 {\n\t\tpanic(\"data empty\")\n\t}\n\tif n == -1 {\n\t\tpanic(\"rune error\")\n\t}\n\tif r == '\\n' {\n\t\tr, _ := utf8.DecodeLastRune(data[:len(data)-n])\n\t\tif r == '\\r' {\n\t\t\tn++\n\t\t}\n\t}\n\treturn data[len(data)-n:]\n}",
"func (k * Keyspace) Previous() ([]byte, error) {\n\terr := k.decrementString(k.length-1)\n\treturn k.toBytes(), err\n}",
"func Utf8PrevChar(p string) string {\n\tc_p := C.CString(p)\n\tdefer C.free(unsafe.Pointer(c_p))\n\n\tretC := C.g_utf8_prev_char(c_p)\n\tretGo := C.GoString(retC)\n\tdefer C.free(unsafe.Pointer(retC))\n\n\treturn retGo\n}",
"func (p Pair) Swap() Pair {\n\tp.Base, p.Quote = p.Quote, p.Base\n\treturn p\n}",
"func (c *CompletionManager) Previous() {\n\tif c.verticalScroll == c.selected && c.selected > 0 {\n\t\tc.verticalScroll--\n\t}\n\tc.selected--\n\tc.update()\n}",
"func (s *Scanner) prev() {\n\ts.end -= s.width\n}",
"func (m mark) deleteCharForward() {\n\tb := m.buf\n\tvar deleted rune // for undo info\n\n\tif m.atLineEnd() {\n\t\tif m.atLastLine() {\n\t\t\treturn\n\t\t}\n\t\tm.joinLineBelow()\n\t\tdeleted = '\\n'\n\t} else {\n\t\tdeleted = m.char()\n\t\tb.text[m.line] = append(b.text[m.line][:m.pos], b.text[m.line][m.pos+1:]...)\n\t}\n\n\t// add undo info\n\tb.lastInsert.oldText.appendChar(deleted)\n}",
"func (lx *lexer) backup() {\n\tif lx.atEOF {\n\t\tlx.atEOF = false\n\t\treturn\n\t}\n\tif lx.nprev < 1 {\n\t\tpanic(\"backed up too far\")\n\t}\n\tw := lx.prevWidths[0]\n\tlx.prevWidths[0] = lx.prevWidths[1]\n\tlx.prevWidths[1] = lx.prevWidths[2]\n\tlx.nprev--\n\tlx.pos -= w\n\tif lx.pos < len(lx.input) && lx.input[lx.pos] == '\\n' {\n\t\tlx.line--\n\t\tlx.currLines[0] = lx.currLines[1]\n\t\tlx.currLines[1] = lx.currLines[2]\n\t\tlx.currLines[2] = 0\n\t}\n}",
"func (c *Cursor) Prev() (k []byte, v []byte) {\n\treturn c.PrevFn()\n}",
"func (i *Input) Backspace() {\n\tif i.Pos > 0 {\n\t\ti.Buffer.RemoveRune(i.Pos - 1)\n\t\ti.Pos--\n\t}\n}",
"func (m mark) deleteCharBackward() mark {\n\tb := m.buf\n\tvar deleted rune // for undo info\n\n\tif m.atLineStart() {\n\t\tif m.atFirstLine() {\n\t\t\treturn m\n\t\t}\n\t\tm.line -= 1\n\t\tm.pos = m.lastCharPos() + 1\n\t\tm.joinLineBelow()\n\t\tdeleted = '\\n'\n\t} else {\n\t\tm.pos -= 1\n\t\tdeleted = m.char()\n\t\tb.text[m.line] = append(b.text[m.line][:m.pos], b.text[m.line][m.pos+1:]...)\n\t}\n\n\t// add undo info\n\tb.lastInsert.oldText.prependChar(deleted)\n\tif m.isBefore(*b.lastInsert.start) {\n\t\tb.lastInsert.start = &m\n\t}\n\n\treturn m\n}",
"func (l *StringLexer) PrevByte() byte {\n\treturn l.input[l.pos-1]\n}",
"func reverseComplement(sequence string) (out string) {\n for i := len(sequence)-1; i >= 0; i-- {\n\n switch sequence[i] {\n\n case 65:\n out += \"T\"\n break\n case 84:\n out += \"A\"\n break\n case 71:\n out += \"C\"\n break\n case 67:\n out += \"G\"\n break\n default:\n fmt.Println(\"Error -- Encountered non-ATGC char in sequence\")\n }\n\n }\n return\n}",
"func (l *Lexer) current() rune {\n\tif l.pos < l.width {\n\t\treturn ' '\n\t}\n\tr, _ := utf8.DecodeRuneInString(l.input[l.pos-l.width:])\n\treturn r\n}",
"func (h *History) Back() string {\n\tvar result string\n\th.commandPointer = Min(len(h.commandLines)-1, Max(-1, h.commandPointer))\n\tif h.commandPointer >= 0 {\n\t\tresult = h.commandLines[h.commandPointer]\n\t\th.commandPointer = h.commandPointer - 1\n\t}\n\treturn result\n}",
"func (p Pair) Swap() Pair {\n\treturn Pair{Base: p.Quote, Quote: p.Base}\n}",
"func (hist *history) previous() (prev string) {\n\n\thist.index -= 1\n\tif hist.index < 0 {\n\t\thist.index = 0\n\t}\n\n\tif hist.index < len(hist.commandHistory) {\n\t\tprev = hist.commandHistory[hist.index]\n\t}\n\treturn prev\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Move to the end of the line buffer.
|
func (ls *linestate) editMoveEnd() {
if ls.pos != len(ls.buf) {
ls.pos = len(ls.buf)
ls.refreshLine()
}
}
|
[
"func (ls *linestate) deleteToEnd() {\n\tls.buf = ls.buf[:ls.pos]\n\tls.refreshLine()\n}",
"func (to *textObject) nextLine() {\n\tto.moveTo(0, -to.state.tl)\n}",
"func (buf *lineBuffer) AdvanceLine() error {\n\t//fmt.Printf(\"===> advance line..\")\n\tbuf.Cursor = 0\n\tbuf.ByteCursor = 0\n\t// iterate over the lines of the input document until valid line found or EOF\n\tif buf.isEof == 1 {\n\t\tbuf.isEof = 2\n\t\t//fmt.Printf(\"..1->2\")\n\t\treturn errAtEof\n\t}\n\t//fmt.Printf(\"..ok\\n\")\n\tfor buf.isEof == 0 {\n\t\tbuf.CurrentLine++\n\t\t//fmt.Printf(\"===> reading line #%d\\n\", buf.CurrentLine)\n\t\tif !buf.Input.Scan() { // could not read a new line: either I/O-error or EOF\n\t\t\tif err := buf.Input.Err(); err != nil {\n\t\t\t\treturn WrapError(ErrCodeIO, \"I/O error while reading input\", err)\n\t\t\t}\n\t\t\t//fmt.Println(\"===> EOF !\")\n\t\t\tbuf.isEof = 1\n\t\t\tbuf.Line = strings.NewReader(\"\")\n\t\t\treturn errAtEof\n\t\t}\n\t\tbuf.Text = buf.Input.Text()\n\t\t//fmt.Printf(\"===> %q\\n\", buf.Text)\n\t\tif !buf.IsIgnoredLine() {\n\t\t\tbuf.Line = strings.NewReader(buf.Text)\n\t\t\tbreak\n\t\t}\n\t}\n\tbuf.Line = strings.NewReader(buf.Text)\n\treturn buf.AdvanceCursor()\n}",
"func (m mark) deleteLine() {\n\tb := m.buf\n\tif len(b.text) == 1 {\n\t\tb.text[0] = newLine()\n\t\treturn\n\t}\n\tb.text = append(b.text[:m.line], b.text[m.line+1:]...)\n}",
"func (edit Editor) NavigateLineEnd() {\n\tedit.Call(\"navigateLineEnd\")\n}",
"func (buf *realLineBuffer) Reset() {\n\tbuf.b.Reset()\n\tbuf.lines = 0\n}",
"func ClearLinePartialForward() {\n\temitEscape(\"K\")\n}",
"func (x *Reader) BackToLastCompleteLine() error {\n\tif x.File == nil {\n\t\treturn nil\n\t}\n\treturn x.SeekOffset(x.Offset)\n}",
"func (edit Editor) RemoveToLineEnd() {\n\tedit.Call(\"removeToLineEnd\")\n}",
"func (b *Buffer) MoveLinesUp(start int, end int) {\n\t// 0 < start < end <= len(b.lines)\n\tif start < 1 || start >= end || end > len(b.lines) {\n\t\treturn // what to do? FIXME\n\t}\n\tif end == len(b.lines) {\n\t\tb.Insert(\n\t\t\tLoc{\n\t\t\t\tutf8.RuneCount(b.lines[end-1].data),\n\t\t\t\tend - 1,\n\t\t\t},\n\t\t\t\"\\n\"+b.Line(start-1),\n\t\t)\n\t} else {\n\t\tb.Insert(\n\t\t\tLoc{0, end},\n\t\t\tb.Line(start-1)+\"\\n\",\n\t\t)\n\t}\n\tb.Remove(\n\t\tLoc{0, start - 1},\n\t\tLoc{0, start},\n\t)\n}",
"func (b *Buffer) MoveLinesDown(start int, end int) {\n\t// 0 <= start < end < len(b.lines)\n\t// if end == len(b.lines), we can't do anything here because the\n\t// last line is unaccessible, FIXME\n\tif start < 0 || start >= end || end >= len(b.lines)-1 {\n\t\treturn // what to do? FIXME\n\t}\n\tb.Insert(\n\t\tLoc{0, start},\n\t\tb.Line(end)+\"\\n\",\n\t)\n\tend++\n\tb.Remove(\n\t\tLoc{0, end},\n\t\tLoc{0, end + 1},\n\t)\n}",
"func (h *BufPane) SelectToEndOfLine() bool {\n\tif !h.Cursor.HasSelection() {\n\t\th.Cursor.OrigSelection[0] = h.Cursor.Loc\n\t}\n\th.Cursor.End()\n\th.Cursor.SelectTo(h.Cursor.Loc)\n\th.Relocate()\n\treturn true\n}",
"func (buf *discardLineBuffer) Reset() {\n\tbuf.lines = 0\n}",
"func (b *Buffer) WriteNewLine() error {\n\tb.allocate()\n\t_, err := b.buf.Write(eol)\n\treturn err\n}",
"func (h *BufPane) EndOfLine() bool {\n\th.Cursor.Deselect(true)\n\th.Cursor.End()\n\th.Relocate()\n\treturn true\n}",
"func (text *Text) NextLine() {\n\twriteCommand(&text.buf, \"T*\")\n\ttext.x = 0\n\ttext.y -= text.currLeading\n}",
"func (cc *Reader) ReadLine() ([]byte, error) {\n\tfor {\n\t\t// try to find a terminated line in the buffered data already read\n\t\tnlidx := bytes.IndexByte(cc.buf[cc.searchFrom:cc.end], '\\n')\n\t\tif nlidx != -1 {\n\t\t\t// got a complete line\n\t\t\tline := cc.buf[cc.start : cc.searchFrom+nlidx]\n\t\t\tcc.start = cc.searchFrom + nlidx + 1\n\t\t\tcc.searchFrom = cc.start\n\t\t\t// treat \\r\\n as the line terminator if it was present\n\t\t\tif 0 < len(line) && line[len(line)-1] == '\\r' {\n\t\t\t\tline = line[:len(line)-1]\n\t\t\t}\n\t\t\treturn line, nil\n\t\t}\n\n\t\t// are we out of space? we can read more if any of these are true:\n\t\t// 1. cc.start != 0, so we can slide the existing data back\n\t\t// 2. cc.end < len(cc.buf), so we can read data into the end of the buffer\n\t\t// 3. len(cc.buf) < cc.maxSize, so we can grow the buffer\n\t\tif cc.start == 0 && cc.end == len(cc.buf) && len(cc.buf) == cc.maxSize {\n\t\t\treturn nil, ErrReadQ\n\t\t}\n\n\t\tif cc.eof {\n\t\t\treturn nil, io.EOF\n\t\t}\n\n\t\tif len(cc.buf) < cc.maxSize && (len(cc.buf)-(cc.end-cc.start) < cc.initialSize/2) {\n\t\t\t// allocate a new buffer, copy any remaining data\n\t\t\tnewLen := roundUpToPowerOfTwo(len(cc.buf) + 1)\n\t\t\tif newLen > cc.maxSize {\n\t\t\t\tnewLen = cc.maxSize\n\t\t\t} else if newLen < cc.initialSize {\n\t\t\t\tnewLen = cc.initialSize\n\t\t\t}\n\t\t\tnewBuf := make([]byte, newLen)\n\t\t\tcopy(newBuf, cc.buf[cc.start:cc.end])\n\t\t\tcc.buf = newBuf\n\t\t} else if cc.start != 0 {\n\t\t\t// slide remaining data back to the front of the buffer\n\t\t\tcopy(cc.buf, cc.buf[cc.start:cc.end])\n\t\t}\n\t\tcc.end = cc.end - cc.start\n\t\tcc.start = 0\n\n\t\tcc.searchFrom = cc.end\n\t\tn, err := cc.conn.Read(cc.buf[cc.end:])\n\t\tcc.end += n\n\t\tif n != 0 && err == io.EOF {\n\t\t\t// we may have received new \\n-terminated lines, try to parse them\n\t\t\tcc.eof = true\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n}",
"func (buf *lineBuffer) AdvanceCursor() error {\n\tif buf.isEof > 2 {\n\t\treturn errAtEof\n\t}\n\tif buf.ByteCursor >= buf.Line.Size() { // at end of line, set lookahead to eolMarker\n\t\tbuf.Lookahead = eolMarker\n\t} else {\n\t\tr, err := buf.readRune()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbuf.Lookahead = r\n\t}\n\treturn nil\n}",
"func (tv *TextView) CursorEndLine() {\n\twupdt := tv.TopUpdateStart()\n\tdefer tv.TopUpdateEnd(wupdt)\n\ttv.ValidateCursor()\n\torg := tv.CursorPos\n\tpos := tv.CursorPos\n\n\tgotwrap := false\n\tif wln := tv.WrappedLines(pos.Ln); wln > 1 {\n\t\tsi, ri, _ := tv.WrappedLineNo(pos)\n\t\tri = len(tv.Renders[pos.Ln].Spans[si].Text) - 1\n\t\tnwc, _ := tv.Renders[pos.Ln].SpanPosToRuneIdx(si, ri)\n\t\tif si == len(tv.Renders[pos.Ln].Spans)-1 { // last span\n\t\t\tri++\n\t\t\tnwc++\n\t\t}\n\t\ttv.CursorCol = ri\n\t\tpos.Ch = nwc\n\t\ttv.CursorPos = pos\n\t\tgotwrap = true\n\t}\n\tif !gotwrap {\n\t\ttv.CursorPos.Ch = tv.Buf.LineLen(tv.CursorPos.Ln)\n\t\ttv.CursorCol = tv.CursorPos.Ch\n\t}\n\ttv.SetCursor(tv.CursorPos)\n\ttv.ScrollCursorToRight()\n\ttv.RenderCursor(true)\n\ttv.CursorSelect(org)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Delete from the current cursor position to the end of the line.
|
func (ls *linestate) deleteToEnd() {
ls.buf = ls.buf[:ls.pos]
ls.refreshLine()
}
|
[
"func (m mark) deleteLine() {\n\tb := m.buf\n\tif len(b.text) == 1 {\n\t\tb.text[0] = newLine()\n\t\treturn\n\t}\n\tb.text = append(b.text[:m.line], b.text[m.line+1:]...)\n}",
"func (edit Editor) RemoveToLineEnd() {\n\tedit.Call(\"removeToLineEnd\")\n}",
"func (e *Editor) DelChar() {\n\t// if the cursor is at the line beyond the end of text\n\t// then move it to the last line\n\tif e.Cursor.Row == len(e.Rows) {\n\t\tif len(e.Rows) == 0 {\n\t\t\treturn\n\t\t}\n\t\te.Cursor.Row = len(e.Rows) - 1\n\t\te.Cursor.Col = len(e.Rows[e.Cursor.Row])\n\t}\n\n\t// if at the beginning of the text, then do nothing\n\tif e.Cursor.Col == 0 && e.Cursor.Row == 0 {\n\t\treturn\n\t}\n\n\t// different handling for at the beginning of the line or middle of line\n\tif e.Cursor.Col > 0 {\n\t\trow := e.Rows[e.Cursor.Row]\n\t\tcopy(row[e.Cursor.Col-1:], row[e.Cursor.Col:])\n\t\trow = row[:len(row)-1]\n\t\te.Rows[e.Cursor.Row] = row\n\t\te.Cursor.Col--\n\t} else {\n\t\te.Cursor.Col = len(e.Rows[e.Cursor.Row-1])\n\t\te.Rows[e.Cursor.Row-1] = append(e.Rows[e.Cursor.Row-1], e.Rows[e.Cursor.Row]...)\n\t\te.DelRow(e.Cursor.Row)\n\t\te.Cursor.Row--\n\t}\n\te.Dirty = true\n}",
"func (b *Batch) DeleteCurrentLine() {\n\tb.call(\"nvim_del_current_line\", nil)\n}",
"func (s *Store) DeleteLine(line int) (string, error) {\n\tif line < 0 || line >= len(s.lines) {\n\t\treturn \"\", fmt.Errorf(\"newLine: Invalid line %v\", line)\n\t}\n\toriginal := s.lines[line].String()\n\tif line < len(s.lines)-1 {\n\t\tcopy(s.lines[line:], s.lines[line+1:])\n\t}\n\ts.lines[len(s.lines)-1] = nil // or the zero value of T\n\ts.lines = s.lines[:len(s.lines)-1]\n\tcs := s.undoFac()\n\tcs.ChangeLine(line, original, \"\")\n\tcs.RemoveLine(line)\n\ts.AddUndoSet(cs)\n\treturn original, nil\n}",
"func (e *LineEditor) DelChar() {\n\n\t// different handling for at the beginning of the line or middle of line\n\tif e.Cx > 0 {\n\t\trow := e.Row\n\t\tcopy(row[e.Cx-1:], row[e.Cx:])\n\t\trow = row[:len(row)-1]\n\t\te.Row = row\n\t\te.Cx--\n\t}\n}",
"func (v *Nvim) DeleteCurrentLine() error {\n\treturn v.call(\"nvim_del_current_line\", nil)\n}",
"func (edit *TextEditor) Delete(where int, len int) {\n\t/* delete characters while updating undo */\n\tedit.makeundoDelete(where, len)\n\n\tedit.Buffer = strDeleteText(edit.Buffer, where, len)\n\tedit.HasPreferredX = false\n}",
"func (tv *TextView) CursorDelete(steps int) {\n\twupdt := tv.TopUpdateStart()\n\tdefer tv.TopUpdateEnd(wupdt)\n\ttv.ValidateCursor()\n\tif tv.HasSelection() {\n\t\ttv.DeleteSelection()\n\t\treturn\n\t}\n\t// note: no update b/c signal from buf will drive update\n\torg := tv.CursorPos\n\ttv.CursorForward(steps)\n\ttv.Buf.DeleteText(org, tv.CursorPos, true, true)\n\ttv.SetCursorShow(org)\n}",
"func ClearLinePartialForward() {\n\temitEscape(\"K\")\n}",
"func (hw *HighlightedWriter) EraseLine() {\n\thw.delegate.EraseLine()\n}",
"func (edit Editor) RemoveToLineStart() {\n\tedit.Call(\"removeToLineStart\")\n}",
"func (m *Model) deleteAfterCursor() bool {\n\tm.value = m.value[:m.pos]\n\treturn m.setCursor(len(m.value))\n}",
"func (w *VT100Writer) EraseLine() {\n\tw.WriteRaw([]byte{0x1b, '[', '2', 'K'})\n}",
"func (r region) delete() mark {\n\tvar fr, to = orderMarks(r.start, r.end)\n\tb := fr.buf\n\tb.text[fr.line] = append(b.text[fr.line][:fr.pos], b.text[to.line][to.pos:]...)\n\tif to.line > fr.line {\n\t\tto.line -= b.deleteLines(mark{fr.line + 1, 0, b}, to)\n\t\tif fr.atEmptyLine() && fr.maxLine() > 0 {\n\t\t\tfr.deleteLine()\n\t\t}\n\t}\n\tfr.fixPos()\n\treturn fr\n}",
"func ClearLine() {\n\tfmt.Print(CSI + EraseEntireLineSeq)\n}",
"func (m mark) deleteCharForward() {\n\tb := m.buf\n\tvar deleted rune // for undo info\n\n\tif m.atLineEnd() {\n\t\tif m.atLastLine() {\n\t\t\treturn\n\t\t}\n\t\tm.joinLineBelow()\n\t\tdeleted = '\\n'\n\t} else {\n\t\tdeleted = m.char()\n\t\tb.text[m.line] = append(b.text[m.line][:m.pos], b.text[m.line][m.pos+1:]...)\n\t}\n\n\t// add undo info\n\tb.lastInsert.oldText.appendChar(deleted)\n}",
"func (edit Editor) RemoveToLineRight() {\n\tedit.Call(\"removeToLineRight\")\n}",
"func ClearLine() {\n\temitEscape(\"K\", 2)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Delete the previous space delimited word.
|
func (ls *linestate) deletePrevWord() {
oldPos := ls.pos
// remove spaces
for ls.pos > 0 && ls.buf[ls.pos-1] == ' ' {
ls.pos--
}
// remove word
for ls.pos > 0 && ls.buf[ls.pos-1] != ' ' {
ls.pos--
}
ls.buf = append(ls.buf[:ls.pos], ls.buf[oldPos:]...)
ls.refreshLine()
}
|
[
"func (m *Model) deleteWordLeft() bool {\n\tif m.pos == 0 || len(m.value) == 0 {\n\t\treturn false\n\t}\n\n\tif m.EchoMode != EchoNormal {\n\t\treturn m.deleteBeforeCursor()\n\t}\n\n\t// Linter note: it's critical that we acquire the initial cursor position\n\t// here prior to altering it via SetCursor() below. As such, moving this\n\t// call into the corresponding if clause does not apply here.\n\toldPos := m.pos //nolint:ifshort\n\n\tblink := m.setCursor(m.pos - 1)\n\tfor unicode.IsSpace(m.value[m.pos]) {\n\t\tif m.pos <= 0 {\n\t\t\tbreak\n\t\t}\n\t\t// ignore series of whitespace before cursor\n\t\tblink = m.setCursor(m.pos - 1)\n\t}\n\n\tfor m.pos > 0 {\n\t\tif !unicode.IsSpace(m.value[m.pos]) {\n\t\t\tblink = m.setCursor(m.pos - 1)\n\t\t} else {\n\t\t\tif m.pos > 0 {\n\t\t\t\t// keep the previous space\n\t\t\t\tblink = m.setCursor(m.pos + 1)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif oldPos > len(m.value) {\n\t\tm.value = m.value[:m.pos]\n\t} else {\n\t\tm.value = append(m.value[:m.pos], m.value[oldPos:]...)\n\t}\n\n\treturn blink\n}",
"func (e *T) eraseWord() {\n\tif e.widx <= 0 {\n\t\treturn\n\t}\n\n\t// number of boundary transitions\n\tn := 2\n\n\te.widx--\n\tif isWordRune(rune(e.buf[e.widx])) {\n\t\tn--\n\t}\n\te.buf[e.widx] = 0\n\n\tfor e.widx > 0 {\n\t\te.widx--\n\t\tisword := isWordRune(rune(e.buf[e.widx]))\n\t\tif n == 2 && isword {\n\t\t\tn--\n\t\t} else if n == 1 && !isword {\n\t\t\te.widx++\n\t\t\tbreak\n\t\t}\n\t\te.buf[e.widx] = 0\n\t}\n}",
"func (m *Model) deleteWordRight() bool {\n\tif m.pos >= len(m.value) || len(m.value) == 0 {\n\t\treturn false\n\t}\n\n\tif m.EchoMode != EchoNormal {\n\t\treturn m.deleteAfterCursor()\n\t}\n\n\toldPos := m.pos\n\tm.setCursor(m.pos + 1)\n\tfor unicode.IsSpace(m.value[m.pos]) {\n\t\t// ignore series of whitespace after cursor\n\t\tm.setCursor(m.pos + 1)\n\n\t\tif m.pos >= len(m.value) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfor m.pos < len(m.value) {\n\t\tif !unicode.IsSpace(m.value[m.pos]) {\n\t\t\tm.setCursor(m.pos + 1)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif m.pos > len(m.value) {\n\t\tm.value = m.value[:oldPos]\n\t} else {\n\t\tm.value = append(m.value[:oldPos], m.value[m.pos:]...)\n\t}\n\n\treturn m.setCursor(oldPos)\n}",
"func (m mark) deleteCharForward() {\n\tb := m.buf\n\tvar deleted rune // for undo info\n\n\tif m.atLineEnd() {\n\t\tif m.atLastLine() {\n\t\t\treturn\n\t\t}\n\t\tm.joinLineBelow()\n\t\tdeleted = '\\n'\n\t} else {\n\t\tdeleted = m.char()\n\t\tb.text[m.line] = append(b.text[m.line][:m.pos], b.text[m.line][m.pos+1:]...)\n\t}\n\n\t// add undo info\n\tb.lastInsert.oldText.appendChar(deleted)\n}",
"func (tv *TextView) CursorDeleteWord(steps int) {\n\twupdt := tv.TopUpdateStart()\n\tdefer tv.TopUpdateEnd(wupdt)\n\ttv.ValidateCursor()\n\tif tv.HasSelection() {\n\t\ttv.DeleteSelection()\n\t\treturn\n\t}\n\t// note: no update b/c signal from buf will drive update\n\torg := tv.CursorPos\n\ttv.CursorForwardWord(steps)\n\ttv.Buf.DeleteText(org, tv.CursorPos, true, true)\n\ttv.SetCursorShow(org)\n}",
"func (h *BufPane) DeleteWordLeft() bool {\n\th.SelectWordLeft()\n\tif h.Cursor.HasSelection() {\n\t\th.Cursor.DeleteSelection()\n\t\th.Cursor.ResetSelection()\n\t}\n\th.Relocate()\n\treturn true\n}",
"func DeleteExtraSpace(src string) string {\n\t//删除字符串中的多余空格,有多个空格时,仅保留一个空格\n\tnewSrc := strings.Replace(src, \" \", \" \", -1) //替换tab为空格\n\tregStr := \"\\\\s{2,}\" //两个及两个以上空格的正则表达式\n\treg, _ := regexp.Compile(regStr) //编译正则表达式\n\tres := make([]byte, len(newSrc)) //定义字符数组切片\n\tcopy(res, newSrc) //将字符串复制到切片\n\tspcIndex := reg.FindStringIndex(string(res)) //在字符串中搜索\n\tfor len(spcIndex) > 0 { //找到适配项\n\t\tres = append(res[:spcIndex[0]+1], res[spcIndex[1]:]...) //删除多余空格\n\t\tspcIndex = reg.FindStringIndex(string(res)) //继续在字符串中搜索\n\t}\n\treturn string(res)\n}",
"func (e *Editor) deleteWord(distance int) {\n\tif distance == 0 {\n\t\treturn\n\t}\n\n\te.makeValid()\n\n\tif e.caret.start.ofs != e.caret.end.ofs {\n\t\te.Delete(1)\n\t\tdistance -= sign(distance)\n\t}\n\tif distance == 0 {\n\t\treturn\n\t}\n\n\t// split the distance information into constituent parts to be\n\t// used independently.\n\twords, direction := distance, 1\n\tif distance < 0 {\n\t\twords, direction = distance*-1, -1\n\t}\n\t// atEnd if offset is at or beyond either side of the buffer.\n\tatEnd := func(offset int) bool {\n\t\tidx := e.caret.start.ofs + offset*direction\n\t\treturn idx <= 0 || idx >= e.editBuffer.len()\n\t}\n\t// next returns the appropriate rune given the direction and offset.\n\tnext := func(offset int) (r rune) {\n\t\tidx := e.caret.start.ofs + offset*direction\n\t\tif idx < 0 {\n\t\t\tidx = 0\n\t\t} else if idx > e.editBuffer.len() {\n\t\t\tidx = e.editBuffer.len()\n\t\t}\n\t\tif direction < 0 {\n\t\t\tr, _ = e.editBuffer.runeBefore(idx)\n\t\t} else {\n\t\t\tr, _ = e.editBuffer.runeAt(idx)\n\t\t}\n\t\treturn r\n\t}\n\tvar runes = 1\n\tfor ii := 0; ii < words; ii++ {\n\t\tif r := next(runes); unicode.IsSpace(r) {\n\t\t\tfor r := next(runes); unicode.IsSpace(r) && !atEnd(runes); r = next(runes) {\n\t\t\t\trunes += 1\n\t\t\t}\n\t\t} else {\n\t\t\tfor r := next(runes); !unicode.IsSpace(r) && !atEnd(runes); r = next(runes) {\n\t\t\t\trunes += 1\n\t\t\t}\n\t\t}\n\t}\n\te.Delete(runes * direction)\n}",
"func (t *Trie) Delete(word string) {\n\tn := t.find(word)\n\tif n.end == true {\n\t\tn.end = false\n\t}\n}",
"func (m mark) deleteCharBackward() mark {\n\tb := m.buf\n\tvar deleted rune // for undo info\n\n\tif m.atLineStart() {\n\t\tif m.atFirstLine() {\n\t\t\treturn m\n\t\t}\n\t\tm.line -= 1\n\t\tm.pos = m.lastCharPos() + 1\n\t\tm.joinLineBelow()\n\t\tdeleted = '\\n'\n\t} else {\n\t\tm.pos -= 1\n\t\tdeleted = m.char()\n\t\tb.text[m.line] = append(b.text[m.line][:m.pos], b.text[m.line][m.pos+1:]...)\n\t}\n\n\t// add undo info\n\tb.lastInsert.oldText.prependChar(deleted)\n\tif m.isBefore(*b.lastInsert.start) {\n\t\tb.lastInsert.start = &m\n\t}\n\n\treturn m\n}",
"func (i *Input) Backspace() {\n\tif i.Pos > 0 {\n\t\ti.Buffer.RemoveRune(i.Pos - 1)\n\t\ti.Pos--\n\t}\n}",
"func cleanUpTheWord(word string) string {\r\n\tword = strings.ToLower(word)\r\n\tword = strings.TrimSpace(word)\r\n\treturn word\r\n}",
"func (tv *TextView) CursorBackspaceWord(steps int) {\n\twupdt := tv.TopUpdateStart()\n\tdefer tv.TopUpdateEnd(wupdt)\n\ttv.ValidateCursor()\n\torg := tv.CursorPos\n\tif tv.HasSelection() {\n\t\ttv.DeleteSelection()\n\t\ttv.SetCursorShow(org)\n\t\treturn\n\t}\n\t// note: no update b/c signal from buf will drive update\n\ttv.CursorBackwardWord(steps)\n\ttv.ScrollCursorToCenterIfHidden()\n\ttv.RenderCursor(true)\n\ttv.Buf.DeleteText(tv.CursorPos, org, true, true)\n}",
"func (edit *TextEditor) Delete(where int, len int) {\n\t/* delete characters while updating undo */\n\tedit.makeundoDelete(where, len)\n\n\tedit.Buffer = strDeleteText(edit.Buffer, where, len)\n\tedit.HasPreferredX = false\n}",
"func deleted(s split, save func(string)) {\n\tif s.R != \"\" {\n\t\tsave(s.L + s.R[1:])\n\t}\n}",
"func (s StringSet) Del(x string) { delete(s, x) }",
"func deleteChar(input string, index int) string {\n\treturn input[:index] + input[index+1:]\n}",
"func TrimThe(s string) string {\n\tconst short = 2\n\ta := strings.Split(s, space)\n\tif len(a) < short {\n\t\treturn s\n\t}\n\tl := a[len(a)-1]\n\tif strings.EqualFold(a[0], \"the\") && (l == \"BBS\" || l == \"FTP\") {\n\t\treturn strings.Join(a[1:], space) // drop \"the\" prefix\n\t}\n\treturn s\n}",
"func DeleteWhitespace(s string) string {\n\tif len(s) == 0 {\n\t\treturn s\n\t}\n\n\tvar hasSpace bool\n\tvar buf bytes.Buffer\n\tfor _, r := range s {\n\t\tif unicode.IsSpace(r) {\n\t\t\thasSpace = true\n\t\t} else {\n\t\t\tbuf.WriteRune(r)\n\t\t}\n\t}\n\n\tif !hasSpace {\n\t\treturn s\n\t}\n\treturn buf.String()\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Show completions for the current line.
|
func (ls *linestate) completeLine() rune {
// get a list of line completions
lc := ls.ts.completionCallback(ls.String())
if len(lc) == 0 {
// no line completions
beep()
return KeycodeNull
}
// navigate and display the line completions
stop := false
idx := 0
u := utf8{}
var r rune
for !stop {
if idx < len(lc) {
// save the line buffer
savedBuf := ls.buf
savedPos := ls.pos
// show the completion
ls.buf = []rune(lc[idx])
ls.pos = len(ls.buf)
ls.refreshLine()
// restore the line buffer
ls.buf = savedBuf
ls.pos = savedPos
} else {
// show the original buffer
ls.refreshLine()
}
// navigate through the completions
r = u.getRune(ls.ifd, nil)
if r == KeycodeNull {
// error on read
stop = true
} else if r == KeycodeTAB {
// loop through the completions
idx = (idx + 1) % (len(lc) + 1)
if idx == len(lc) {
beep()
}
} else if r == KeycodeESC {
// could be an escape, could be an escape sequence
if wouldBlock(ls.ifd, &timeout20ms) {
// nothing more to read, looks like a single escape
// re-show the original buffer
if idx < len(lc) {
ls.refreshLine()
}
// don't pass the escape key back
r = KeycodeNull
} else {
// probably an escape sequence
// update the buffer and return
if idx < len(lc) {
ls.buf = []rune(lc[idx])
ls.pos = len(ls.buf)
}
}
stop = true
} else {
// update the buffer and return
if idx < len(lc) {
ls.buf = []rune(lc[idx])
ls.pos = len(ls.buf)
}
stop = true
}
}
// return the last rune read
return r
}
|
[
"func (s *SoracomCompleter) Complete(d prompt.Document) []prompt.Suggest {\n\tline := d.CurrentLine()\n\n\t// return from hard corded Commands as atm don't have a way to find top-level commands from API definition\n\tif isFirstCommand(line) {\n\t\ts := filterFunc(Commands, line, prompt.FilterFuzzy)\n\t\tsort.Slice(s, func(i, j int) bool {\n\t\t\treturn s[i].Text < s[j].Text\n\t\t})\n\t\treturn s\n\t}\n\n\tif endsWithPipeOrRedirect(line) {\n\t\treturn []prompt.Suggest{}\n\t}\n\n\treturn s.findSuggestions(line)\n}",
"func autoComplete(line string, pos int, key rune) (string, int, bool) {\n\tswitch key {\n\tcase 0x01: // Ctrl-A\n\t\treturn line, 0, true\n\tcase 0x03: // Ctrl-C\n\t\texit()\n\tcase 0x05: // Ctrl-E\n\t\treturn line, len(line), true\n\t// case 0x09: // Ctrl-I (Tab)\n\t// \t_, _, chain, args := cli.FindCommand(string(line))\n\t// \tline := commandChain(chain, args)\n\t// \treturn line, len(line), true\n\tcase 0x15: // Ctrl-U\n\t\treturn \"\", 0, true\n\tcase 0x1a: // Ctrl-Z\n\t\tsuspend()\n\t\t// case '?':\n\t\t// \tcmd, subcmds, chain, args := cli.FindCommand(string(line[0:pos]))\n\t\t// \tif cmd == nil {\n\t\t// \t\tterm.Write([]byte(prompt))\n\t\t// \t\tterm.Write([]byte(line))\n\t\t// \t\tterm.Write([]byte(\"?\\n\"))\n\t\t// \t}\n\t\t// \tif subcmds != nil {\n\t\t// \t\tfor _, c := range *subcmds {\n\t\t// \t\t\tterm.Write([]byte(\" \" + c.Command))\n\t\t// \t\t\tterm.Write([]byte(\"\\n\"))\n\t\t// \t\t}\n\t\t// \t} else if cmd == nil {\n\t\t// \t\tterm.Write([]byte(\"Unknown command.\\n\"))\n\t\t// \t}\n\t\t//\n\t\t// \tline := commandChain(chain, args)\n\t\t// \treturn line, len(line), true\n\t}\n\treturn \"\", 0, false\n}",
"func acceptCompletion(ed *Editor) {\n\tc := ed.completion\n\tif 0 <= c.selected && c.selected < len(c.candidates) {\n\t\ted.line, ed.dot = c.apply(ed.line, ed.dot)\n\t}\n\ted.mode = &ed.insert\n}",
"func (o *ViewOptions) Complete(cmdline cmdline.Cmdline, args []string) (err error) {\n\treturn\n}",
"func printCompletion() int {\n\tinfo := genUsage()\n\n\tswitch options.GetS(OPT_COMPLETION) {\n\tcase \"bash\":\n\t\tfmt.Printf(bash.Generate(info, \"rbinstall-clone\"))\n\tcase \"fish\":\n\t\tfmt.Printf(fish.Generate(info, \"rbinstall-clone\"))\n\tcase \"zsh\":\n\t\tfmt.Printf(zsh.Generate(info, optMap, \"rbinstall-clone\"))\n\tdefault:\n\t\treturn 1\n\t}\n\n\treturn 0\n}",
"func TabCompleter(line []rune, pos int) (lastWord string, completions []*readline.CompletionGroup) {\n\n\t// Format and sanitize input\n\targs, last, lastWord := FormatInput(line)\n\n\t// Detect base command automatically\n\tvar command = detectedCommand(args, \"\") // add *commands.Context.Menu in the string here\n\n\t// Propose commands\n\tif noCommandOrEmpty(args, last, command) {\n\t\treturn CompleteMenuCommands(last, pos)\n\t}\n\n\t// Check environment variables\n\tif envVarAsked(args, last) {\n\n\t}\n\n\t// Base command has been identified\n\tif commandFound(command) {\n\n\t\t// If user asks for completions with \"-\" / \"--\", show command options\n\t\tif optionsAsked(args, last, command) {\n\n\t\t}\n\n\t\t// Check environment variables again\n\t\tif envVarAsked(args, last) {\n\n\t\t}\n\n\t\t// Propose argument completion before anything, and if needed\n\t\tif _, yes := argumentRequired(last, args, \"\", command, false); yes { // add *commands.Context.Menu in the string here\n\n\t\t}\n\n\t\t// Then propose subcommands\n\t\tif hasSubCommands(command, args) {\n\n\t\t}\n\n\t\t// Handle subcommand if found (maybe we should rewrite this function and use it also for base command)\n\t\tif _, ok := subCommandFound(last, args, command); ok {\n\n\t\t}\n\t}\n\n\t// -------------------- IMPORTANT ------------------------\n\t// WE NEED TO PASS A DEEP COPY OF THE OBJECTS: OTHERWISE THE COMPLETION SEARCH FUNCTION WILL MESS UP WITH THEM.\n\n\treturn\n}",
"func (comp *completion) apply(line string, dot int) (string, int) {\n\ttext := comp.selectedCandidate().text\n\treturn line[:comp.begin] + text + line[comp.end:], comp.begin + len(text)\n}",
"func (c *Completer) Complete(in prompt.Document) (result []prompt.Suggest) {\n\tp := in.TextBeforeCursor()\n\tif isCommand(p) {\n\t\tresult = c.commandSuggestions(in.GetWordBeforeCursor())\n\t} else if isCommandArgument(p) && c.pathCompletionToggle {\n\t\tcur := in.GetWordBeforeCursor()\n\t\tif isAbsolutePath(cur) {\n\t\t\tresult = c.absolutePathSuggestions(cur)\n\t\t} else {\n\t\t\tresult = c.relativePathSuggestions(cur)\n\t\t}\n\t}\n\n\treturn result\n}",
"func printCompletion() int {\n\tinfo := genUsage()\n\n\tswitch options.GetS(OPT_COMPLETION) {\n\tcase \"bash\":\n\t\tfmt.Printf(bash.Generate(info, \"init-exporter\"))\n\tcase \"fish\":\n\t\tfmt.Printf(fish.Generate(info, \"init-exporter\"))\n\tcase \"zsh\":\n\t\tfmt.Printf(zsh.Generate(info, optMap, \"init-exporter\"))\n\tdefault:\n\t\treturn 1\n\t}\n\n\treturn 0\n}",
"func ShowCursor() {\n\tfmt.Printf(CSI + ShowCursorSeq)\n}",
"func (c *Completer) Complete(d prompt.Document) []prompt.Suggest {\n\t// shell lib can request duplicate Complete request with empty strings as text\n\t// skipping to avoid cache reset\n\tif d.Text == \"\" {\n\t\treturn nil\n\t}\n\n\tmeta := extractMeta(c.ctx)\n\n\targsBeforeCursor := meta.CliConfig.Alias.ResolveAliases(strings.Split(d.TextBeforeCursor(), \" \"))\n\targsAfterCursor := meta.CliConfig.Alias.ResolveAliases(strings.Split(d.TextAfterCursor(), \" \"))\n\tcurrentArg := lastArg(argsBeforeCursor) + firstArg(argsAfterCursor)\n\n\t// leftArgs contains all arguments before the one with the cursor\n\tleftArgs := trimLastArg(argsBeforeCursor)\n\t// rightWords contains all words after the selected one\n\trightWords := trimFirstArg(argsAfterCursor)\n\n\tleftWords := append([]string{\"scw\"}, leftArgs...)\n\n\tacr := AutoComplete(c.ctx, leftWords, currentArg, rightWords)\n\n\tsuggestions := []prompt.Suggest(nil)\n\trawSuggestions := []string(acr.Suggestions)\n\n\t// if first suggestion is an option, all suggestions should be options\n\t// we sort them\n\tif len(rawSuggestions) > 0 && argIsOption(rawSuggestions[0]) {\n\t\trawSuggestions = sortOptions(meta, leftArgs, rawSuggestions[0], rawSuggestions)\n\t}\n\n\tfor _, suggest := range rawSuggestions {\n\t\tsuggestions = append(suggestions, prompt.Suggest{\n\t\t\tText: suggest,\n\t\t\tDescription: getSuggestDescription(meta, leftArgs, suggest),\n\t\t})\n\t}\n\n\treturn prompt.FilterHasPrefix(suggestions, currentArg, true)\n}",
"func (n *Node) CompLineComplete(compLine string) []string {\n\t// TODO: This split might not consider files that have spaces in them.\n\tcompLineParts := strings.Split(compLine, \" \")\n\t// return compLineParts\n\tif len(compLineParts) == 0 || compLineParts[0] == \"\" {\n\t\tDebug.Printf(\"CompLineComplete - node: %s, compLine %s > %v - Empty compLineParts\\n\", n.Name, compLine, []string{})\n\t\treturn []string{}\n\t}\n\n\t// Drop the executable or command\n\tcompLineParts = compLineParts[1:]\n\n\t// We have a possibly partial request\n\tif len(compLineParts) >= 1 {\n\t\tcurrent := compLineParts[0]\n\n\t\tcc := n.Completions(current)\n\t\tif len(compLineParts) == 1 && len(cc) > 1 {\n\t\t\tDebug.Printf(\"CompLineComplete - node: %s, compLine %s > %v - Multiple completions for this compLine\\n\", n.Name, compLine, cc)\n\t\t\treturn cc\n\t\t}\n\t\t// Check if the current fully matches a command (child node)\n\t\tchild := n.GetChildByName(current)\n\t\tif child.Name == current && child.Kind == StringNode {\n\t\t\tDebug.Printf(\"CompLineComplete - node: %s, compLine %s - Recursing into command %s\\n\", n.Name, compLine, current)\n\t\t\t// Recurse into the child node's completion\n\t\t\treturn child.CompLineComplete(strings.Join(compLineParts, \" \"))\n\t\t}\n\t\t// Check if the current fully matches an option\n\t\tlist := n.GetChildrenByKind(OptionsNode)\n\t\tlist = append(list, n.GetChildrenByKind(CustomNode)...)\n\t\tfor _, child := range list {\n\t\t\tfor _, e := range child.Entries {\n\t\t\t\tif current == e {\n\t\t\t\t\tif len(compLineParts) == 1 {\n\t\t\t\t\t\tDebug.Printf(\"CompLineComplete - node: %s, compLine %s > %v - Fully Matched Option/Custom\\n\", n.Name, compLine, current)\n\t\t\t\t\t\treturn []string{current}\n\t\t\t\t\t}\n\t\t\t\t\tDebug.Printf(\"CompLineComplete - node: %s, compLine %s - Fully matched Option/Custom %s, recursing to self\\n\", n.Name, compLine, current)\n\t\t\t\t\t// Recurse into the node self completion\n\t\t\t\t\treturn n.CompLineComplete(strings.Join(compLineParts, \" \"))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// Get FileList completions after all other completions\n\t\tfor _, child := range n.GetChildrenByKind(FileListNode) {\n\t\t\tcc := child.SelfCompletions(current)\n\t\t\tfor _, e := range cc {\n\t\t\t\tif current == e {\n\t\t\t\t\tif len(compLineParts) == 1 {\n\t\t\t\t\t\tDebug.Printf(\"CompLineComplete - node: %s, compLine %s > %v - Fully matched File\\n\", n.Name, compLine, current)\n\t\t\t\t\t\treturn []string{current}\n\t\t\t\t\t}\n\t\t\t\t\tDebug.Printf(\"CompLineComplete - node: %s, compLine %s - Fully matched File %s, recursing to self\\n\", n.Name, compLine, current)\n\t\t\t\t\t// Recurse into the node self completion\n\t\t\t\t\treturn n.CompLineComplete(strings.Join(compLineParts, \" \"))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Return a partial match\n\t\tDebug.Printf(\"CompLineComplete - node: %s, compLine %s - Partial match %s\\n\", n.Name, compLine, current)\n\t\treturn n.Completions(current)\n\t}\n\n\tDebug.Printf(\"CompLineComplete - node: %s, compLine %s > [] - Return all results\\n\", n.Name, compLine)\n\t// No partial request, return all results\n\treturn n.Completions(\"\")\n}",
"func AutoCompleteHandler(m *Message) {\n defer wg.Done()\n\n var code string = strings.Replace(m.Info.Code, \"\\\\n\", \"\\n\", -1)\n var pos *Pos = m.Info.Pos\n var path string = m.Info.Path\n var splitCode []string = strings.Split(strings.Replace(code, \"\\\\n\", \"\\n\", -1), \"\\n\")\n var byteCount int = 0\n var in *strings.Reader\n var out bytes.Buffer\n var i Info\n var unprocessed_hints []string\n var hints []string\n\n for lineNum, line := range splitCode {\n if lineNum == pos.Line {\n byteCount = byteCount + pos.Ch\n break\n }\n byteCount = byteCount + len(line) + 1 // +1 for the newline that strings.Split removed\n }\n\n // If the incoming source code is unicode, should be c%d below.\n cmd := exec.Command(\"gocode\", \"-f=csv\", \"autocomplete\", path, fmt.Sprintf(\"%d\", byteCount))\n\n in = strings.NewReader(code)\n cmd.Stdin = in\n cmd.Stdout = &out\n\n err := cmd.Run()\n if err != nil {\n log.Println(\"[AutoCompleteHandler] Couldn't exec gocode.\")\n return\n }\n\n i.Result = m.Info.Code\n i.Pos = m.Info.Pos\n\n // Parse the autocomplete results\n unprocessed_hints = strings.Split(out.String(), \"\\n\")\n hints = make([]string, len(unprocessed_hints))\n\n for i, hint := range unprocessed_hints {\n splitHints := strings.Split(hint, \",\")\n if len(splitHints) >= 5 {\n hints[i] = splitHints[2] //splitHints[4] has the function signature data, but light table can't use that yet.\n }\n }\n\n i.Hints = hints\n\n Send(m.Cid, \"editor.go.hints.result\", i)\n}",
"func (c *CommandCompleter) hintCompleter(line []rune, pos int) (hint []rune) {\n\n\t// Format and sanitize input\n\t// @args => All items of the input line\n\t// @last => The last word detected in input line as []rune\n\t// @lastWord => The last word detected in input as string\n\targs, last, lastWord := formatInput(line)\n\n\t// Detect base command automatically\n\tvar command = c.detectedCommand(args)\n\n\t// Menu hints (command line is empty, or nothing recognized)\n\tif noCommandOrEmpty(args, last, command) {\n\t\thint = menuHint(args, last)\n\t}\n\n\t// Check environment variables\n\tif yes, _, _ := c.envVarAsked(args, lastWord); yes {\n\t\treturn c.envVarHint(args, last)\n\t}\n\n\t// Command Hint\n\tif commandFound(command) {\n\n\t\t// Command hint by default (no space between cursor and last command character)\n\t\thint = c.commandHint(command)\n\n\t\t// Check environment variables\n\t\tif yes, _, _ := c.envVarAsked(args, lastWord); yes {\n\t\t\treturn c.envVarHint(args, last)\n\t\t}\n\n\t\t// If options are asked for root command, return commpletions.\n\t\tif len(command.Groups()) > 0 {\n\t\t\tvar groups = command.Groups()\n\t\t\tgroups = append(groups, c.console.CommandParser().Groups()...)\n\t\t\tfor _, grp := range groups {\n\t\t\t\tif opt, yes := optionArgRequired(args, last, grp); yes {\n\t\t\t\t\thint = c.optionArgumentHint(args, last, opt)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// If user asks for completions with \"-\" or \"--\".\n\t\t// (Note: This takes precedence on any argument hints, as it is evaluated after them)\n\t\tif commandOptionsAsked(args, string(last), command) {\n\t\t\treturn optionHints(args, last, command)\n\t\t}\n\n\t\t// If command has args, hint for args\n\t\tif arg, yes := commandArgumentRequired(lastWord, args, command); yes {\n\t\t\thint = []rune(c.commandArgumentHints(args, last, command, arg))\n\t\t}\n\n\t\t// Handle subcommand if found\n\t\tif sub, ok := subCommandFound(lastWord, args, command); ok {\n\t\t\treturn c.handleSubcommandHints(args[1:], last, command, sub)\n\t\t}\n\n\t}\n\n\t// Handle system binaries, shell commands, etc...\n\tif commandFoundInPath(args[0]) {\n\t\t// hint = []rune(exeHint + util.ParseSummary(util.GetManPages(args[0])))\n\t}\n\n\treturn\n}",
"func (o *ViewOptions) Complete(name string, cmd *cobra.Command, args []string) (err error) {\n\tcfg, err := config.NewLocalConfigInfo(o.contextDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\to.lci = cfg\n\treturn\n}",
"func Display(possible []string) {\n\t// Seed random integer\n\tr := rand.New(rand.NewSource(time.Now().UTC().UnixNano()))\n\ti := r.Intn(len(possible))\n\tcmd := possible[i]\n\tif hints, ok := commandHints[cmd]; ok {\n\t\t// Display random hint from chosen command's list\n\t\titem := r.Intn(len(hints))\n\t\tif len(hints) > item {\n\t\t\tui.Hint(hints[item], false)\n\t\t}\n\t}\n}",
"func CursorNextLine(n int) {\n\tfmt.Printf(CSI+CursorNextLineSeq, n)\n}",
"func (p *InteractiveMultiselectPrinter) Show(text ...string) ([]string, error) {\n\t// should be the first defer statement to make sure it is executed last\n\t// and all the needed cleanup can be done before\n\tcancel, exit := internal.NewCancelationSignal(p.OnInterruptFunc)\n\tdefer exit()\n\n\tif len(text) == 0 || Sprint(text[0]) == \"\" {\n\t\ttext = []string{p.DefaultText}\n\t}\n\n\tp.text = p.TextStyle.Sprint(text[0])\n\tp.fuzzySearchMatches = append([]string{}, p.Options...)\n\n\tif p.MaxHeight == 0 {\n\t\tp.MaxHeight = DefaultInteractiveMultiselect.MaxHeight\n\t}\n\n\tmaxHeight := p.MaxHeight\n\tif maxHeight > len(p.fuzzySearchMatches) {\n\t\tmaxHeight = len(p.fuzzySearchMatches)\n\t}\n\n\tif len(p.Options) == 0 {\n\t\treturn nil, fmt.Errorf(\"no options provided\")\n\t}\n\n\tp.displayedOptions = append([]string{}, p.fuzzySearchMatches[:maxHeight]...)\n\tp.displayedOptionsStart = 0\n\tp.displayedOptionsEnd = maxHeight\n\n\tfor _, option := range p.DefaultOptions {\n\t\tp.selectOption(option)\n\t}\n\n\tarea, err := DefaultArea.Start(p.renderSelectMenu())\n\tdefer area.Stop()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not start area: %w\", err)\n\t}\n\n\tif p.Filter && (p.KeyConfirm == keys.Space || p.KeySelect == keys.Space) {\n\t\treturn nil, fmt.Errorf(\"if filter/search is active, keys.Space can not be used for KeySelect or KeyConfirm\")\n\t}\n\n\tarea.Update(p.renderSelectMenu())\n\n\tcursor.Hide()\n\tdefer cursor.Show()\n\terr = keyboard.Listen(func(keyInfo keys.Key) (stop bool, err error) {\n\t\tkey := keyInfo.Code\n\n\t\tif p.MaxHeight > len(p.fuzzySearchMatches) {\n\t\t\tmaxHeight = len(p.fuzzySearchMatches)\n\t\t} else {\n\t\t\tmaxHeight = p.MaxHeight\n\t\t}\n\n\t\tswitch key {\n\t\tcase p.KeyConfirm:\n\t\t\tif len(p.fuzzySearchMatches) == 0 {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\tarea.Update(p.renderFinishedMenu())\n\t\t\treturn true, nil\n\t\tcase p.KeySelect:\n\t\t\tif len(p.fuzzySearchMatches) > 0 {\n\t\t\t\t// Select option if not already selected\n\t\t\t\tp.selectOption(p.fuzzySearchMatches[p.selectedOption])\n\t\t\t}\n\t\t\tarea.Update(p.renderSelectMenu())\n\t\tcase keys.RuneKey:\n\t\t\tif p.Filter {\n\t\t\t\t// Fuzzy search for options\n\t\t\t\t// append to fuzzy search string\n\t\t\t\tp.fuzzySearchString += keyInfo.String()\n\t\t\t\tp.selectedOption = 0\n\t\t\t\tp.displayedOptionsStart = 0\n\t\t\t\tp.displayedOptionsEnd = maxHeight\n\t\t\t\tp.displayedOptions = append([]string{}, p.fuzzySearchMatches[:maxHeight]...)\n\t\t\t}\n\t\t\tarea.Update(p.renderSelectMenu())\n\t\tcase keys.Space:\n\t\t\tif p.Filter {\n\t\t\t\tp.fuzzySearchString += \" \"\n\t\t\t\tp.selectedOption = 0\n\t\t\t\tarea.Update(p.renderSelectMenu())\n\t\t\t}\n\t\tcase keys.Backspace:\n\t\t\t// Remove last character from fuzzy search string\n\t\t\tif len(p.fuzzySearchString) > 0 {\n\t\t\t\t// Handle UTF-8 characters\n\t\t\t\tp.fuzzySearchString = string([]rune(p.fuzzySearchString)[:len([]rune(p.fuzzySearchString))-1])\n\t\t\t}\n\n\t\t\tif p.fuzzySearchString == \"\" {\n\t\t\t\tp.fuzzySearchMatches = append([]string{}, p.Options...)\n\t\t\t}\n\n\t\t\tp.renderSelectMenu()\n\n\t\t\tif len(p.fuzzySearchMatches) > p.MaxHeight {\n\t\t\t\tmaxHeight = p.MaxHeight\n\t\t\t} else {\n\t\t\t\tmaxHeight = len(p.fuzzySearchMatches)\n\t\t\t}\n\n\t\t\tp.selectedOption = 0\n\t\t\tp.displayedOptionsStart = 0\n\t\t\tp.displayedOptionsEnd = maxHeight\n\t\t\tp.displayedOptions = append([]string{}, p.fuzzySearchMatches[p.displayedOptionsStart:p.displayedOptionsEnd]...)\n\n\t\t\tarea.Update(p.renderSelectMenu())\n\t\tcase keys.Left:\n\t\t\t// Unselect all options\n\t\t\tp.selectedOptions = []int{}\n\t\t\tarea.Update(p.renderSelectMenu())\n\t\tcase keys.Right:\n\t\t\t// Select all options\n\t\t\tp.selectedOptions = []int{}\n\t\t\tfor i := 0; i < len(p.Options); i++ {\n\t\t\t\tp.selectedOptions = append(p.selectedOptions, i)\n\t\t\t}\n\t\t\tarea.Update(p.renderSelectMenu())\n\t\tcase keys.Up:\n\t\t\tif len(p.fuzzySearchMatches) == 0 {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\tif p.selectedOption > 0 {\n\t\t\t\tp.selectedOption--\n\t\t\t\tif p.selectedOption < p.displayedOptionsStart {\n\t\t\t\t\tp.displayedOptionsStart--\n\t\t\t\t\tp.displayedOptionsEnd--\n\t\t\t\t\tif p.displayedOptionsStart < 0 {\n\t\t\t\t\t\tp.displayedOptionsStart = 0\n\t\t\t\t\t\tp.displayedOptionsEnd = maxHeight\n\t\t\t\t\t}\n\t\t\t\t\tp.displayedOptions = append([]string{}, p.fuzzySearchMatches[p.displayedOptionsStart:p.displayedOptionsEnd]...)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tp.selectedOption = len(p.fuzzySearchMatches) - 1\n\t\t\t\tp.displayedOptionsStart = len(p.fuzzySearchMatches) - maxHeight\n\t\t\t\tp.displayedOptionsEnd = len(p.fuzzySearchMatches)\n\t\t\t\tp.displayedOptions = append([]string{}, p.fuzzySearchMatches[p.displayedOptionsStart:p.displayedOptionsEnd]...)\n\t\t\t}\n\n\t\t\tarea.Update(p.renderSelectMenu())\n\t\tcase keys.Down:\n\t\t\tif len(p.fuzzySearchMatches) == 0 {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\tp.displayedOptions = p.fuzzySearchMatches[:maxHeight]\n\t\t\tif p.selectedOption < len(p.fuzzySearchMatches)-1 {\n\t\t\t\tp.selectedOption++\n\t\t\t\tif p.selectedOption >= p.displayedOptionsEnd {\n\t\t\t\t\tp.displayedOptionsStart++\n\t\t\t\t\tp.displayedOptionsEnd++\n\t\t\t\t\tp.displayedOptions = append([]string{}, p.fuzzySearchMatches[p.displayedOptionsStart:p.displayedOptionsEnd]...)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tp.selectedOption = 0\n\t\t\t\tp.displayedOptionsStart = 0\n\t\t\t\tp.displayedOptionsEnd = maxHeight\n\t\t\t\tp.displayedOptions = append([]string{}, p.fuzzySearchMatches[p.displayedOptionsStart:p.displayedOptionsEnd]...)\n\t\t\t}\n\n\t\t\tarea.Update(p.renderSelectMenu())\n\t\tcase keys.CtrlC:\n\t\t\tcancel()\n\t\t\treturn true, nil\n\t\t}\n\n\t\treturn false, nil\n\t})\n\tif err != nil {\n\t\tError.Println(err)\n\t\treturn nil, fmt.Errorf(\"failed to start keyboard listener: %w\", err)\n\t}\n\n\tvar result []string\n\tfor _, selectedOption := range p.selectedOptions {\n\t\tresult = append(result, p.Options[selectedOption])\n\t}\n\n\treturn result, nil\n}",
"func Complete() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"complete\",\n\t\tAliases: []string{\"completion\"},\n\t\tHidden: true,\n\t\tShort: \"Generate script for auto-completion\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tif name, _ := cmd.Flags().GetString(\"executable\"); name != \"\" {\n\t\t\t\tcmd.Root().Use = name\n\t\t\t}\n\t\t\tswitch shell, _ := cmd.Flags().GetString(\"shell\"); shell {\n\t\t\tcase \"bash\":\n\t\t\t\treturn cmd.Root().GenBashCompletion(os.Stdout)\n\t\t\tcase \"zsh\":\n\t\t\t\treturn cmd.Root().GenZshCompletion(os.Stdout)\n\t\t\tcase \"fish\":\n\t\t\t\t// Fish does not accept `-` in variable names\n\t\t\t\tbuf := new(bytes.Buffer)\n\t\t\t\tif err := cmd.Root().GenFishCompletion(buf, true); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tscript := strings.Replace(buf.String(), \"__ttn-lw-\", \"__ttn_lw_\", -1)\n\t\t\t\t_, err := fmt.Print(script)\n\t\t\t\treturn err\n\t\t\tcase \"powershell\":\n\t\t\t\treturn cmd.Root().GenPowerShellCompletion(os.Stdout)\n\t\t\tdefault:\n\t\t\t\treturn errInvalidShell.WithAttributes(\"shell\", shell)\n\t\t\t}\n\t\t},\n\t}\n\tcmd.Flags().String(\"shell\", \"bash\", \"bash|zsh|fish|powershell\")\n\tcmd.Flags().String(\"executable\", \"\", \"Executable name to create generate auto completion script for\")\n\treturn cmd\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a string for the current line buffer.
|
func (ls *linestate) String() string {
return string(ls.buf)
}
|
[
"func Buffer() string {\n\treturn C.GoString(C.rl_line_buffer)\n}",
"func (buf *realLineBuffer) String() string {\n\treturn buf.b.String()\n}",
"func (m *Model) CurrentLine() string {\n\treturn string(m.value[m.row])\n}",
"func (buffer LwBuffer) String() string {\n cmd, err := buffer.Command()\n if err != nil {\n fmt.Println(err);\n return fmt.Sprint(\"LwBuffer::String: ERROR: \", err)\n }\n return cmd.String()\n}",
"func (cl *charLine) string() string {\n\treturn string(*cl)\n}",
"func (c *Console) Buffer() string {\n\tc.bufLock.Lock()\n\tdefer c.bufLock.Unlock()\n\treturn strings.Join(c.buffer, \"\\n\")\n}",
"func (s *Buffer) String() string {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\treturn s.buffer.String()\n}",
"func (b *Buffer) Line(n int) string {\n\tif n >= len(b.lines) {\n\t\treturn \"\"\n\t}\n\treturn string(b.lines[n].data)\n}",
"func (h *LineHist) LineString(lineno int) string",
"func (v *Nvim) CurrentBuffer() (buffer Buffer, err error) {\n\terr = v.call(\"nvim_get_current_buf\", &buffer)\n\treturn buffer, err\n}",
"func (p *parser) line() string {\n\tif !p.valid() {\n\t\treturn \"\"\n\t}\n\tl := p.lines[p.idx]\n\tp.idx++\n\treturn l\n}",
"func (b *Buffer) Line(n int) (string, error) {\n\t// TODO: this is inefficient because we are splitting the contents of\n\t// the buffer again... even thought this may already have been done\n\t// in the content converter, b.cc\n\tlines := bytes.Split(b.Contents(), []byte(\"\\n\"))\n\tif n >= len(lines) {\n\t\treturn \"\", fmt.Errorf(\"line %v is beyond the end of the buffer (no. of lines %v)\", n, len(lines))\n\t}\n\treturn string(lines[n-1]), nil\n}",
"func (w *Wrap) String() string {\n\treturn w.buf.String()\n}",
"func (l *Line) String() string {\n\treturn fmt.Sprintf(\"Line: %s\", l.Name)\n}",
"func (b *SafeBuffer) String() string {\n\tb.m.RLock()\n\tdefer b.m.RUnlock()\n\treturn b.b.String()\n}",
"func (l *StringLexer) BufferString() string {\n\treturn l.input[l.start:l.pos]\n}",
"func (b *Batch) CurrentLine(line *[]byte) {\n\tb.call(\"nvim_get_current_line\", line)\n}",
"func (eb *errBuffer) String() string {\n\teb.mu.Lock()\n\tdefer eb.mu.Unlock()\n\n\treturn string(eb.buf)\n}",
"func (w *Reflow) String() string {\n\treturn w.buf.String()\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
edit a line in raw mode
|
func (l *Linenoise) edit(ifd, ofd int, prompt, init string) (string, error) {
// create the line state
ls := newLineState(ifd, ofd, prompt, l)
// set and output the initial line
ls.editSet(init)
// The latest history entry is always our current buffer
l.HistoryAdd(ls.String())
u := utf8{}
for {
r := u.getRune(syscall.Stdin, nil)
if r == KeycodeNull {
continue
}
// Autocomplete when the callback is set.
// It returns the character to be handled next.
if r == KeycodeTAB && l.completionCallback != nil {
r = ls.completeLine()
if r == KeycodeNull {
continue
}
}
if r == KeycodeCR || r == l.hotkey {
l.historyPop(-1)
if l.hintsCallback != nil {
// Refresh the line without hints to leave the
// line as the user typed it after the newline.
hcb := l.hintsCallback
l.hintsCallback = nil
ls.refreshLine()
l.hintsCallback = hcb
}
s := ls.String()
if r == l.hotkey {
return s + string(l.hotkey), nil
}
return s, nil
} else if r == KeycodeBS {
// backspace: remove the character to the left of the cursor
ls.editBackspace()
} else if r == KeycodeESC {
if wouldBlock(ifd, &timeout20ms) {
// looks like a single escape- abandon the line
l.historyPop(-1)
return "", nil
}
// escape sequence
s0 := u.getRune(ifd, &timeout20ms)
s1 := u.getRune(ifd, &timeout20ms)
if s0 == '[' {
// ESC [ sequence
if s1 >= '0' && s1 <= '9' {
// Extended escape, read additional byte.
s2 := u.getRune(ifd, &timeout20ms)
if s2 == '~' {
if s1 == '3' {
// delete
ls.editDelete()
}
}
} else {
if s1 == 'A' {
// cursor up
ls.editSet(l.historyPrev(ls))
} else if s1 == 'B' {
// cursor down
ls.editSet(l.historyNext(ls))
} else if s1 == 'C' {
// cursor right
ls.editMoveRight()
} else if s1 == 'D' {
// cursor left
ls.editMoveLeft()
} else if s1 == 'H' {
// cursor home
ls.editMoveHome()
} else if s1 == 'F' {
// cursor end
ls.editMoveEnd()
}
}
} else if s0 == '0' {
// ESC 0 sequence
if s1 == 'H' {
// cursor home
ls.editMoveHome()
} else if s1 == 'F' {
// cursor end
ls.editMoveEnd()
}
}
} else if r == KeycodeCtrlA {
// go to the start of the line
ls.editMoveHome()
} else if r == KeycodeCtrlB {
// cursor left
ls.editMoveLeft()
} else if r == KeycodeCtrlC {
// return QUIT
return "", ErrQuit
} else if r == KeycodeCtrlD {
if len(ls.buf) > 0 {
// delete: remove the character to the right of the cursor.
ls.editDelete()
} else {
// nothing to delete - QUIT
l.historyPop(-1)
return "", ErrQuit
}
} else if r == KeycodeCtrlE {
// go to the end of the line
ls.editMoveEnd()
} else if r == KeycodeCtrlF {
// cursor right
ls.editMoveRight()
} else if r == KeycodeCtrlH {
// backspace: remove the character to the left of the cursor
ls.editBackspace()
} else if r == KeycodeCtrlK {
// delete to the end of the line
ls.deleteToEnd()
} else if r == KeycodeCtrlL {
// clear screen
clearScreen()
ls.refreshLine()
} else if r == KeycodeCtrlN {
// next history item
ls.editSet(l.historyNext(ls))
} else if r == KeycodeCtrlP {
// previous history item
ls.editSet(l.historyPrev(ls))
} else if r == KeycodeCtrlT {
// swap current character with the previous
ls.editSwap()
} else if r == KeycodeCtrlU {
// delete the whole line
ls.deleteLine()
} else if r == KeycodeCtrlW {
// delete previous word
ls.deletePrevWord()
} else {
// insert the character into the line buffer
ls.editInsert(r)
}
}
}
|
[
"func edit(c *cli.Context) {\n\tlines := content()\n\n\targLen := len(c.Args())\n\n\tvar ind int\n\n\tswitch argLen {\n\tcase 0:\n\t\tind = 0\n\tcase 1:\n\t\tind, _ = strconv.Atoi(c.Args()[0])\n\tdefault:\n\t\tpanic(1)\n\t}\n\n\tselectedLine := lines[ind]\n\tlineArr := strings.Split(selectedLine, \" \")\n\n\tenv := os.Environ()\n\tvimBin, err := exec.LookPath(\"vim\")\n\tcheck(err)\n\n\tplusCmd := fmt.Sprint(\"+\", lineArr[0])\n\tplussCmd := []string{\"vim\", lineArr[1], plusCmd}\n\n\tdebug(\"Whole cmd: %v Index: %v\", plussCmd, c.Args()[0])\n\n\tif true {\n\t\texecErr := syscall.Exec(vimBin, plussCmd, env)\n\t\tcheck(execErr)\n\t}\n}",
"func (l *LineReader) raw() {\n\t// STD_OUTPUT_HANDLE\n\th, errno := syscall.GetStdHandle(-11)\n\tt.h = uintptr(h)\n\tif int32(t.h) == -1 {\n\t\terr := os.Errno(errno)\n\t\tpanic(err)\n\t}\n\tok, _, e := syscall.Syscall(procGetConsoleMode, 2,\n\t\tt.h, uintptr(unsafe.Pointer(&t.origTerm)), 0)\n\tif ok == 0 {\n\t\terr := os.NewSyscallError(\"GetConsoleMode\", int(e))\n\t\tpanic(err)\n\t}\n\n\traw := t.origTerm\n\traw &^= _ENABLE_LINE_INPUT | _ENABLE_ECHO_INPUT | _ENABLE_PROCESSED_INPUT | _ENABLE_WINDOW_INPUT\n\tok, _, e = syscall.Syscall(procSetConsoleMode, 2, t.h, uintptr(raw), 0)\n\tif ok == 0 {\n\t\terr := os.NewSyscallError(\"SetConsoleMode\", int(e))\n\t\tpanic(err)\n\t}\n\n\twin := t.getConsoleInfo()\n\tt.cols = int(win.dwSize.x)\n\tt.rows = int(win.dwSize.y)\n\n\tt.buf = new(buffer)\n}",
"func EditRawOvf(r io.Reader, scheme EditScheme) (*bytes.Buffer, error) {\n\traw, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = xmlutil.ValidateFormatting(raw)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tscanner := bufio.NewScanner(bytes.NewReader(raw))\n\n\tendOfLineChars := lfEol\n\tlenRaw := len(raw)\n\tif lenRaw > 1 && raw[lenRaw-2] == '\\r' {\n\t\tendOfLineChars = crLfEol\n\t}\n\n\tnewData := bytes.NewBuffer(nil)\n\n\tfor scanner.Scan() {\n\t\terr := processNextToken(scanner, endOfLineChars, newData, scheme)\n\t\tif err != nil {\n\t\t\treturn newData, err\n\t\t}\n\t}\n\n\terr = scanner.Err()\n\tif err != nil {\n\t\treturn newData, err\n\t}\n\n\treturn newData, nil\n}",
"func (l *Linenoise) readRaw(prompt, init string) (string, error) {\n\t// set rawmode for stdin\n\tl.enableRawMode(syscall.Stdin)\n\tdefer l.disableRawMode(syscall.Stdin)\n\t// edit the line\n\ts, err := l.edit(syscall.Stdin, syscall.Stdout, prompt, init)\n\tfmt.Printf(\"\\r\\n\")\n\treturn s, err\n}",
"func (c *Console) EditRow(id Row, text string) <-chan struct{} {\n\tch := make(chan struct{})\n\tc.jobs <- func() {\n\t\tdiff := c.rowCount - int(id)\n\t\tfmt.Fprintf(c.File, \"%c[%dA\", 27, diff)\n\t\tfmt.Fprintf(c.File, \"\\r%c[2K\", 27)\n\t\tfmt.Fprintf(c.File, \"%s\\n\", strings.TrimSpace(text))\n\t\tfmt.Fprintf(c.File, \"%c[%dB\", 27, diff)\n\t\tclose(ch)\n\t}\n\treturn ch\n}",
"func (conn *Conn) Raw(rawline string) {\n\t// Avoid command injection by enforcing one command per line.\n\tconn.out <- cutNewLines(rawline)\n}",
"func (ls *linestate) editSwap() {\n\tif ls.pos > 0 && ls.pos < len(ls.buf) {\n\t\ttmp := ls.buf[ls.pos-1]\n\t\tls.buf[ls.pos-1] = ls.buf[ls.pos]\n\t\tls.buf[ls.pos] = tmp\n\t\tif ls.pos != len(ls.buf)-1 {\n\t\t\tls.pos++\n\t\t}\n\t\tls.refreshLine()\n\t}\n}",
"func (c *client) Edit(filename string, update bool, filter Filter) error {\n\tif filename == \"-\" {\n\t\treturn EditStream(c.o.InStream, c.o.OutStream, filename, filter)\n\t}\n\n\tif update {\n\t\treturn UpdateFile(filename, filter)\n\t}\n\n\treturn ReadFile(filename, c.o.OutStream, filter)\n}",
"func EditCommand(c *cli.Context, i storage.Impl) (n storage.Note, err error) {\n\tnName, err := NoteName(c)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\n\tn, err = i.LoadNote(nName)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\n\tif err := writer.WriteNote(&n); err != nil {\n\t\treturn n, err\n\t}\n\n\tif err := i.SaveNote(&n); err != nil {\n\t\treturn n, err\n\t}\n\n\treturn n, nil\n\n}",
"func (e *Editor) Line() (string, error) {\n\tif err := e.editReset(); err != nil {\n\t\treturn string(e.Buffer), err\n\t}\nline:\n\tfor {\n\t\tr, _, err := e.In.ReadRune()\n\t\tif err != nil {\n\t\t\treturn string(e.Buffer), err\n\t\t}\n\n\t\tswitch r {\n\t\tcase enter:\n\t\t\tbreak line\n\t\tcase ctrlC:\n\t\t\treturn string(e.Buffer), errors.New(\"try again\")\n\t\tcase backspace, ctrlH:\n\t\t\tif err := e.editBackspace(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlD:\n\t\t\tif len(e.Buffer) == 0 {\n\t\t\t\treturn string(e.Buffer), io.EOF\n\t\t\t}\n\n\t\t\tif err := e.editDelete(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlT:\n\t\t\tif err := e.editSwap(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlB:\n\t\t\tif err := e.editMoveLeft(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlF:\n\t\t\tif err := e.editMoveRight(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlP:\n\t\t\tif err := e.editHistoryPrev(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlN:\n\t\t\tif err := e.editHistoryNext(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlU:\n\t\t\tif err := e.editReset(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlK:\n\t\t\tif err := e.editKillForward(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlA:\n\t\t\tif err := e.editMoveHome(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlE:\n\t\t\tif err := e.editMoveEnd(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlL:\n\t\t\tif err := e.clearScreen(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\n\t\t\tif err := e.refreshLine(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlW:\n\t\t\tif err := e.editDeletePrevWord(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase esc:\n\t\t\tr, _, err := e.In.ReadRune()\n\t\t\tif err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\n\t\t\tswitch r {\n\t\t\tcase '[':\n\t\t\t\tr, _, err := e.In.ReadRune()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t}\n\n\t\t\t\tswitch r {\n\t\t\t\tcase '0', '1', '2', '4', '5', '6', '7', '8', '9':\n\t\t\t\t\t_, _, err := e.In.ReadRune()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase '3':\n\t\t\t\t\tr, _, err := e.In.ReadRune()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\n\t\t\t\t\tswitch r {\n\t\t\t\t\tcase '~':\n\t\t\t\t\t\tif err := e.editDelete(); err != nil {\n\t\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase 'A':\n\t\t\t\t\tif err := e.editHistoryPrev(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'B':\n\t\t\t\t\tif err := e.editHistoryNext(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'C':\n\t\t\t\t\tif err := e.editMoveRight(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'D':\n\t\t\t\t\tif err := e.editMoveLeft(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'H':\n\t\t\t\t\tif err := e.editMoveHome(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'F':\n\t\t\t\t\tif err := e.editMoveEnd(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase 'O':\n\t\t\t\tr, _, err := e.In.ReadRune()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t}\n\n\t\t\t\tswitch r {\n\t\t\t\tcase 'H':\n\t\t\t\t\tif err := e.editMoveHome(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'F':\n\t\t\t\t\tif err := e.editMoveEnd(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase tab:\n\t\t\tif err := e.completeLine(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tdefault:\n\t\t\tif err := e.editInsert(r); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn string(e.Buffer), nil\n}",
"func Edit(g *types.Cmd) {\n\tg.AddOptions(\"--edit\")\n}",
"func (i *UI) rawReadline(f *os.File) (string, error) {\n\tvar resultBuf []byte\n\tfor {\n\t\tvar buf [1]byte\n\t\tn, err := f.Read(buf[:])\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif n == 0 || buf[0] == '\\n' || buf[0] == '\\r' {\n\t\t\tbreak\n\t\t}\n\n\t\tif buf[0] == 3 {\n\t\t\treturn \"\", ErrInterrupted\n\t\t}\n\n\t\tif i.mask {\n\t\t\tfmt.Fprintf(i.Writer, i.maskVal)\n\t\t}\n\n\t\tresultBuf = append(resultBuf, buf[0])\n\t}\n\n\tfmt.Fprintf(i.Writer, \"\\n\")\n\treturn string(resultBuf), nil\n}",
"func (b *bufferedInterpreter) interpretLine(line string, out io.Writer) {\n\tb.buffer += line\n\tevaluated, err := b.interpreter.Interpret(\"(irb)\", b.preserveLineCount(b.buffer))\n\tif err != nil {\n\t\tif isEOFError(err) {\n\t\t\tb.buffer += \"\\n\"\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprintf(out, \"%s\\n\", err.Error())\n\t\tb.buffer = \"\"\n\t\treturn\n\t}\n\n\tif evaluated != nil {\n\t\tfmt.Fprintf(out, \"=> %s\\n\", evaluated.Inspect())\n\t}\n\tb.buffer = \"\"\n}",
"func (rec *Record) Edit(editNotes bool) error {\n\n\tline := liner.NewLiner()\n\tdefer line.Close()\n\tline.SetCtrlCAborts(true)\n\n\tpos := -1\n\n\tvar err error\n\tvar editedValue string\n\n\taborted := fmt.Errorf(\"Aborted\")\n\n\tif editedValue, err = line.PromptWithSuggestion(config.TitleLabel, rec.Title, pos); err != nil {\n\t\treturn aborted\n\t}\n\trec.Title = editedValue\n\n\tif editedValue, err = line.PromptWithSuggestion(config.AccountLabel, rec.Account, pos); err != nil {\n\t\treturn aborted\n\t}\n\trec.Account = editedValue\n\n\tif editedValue, err = line.PromptWithSuggestion(config.PasswordLabel, rec.Password, pos); err != nil {\n\t\treturn aborted\n\t}\n\trec.Password = editedValue\n\n\ttagsString := strings.Join(rec.Tags, \", \")\n\n\tif editedValue, err = line.PromptWithSuggestion(config.TagsLabel, tagsString, pos); err != nil {\n\t\treturn aborted\n\t}\n\trec.Tags = tagsStringToArray(editedValue)\n\n\tif editedValue, err = line.PromptWithSuggestion(config.URLLabel, rec.Url, pos); err != nil {\n\t\treturn aborted\n\t}\n\trec.Url = editedValue\n\n\tif editNotes {\n\t\t// handle multi-line notes\n\t\tlog.Info(\"\\n%s\", config.NotesLabel)\n\n\t\tlines := strings.Split(rec.Notes, \"\\n\")\n\n\t\twriteBack := \"\"\n\t\tlineIdx := 0\n\n\t\taborted := false\n\n\t\tfor {\n\t\t\tproposal := \"\"\n\n\t\t\tif lineIdx < len(lines) {\n\t\t\t\tproposal = lines[lineIdx]\n\t\t\t}\n\n\t\t\tinput, err := line.PromptWithSuggestion(\"\", proposal, len(proposal))\n\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Debug(\"Aborted? : %v\", err)\n\t\t\t\taborted = true\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\twriteBack += input + \"\\n\"\n\t\t\tlineIdx++\n\t\t}\n\n\t\tif !aborted {\n\t\t\trec.Notes = writeBack\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (w *Writer) UpdateLine(fd *os.File, cursorPos int64, newValue string) error {\n\terr := writeFileAt(fd, newValue, cursorPos)\n\tif err != nil {\n\t\tcore.PrintAndExit(err)\n\t}\n\treturn nil\n}",
"func (ui *ReplApp) evalLine(line string) (quit bool) {\n\t// these vars are used in many places below\n\tengine := ui.engine\n\tcmds := ui.commands\n\toutput := ui.output\n\tmeProfileFile := ui.meProfileFile\n\tcontactsFile := ui.contactsFile\n\tprivateKeyFile := ui.privateKeyFile\n\n\t// parse raw line into command struct\n\tcmd := cmds.parse(line)\n\tif cmd.err != nil {\n\t\tif cmd.cmd != \"\" { // ignore blank lines\n\t\t\tlog.Printf(\"Error: %s\\n\", cmd.err)\n\t\t}\n\t\treturn\n\t}\n\n\t// process specific command\n\t// each block could really be in it's own function\n\t// or a function in the command definitions\n\tswitch cmd.cmd {\n\tcase \"exit\":\n\t\treturn true\n\n\tcase \"help\":\n\t\tfmt.Fprintln(output, cmds.help()) // uses commanddefs\n\n\tcase \"ip\":\n\t\tfmt.Fprintln(output, \"getting external ip...\")\n\t\tip, err := GetIP()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprintf(output, \"external IP address:\\t%s\\nlistening on port:\\t%s\\n\", ip, engine.Me.Port)\n\n\tcase \"me\":\n\t\tswitch cmd = *cmd.leaf(); cmd.cmd {\n\t\tcase \"show\":\n\t\t\tfmt.Fprintf(output, \"I am \\\"%s\\\"\\nPubKey: %s\\nPrivKey: %s\\n\",\n\t\t\t\tengine.Me,\n\t\t\t\tbase64.RawStdEncoding.EncodeToString(engine.Me.PublicSigningKey),\n\t\t\t\tbase64.RawStdEncoding.EncodeToString(engine.PrivSignKey))\n\n\t\tcase \"edit\":\n\t\t\tp, err := ParseProfile(cmd.args[0])\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tp.PublicSigningKey = engine.Me.PublicSigningKey // preserve key\n\t\t\terr = WriteProfile(p, meProfileFile)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tengine.Me = p\n\n\t\t\terr = WritePrivateKey(engine.PrivSignKey, privateKeyFile)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\tcase \"contacts\":\n\n\t\tswitch cmd = *cmd.leaf(); cmd.cmd {\n\t\tcase \"list\":\n\t\t\tfor i, c := range engine.Contacts {\n\t\t\t\tif c != nil {\n\t\t\t\t\tfmt.Fprintf(output, \"%d\\t%s\\t%s\\n\", i, c,\n\t\t\t\t\t\tbase64.RawStdEncoding.EncodeToString(c.PublicSigningKey))\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"add\":\n\t\t\tvar p *Profile\n\t\t\targ := cmd.args[0]\n\t\t\tn, err := strconv.Atoi(arg)\n\t\t\tif err == nil {\n\t\t\t\tif sess, ok := engine.GetSession(n); ok {\n\t\t\t\t\tp = sess.Other\n\t\t\t\t\tif p == nil {\n\t\t\t\t\t\tlog.Printf(\"session %d had a nil Other\", n)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tlog.Printf(\"%d not found\\n\", n)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\tp, err = ParseProfile(arg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// overwrite contact if existing Equal() one found\n\t\t\t// TODO: do i really want to overwrite? what about having 2\n\t\t\t// contacts with different names but the same address?\n\t\t\t// i guess the question boils down to the definition of Profile\n\t\t\tif index := engine.FindContact(p); index >= 0 {\n\t\t\t\told := engine.Contacts[index]\n\t\t\t\tengine.Contacts[index] = p\n\t\t\t\tlog.Printf(\"overwrote #%d '%s' with '%s'\\n\", index, old, p)\n\t\t\t} else {\n\t\t\t\tengine.AddContact(p)\n\t\t\t\tlog.Printf(\"added %s\\n\", p)\n\t\t\t}\n\n\t\t\terr = WriteContacts(engine.Contacts, contactsFile)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tlog.Println(\"did not save changes to disk\")\n\t\t\t}\n\n\t\tcase \"delete\":\n\t\t\targ := cmd.args[0]\n\t\t\tn, err := strconv.Atoi(arg)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tremoved := engine.Contacts[n]\n\t\t\tif engine.RemoveContact(n) {\n\t\t\t\tlog.Printf(\"deleted %s\\n\", removed)\n\n\t\t\t\terr = WriteContacts(engine.Contacts, contactsFile)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t\tlog.Println(\"did not save changes to disk\")\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"%d not found\\n\", n)\n\t\t\t}\n\t\t}\n\n\tcase \"requests\":\n\t\tswitch cmd = *cmd.leaf(); cmd.cmd {\n\t\tcase \"list\":\n\t\t\tfor i, r := range engine.Requests {\n\t\t\t\tif r != nil {\n\t\t\t\t\tfmt.Fprintf(output, \"%d\\t%s at %s (%s ago)\\n\", i,\n\t\t\t\t\t\tr.Profile,\n\t\t\t\t\t\tr.Time().Format(time.Kitchen),\n\t\t\t\t\t\ttime.Since(r.Time()))\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"accept\":\n\t\t\targ := cmd.args[0]\n\t\t\tn, err := strconv.Atoi(arg)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif _, ok := engine.GetRequest(n); !ok {\n\t\t\t\tlog.Printf(\"%d not found\\n\", n)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\terr = engine.AcceptRequest(engine.Requests[n])\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Println(\"request accepted\")\n\n\t\tcase \"reject\":\n\t\t\targ := cmd.args[0]\n\t\t\tn, err := strconv.Atoi(arg)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif engine.RemoveRequest(n) {\n\t\t\t\tlog.Println(\"removed request\")\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"%d not found\\n\", n)\n\t\t\t}\n\t\t}\n\n\tcase \"sessions\":\n\t\tswitch cmd = *cmd.leaf(); cmd.cmd {\n\t\tcase \"list\":\n\t\t\tfor i, s := range engine.Sessions {\n\t\t\t\tif s != nil {\n\t\t\t\t\tfmt.Fprintf(output, \"%d\\t%s\\n\", i, s)\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"start\":\n\t\t\tvar p *Profile\n\t\t\targ := cmd.args[0]\n\t\t\tn, err := strconv.Atoi(arg)\n\t\t\tif err == nil {\n\t\t\t\tif p, _ = engine.GetContact(n); p == nil {\n\t\t\t\t\tlog.Printf(\"%d not found\\n\", n)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tp, err = ParseProfile(arg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif i := engine.FindContact(p); i >= 0 {\n\t\t\t\t\tp = engine.Contacts[i] // use profile from contacts if available\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\terr = engine.SendRequest(p)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Println(\"request sent\")\n\n\t\tcase \"drop\":\n\t\t\targ := cmd.args[0]\n\t\t\tn, err := strconv.Atoi(arg)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif engine.RemoveSession(n) {\n\t\t\t\tlog.Println(\"dropped session\")\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"%d not found\\n\", n)\n\t\t\t}\n\t\t}\n\n\tcase \"msg\":\n\t\tn, err := strconv.Atoi(cmd.args[0])\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\t\tif _, ok := engine.GetSession(n); !ok {\n\t\t\tlog.Printf(\"%d not found\\n\", n)\n\t\t\treturn\n\t\t}\n\n\t\terr = engine.Sessions[n].SendText(cmd.args[1])\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\t\tlog.Println(\"sent\")\n\n\tcase \"show\":\n\t\tn, err := strconv.Atoi(cmd.args[0])\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\t\ts, ok := engine.GetSession(n)\n\t\tif !ok {\n\t\t\tlog.Printf(\"%d not found\\n\", n)\n\t\t\treturn\n\t\t}\n\n\t\tconst num = 5\n\t\tstart := len(s.Msgs) - num\n\t\tif start < 0 {\n\t\t\tstart = 0\n\t\t} // clamp\n\t\tshow := s.Msgs[start:]\n\t\tfor i, t := range show {\n\t\t\tfmt.Fprintf(output, \"%d %s\\t| %s > %s\\n\", i,\n\t\t\t\tt.From().Name,\n\t\t\t\tt.TimeStamp.Time().Format(time.Kitchen),\n\t\t\t\tt.Message)\n\t\t}\n\n\t}\n\n\treturn\n}",
"func (e *EditCore) Edit(v *gocui.View, key gocui.Key, ch rune, mod gocui.Modifier) {\n\tswitch {\n\tcase ch != 0 && mod == 0:\n\t\tv.EditWrite(ch)\n\tcase key == gocui.KeySpace:\n\t\tv.EditWrite(' ')\n\tcase key == gocui.KeyBackspace || key == gocui.KeyBackspace2:\n\t\tv.EditDelete(true)\n\tcase key == gocui.KeyDelete:\n\t\tv.EditDelete(false)\n\tcase key == gocui.KeyInsert:\n\t\tv.Overwrite = !v.Overwrite\n\tcase key == gocui.KeyArrowDown:\n\t\tv.MoveCursor(0, 1, false)\n\tcase key == gocui.KeyArrowUp:\n\t\tv.MoveCursor(0, -1, false)\n\tcase key == gocui.KeyArrowLeft:\n\t\tv.MoveCursor(-1, 0, false)\n\tcase key == gocui.KeyArrowRight:\n\t\tv.MoveCursor(1, 0, false)\n\t}\n}",
"func BasicLineEdit(i int, b byte) Op {\n\tswitch b {\n\tcase '\\b', 0x7f: // ^H, ^?\n\t\treturn Erase\n\tcase 0x17: // ^W\n\t\treturn EraseWord\n\tcase 0x15: // ^U\n\t\treturn Kill\n\tcase 0x04: // ^D\n\t\tif i == 0 {\n\t\t\treturn Close\n\t\t}\n\t\treturn Flush\n\tcase '\\n':\n\t\treturn Append | Flush\n\tdefault:\n\t\treturn Append\n\t}\n}",
"func (r *rawMode) enter() (err error) {\n\tr.state, err = readline.MakeRaw(r.StdinFd)\n\treturn err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read a line from stdin in raw mode.
|
func (l *Linenoise) readRaw(prompt, init string) (string, error) {
// set rawmode for stdin
l.enableRawMode(syscall.Stdin)
defer l.disableRawMode(syscall.Stdin)
// edit the line
s, err := l.edit(syscall.Stdin, syscall.Stdout, prompt, init)
fmt.Printf("\r\n")
return s, err
}
|
[
"func (c *Config) readLineRaw(prompt string) (string, error) {\n\t_, err := c.stdout.Write([]byte(prompt))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif c.bufioReader == nil {\n\t\tc.bufioReader = bufio.NewReader(c.stdin)\n\t}\n\tline, err := c.bufioReader.ReadString('\\n')\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn strings.TrimSpace(line), nil\n}",
"func (l *LineReader) raw() {\n\t// STD_OUTPUT_HANDLE\n\th, errno := syscall.GetStdHandle(-11)\n\tt.h = uintptr(h)\n\tif int32(t.h) == -1 {\n\t\terr := os.Errno(errno)\n\t\tpanic(err)\n\t}\n\tok, _, e := syscall.Syscall(procGetConsoleMode, 2,\n\t\tt.h, uintptr(unsafe.Pointer(&t.origTerm)), 0)\n\tif ok == 0 {\n\t\terr := os.NewSyscallError(\"GetConsoleMode\", int(e))\n\t\tpanic(err)\n\t}\n\n\traw := t.origTerm\n\traw &^= _ENABLE_LINE_INPUT | _ENABLE_ECHO_INPUT | _ENABLE_PROCESSED_INPUT | _ENABLE_WINDOW_INPUT\n\tok, _, e = syscall.Syscall(procSetConsoleMode, 2, t.h, uintptr(raw), 0)\n\tif ok == 0 {\n\t\terr := os.NewSyscallError(\"SetConsoleMode\", int(e))\n\t\tpanic(err)\n\t}\n\n\twin := t.getConsoleInfo()\n\tt.cols = int(win.dwSize.x)\n\tt.rows = int(win.dwSize.y)\n\n\tt.buf = new(buffer)\n}",
"func (i *UI) rawReadline(f *os.File) (string, error) {\n\tvar resultBuf []byte\n\tfor {\n\t\tvar buf [1]byte\n\t\tn, err := f.Read(buf[:])\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif n == 0 || buf[0] == '\\n' || buf[0] == '\\r' {\n\t\t\tbreak\n\t\t}\n\n\t\tif buf[0] == 3 {\n\t\t\treturn \"\", ErrInterrupted\n\t\t}\n\n\t\tif i.mask {\n\t\t\tfmt.Fprintf(i.Writer, i.maskVal)\n\t\t}\n\n\t\tresultBuf = append(resultBuf, buf[0])\n\t}\n\n\tfmt.Fprintf(i.Writer, \"\\n\")\n\treturn string(resultBuf), nil\n}",
"func ReadLine(prompt string) (line string, err error) {\n\n\tfmt.Print(prompt)\n\n\tin := bufio.NewReader(os.Stdin)\n\n\tline, err = in.ReadString('\\n')\n\tif err != nil {\n\t\terr = ErrInterrupted\n\t} else if len(line) > 0 {\n\t\t// need to take the end of line back off\n\t\t// using scanner didn't register ctrl-c properly\n\t\tline = strings.TrimRight(line, \"\\n\\r\")\n\t}\n\treturn\n\n}",
"func (r repl) ReadLine() (string, error) {\n\tfd := int(os.Stdin.Fd())\n\toldState, err := term.MakeRaw(fd)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer func() {\n\t\terr := term.Restore(fd, oldState)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\treturn r.term.ReadLine()\n}",
"func StdinReader(input *ringbuffer.RingBuffer) {\n\tin := bufio.NewReader(os.Stdin)\n\tfor {\n\t\tby, err := in.ReadByte()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\tinput.Write([]byte{by})\n\t}\n}",
"func (r *rawMode) enter() (err error) {\n\tr.state, err = readline.MakeRaw(r.StdinFd)\n\treturn err\n}",
"func StdinPipe() io.Reader {\n\tstat, _ := os.Stdin.Stat()\n\tif (stat.Mode() & os.ModeCharDevice) == 0 {\n\t\treturn os.Stdin\n\t}\n\treturn EmptyReader()\n}",
"func (fs *FakeStdin) Readln() string {\n\treturn fs.nextLineToRead\n}",
"func (conn *Conn) Raw(rawline string) {\n\t// Avoid command injection by enforcing one command per line.\n\tconn.out <- cutNewLines(rawline)\n}",
"func ReadRaw(in io.Reader) (Data, error) {\n\treturn read(in)\n}",
"func readstdin(reader *bufio.Reader) string {\n\ttext, err := reader.ReadString('\\n')\n\tif err != nil {\n\t\tlog.Fatalf(\"Error when reading input: %v\", err)\n\t}\n\treturn strings.TrimSpace(text)\n}",
"func Readline(prompt string) (string, error) {\n\tprompt = \"\\x1b[0m\" + prompt // Prepend a 'reset' ANSI escape sequence\n\tprompt = escapeSeq.ReplaceAllString(prompt, promptStartIgnore+\"$0\"+promptEndIgnore)\n\tp := C.CString(prompt)\n\trp := C.readline(p)\n\ts := C.GoString(rp)\n\tC.free(unsafe.Pointer(p))\n\tif rp != nil {\n\t\tC.free(unsafe.Pointer(rp))\n\t\treturn s, nil\n\t}\n\treturn s, io.EOF\n}",
"func (cmd Cmd) Read(stdin io.Reader) Cmd {\n\tcmd.Stdin = stdin\n\treturn cmd\n}",
"func Readline(prompt string) (string, error) {\n\tcprompt := C.CString(prompt)\n\tdefer C.free(unsafe.Pointer(cprompt))\n\tvar sig C.int\n\tcline := C._creadline_readline(cprompt, &sig)\n\tvar line string\n\tif cline != nil {\n\t\tline = C.GoString(cline)\n\t\tdefer C.free(unsafe.Pointer(cline))\n\t}\n\tif sig != 0 {\n\t\treturn line, Interrupt\n\t}\n\treturn line, nil\n}",
"func (c *Cmd) Read(p []byte) (n int, err error) {\n\treturn c.pty.Read(p)\n}",
"func readline() (value string, err error) {\n\tvar valb []byte\n\tvar n int\n\tb := make([]byte, 1)\n\tfor {\n\t\t// read one byte at a time so we don't accidentally read extra bytes\n\t\tn, err = os.Stdin.Read(b)\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif n == 0 || b[0] == '\\n' {\n\t\t\tbreak\n\t\t}\n\t\tvalb = append(valb, b[0])\n\t}\n\n\treturn strings.TrimSuffix(string(valb), \"\\r\"), nil\n}",
"func ReadLine(prompt string) (string, bool) {\n\tvar cprompt *C.char\n\tif len(prompt) != 0 {\n\t\tcprompt = C.CString(prompt)\n\t}\n\tcline := C.readline(cprompt)\n\tif cprompt != nil {\n\t\tC.free(unsafe.Pointer(cprompt))\n\t}\n\tif cline == nil {\n\t\treturn \"\", true\n\t}\n\tline := C.GoString(cline)\n\tC.free(unsafe.Pointer(cline))\n\treturn line, false\n}",
"func ReadIO(in io.Reader) (l *Line, err error) {\n\n s := bufio.NewScanner(in)\n\n // scan once to get the first line\n if s.Scan() == false || s.Err() != nil {\n return nil, s.Err()\n }\n\n l = &Line{data: s.Text()}\n scanIO(l, s)\n\n return l, s.Err()\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Loop calls the provided function in a loop. Exit when the function returns true or when the exit key is pressed. Returns true when the loop function completes, false for early exit.
|
func (l *Linenoise) Loop(fn func() bool, exitKey rune) bool {
// set rawmode for stdin
err := l.enableRawMode(syscall.Stdin)
if err != nil {
log.Printf("enable rawmode error %s\n", err)
return false
}
u := utf8{}
rc := false
looping := true
for looping {
// get a rune
r := u.getRune(syscall.Stdin, &timeoutZero)
if r == exitKey {
// the loop has been cancelled
rc = false
looping = false
} else {
if fn() {
// the loop function has completed
rc = true
looping = false
}
}
}
// restore the terminal mode for stdin
l.disableRawMode(syscall.Stdin)
return rc
}
|
[
"func loop() bool {\n\tfmt.Printf(\"loop index %d/%d\\r\\n\", loopIndex, maxLoops)\n\ttime.Sleep(500 * time.Millisecond)\n\tloopIndex++\n\treturn loopIndex > maxLoops\n}",
"func (c *Coordinator) loop(fn loopFunc, interval time.Duration, reason string) chan struct{} {\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tticker := c.ticker(interval)\n\t\tdefer close(done)\n\n\t\tfor {\n\t\t\tselect {\n\t\t\t// taker or renew old leases\n\t\t\tcase <-ticker():\n\t\t\t\tif err := fn(); err != nil {\n\t\t\t\t\tc.Logger.WithError(err).Errorf(\"Worker %s failed to %s\", c.WorkerId, reason)\n\t\t\t\t}\n\t\t\t// someone called stop and we need to exit.\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn done\n}",
"func (r *Repl) Loop() error {\n\tdefer func() {\n\t\tif recover() != nil {\n\t\t\t// interpreter signal or other badness, just abort.\n\t\t\tos.Exit(0)\n\t\t}\n\t}()\n\n\tvar line string\n\tfor {\n\t\ttmp, err := r.readline.Readline()\n\t\tif err == io.EOF {\n\t\t\treturn nil\n\t\t}\n\n\t\tif err != nil && err.Error() == \"Interrupt\" {\n\t\t\tfmt.Println(\"You can press ^D or type \\\"quit\\\", \\\"exit\\\" to exit the shell\")\n\t\t\tline = \"\"\n\t\t\tcontinue\n\t\t}\n\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"+++ Error %#v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tline += tmp\n\n\t\tswitch strings.TrimSpace(line) {\n\t\tcase \"quit\":\n\t\t\tfallthrough\n\t\tcase \"exit\":\n\t\t\tos.Exit(0)\n\t\t}\n\n\t\tval, err := r.builder.Run(line)\n\t\tline = \"\"\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"+++ Error: %v\\n\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tfmt.Println(val)\n\t}\n}",
"func Loop(job floc.Job) floc.Job {\n\treturn func(ctx floc.Context, ctrl floc.Control) error {\n\t\tfor {\n\t\t\t// Do not start the job if the execution is finished\n\t\t\tif ctrl.IsFinished() {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// Do the job\n\t\t\terr := job(ctx, ctrl)\n\t\t\tif handledErr := handleResult(ctrl, err); handledErr != nil {\n\t\t\t\treturn handledErr\n\t\t\t}\n\t\t}\n\t}\n}",
"func (ui *ReplApp) loop() {\n\tsig := make(chan os.Signal)\n\tsignal.Notify(sig, os.Interrupt, syscall.SIGTERM)\n\n\tfor {\n\t\t// get first input from sig, console, or bot\n\t\tvar line string\n\t\tselect {\n\t\tcase <-sig:\n\t\t\treturn\n\n\t\tcase line = <-ui.console.Read():\n\t\t\tif ui.evalLine(line) {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t}\n\t}\n}",
"func Timerloop() bool {\n\tticker := time.NewTicker(5 * time.Second)\n\tc := make(chan struct{})\n\tfor {\n\t\tselect {\n\t\tcase <-c:\n\t\t\treturn false\n\t\tcase t := <-ticker.C:\n\t\t\tfmt.Println(\"tick\", t)\n\t\t\t//timer is on\n\t\t\treturn true\n\t\t}\n\t}\n\n\ttime.Sleep(3 * time.Millisecond)\n\tticker.Stop()\n\t// fmt.Println(\"Ticker stopped\")\n\treturn false\n}",
"func EnterLoop() {\n\tC.native_loop()\n}",
"func (c *Canvas) Run(f func(e interface{}) bool) {\n\tif f == nil {\n\t\tf = func(e interface{}) bool {\n\t\t\tswitch e := e.(type) {\n\t\t\tcase paint.Event:\n\t\t\t\tc.Paint()\n\t\t\tcase key.Event:\n\t\t\t\tswitch e.Code {\n\t\t\t\tcase key.CodeEscape, key.CodeQ:\n\t\t\t\t\tif e.Direction == key.DirPress {\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn true\n\n\t\t}\n\t}\n\tfor {\n\t\te := c.win.NextEvent()\n\t\tif !f(e) {\n\t\t\treturn\n\t\t}\n\t}\n}",
"func checkLoop() {\n\n\ttimeout := make(chan bool, 1)\n\tgo func() {\n\t\ttime.Sleep(timer * time.Second)\n\t\ttimeout <- true\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase <-timeout:\n\t\t\tclearTerm()\n\t\t\tgo makeDing(25)\n\t\t\tfor i := 0; i < 25; i++ {\n\t\t\t\tfmt.Println(\"You are not worthy :(\")\n\t\t\t\ttime.Sleep(150 * time.Millisecond)\n\t\t\t}\n\t\t\treadInput(\"Press enter to continue...\")\n\t\t\tclearTerm()\n\t\t\treturn\n\t\tdefault:\n\t\t\tif win := readAttempt(); win {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}",
"func RepeatFnBool(\n\tctx context.Context,\n\tfn func() bool,\n) <-chan bool {\n\tch := make(chan bool)\n\n\tgo func() {\n\t\tdefer close(ch)\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase ch <- fn():\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn ch\n}",
"func LoopSync(tk *time.Ticker, f func()) {\n\tsigChan := make(chan os.Signal, 1)\n\tsignal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)\n\n\tfor {\n\t\tselect {\n\t\tcase <-tk.C:\n\t\t\tf()\n\t\tcase <-sigChan:\n\t\t\tlog.Println(\"shutdown received, stopping loop\")\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (a *App) Loop() {\n\tif err := a.gui.MainLoop(); err != nil && err != gocui.ErrQuit {\n\t\tlog.Panicln(err)\n\t}\n}",
"func ExitLoop() {\n\t//void IupExitLoop (void);\n\tC.IupExitLoop()\n}",
"func (w *WebShell) Loop(ctx context.Context, cancel context.CancelFunc, exitCh chan struct{}) {\n\tinBuf := bufio.NewReader(os.Stdin)\n\tprompt := \"go-forward-shell$ \"\n\n\tfor {\n\t\tfmt.Print(prompt)\n\t\tcmd, err := inBuf.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error reading in the command: %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tcmd = strings.TrimSuffix(cmd, \"\\n\")\n\n\t\tswitch {\n\t\tcase cmd == \"upgrade\":\n\t\t\tprompt = \"\"\n\t\t\tw.UpgradeShell()\n\t\tcase cmd == \"exit\":\n\t\t\tcancel()\n\t\t\tfmt.Println(\"Exiting\")\n\t\t\ttime.Sleep(500 * time.Millisecond)\n\t\t\texitCh <- struct{}{}\n\n\t\t\treturn\n\t\tdefault:\n\t\t\tw.WriteCmd(cmd)\n\t\t}\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tfmt.Println(\"Exiting\")\n\t\t\ttime.Sleep(500 * time.Millisecond)\n\t\t\texitCh <- struct{}{}\n\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t}\n}",
"func (s *statefulScheduler) loop() {\n\tfor {\n\t\ts.step()\n\t\ttime.Sleep(250 * time.Millisecond)\n\t}\n}",
"func (w *watcher) checkLoop(ctx context.Context) {\n\tfor atomic.LoadInt32(&w.state) == isRunning {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tw.Close()\n\t\t\tw.dispose()\n\t\t\treturn\n\t\tdefault:\n\t\t\tw.check(ctx)\n\t\t\ttime.Sleep(w.interval)\n\t\t}\n\t}\n}",
"func DoLoop() {\n\tMatch('p')\n\tl1 := NewLabel()\n\tl2 := NewLabel()\n\tPostLabel(l1)\n\tBlock(l2)\n\tMatch('e')\n\tEmitLn(\"BRA \" + l1)\n\tPostLabel(l2)\n}",
"func (o *runner) loop() {\n\tlog.WithField(\"name\", o.tickable.GetName()).Infof(\"Starting loop\")\n\tdefer o.wg.Done()\n\tdefer log.WithField(\"name\", o.tickable.GetName()).Infof(\"Exiting loop\")\n\n\t// Ensure running flag is set\n\to.running = true\n\n\t// Start the tickableIFC before entering the loop\n\tif err := o.tickable.start(); err != nil {\n\t\tpanic(err)\n\t}\n\n\tticker := time.NewTicker(o.tickable.getPeriod())\n\tvar tickerErr error = nil\n\n\tfor o.running {\n\t\tticker = time.NewTicker(o.tickable.getPeriod())\n\t\tselect {\n\t\tcase o.running = <-o.ch:\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"name\": o.tickable.GetName(),\n\t\t\t\t\"period\": o.tickable.getPeriod(),\n\t\t\t}).Debug(\"idle\")\n\t\tcase <-ticker.C:\n\t\t\t//log.WithFields(log.Fields{\n\t\t\t//\t\"name\": o.tickableIFC.GetName(),\n\t\t\t//\t\"period\": o.tickableIFC.getPeriod(),\n\t\t\t//}).Debugf(\"tick\")\n\t\t\ttickerErr = o.tickable.tick()\n\t\t\tif tickerErr != nil {\n\t\t\t\tlog.Error(tickerErr)\n\t\t\t\to.running = false\n\t\t\t}\n\t\t}\n\t}\n\n\t// Stop the tickableIFC before exiting\n\tif tickerErr == nil {\n\t\tif err := o.tickable.stop(); err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\t}\n\n\t// Ensure running flag is reset\n\to.running = false\n}",
"func remoteCmdLoop(ctx *context.T, stdin io.Reader) func() {\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tscanner := bufio.NewScanner(stdin)\n\t\tfor scanner.Scan() {\n\t\t\tif scanner.Text() == \"close\" {\n\t\t\t\tclose(done)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn func() { <-done }\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Key Code Debugging PrintKeycodes prints scan codes on the screen for debugging/development purposes.
|
func (l *Linenoise) PrintKeycodes() {
fmt.Printf("Linenoise key codes debugging mode.\n")
fmt.Printf("Press keys to see scan codes. Type 'quit' at any time to exit.\n")
// set rawmode for stdin
err := l.enableRawMode(syscall.Stdin)
if err != nil {
log.Printf("enable rawmode error %s\n", err)
return
}
u := utf8{}
var cmd [4]rune
running := true
for running {
// get a rune
r := u.getRune(syscall.Stdin, nil)
if r == KeycodeNull {
continue
}
// display the character
var s string
if unicode.IsPrint(r) {
s = string(r)
} else {
switch r {
case KeycodeCR:
s = "\\r"
case KeycodeTAB:
s = "\\t"
case KeycodeESC:
s = "ESC"
case KeycodeLF:
s = "\\n"
case KeycodeBS:
s = "BS"
default:
s = "?"
}
}
fmt.Printf("'%s' 0x%x (%d)\r\n", s, int32(r), int32(r))
// check for quit
copy(cmd[:], cmd[1:])
cmd[3] = r
if string(cmd[:]) == "quit" {
running = false
}
}
// restore the terminal mode for stdin
l.disableRawMode(syscall.Stdin)
}
|
[
"func printCharCode(code rune) string {\n\t// print as ASCII for printable range\n\tif code >= 0x0020 {\n\t\treturn fmt.Sprintf(`%c`, code)\n\t}\n\t// Otherwise print the escaped form. e.g. `\"\\\\u0007\"`\n\treturn fmt.Sprintf(`\\u%04X`, code)\n}",
"func (scode Scancode) Code() Code {\n\treturn Code(C.SDL_GetKeyFromScancode(C.SDL_Scancode(scode)))\n}",
"func inputCodes(seqs ...[]uint8) {\n\targs := []string{\"controlvm\", *name, \"keyboardputscancode\"}\n\targs = append(args, codes(seqs...)...)\n\tvbox(args...)\n}",
"func wdekeyFromCode(e *sdl.KeyboardEvent) string {\n\t//TODO Implement this\n\treturn \"\"\n}",
"func loadPrintable(keys map[rune]kb.Key, keycodeConverterMap, domKeyMap map[string][]string, layoutBuf []byte, scanCodeMap map[string][]int64) error {\n\tbuf := extract(layoutBuf, \"kPrintableCodeMap\")\n\n\tmatches := printableKeyRE.FindAllStringSubmatch(string(buf), -1)\n\tfor _, m := range matches {\n\t\tdomCode := m[1]\n\n\t\t// ignore domCodes that are duplicates of other unicode characters\n\t\tif domCode == \"INTL_BACKSLASH\" || domCode == \"INTL_HASH\" || strings.HasPrefix(domCode, \"NUMPAD\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tkc, ok := keycodeConverterMap[domCode]\n\t\tif !ok {\n\t\t\tpanic(fmt.Sprintf(\"could not find key %s in keycode map\", domCode))\n\t\t}\n\n\t\tcode := getCode(kc[5])\n\t\tr1, r2 := decodeRune(m[2]), decodeRune(m[3])\n\t\taddKey(keys, r1, kb.Key{\n\t\t\tCode: code,\n\t\t\tKey: string(r1),\n\t\t\tText: string(r1),\n\t\t\tUnmodified: string(r1),\n\t\t\tPrint: true,\n\t\t}, scanCodeMap, true)\n\n\t\t// shifted value is same as non-shifted, so skip\n\t\tif r2 == r1 {\n\t\t\tcontinue\n\t\t}\n\t\t// skip for duplicate keys\n\t\tif r2 == '|' && domCode != \"BACKSLASH\" {\n\t\t\tcontinue\n\t\t}\n\n\t\taddKey(keys, r2, kb.Key{\n\t\t\tCode: code,\n\t\t\tKey: string(r2),\n\t\t\tText: string(r2),\n\t\t\tUnmodified: string(r1),\n\t\t\tShift: true,\n\t\t\tPrint: true,\n\t\t}, scanCodeMap, true)\n\t}\n\n\treturn nil\n}",
"func PrintKey() {\n\tpub, priv, err := GenerateKey()\n\tPanicError(err, \"\")\n\tPrint(\"private key: \", EncodeKey(pub))\n\tPrint(\"public key: \", EncodeKey(priv))\n}",
"func Debug(symbol rune, fmtString string, args ...interface{}) {\n\tif DebugIsEnabled(symbol) {\n\t\tfmt.Printf(fmtString, args...)\n\t}\n}",
"func cocoaKeyCode(vkcode uint16) key.Code {\n\tswitch vkcode {\n\tcase C.kVK_ANSI_A:\n\t\treturn key.CodeA\n\tcase C.kVK_ANSI_B:\n\t\treturn key.CodeB\n\tcase C.kVK_ANSI_C:\n\t\treturn key.CodeC\n\tcase C.kVK_ANSI_D:\n\t\treturn key.CodeD\n\tcase C.kVK_ANSI_E:\n\t\treturn key.CodeE\n\tcase C.kVK_ANSI_F:\n\t\treturn key.CodeF\n\tcase C.kVK_ANSI_G:\n\t\treturn key.CodeG\n\tcase C.kVK_ANSI_H:\n\t\treturn key.CodeH\n\tcase C.kVK_ANSI_I:\n\t\treturn key.CodeI\n\tcase C.kVK_ANSI_J:\n\t\treturn key.CodeJ\n\tcase C.kVK_ANSI_K:\n\t\treturn key.CodeK\n\tcase C.kVK_ANSI_L:\n\t\treturn key.CodeL\n\tcase C.kVK_ANSI_M:\n\t\treturn key.CodeM\n\tcase C.kVK_ANSI_N:\n\t\treturn key.CodeN\n\tcase C.kVK_ANSI_O:\n\t\treturn key.CodeO\n\tcase C.kVK_ANSI_P:\n\t\treturn key.CodeP\n\tcase C.kVK_ANSI_Q:\n\t\treturn key.CodeQ\n\tcase C.kVK_ANSI_R:\n\t\treturn key.CodeR\n\tcase C.kVK_ANSI_S:\n\t\treturn key.CodeS\n\tcase C.kVK_ANSI_T:\n\t\treturn key.CodeT\n\tcase C.kVK_ANSI_U:\n\t\treturn key.CodeU\n\tcase C.kVK_ANSI_V:\n\t\treturn key.CodeV\n\tcase C.kVK_ANSI_W:\n\t\treturn key.CodeW\n\tcase C.kVK_ANSI_X:\n\t\treturn key.CodeX\n\tcase C.kVK_ANSI_Y:\n\t\treturn key.CodeY\n\tcase C.kVK_ANSI_Z:\n\t\treturn key.CodeZ\n\tcase C.kVK_ANSI_1:\n\t\treturn key.Code1\n\tcase C.kVK_ANSI_2:\n\t\treturn key.Code2\n\tcase C.kVK_ANSI_3:\n\t\treturn key.Code3\n\tcase C.kVK_ANSI_4:\n\t\treturn key.Code4\n\tcase C.kVK_ANSI_5:\n\t\treturn key.Code5\n\tcase C.kVK_ANSI_6:\n\t\treturn key.Code6\n\tcase C.kVK_ANSI_7:\n\t\treturn key.Code7\n\tcase C.kVK_ANSI_8:\n\t\treturn key.Code8\n\tcase C.kVK_ANSI_9:\n\t\treturn key.Code9\n\tcase C.kVK_ANSI_0:\n\t\treturn key.Code0\n\t// TODO: move the rest of these codes to constants in key.go\n\t// if we are happy with them.\n\tcase C.kVK_Return:\n\t\treturn key.CodeReturnEnter\n\tcase C.kVK_Escape:\n\t\treturn key.CodeEscape\n\tcase C.kVK_Delete:\n\t\treturn key.CodeDeleteBackspace\n\tcase C.kVK_Tab:\n\t\treturn key.CodeTab\n\tcase C.kVK_Space:\n\t\treturn key.CodeSpacebar\n\tcase C.kVK_ANSI_Minus:\n\t\treturn key.CodeHyphenMinus\n\tcase C.kVK_ANSI_Equal:\n\t\treturn key.CodeEqualSign\n\tcase C.kVK_ANSI_LeftBracket:\n\t\treturn key.CodeLeftSquareBracket\n\tcase C.kVK_ANSI_RightBracket:\n\t\treturn key.CodeRightSquareBracket\n\tcase C.kVK_ANSI_Backslash:\n\t\treturn key.CodeBackslash\n\t// 50: Keyboard Non-US \"#\" and ~\n\tcase C.kVK_ANSI_Semicolon:\n\t\treturn key.CodeSemicolon\n\tcase C.kVK_ANSI_Quote:\n\t\treturn key.CodeApostrophe\n\tcase C.kVK_ANSI_Grave:\n\t\treturn key.CodeGraveAccent\n\tcase C.kVK_ANSI_Comma:\n\t\treturn key.CodeComma\n\tcase C.kVK_ANSI_Period:\n\t\treturn key.CodeFullStop\n\tcase C.kVK_ANSI_Slash:\n\t\treturn key.CodeSlash\n\tcase C.kVK_CapsLock:\n\t\treturn key.CodeCapsLock\n\tcase C.kVK_F1:\n\t\treturn key.CodeF1\n\tcase C.kVK_F2:\n\t\treturn key.CodeF2\n\tcase C.kVK_F3:\n\t\treturn key.CodeF3\n\tcase C.kVK_F4:\n\t\treturn key.CodeF4\n\tcase C.kVK_F5:\n\t\treturn key.CodeF5\n\tcase C.kVK_F6:\n\t\treturn key.CodeF6\n\tcase C.kVK_F7:\n\t\treturn key.CodeF7\n\tcase C.kVK_F8:\n\t\treturn key.CodeF8\n\tcase C.kVK_F9:\n\t\treturn key.CodeF9\n\tcase C.kVK_F10:\n\t\treturn key.CodeF10\n\tcase C.kVK_F11:\n\t\treturn key.CodeF11\n\tcase C.kVK_F12:\n\t\treturn key.CodeF12\n\t// 70: PrintScreen\n\t// 71: Scroll Lock\n\t// 72: Pause\n\t// 73: Insert\n\tcase C.kVK_Home:\n\t\treturn key.CodeHome\n\tcase C.kVK_PageUp:\n\t\treturn key.CodePageUp\n\tcase C.kVK_ForwardDelete:\n\t\treturn key.CodeDeleteForward\n\tcase C.kVK_End:\n\t\treturn key.CodeEnd\n\tcase C.kVK_PageDown:\n\t\treturn key.CodePageDown\n\tcase C.kVK_RightArrow:\n\t\treturn key.CodeRightArrow\n\tcase C.kVK_LeftArrow:\n\t\treturn key.CodeLeftArrow\n\tcase C.kVK_DownArrow:\n\t\treturn key.CodeDownArrow\n\tcase C.kVK_UpArrow:\n\t\treturn key.CodeUpArrow\n\tcase C.kVK_ANSI_KeypadClear:\n\t\treturn key.CodeKeypadNumLock\n\tcase C.kVK_ANSI_KeypadDivide:\n\t\treturn key.CodeKeypadSlash\n\tcase C.kVK_ANSI_KeypadMultiply:\n\t\treturn key.CodeKeypadAsterisk\n\tcase C.kVK_ANSI_KeypadMinus:\n\t\treturn key.CodeKeypadHyphenMinus\n\tcase C.kVK_ANSI_KeypadPlus:\n\t\treturn key.CodeKeypadPlusSign\n\tcase C.kVK_ANSI_KeypadEnter:\n\t\treturn key.CodeKeypadEnter\n\tcase C.kVK_ANSI_Keypad1:\n\t\treturn key.CodeKeypad1\n\tcase C.kVK_ANSI_Keypad2:\n\t\treturn key.CodeKeypad2\n\tcase C.kVK_ANSI_Keypad3:\n\t\treturn key.CodeKeypad3\n\tcase C.kVK_ANSI_Keypad4:\n\t\treturn key.CodeKeypad4\n\tcase C.kVK_ANSI_Keypad5:\n\t\treturn key.CodeKeypad5\n\tcase C.kVK_ANSI_Keypad6:\n\t\treturn key.CodeKeypad6\n\tcase C.kVK_ANSI_Keypad7:\n\t\treturn key.CodeKeypad7\n\tcase C.kVK_ANSI_Keypad8:\n\t\treturn key.CodeKeypad8\n\tcase C.kVK_ANSI_Keypad9:\n\t\treturn key.CodeKeypad9\n\tcase C.kVK_ANSI_Keypad0:\n\t\treturn key.CodeKeypad0\n\tcase C.kVK_ANSI_KeypadDecimal:\n\t\treturn key.CodeKeypadFullStop\n\tcase C.kVK_ANSI_KeypadEquals:\n\t\treturn key.CodeKeypadEqualSign\n\tcase C.kVK_F13:\n\t\treturn key.CodeF13\n\tcase C.kVK_F14:\n\t\treturn key.CodeF14\n\tcase C.kVK_F15:\n\t\treturn key.CodeF15\n\tcase C.kVK_F16:\n\t\treturn key.CodeF16\n\tcase C.kVK_F17:\n\t\treturn key.CodeF17\n\tcase C.kVK_F18:\n\t\treturn key.CodeF18\n\tcase C.kVK_F19:\n\t\treturn key.CodeF19\n\tcase C.kVK_F20:\n\t\treturn key.CodeF20\n\t// 116: Keyboard Execute\n\tcase C.kVK_Help:\n\t\treturn key.CodeHelp\n\t// 118: Keyboard Menu\n\t// 119: Keyboard Select\n\t// 120: Keyboard Stop\n\t// 121: Keyboard Again\n\t// 122: Keyboard Undo\n\t// 123: Keyboard Cut\n\t// 124: Keyboard Copy\n\t// 125: Keyboard Paste\n\t// 126: Keyboard Find\n\tcase C.kVK_Mute:\n\t\treturn key.CodeMute\n\tcase C.kVK_VolumeUp:\n\t\treturn key.CodeVolumeUp\n\tcase C.kVK_VolumeDown:\n\t\treturn key.CodeVolumeDown\n\t// 130: Keyboard Locking Caps Lock\n\t// 131: Keyboard Locking Num Lock\n\t// 132: Keyboard Locking Scroll Lock\n\t// 133: Keyboard Comma\n\t// 134: Keyboard Equal Sign\n\t// ...: Bunch of stuff\n\tcase C.kVK_Control:\n\t\treturn key.CodeLeftControl\n\tcase C.kVK_Shift:\n\t\treturn key.CodeLeftShift\n\tcase C.kVK_Option:\n\t\treturn key.CodeLeftAlt\n\tcase C.kVK_Command:\n\t\treturn key.CodeLeftGUI\n\tcase C.kVK_RightControl:\n\t\treturn key.CodeRightControl\n\tcase C.kVK_RightShift:\n\t\treturn key.CodeRightShift\n\tcase C.kVK_RightOption:\n\t\treturn key.CodeRightAlt\n\t// TODO key.CodeRightGUI\n\tdefault:\n\t\treturn key.CodeUnknown\n\t}\n}",
"func PrintDisassembled(code string) error {\n\tscript, err := hex.DecodeString(code)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tit := NewInstructionIterator(script)\n\tfor it.Next() {\n\t\tif it.Arg() != nil && 0 < len(it.Arg()) {\n\t\t\tfmt.Printf(\"%06v: %v 0x%x\\n\", it.PC(), it.Op(), it.Arg())\n\t\t} else {\n\t\t\tfmt.Printf(\"%06v: %v\\n\", it.PC(), it.Op())\n\t\t}\n\t}\n\treturn it.Error()\n}",
"func (d *KeycodeRepository) ListKeycodes() ([]repository_keycode.Detail, error) {\n\n\tkeycodes, err := d.client.ListKeycodes()\n\tif err != nil {\n\t\treturn []repository_keycode.Detail{}, err\n\t}\n\n\tdetails := make([]repository_keycode.Detail, len(keycodes))\n\tfor i, keycode := range keycodes {\n\t\tvar applyTime *time.Time\n\t\tif keycode.ApplyTime != nil {\n\t\t\tt, err := ptypes.Timestamp(keycode.ApplyTime)\n\t\t\tif err != nil {\n\t\t\t\treturn []repository_keycode.Detail{}, errors.Errorf(\"convert timestamp failed: %s\", err.Error())\n\t\t\t}\n\t\t\tapplyTime = &t\n\t\t}\n\t\tvar expireTime *time.Time\n\t\tif keycode.ExpireTime != nil {\n\t\t\tt, err := ptypes.Timestamp(keycode.ExpireTime)\n\t\t\tif err != nil {\n\t\t\t\treturn []repository_keycode.Detail{}, errors.Errorf(\"convert timestamp failed: %s\", err.Error())\n\t\t\t}\n\t\t\texpireTime = &t\n\t\t}\n\t\tdetails[i] = repository_keycode.Detail{\n\t\t\tKeycode: keycode.Keycode,\n\t\t\tKeycodeType: keycode.KeycodeType,\n\t\t\tKeycodeVersion: keycode.KeycodeVersion,\n\t\t\tApplyTime: applyTime,\n\t\t\tExpireTime: expireTime,\n\t\t\tLicenseState: keycode.LicenseState,\n\t\t\tRegistered: keycode.Registered,\n\t\t}\n\t}\n\n\treturn details, nil\n}",
"func Print(key string, val ...interface{}) {\n\tfmt.Printf(\"\\n\\n%c[%d;%d;%dm[key]%c[0m %c[%d;%d;%dm %s %c[0m \\n\",\n\t\t0x1B, 1, 40, 31, 0x1B,\n\t\t0x1B, 1, 40, 33, key, 0x1B,\n\t)\n\tfmt.Printf(\"%c[%d;%d;%dm[val]%c[0m %c[%d;%d;%dm %+v %c[0m \\n\\n\",\n\t\t0x1B, 1, 40, 31, 0x1B,\n\t\t0x1B, 1, 40, 33, val, 0x1B,\n\t)\n}",
"func (c RekeyClient) DebugShowRekeyStatus(ctx context.Context, sessionID int) (err error) {\n\t__arg := DebugShowRekeyStatusArg{SessionID: sessionID}\n\terr = c.Cli.Call(ctx, \"keybase.1.rekey.debugShowRekeyStatus\", []interface{}{__arg}, nil, 0*time.Millisecond)\n\treturn\n}",
"func debugPrint(text string) {\n\tif (debugon) {\n\t\tfmt.Println(text)\n\t}\n}",
"func ShowLanguageCodes() {\n\tstr := \"\"\n\tfor _, code := range langCodeSortSeq {\n\t\tnewstr := code + \" | \" + languageDic[code] + \"\\n\"\n\t\tstr += newstr\n\t}\n\tfmt.Println(str)\n}",
"func Display(m charset.Decoder) {\n\tspecials := map[rune]string{\n\t\t'\\n': \"LF\",\n\t\t'\\r': \"CR\",\n\t\t'\\f': \"FF\",\n\t\t' ': \"SP\",\n\t\t0x1b: \"ESC\",\n\t\t0x20ac: \" €\",\n\t}\n\tfmt.Printf(\" \")\n\tfor c := 0; c < 8; c++ {\n\t\tfmt.Printf(\"0x%d_ \", c)\n\t}\n\tfmt.Println(\"\")\n\tfor r := 0; r < 0x10; r++ {\n\t\tfmt.Printf(\"0x_%x: \", r)\n\t\tfor c := 0; c < 8; c++ {\n\t\t\tk := byte(c*0x10 + r)\n\t\t\tif v, ok := m[k]; ok {\n\t\t\t\tif s, ok := specials[v]; ok {\n\t\t\t\t\tfmt.Printf(\"%3s \", s)\n\t\t\t\t} else if v >= 0x400 {\n\t\t\t\t\tfmt.Printf(\"%04x \", v)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Printf(\" %c \", v)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\" \")\n\t\t\t}\n\t\t}\n\t\tfmt.Println()\n\t}\n}",
"func (br *blessingRoots) DebugString() string {\n\tconst format = \"%-47s %s\\n\"\n\tb := bytes.NewBufferString(fmt.Sprintf(format, \"Public key\", \"Pattern\"))\n\tvar s rootSorter\n\tbr.mu.RLock()\n\tdefer br.mu.RUnlock()\n\tfor keyBytes, patterns := range br.state {\n\t\tkey, err := security.UnmarshalPublicKey([]byte(keyBytes))\n\t\tif err != nil {\n\t\t\treturn fmt.Sprintf(\"failed to decode public key: %v\", err)\n\t\t}\n\t\ts = append(s, &root{key, fmt.Sprintf(\"%v\", patterns)})\n\t}\n\tsort.Sort(s)\n\tfor _, r := range s {\n\t\tb.WriteString(fmt.Sprintf(format, r.key, r.patterns))\n\t}\n\treturn b.String()\n}",
"func SendKeyBoardInput(key string) {\n\tfmt.Println(\"SendKeyBoardInput\")\n\tfmt.Println(\"I am going to print '\" + key + \"'\")\n\tvar i = 0\n\tn := len(key)\n\tparameter := make([]string, n+1)\n\tparameter[0] = \"key\"\n\tfor i = 0; i < n; i++ {\n\t\tswitch key[i] {\n\t\tcase 0x20:\n\t\t\tparameter[i+1] = \"space\"\n\t\tdefault:\n\t\t\tparameter[i+1] = string(key[i])\n\t\t}\n\t}\n\tfmt.Println(parameter)\n\tcmd := exec.Command(\"xdotool\", parameter...)\n\tcore.ExecuteCommand(cmd)\n}",
"func (d *Parallels9Driver) SendKeyScanCodes(vmName string, codes ...string) error {\n\tvar stdout, stderr bytes.Buffer\n\n\tif codes == nil || len(codes) == 0 {\n\t\tlog.Printf(\"No scan codes to send\")\n\t\treturn nil\n\t}\n\n\tf, err := ioutil.TempFile(\"\", \"prltype\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.Remove(f.Name())\n\n\tscript := []byte(Prltype)\n\t_, err = f.Write(script)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\targs := prepend(vmName, codes)\n\targs = prepend(f.Name(), args)\n\tcmd := exec.Command(\"/usr/bin/python\", args...)\n\tcmd.Stdout = &stdout\n\tcmd.Stderr = &stderr\n\terr = cmd.Run()\n\n\tstdoutString := strings.TrimSpace(stdout.String())\n\tstderrString := strings.TrimSpace(stderr.String())\n\n\tif _, ok := err.(*exec.ExitError); ok {\n\t\terr = fmt.Errorf(\"prltype error: %s\", stderrString)\n\t}\n\n\tlog.Printf(\"stdout: %s\", stdoutString)\n\tlog.Printf(\"stderr: %s\", stderrString)\n\n\treturn err\n}",
"func dsaKeyPrinter(name string, val *big.Int, buf *bytes.Buffer) {\n buf.WriteString(fmt.Sprintf(\"%16s%s:\", \"\", name))\n for i, b := range val.Bytes() {\n if (i % 15) == 0 {\n buf.WriteString(fmt.Sprintf(\"\\n%20s\", \"\"))\n }\n buf.WriteString(fmt.Sprintf(\"%02x\", b)\n if i != len(val.Bytes())-1 { \n buf.WriteString(\":\")\n }\n }"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetCompletionCallback sets the completion callback function.
|
func (l *Linenoise) SetCompletionCallback(fn func(string) []string) {
l.completionCallback = fn
}
|
[
"func (client *SyncClient) SetCompletionListener(callback syncCompletionListener) error {\n\tif callback == nil {\n\t\tC.obx_sync_listener_complete(client.cClient, nil, nil)\n\t\tcCallbackUnregister(client.cCallbacks[cCallbackIndexCompletion])\n\t} else {\n\t\tif cbId, err := cCallbackRegister(cVoidCallback(func() {\n\t\t\tcallback()\n\t\t})); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\tC.obx_sync_listener_complete(client.cClient, (*C.OBX_sync_listener_complete)(cVoidCallbackDispatchPtr), cbId.cPtr())\n\t\t\tclient.swapCallbackId(cCallbackIndexCompletion, cbId)\n\t\t}\n\t}\n\treturn nil\n}",
"func SetCompletionHandler(c CompletionHandler) {\n\tcomplHandler = c\n}",
"func (a *Audible) SetFinishedCallback(callbackFn func(*Audible)) {\n\ta.finishedCallback = callbackFn\n}",
"func (q *Queue) SetCallback(cb Callback) error {\n\tq.cb = cb\n\treturn nil\n}",
"func (cli *grpcClient) SetResponseCallback(resCb Callback) {\n\tcli.mtx.Lock()\n\tcli.resCb = resCb\n\tcli.mtx.Unlock()\n}",
"func SetCallback(handler CallbackCommand) {\n\tcallback = handler\n}",
"func (reqRes *ReqRes) SetCallback(cb func(res *types.Response)) {\n\treqRes.mtx.Lock()\n\n\tif reqRes.done {\n\t\treqRes.mtx.Unlock()\n\t\tcb(reqRes.Response)\n\t\treturn\n\t}\n\n\treqRes.cb = cb\n\treqRes.mtx.Unlock()\n}",
"func (t *StreamTransport) SetCallBack(cb StreamTransportCallbacker) {\n\tt.cb = cb\n}",
"func (mb *MenuButton) SetCallBack(callback Callback) {\n\tmb.callback = callback\n}",
"func (_this *IDBTransaction) SetOnComplete(listener func(event *domcore.Event, currentTarget *IDBTransaction)) js.Func {\n\tcb := eventFuncIDBTransaction_domcore_Event(listener)\n\t_this.Value_JS.Set(\"oncomplete\", cb)\n\treturn cb\n}",
"func (_m *Client) SetResponseCallback(_a0 abcicli.Callback) {\n\t_m.Called(_a0)\n}",
"func (req *Request) SetCallback(parserName string) *Request {\n\treq.Callback = parserName\n\treturn req\n}",
"func (s *Session) SetCloseCallback(callback func(*Session)) {\n\ts.closeCallback = callback\n}",
"func (pbr *PbReader) SetCb(cb audio.OnDataCb) {\n\tpbr.Lock()\n\tdefer pbr.Unlock()\n\tpbr.callback = cb\n}",
"func (s *Strava) SetCallbackHandler(\n\tsuccess func(auth *strava.AuthorizationResponse, w http.ResponseWriter, r *http.Request),\n\tfailure func(err error, w http.ResponseWriter, r *http.Request)) {\n\tpath, _ := s.authenticator.CallbackPath()\n\thttp.HandleFunc(path, s.authenticator.HandlerFunc(success, failure))\n}",
"func SetLicenseCallback(callbackFunction func(int)) int {\n\tstatus := C.SetLicenseCallback((C.CallbackType)(unsafe.Pointer(C.licenseCallbackCgoGateway)))\n\tlicenseCallbackFuncion = callbackFunction\n\treturn int(status)\n}",
"func SetAttemptedCompletionFunction(fn func(line string, start, end int) []string) {\n\tcompletionFunction = fn\n\tif fn == nil {\n\t\tC.rl_attempted_completion_function = nil\n\t\treturn\n\t}\n\tC.rl_attempted_completion_function = (*C.rl_completion_func_t)(C._creadline_complete)\n}",
"func (s *State) SetWordsetCallback(callback func() error) {\n\ts.mx.Lock()\n\tdefer s.mx.Unlock()\n\ts.sendWordsetCallback = callback\n}",
"func (v *Vox) SetCb(cb audio.OnDataCb) {\n\tv.Lock()\n\tdefer v.Unlock()\n\tv.cb = cb\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetHintsCallback sets the hints callback function.
|
func (l *Linenoise) SetHintsCallback(fn func(string) *Hint) {
l.hintsCallback = fn
}
|
[
"func (w *Window) SetHints(hints hints) {\n\tw.hints |= hints\n}",
"func (v *TextView) SetInputHints(hints InputHints) {\n\tC.gtk_text_view_set_input_hints(v.native(), C.GtkInputHints(hints))\n}",
"func (f *Font) SetHinting(hinting int) {\n\tC.TTF_SetFontHinting(f.f, C.int(hinting))\n}",
"func SetCallback(ih Ihandle, name string, fn interface{}) uintptr {\n\tc_name := C.CString(name)\n\tdefer C.free(unsafe.Pointer(c_name))\n\n\t//Icallback IupSetCallback (Ihandle* ih, const char *name, Icallback func);\n\t//Ihandle* IupSetCallbacks(Ihandle* ih, const char *name, Icallback func, ...);\n\n\tswitch fn.(type) {\n\tcase uintptr:\n\t\treturn uintptr(unsafe.Pointer(C.__IupSetCallback(ih.ptr(), c_name, unsafe.Pointer(fn.(uintptr)))))\n\tdefault:\n\t\treturn uintptr(unsafe.Pointer(C.__IupSetCallback(ih.ptr(), c_name, unsafe.Pointer(syscall.NewCallbackCDecl(fn)))))\n\t}\n}",
"func (jbobject *ClientConfiguration) SetSocketBufferSizeHints(a int, b int) {\n\t_, err := jbobject.CallMethod(javabind.GetEnv(), \"setSocketBufferSizeHints\", javabind.Void, a, b)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}",
"func InitHint(hint Hint, value HintValue) {\n\tC.glfwInitHint(C.int(hint), C.int(value))\n}",
"func (o *FontOptions) SetHintStyle(hintStyle HintStyle) {\n\tC.cairo_font_options_set_hint_style(o.native, C.cairo_hint_style_t(hintStyle))\n}",
"func (callbacks *JvmtiCallbacks) SetCallback(eventId int, fn func(JvmtiEnv, ...JvmtiArg)) {\n\tif eventId <= JVMTI_MAX_EVENT_TYPE_VAL && eventId >= JVMTI_MIN_EVENT_TYPE_VAL {\n\t\tcallbacks.cbs[eventId-JVMTI_MIN_EVENT_TYPE_VAL] = fn\n\t\tC.EnableJvmtiCallback(unsafe.Pointer(_lib.jvmti), C.int(eventId))\n\t} else if eventId <= JVMTI_MAX_FAKE_EVENT_TYPE_VAL && eventId >= JVMTI_MIN_FAKE_EVENT_TYPE_VAL {\n\t\tC.EnableJvmtiCallback(unsafe.Pointer(_lib.jvmti), C.int(eventId))\n\t} else {\n\t\tfmt.Println(\"GO: Bad event id \", eventId)\n\t}\n}",
"func (l *Linenoise) SetCompletionCallback(fn func(string) []string) {\n\tl.completionCallback = fn\n}",
"func InitHint(hint Hint, value int) {\n\tC.glfwInitHint(C.int(hint), C.int(value))\n}",
"func (o *ReaderOptions) SetPasswordCallback(cb func() string) {\n\to.cb = cb\n}",
"func WithCursorHints(hints ...CursorHint) CursorOption {\n\treturn func(c *CursorConfig) {\n\t\tfor _, hint := range hints {\n\t\t\thint(&c.Hints)\n\t\t}\n\t}\n}",
"func (device *LaserRangeFinderBricklet) SetDistanceCallbackThreshold(option ThresholdOption, min uint16, max uint16) (err error) {\n\tvar buf bytes.Buffer\n\tbinary.Write(&buf, binary.LittleEndian, option)\n\tbinary.Write(&buf, binary.LittleEndian, min)\n\tbinary.Write(&buf, binary.LittleEndian, max)\n\n\tresultBytes, err := device.device.Set(uint8(FunctionSetDistanceCallbackThreshold), buf.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 8 {\n\t\t\treturn fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 8)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tbytes.NewBuffer(resultBytes[8:])\n\n\t}\n\n\treturn nil\n}",
"func SetCallback(handler CallbackCommand) {\n\tcallback = handler\n}",
"func (ao *AggregateOptions) SetHint(h interface{}) *AggregateOptions {\n\tao.Hint = h\n\treturn ao\n}",
"func WithCursorHintPredicate(f CursorPredicateFunc) CursorHint {\n\treturn func(o *CursorHints) {\n\t\to.PredicateFn = f\n\t}\n}",
"func (reqRes *ReqRes) SetCallback(cb func(res *types.Response)) {\n\treqRes.mtx.Lock()\n\n\tif reqRes.done {\n\t\treqRes.mtx.Unlock()\n\t\tcb(reqRes.Response)\n\t\treturn\n\t}\n\n\treqRes.cb = cb\n\treqRes.mtx.Unlock()\n}",
"func (o *FontOptions) SetHintMetrics(hintMetrics HintMetrics) {\n\tC.cairo_font_options_set_hint_metrics(o.native, C.cairo_hint_metrics_t(hintMetrics))\n}",
"func (mn *MockNetwork) SetConnectCallback(network.ConnectCallback) {\n\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetMultiline sets multiline editing mode.
|
func (l *Linenoise) SetMultiline(mode bool) {
l.mlmode = mode
}
|
[
"func (v *Label) SetSingleLineMode(mode bool) {\n\tC.gtk_label_set_single_line_mode(v.native(), gbool(mode))\n}",
"func (d *Discord) SupportsMultiline() bool {\n\treturn true\n}",
"func (v *Nvim) SetBufferLines(buffer Buffer, start int, end int, strictIndexing bool, replacement [][]byte) error {\n\treturn v.call(\"nvim_buf_set_lines\", nil, buffer, start, end, strictIndexing, replacement)\n}",
"func ValidateMultiline(multiline *Multiline) error {\n\tif multiline == nil {\n\t\treturn nil\n\t}\n\n\t// Check if valid regular expression for multiline\n\t_, err := regexp.Compile(multiline.Pattern)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Check match mode\n\tmatch := multiline.Match\n\tswitch match {\n\tcase \"after\":\n\tcase \"before\":\n\tdefault:\n\t\treturn errors.New(\"Configuration: Invalid match type in multiline mode: \" + match)\n\t}\n\treturn nil\n}",
"func (f *StandardFormatter) SetAppendNewLine(appendNewLine bool) {\n\tf.mutex.Lock()\n\tdefer f.mutex.Unlock()\n\tf.appendNewLine = appendNewLine\n}",
"func (b *Batch) SetBufferLines(buffer Buffer, start int, end int, strictIndexing bool, replacement [][]byte) {\n\tb.call(\"nvim_buf_set_lines\", nil, buffer, start, end, strictIndexing, replacement)\n}",
"func (edit Editor) SetHighlightActiveLine(shouldHighlight bool) {\n\tedit.Call(\"setHighlightActiveLine\", shouldHighlight)\n}",
"func (r *Readline) SetEmacs() {\n\tif r.instance.IsVimMode() {\n\t\tr.instance.SetVimMode(false)\n\t\tprintln(\"mode changed to emacs\")\n\t} else {\n\t\tprintln(\"mode already is emacs\")\n\t}\n}",
"func (v *Label) SetLineWrapMode(wrapMode pango.WrapMode) {\n\tC.gtk_label_set_line_wrap_mode(v.native(), C.PangoWrapMode(wrapMode))\n}",
"func (f *FileTarget) SetMaxBufferLine(maxBufferLine int) {\n f.maxBufferLine = maxBufferLine\n}",
"func (ls *State) SetLine(src []rune) {\n\tls.Src = src\n\tls.Lex = nil\n\tls.Comments = nil\n\tls.Pos = 0\n}",
"func (b *Blueprint) MultiLineString(column string) *ColumnDefinition {\n\treturn b.addColumn(\"multilinestring\", column, nil)\n}",
"func (t *TextDisplay) SetLinenumberSize(s int) {\n\tC.go_fltk_TextDisplay_set_linenumber_size((*C.Fl_Text_Display)(t.ptr()), C.int(s))\n}",
"func NewMultiLineParser(flushTimeout time.Duration, parser parser.Parser, lineHandler LineHandler, lineLimit int) *MultiLineParser {\n\treturn &MultiLineParser{\n\t\tinputChan: make(chan *DecodedInput),\n\t\tbuffer: bytes.NewBuffer(nil),\n\t\tflushTimeout: flushTimeout,\n\t\tlineHandler: lineHandler,\n\t\tlineLimit: lineLimit,\n\t\tparser: parser,\n\t}\n}",
"func (s *VisvalingamSimplifier) MultiLineString(mls orb.MultiLineString) orb.MultiLineString {\n\treturn multiLineString(s, mls)\n}",
"func (t *Textarea) SetHeight(val int) {\n\tt.Call(\"setAttribute\", \"style\", \"height:\"+strconv.Itoa(val)+\"px\")\n}",
"func NewMultiLineEntry(text string, placeholder string, isReadOnly bool) (entry *widget.Entry) {\n\tentry = widget.NewMultiLineEntry()\n\tentry.SetText(text)\n\tentry.SetPlaceHolder(placeholder)\n\tentry.SetReadOnly(isReadOnly)\n\treturn\n}",
"func (edit Editor) ToggleCommentLines() {\n\tedit.Call(\"toggleCommentLines\")\n}",
"func (s *Set) SetMaxLineSize(i int) {\n\ts.MaxLineSize = i\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetHotkey sets the hotkey that causes line editing to exit. The hotkey will be appended to the line buffer but not displayed.
|
func (l *Linenoise) SetHotkey(key rune) {
l.hotkey = key
}
|
[
"func (g *Gui) SetKeybinding(viewname string, key tcell.Key, ch rune, mod tcell.ModMask, handler func(*Gui, *View) error) error {\n\t// var kb *eventBinding\n\n\t// k, ch, err := getKey(key)\n\t// if err != nil {\n\t// \treturn err\n\t// }\n\t// TODO: get rid of this ugly mess\n\t//switch key {\n\t//case termbox.MouseLeft:\n\t//\tkb = newMouseBinding(viewname, tcell.Button1, mod, handler)\n\t//case termbox.MouseMiddle:\n\t//\tkb = newMouseBinding(viewname, tcell.Button3, mod, handler)\n\t//case termbox.MouseRight:\n\t//\tkb = newMouseBinding(viewname, tcell.Button2, mod, handler)\n\t//case termbox.MouseWheelUp:\n\t//\tkb = newMouseBinding(viewname, tcell.WheelUp, mod, handler)\n\t//case termbox.MouseWheelDown:\n\t//\tkb = newMouseBinding(viewname, tcell.WheelDown, mod, handler)\n\t//default:\n\t//\tkb = newKeybinding(viewname, key, ch, mod, handler)\n\t//}\n\tkb := newKeybinding(viewname, key, ch, mod, handler)\n\tg.eventBindings = append(g.eventBindings, kb)\n\treturn nil\n}",
"func (c *Chip8) SetKeyDown(index int) {\n\tc.key[index] = 1\n}",
"func (r AKC695X) SetLine(b bool) {\n\tif b {\n\t\tr.reg[0x06] |= 1 << 1\n\t} else {\n\t\tr.reg[0x06] &^= 1 << 1\n\t}\n}",
"func (g *Gui) SetKeybinding(viewname string, key interface{}, mod Modifier, handler func(*Gui, *View) error) error {\n\tvar kb *keybinding\n\n\tk, ch, err := getKey(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\tkb = newKeybinding(viewname, k, ch, mod, handler)\n\tg.keybindings = append(g.keybindings, kb)\n\treturn nil\n}",
"func (edit Editor) SetHighlightGutterLine() {\n\tedit.Call(\"setHighlightGutterLine\")\n}",
"func (self *Key) SetCtrlKeyA(member bool) {\n self.Object.Set(\"ctrlKey\", member)\n}",
"func (edit Editor) SetKeyboardHandler(keyboardHandler string) {\n\tedit.Call(\"setKeyboardHandler\", keyboardHandler)\n}",
"func (c *Chip8) SetKeyUp(index int) {\n\tc.key[index] = 0\n}",
"func (r *Readline) SetVim() {\n\tif !r.instance.IsVimMode() {\n\t\tr.instance.SetVimMode(true)\n\t\tprintln(\"mode changed to vim\")\n\t} else {\n\t\tprintln(\"mode already is vim\")\n\t}\n}",
"func (*XMLDocument) SetOnkeypress(onkeypress func(window.Event)) {\n\tmacro.Rewrite(\"$_.onkeypress = $1\", onkeypress)\n}",
"func (env *Environment) SetFocus(module *Module) {\n\tif env != module.env {\n\t\tpanic(\"SetFocus to module from another environment\")\n\t}\n\tC.EnvFocus(env.env, module.modptr)\n}",
"func (recv *IOChannel) SetLineTerm(lineTerm string, length int32) {\n\tc_line_term := C.CString(lineTerm)\n\tdefer C.free(unsafe.Pointer(c_line_term))\n\n\tc_length := (C.gint)(length)\n\n\tC.g_io_channel_set_line_term((*C.GIOChannel)(recv.native), c_line_term, c_length)\n\n\treturn\n}",
"func (*XMLDocument) SetOnkeydown(onkeydown func(window.Event)) {\n\tmacro.Rewrite(\"$_.onkeydown = $1\", onkeydown)\n}",
"func ClearLine() {\n\temitEscape(\"K\", 2)\n}",
"func EnableFocus(w io.Writer) error {\n\t_, err := fmt.Fprint(w, \"\\x1b[?1004h\")\n\treturn err\n}",
"func (t *Terminal) handleKey(key int) (line string, ok bool) {\n\tswitch key {\n\tcase keyBackspace:\n\t\tif t.pos == 0 {\n\t\t\treturn\n\t\t}\n\t\tt.pos--\n\t\tt.moveCursorToPos(t.pos)\n\n\t\tcopy(t.line[t.pos:], t.line[1+t.pos:])\n\t\tt.line = t.line[:len(t.line)-1]\n\t\tif t.echo {\n\t\t\tt.writeLine(t.line[t.pos:])\n\t\t}\n\t\tt.queue(eraseUnderCursor)\n\t\tt.moveCursorToPos(t.pos)\n\tcase keyAltLeft:\n\t\t// move left by a word.\n\t\tif t.pos == 0 {\n\t\t\treturn\n\t\t}\n\t\tt.pos--\n\t\tfor t.pos > 0 {\n\t\t\tif t.line[t.pos] != ' ' {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt.pos--\n\t\t}\n\t\tfor t.pos > 0 {\n\t\t\tif t.line[t.pos] == ' ' {\n\t\t\t\tt.pos++\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt.pos--\n\t\t}\n\t\tt.moveCursorToPos(t.pos)\n\tcase keyAltRight:\n\t\t// move right by a word.\n\t\tfor t.pos < len(t.line) {\n\t\t\tif t.line[t.pos] == ' ' {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt.pos++\n\t\t}\n\t\tfor t.pos < len(t.line) {\n\t\t\tif t.line[t.pos] != ' ' {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt.pos++\n\t\t}\n\t\tt.moveCursorToPos(t.pos)\n\tcase keyEnter:\n\t\tt.moveCursorToPos(len(t.line))\n\t\tt.queue([]byte(\"\\r\\n\"))\n\t\tline = string(t.line)\n\t\tok = true\n\t\tt.line = t.line[:0]\n\t\tt.pos = 0\n\t\tt.cursorX = 0\n\t\tt.cursorY = 0\n\t\tt.maxLine = 0\n\n\tdefault:\n\t\tif t.handleAutoComplete(key) {\n\t\t\treturn\n\t\t}\n\t\tif !isPrintable(key) {\n\t\t\treturn\n\t\t}\n\t\tif len(t.line) == maxLineLength {\n\t\t\treturn\n\t\t}\n\t\tif len(t.line) == cap(t.line) {\n\t\t\tnewLine := make([]byte, len(t.line), 2*(1+len(t.line)))\n\t\t\tcopy(newLine, t.line)\n\t\t\tt.line = newLine\n\t\t}\n\t\tt.line = t.line[:len(t.line)+1]\n\t\tcopy(t.line[t.pos+1:], t.line[t.pos:])\n\t\tt.line[t.pos] = byte(key)\n\t\tif t.echo {\n\t\t\tt.writeLine(t.line[t.pos:])\n\t\t}\n\t\tt.pos++\n\t\tt.moveCursorToPos(t.pos)\n\t}\n\treturn\n}",
"func setCursorLoc(x, y int) {\n\tfmt.Printf(\"\\033[%v;%vH\", y, x)\n}",
"func (h *BufPane) JumpLine() bool {\n\tInfoBar.Prompt(\"> \", \"goto \", \"Command\", nil, func(resp string, canceled bool) {\n\t\tif !canceled {\n\t\t\th.HandleCommand(resp)\n\t\t}\n\t})\n\treturn true\n}",
"func SetFocus(ih Ihandle) Ihandle {\n\t//Ihandle* IupSetFocus (Ihandle* ih);\n\treturn mkih(C.IupSetFocus(ih.ptr()))\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Command History pop an entry from the history list
|
func (l *Linenoise) historyPop(idx int) string {
if idx < 0 {
// pop the last entry
idx = len(l.history) - 1
}
if idx >= 0 && idx < len(l.history) {
s := l.history[idx]
l.history = append(l.history[:idx], l.history[idx+1:]...)
return s
}
// nothing to pop
return ""
}
|
[
"func (hist *history) push(command string) int {\n\t// If we reused a history item\n\tif hist.index < len(hist.commandHistory) {\n\t\t// If we did not edit this command, then swap it to the end\n\t\tif hist.commandHistory[hist.index] == command {\n\t\t\thist.reuse(hist.index)\n\t\t\thist.index = len(hist.commandHistory)\n\t\t\treturn hist.index\n\t\t}\n\t}\n\n\tif hist.isFull() {\n\t\t// Create new slice\n\t\tnew := make([]string, 0, cap(hist.commandHistory))\n\t\t// Copy history into it TODO: more efficent way?\n\t\tnew = append(new, hist.commandHistory[1:]...)\n\n\t\thist.commandHistory = append(new, command)\n\t} else {\n\t\thist.commandHistory = append(hist.commandHistory, command)\n\t}\n\thist.index = len(hist.commandHistory)\n\treturn len(hist.commandHistory) - 1\n}",
"func (cl *cmdLine) pushHistory() {\n\tif len(cl.buf) > 1 {\n\t\tcl.history.pushFront(cl.buf[:len(cl.buf)-1]) // exclude the last ' '\n\t}\n}",
"func (h *LineHist) Pop(lineno int)",
"func (z *zpoolctl) History(ctx context.Context, options string, pool string) *execute {\n\targs := []string{\"history\"}\n\tif len(options) > 0 {\n\t\targs = append(args, options)\n\t}\n\tif len(pool) > 0 {\n\t\targs = append(args, pool)\n\t}\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}",
"func (h *InfoPane) HistoryDown() {\n\th.DownHistory(h.History[h.PromptType])\n}",
"func (h *History) Back() string {\n\tvar result string\n\th.commandPointer = Min(len(h.commandLines)-1, Max(-1, h.commandPointer))\n\tif h.commandPointer >= 0 {\n\t\tresult = h.commandLines[h.commandPointer]\n\t\th.commandPointer = h.commandPointer - 1\n\t}\n\treturn result\n}",
"func OptionHistory(x []string) Option {\n\treturn func(p *Prompt) error {\n\t\tp.history.histories = x\n\t\tp.history.Clear()\n\t\treturn nil\n\t}\n}",
"func (app *Application) historyCmd(p Project, cmd *historyApiCommand) (*response, error) {\n\n\tresp := newResponse(\"history\")\n\n\tchannel := cmd.Channel\n\n\tbody := &HistoryBody{\n\t\tChannel: channel,\n\t}\n\n\tresp.Body = body\n\n\thistory, err := app.History(p.Name, channel)\n\tif err != nil {\n\t\tresp.Err(ErrInternalServerError)\n\t\treturn resp, nil\n\t}\n\n\tbody.Data = history\n\n\treturn resp, nil\n}",
"func (h *InfoPane) HistorySearchDown() {\n\th.SearchDownHistory(h.History[h.PromptType])\n}",
"func (self *Commands) Back() error {\n\tif _, err := self.browser.Tab().RPC(`Page`, `getNavigationHistory`, nil); err == nil {\n\t\treturn nil\n\t} else {\n\t\treturn err\n\t}\n}",
"func (cd *circularDependency) pop() {\n\t//log.Println(\"Removing:\", cd.Dependents[len(cd.Dependents)-1])\n\tcd.dependents = cd.dependents[:len(cd.dependents)-1]\n}",
"func (h *History) Clear() {\n\th.tmp = make([]string, len(h.histories))\n\tfor i := range h.histories {\n\t\th.tmp[i] = h.histories[i]\n\t}\n\th.tmp = append(h.tmp, \"\")\n\th.selected = len(h.tmp) - 1\n}",
"func (app *application) historyCmd(p *project, cmd *historyApiCommand) (*response, error) {\n\n\tresp := newResponse(\"history\")\n\n\tchannel := cmd.Channel\n\n\tif channel == \"\" {\n\t\tlogger.ERROR.Println(\"channel required\")\n\t\treturn nil, ErrInvalidApiMessage\n\t}\n\n\tbody := map[string]interface{}{\n\t\t\"channel\": channel,\n\t}\n\n\tresp.Body = body\n\n\tchOpts, err := app.channelOpts(p.Name, channel)\n\tif err != nil {\n\t\tresp.Err(err)\n\t\treturn resp, nil\n\t}\n\n\tif chOpts.HistorySize <= 0 || chOpts.HistoryLifetime <= 0 {\n\t\tresp.Err(ErrNotAvailable)\n\t\treturn resp, nil\n\t}\n\n\thistory, err := app.history(p.Name, channel)\n\tif err != nil {\n\t\tresp.Err(ErrInternalServerError)\n\t\treturn resp, nil\n\t}\n\n\tresp.Body = map[string]interface{}{\n\t\t\"channel\": channel,\n\t\t\"data\": history,\n\t}\n\treturn resp, nil\n}",
"func (f *CommandRunnerRunCommandFunc) History() []CommandRunnerRunCommandFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]CommandRunnerRunCommandFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
"func (ptr *terminalPrompter) AppendHistory(command string) {\n\tptr.State.AppendHistory(command)\n}",
"func (s *RemoteStack) History(ctx context.Context, pageSize, page int) ([]UpdateSummary, error) {\n\t// Note: Find a way to allow options for ShowSecrets(true) that doesn't require loading the project.\n\treturn s.stack.History(ctx, pageSize, page, opthistory.ShowSecrets(false))\n}",
"func uxHistory(cmd cli.Command) cli.Command {\n\thistoryFlags := []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName: \"no-history\",\n\t\t\tUsage: \"do not create a history entry\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"history.author\",\n\t\t\tUsage: \"author value for the history entry\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"history.comment\",\n\t\t\tUsage: \"comment for the history entry\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"history.created\",\n\t\t\tUsage: \"created value for the history entry\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"history.created_by\",\n\t\t\tUsage: \"created_by value for the history entry\",\n\t\t},\n\t}\n\tcmd.Flags = append(cmd.Flags, historyFlags...)\n\n\toldBefore := cmd.Before\n\tcmd.Before = func(ctx *cli.Context) error {\n\t\t// --no-history is incompatible with other --history.* options.\n\t\tif ctx.Bool(\"no-history\") {\n\t\t\tfor _, flag := range historyFlags {\n\t\t\t\tif name := flag.GetName(); name == \"no-history\" {\n\t\t\t\t\tcontinue\n\t\t\t\t} else if ctx.IsSet(name) {\n\t\t\t\t\treturn errors.Errorf(\"--no-history and --%s may not be specified together\", name)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Include any old befores set.\n\t\tif oldBefore != nil {\n\t\t\treturn oldBefore(ctx)\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn cmd\n}",
"func (h *History) Last() string {\n\tvar result string\n\tif len(h.commandLines) > 0 {\n\t\th.commandPointer = len(h.commandLines) - 1\n\t\tresult = h.commandLines[h.commandPointer]\n\t}\n\treturn result\n}",
"func viewHistory(board *chess.Board, includeEval bool, resultString string) {\n\tvar input rune\n\tindex := len(board.History) - 1\n\ttempBoard := board\n\treader := bufio.NewReader(os.Stdin)\n\tfor {\n\t\t//fmt.Print(\"\\033[1;1H\")\n\t\t//fmt.Print(\"\\033[0J\")\n\t\ttempBoard.PrintBoard(false)\n\t\tif includeEval {\n\t\t\tfmt.Println(\"Basic: \", eval.EvaluateBasic(tempBoard), \" With tables: \", eval.EvaluateWithTables(tempBoard))\n\t\t}\n\t\tfmt.Println(\"Options: a: move back one ply, d: move backward one ply, q: quit\")\n\t\tif index == -1 {\n\t\t\tfmt.Println(\"Beginning of game!\")\n\t\t} else if index == len(board.History)-1 {\n\t\t\tfmt.Println(\"End of game!\")\n\t\t\tfmt.Println(resultString)\n\t\t}\n\t\tinput, _, _ = reader.ReadRune()\n\t\tif input == 'q' {\n\t\t\tquit(board)\n\t\t} else if input == 'a' {\n\t\t\tif index == -1 { //reset index so doesn't run of end of History at first of game\n\t\t\t\tindex++\n\t\t\t}\n\t\t\tindex-- //adjust index\n\t\t\tif index == -1 {\n\t\t\t\ttempBoard, _ = chess.ParseFen(\"\") //index of -1 means initial position (not recorded in History)\n\t\t\t} else {\n\t\t\t\ttempBoard, _ = chess.ParseFen(board.History[index])\n\t\t\t}\n\n\t\t} else if input == 'd' {\n\t\t\tif index == len(board.History)-1 { //reset index\n\t\t\t\tindex--\n\t\t\t}\n\t\t\tindex++ //adjust index\n\t\t\tif index == -1 {\n\t\t\t\ttempBoard, _ = chess.ParseFen(board.History[index+1])\n\t\t\t} else {\n\t\t\t\ttempBoard, _ = chess.ParseFen(board.History[index])\n\t\t\t}\n\n\t\t}\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set a history entry by index number.
|
func (l *Linenoise) historySet(idx int, line string) {
l.history[len(l.history)-1-idx] = line
}
|
[
"func (n Nodes) SetIndex(i int, node *Node)",
"func (list *List) Set(index int, value interface{}) {\n\n\tif !list.withinRange(index) {\n\t\t// Append\n\t\tif index == list.size {\n\t\t\tlist.Add(value)\n\t\t}\n\t\treturn\n\t}\n\n\tfoundElement := list.first\n\tfor e := 0; e != index; {\n\t\te, foundElement = e+1, foundElement.next\n\t}\n\tfoundElement.value = value\n}",
"func (hat *HashedArrayTree) Set(index int, value interface{}) error {\n\tif !hat.validIndex(index) {\n\t\treturn ErrIndexOutOfRange\n\t}\n\tti, li := hat.topIndex(index), hat.leafIndex(index)\n\that.top[ti][li] = value\n\treturn nil\n}",
"func (cache *cache) SetValue(index, value int) error {\n\tbs := []byte(strconv.Itoa(value))\n\tsetItem := memcache.Item{\n\t\tKey: strconv.Itoa(index),\n\t\tValue: bs}\n\tif err := cache.client.Set(&setItem); err != nil {\n\t\treturn err\n\t}\n\tif MaxCalculatedIndex < index {\n\t\tMaxCalculatedIndex = index\n\t}\n\treturn nil\n}",
"func (e *Engine) setIndex(index int64) {\n\te.Index = index\n\te.Name = naming.Name(index)\n}",
"func (d *V8interceptor) SetByindex(index int32, object, value *V8value, exception *string) int32 {\n\texception_ := C.cef_string_userfree_alloc()\n\tsetCEFStr(*exception, exception_)\n\tdefer func() {\n\t\t*exception = cefstrToString(exception_)\n\t\tC.cef_string_userfree_free(exception_)\n\t}()\n\treturn int32(C.gocef_v8interceptor_set_byindex(d.toNative(), C.int(index), object.toNative(), value.toNative(), (*C.cef_string_t)(exception_), d.set_byindex))\n}",
"func (el *EntryList) Set(i int, e *Entry) {\n\tif i < 0 {\n\t\tpanic(\"invalid slice index\")\n\t}\n\tel.mu.Lock()\n\tdefer el.mu.Unlock()\n\n\tif i > (cap(el.es) - 1) {\n\t\t// resize the slice\n\t\tnewEntries := make([]*Entry, i)\n\t\tcopy(newEntries, el.es)\n\t\tel.es = newEntries\n\t}\n\tel.es[i] = e\n}",
"func (room *RoomRecorder) setHistory(history []*action_.PlayerAction) {\n\troom.historyM.Lock()\n\troom._history = history\n\troom.historyM.Unlock()\n}",
"func (m *RecurrencePattern) SetIndex(value *WeekIndex)() {\n m.index = value\n}",
"func (k Keeper) SetHistoryItem(ctx sdk.Context, item types.HistoryItem) {\n\tk.modulePerms.AutoCheck(types.PermHistoryWrite)\n\n\tstore := ctx.KVStore(k.storeKey)\n\tkey := types.GetHistoryItemKey(item.MarketID, item.BlockHeight)\n\tbz := k.cdc.MustMarshalBinaryLengthPrefixed(item)\n\tstore.Set(key, bz)\n}",
"func (l *list) Set(i int, v interface{}) error {\n\tif i > -1 && i < l.len {\n\t\tfor c := l.first; c != nil; c = c.next {\n\t\t\tif i < len(c.values) {\n\t\t\t\tc.values[i] = v\n\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\ti -= len(c.values)\n\t\t}\n\t}\n\n\treturn errors.New(\"index bounds out of range\")\n}",
"func (v Value) SetIndex(i int, x interface{}) {\n\tpanic(message)\n}",
"func (mnu *MedicalNoteUpdate) SetHistory(h *History) *MedicalNoteUpdate {\n\treturn mnu.SetHistoryID(h.ID)\n}",
"func (aa *Array) Set(idx int, node interface{}) error {\n\t// do not lock if not needed\n\tif idx < 0 || idx >= aa.length {\n\t\treturn fmt.Errorf(\"index %d is larger than array size (%d)\", idx, aa.length)\n\t}\n\n\taa.mutex.Lock()\n\taa.items[idx] = node\n\taa.mutex.Unlock()\n\treturn nil\n}",
"func (f *BatchFuture) Set(index uint64, err error) {\n\tf.index = index\n\tf.err = err\n\tclose(f.waitCh)\n}",
"func lset(writer *reply.Reply, command *resp.Command, store *storage.Storage) {\n if len(command.Key) < 1 || len(command.Args) < 2 {\n writer.SendError(fmt.Errorf(\"LSET expects 3 argument(s)\"))\n return\n }\n value := store.Get(command.Key)\n if value == nil {\n writer.SendNull()\n return\n }\n index, error := strconv.Atoi(command.Args[0])\n if error == nil {\n value.(*structs.List).Set(index, command.Args[1])\n writer.SendString(\"OK\")\n } else {\n writer.SendError(fmt.Errorf(\"Index is not integer\"))\n }\n}",
"func (idx *Index) Set(row int, level int, val interface{}) error {\n\tif err := idx.ensureRowPositions([]int{row}); err != nil {\n\t\treturn fmt.Errorf(\"index.Set(): %v\", err)\n\t}\n\tif err := idx.ensureLevelPositions([]int{level}); err != nil {\n\t\treturn fmt.Errorf(\"index.Set(): %v\", err)\n\t}\n\tif _, err := values.InterfaceFactory(val); err != nil {\n\t\treturn fmt.Errorf(\"index.Set(): %v\", err)\n\t}\n\n\tidx.Levels[level].Labels.Set(row, val)\n\tidx.Levels[level].NeedsRefresh = true\n\treturn nil\n}",
"func setH3Index(hp *H3Index, res int, baseCell int, initDigit Direction) {\n\th := H3_INIT\n\tH3_SET_MODE(&h, H3_HEXAGON_MODE)\n\tH3_SET_RESOLUTION(&h, res)\n\tH3_SET_BASE_CELL(&h, baseCell)\n\tfor r := 1; r <= res; r++ {\n\t\tH3_SET_INDEX_DIGIT(&h, r, initDigit)\n\t}\n\t*hp = h\n}",
"func (arr *ArrayList) Set(index uint32, newItem ItemType) {\n if index < arr.length {\n arr.data[index] = newItem\n } else {\n panic(\"out of bounds\")\n }\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the full history list.
|
func (l *Linenoise) historyList() []string {
return l.history
}
|
[
"func (f *GitserverClientRawContentsFunc) History() []GitserverClientRawContentsFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]GitserverClientRawContentsFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
"func (f *WorkerStoreResetStalledFunc) History() []WorkerStoreResetStalledFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]WorkerStoreResetStalledFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
"func (f *WorkerStoreHandleFunc) History() []WorkerStoreHandleFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]WorkerStoreHandleFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
"func (f *BundleManagerClientSendDBFunc) History() []BundleManagerClientSendDBFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]BundleManagerClientSendDBFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
"func (f *ReleaseStoreHandleFunc) History() []ReleaseStoreHandleFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]ReleaseStoreHandleFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
"func (f *IndexingRepoStoreListMinimalReposFunc) History() []IndexingRepoStoreListMinimalReposFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]IndexingRepoStoreListMinimalReposFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
"func (f *DBStoreHandleFunc) History() []DBStoreHandleFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]DBStoreHandleFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
"func (f *DBStoreDirtyRepositoriesFunc) History() []DBStoreDirtyRepositoriesFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]DBStoreDirtyRepositoriesFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
"func (f *StoreListFunc) History() []StoreListFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]StoreListFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
"func (f *ReleaseStoreGetLatestFunc) History() []ReleaseStoreGetLatestFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]ReleaseStoreGetLatestFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
"func (f *ExtensionStoreListFunc) History() []ExtensionStoreListFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]ExtensionStoreListFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
"func (f *WorkerStoreUpdateExecutionLogEntryFunc) History() []WorkerStoreUpdateExecutionLogEntryFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]WorkerStoreUpdateExecutionLogEntryFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
"func History() []Snapshot {\n\tmetricsMu.Lock()\n\tnewSnaps := make([]Snapshot, len(snapHistory))\n\tfor i, snap := range snapHistory {\n\t\tnewSnaps[i] = *snap\n\t}\n\tmetricsMu.Unlock()\n\treturn newSnaps\n}",
"func (f *AutoIndexingServiceGetUnsafeDBFunc) History() []AutoIndexingServiceGetUnsafeDBFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]AutoIndexingServiceGetUnsafeDBFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
"func (f *ExtensionStoreHandleFunc) History() []ExtensionStoreHandleFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]ExtensionStoreHandleFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
"func (f *ReleaseStoreTransactFunc) History() []ReleaseStoreTransactFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]ReleaseStoreTransactFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
"func (f *ReleaseStoreGetLatestBatchFunc) History() []ReleaseStoreGetLatestBatchFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]ReleaseStoreGetLatestBatchFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
"func (f *WorkerStoreHeartbeatFunc) History() []WorkerStoreHeartbeatFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]WorkerStoreHeartbeatFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
"func (f *IndexingRepoStoreListIndexableReposFunc) History() []IndexingRepoStoreListIndexableReposFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]IndexingRepoStoreListIndexableReposFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return next history item.
|
func (l *Linenoise) historyNext(ls *linestate) string {
if len(l.history) == 0 {
return ""
}
// update the current history entry with the line buffer
l.historySet(ls.historyIndex, ls.String())
ls.historyIndex--
// next history item
if ls.historyIndex < 0 {
ls.historyIndex = 0
}
return l.historyGet(ls.historyIndex)
}
|
[
"func (hist *history) next() (next string) {\n\thist.index += 1\n\n\tif hist.index < len(hist.commandHistory) {\n\t\tnext = hist.commandHistory[hist.index]\n\t} else {\n\t\thist.index = len(hist.commandHistory) // always point to one beyond\n\t}\n\treturn next\n}",
"func (v *VersionHistory) GetLastItem() (*VersionHistoryItem, error) {\n\n\tif len(v.Items) == 0 {\n\t\treturn nil, &shared.BadRequestError{Message: \"version history is empty.\"}\n\t}\n\n\treturn v.Items[len(v.Items)-1].Duplicate(), nil\n}",
"func getLatestHistoryFile(dir string) (item historyItem) {\n\tfileList, err := getHistoryFileList(dir)\n\t// start from 0\n\tif len(fileList) == 0 || err != nil {\n\t\titem.index = 0\n\t\titem.path = filepath.Join(dir, fmt.Sprintf(\"%s%s\", historyPrefix, strconv.Itoa(item.index)))\n\t\treturn\n\t}\n\n\tlatestItem := fileList[0]\n\n\tif latestItem.info.Size() >= historySize {\n\t\titem.index = latestItem.index + 1\n\t\titem.path = filepath.Join(dir, fmt.Sprintf(\"%s%s\", historyPrefix, strconv.Itoa(item.index)))\n\t} else {\n\t\titem = latestItem\n\t}\n\n\treturn\n}",
"func (q *Queue) GetNext() ([]byte, error) {\n\tq.Lock()\n\tdefer q.Unlock()\n\n\titem, err := q.readItemByID(q.head + 1)\n\tif err != nil {\n\t\treturn item.Value, err\n\t}\n\n\terr = q.db.Delete(item.Key, nil)\n\tif err == nil {\n\t\tq.head++\n\t}\n\treturn item.Value, err\n}",
"func (w *Window) Next() error {\n\trawurl, err := w.history.Next()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar u *url.URL\n\tif u, err = url.Parse(rawurl); err != nil {\n\t\treturn err\n\t}\n\treturn w.load(u)\n}",
"func (r *Reader) fetchNextHistory() {\n\t// lock has to be held here\n\tif r.NoHistory {\n\t\treturn\n\t}\n\n\tswitch r.historyIndex {\n\tcase -1:\n\t\treturn\n\tcase 0:\n\t\trunes := []rune(r.historyPending)\n\t\tr.input.Set(runes, len(runes))\n\t\tr.historyIndex--\n\tdefault:\n\t\tentry, ok := r.history.NthPreviousEntry(r.historyIndex - 1)\n\t\tif ok {\n\t\t\tr.historyIndex--\n\t\t\trunes := []rune(entry)\n\t\t\tr.input.Set(runes, len(runes))\n\t\t}\n\t}\n}",
"func (l *reader) nextItem() item {\n\tif l.peekCount > 0 {\n\t\tl.peekCount--\n\t} else {\n\t\tl.token[0] = l.nextItemFromInput()\n\t}\n\treturn l.token[l.peekCount]\n}",
"func (qi *Items) Next() *pageloader.Request {\n\tqi.Lock()\n\tlog.Println(\"pulling request from the queue\")\n\tif qi.Length == 0 {\n\t\tlog.Println(\"nothing in the queue\")\n\t\treturn nil\n\t}\n\tt := qi.Stack[0]\n\ttCopy := *t\n\tqi.Stack = qi.Stack[1:]\n\tqi.Length--\n\tlog.Printf(\"queue length now: %d\\n\", qi.Length)\n\tqi.Unlock()\n\treturn &tCopy\n}",
"func (k Keeper) GetHistoryItem(ctx sdk.Context, marketID dnTypes.ID, blockHeight int64) (types.HistoryItem, error) {\n\tk.modulePerms.AutoCheck(types.PermHistoryRead)\n\n\tstore := ctx.KVStore(k.storeKey)\n\tbz := store.Get(types.GetHistoryItemKey(marketID, blockHeight))\n\tif bz == nil {\n\t\treturn types.HistoryItem{}, types.ErrWrongHistoryItem\n\t}\n\n\titem := types.HistoryItem{}\n\tif err := k.cdc.UnmarshalBinaryLengthPrefixed(bz, &item); err != nil {\n\t\tpanic(fmt.Errorf(\"historyItem unmarshal: %w\", err))\n\t}\n\n\treturn item, nil\n}",
"func (i *Item) Next() *Item {\n\treturn i.next\n}",
"func (i *QueueItem) Next() *QueueItem {\n\ti.lock.Lock()\n\tdefer i.lock.Unlock()\n\treturn i.next\n}",
"func (i Item) Next() *Item {\n\treturn i.next\n}",
"func (v *VersionHistory) GetFirstItem() (*VersionHistoryItem, error) {\n\n\tif len(v.Items) == 0 {\n\t\treturn nil, &shared.BadRequestError{Message: \"version history is empty.\"}\n\t}\n\n\treturn v.Items[0].Duplicate(), nil\n}",
"func (h *History) Last() *Entry {\n\treturn &(*h)[len(*h)-1]\n}",
"func (jn *JSONNode) GetNext() (*JSONNode) {\n\tif jn.curIndex >= len(jn.unnamedKids) {\n\t\treturn nil\n\t}\n\n\titem := jn.unnamedKids[jn.curIndex]\n\tjn.curIndex++\n\n\treturn item\n}",
"func (i *bufferItem) Next(ctx context.Context, forceClose <-chan struct{}) (*bufferItem, error) {\n\t// See if there is already a next value, block if so. Note we don't rely on\n\t// state change (chan nil) as that's not threadsafe but detecting close is.\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\tcase <-forceClose:\n\t\treturn nil, fmt.Errorf(\"subscription closed\")\n\tcase <-i.link.nextCh:\n\t}\n\n\t// Check if the reader is too slow and the event buffer as discarded the event\n\t// This must happen after the above select to prevent a random selection\n\t// between linkCh and droppedCh\n\tselect {\n\tcase <-i.link.droppedCh:\n\t\treturn nil, fmt.Errorf(\"event dropped from buffer\")\n\tdefault:\n\t}\n\n\t// If channel closed, there must be a next item to read\n\tnextRaw := i.link.next.Load()\n\tif nextRaw == nil {\n\t\t// shouldn't be possible\n\t\treturn nil, errors.New(\"invalid next item\")\n\t}\n\tnext := nextRaw.(*bufferItem)\n\tif next.Err != nil {\n\t\treturn nil, next.Err\n\t}\n\treturn next, nil\n}",
"func (s *Scanner) nextItem() item {\n\titem := <-s.items\n\ts.lastPos = item.pos\n\treturn item\n}",
"func (o *PaginatedChangeset) GetNext() string {\n\tif o == nil || o.Next == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Next\n}",
"func (_CraftingI *CraftingICaller) NextItem(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _CraftingI.contract.Call(opts, &out, \"next_item\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return previous history item.
|
func (l *Linenoise) historyPrev(ls *linestate) string {
if len(l.history) == 0 {
return ""
}
// update the current history entry with the line buffer
l.historySet(ls.historyIndex, ls.String())
ls.historyIndex++
// previous history item
if ls.historyIndex >= len(l.history) {
ls.historyIndex = len(l.history) - 1
}
return l.historyGet(ls.historyIndex)
}
|
[
"func (hist *history) previous() (prev string) {\n\n\thist.index -= 1\n\tif hist.index < 0 {\n\t\thist.index = 0\n\t}\n\n\tif hist.index < len(hist.commandHistory) {\n\t\tprev = hist.commandHistory[hist.index]\n\t}\n\treturn prev\n}",
"func (i *Item) Prev() *Item {\n\treturn i.prev\n}",
"func (w *Window) Previous() error {\n\trawurl, err := w.history.Previous()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar u *url.URL\n\tif u, err = url.Parse(rawurl); err != nil {\n\t\treturn err\n\t}\n\treturn w.load(u)\n}",
"func (i Item) Prev() *Item {\n\treturn i.prev\n}",
"func (e *ListElement) Previous() *ListElement {\n\treturn (*ListElement)(atomic.LoadPointer(&e.previousElement))\n}",
"func (player *musicPlayer) previous() (string, error) {\n\tplayer.Lock()\n\tvar songToResume string\n\tif player.state.current > 0 {\n\t\tif player.state.status == playing {\n\t\t\tplayer.stopFlow()\n\t\t}\n\t\tplayer.state.current -= 1\n\t\tsongToResume = player.state.queue[player.state.current]\n\t} else {\n\t\tplayer.Unlock()\n\t\treturn songToResume, errors.New(cannot_previous_msg)\n\t}\n\n\tplayer.Unlock()\n\tch := make(chan error)\n\tdefer close(ch)\n\tgo player.playQueue(0, ch)\n\terr := <-ch\n\n\treturn songToResume, err\n}",
"func (el *Element) Previous() *Element {\n\tparent, err := el.PreviousE()\n\tkit.E(err)\n\treturn parent\n}",
"func (o *PaginatedChangeset) GetPrevious() string {\n\tif o == nil || o.Previous == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Previous\n}",
"func (o *Note) GetPrevNumber() int { return o.Number }",
"func getPreviousEvent(ev *types.EventMessage) *EventNode {\n\n\tk := HashGraph[ev.Owner].events\n\tif len(k) == 0 {\n\t\treturn nil\n\t}\n\n\tfor _, v := range k {\n\t\tif v.OwnHash == ev.PreviousHash {\n\t\t\treturn v\n\t\t}\n\t}\n\treturn nil\n}",
"func (cmd *ChainedCommands) GetPreviousID() stored_files.File {\n\treturn cmd.PrevID\n}",
"func (entry *LevelObjectEntry) PreviousIndex() LevelObjectChainIndex {\n\treturn LevelObjectChainIndex(entry.Previous)\n}",
"func (k * Keyspace) Previous() ([]byte, error) {\n\terr := k.decrementString(k.length-1)\n\treturn k.toBytes(), err\n}",
"func (h *History) Back() string {\n\tvar result string\n\th.commandPointer = Min(len(h.commandLines)-1, Max(-1, h.commandPointer))\n\tif h.commandPointer >= 0 {\n\t\tresult = h.commandLines[h.commandPointer]\n\t\th.commandPointer = h.commandPointer - 1\n\t}\n\treturn result\n}",
"func (r Response) Prev() Command {\n\treturn Command{\n\t\tJID: r.IQ.From,\n\t\tSID: r.SID,\n\t\tNode: r.Node,\n\t\tAction: \"prev\",\n\t}\n}",
"func (c *staticCursor) Prev() ([]byte, []byte) {\n\treturn c.getValueAtIndex(-1)\n}",
"func (el *Element) Previous() (*Element, error) {\n\treturn el.ElementByJS(Eval(`() => this.previousElementSibling`))\n}",
"func (e *Element) Previous() *Element {\n\tif e == nil || e.list == nil || e.prev == e.list.root {\n\t\treturn nil\n\t}\n\treturn e.prev\n}",
"func (v Vertex) GetPrev() *slice.Sequence {\n\treturn v.Prev\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
HistoryAdd adds a new entry to the history.
|
func (l *Linenoise) HistoryAdd(line string) {
if l.historyMaxlen == 0 {
return
}
// don't re-add the last entry
if len(l.history) != 0 && line == l.history[len(l.history)-1] {
return
}
// add the line to the history
if len(l.history) == l.historyMaxlen {
// remove the first entry
l.historyPop(0)
}
l.history = append(l.history, line)
}
|
[
"func (h *History) Add(entry string) {\n\th.lock.Lock()\n\tdefer h.lock.Unlock()\n\n\tmax := cap(h.entries)\n\th.head = (h.head + 1) % max\n\th.entries[h.head] = entry\n\tif h.size < max {\n\t\th.size++\n\t}\n}",
"func AddHistory(content string) error {\n\tins := getInstance()\n\treturn ins.SaveHistory(content)\n}",
"func (m *Editor) AddHistory(line string) error {\n\tm.AddHistoryEntry(line)\n\tif m.autoSaveHistory && m.histFile != \"\" {\n\t\treturn m.SaveHistory()\n\t}\n\treturn nil\n}",
"func (e *TarantoolEngine) addHistory(chID ChannelID, message Message, size, lifetime int64) (err error) {\n\t// not implemented\n\treturn\n}",
"func (h *History) Add(b byte) {\n\th.data[h.pos] = b\n\th.pos++\n\tif h.pos >= len(h.data) {\n\t\th.pos = 0\n\t}\n}",
"func (e *MemoryEngine) AddHistory(ch string, pub *Publication, opts *ChannelOptions) (*Publication, error) {\n\treturn e.historyHub.add(ch, pub, opts)\n}",
"func (h *History) Add(action []*Action) {\n\th.actions = append(h.actions, action)\n\th.head++\n}",
"func (g *Generator) AddHistory(history ispec.History) {\n\tg.image.History = append(g.image.History, history)\n}",
"func (d *Dao) AddHistoryCache(c context.Context, tp int32, oid, timestamp int64, value []byte) (err error) {\n\tvar (\n\t\tconn = d.dmMC.Get(c)\n\t\tkey = keyHistory(tp, oid, timestamp)\n\t)\n\tdefer conn.Close()\n\titem := &memcache.Item{\n\t\tKey: key,\n\t\tValue: value,\n\t\tExpiration: d.historyExpire,\n\t}\n\tif err = conn.Set(item); err != nil {\n\t\tlog.Error(\"conn.Set(%s) error(%v)\", key, err)\n\t}\n\treturn\n}",
"func (d *StormDatabase) AddTorrentHistory(infoHash, name string, b []byte) {\n\tdefer perf.ScopeTimer()()\n\n\tif !config.Get().UseTorrentHistory {\n\t\treturn\n\t}\n\n\tlog.Debugf(\"Saving torrent %s with infohash %s to the history\", name, infoHash)\n\n\tvar oldItem TorrentHistory\n\tif err := d.db.One(\"InfoHash\", infoHash, &oldItem); err == nil {\n\t\toldItem.Dt = time.Now()\n\t\tif err := d.db.Update(&oldItem); err != nil {\n\t\t\tlog.Warningf(\"Error updating item in the history: %s\", err)\n\t\t}\n\n\t\treturn\n\t}\n\n\titem := TorrentHistory{\n\t\tInfoHash: infoHash,\n\t\tName: name,\n\t\tDt: time.Now(),\n\t\tMetadata: b,\n\t}\n\n\tif err := d.db.Save(&item); err != nil {\n\t\tlog.Warningf(\"Error inserting item to the history: %s\", err)\n\t\treturn\n\t}\n\n\tvar ths []TorrentHistory\n\td.db.AllByIndex(\"Dt\", &ths, storm.Reverse(), storm.Skip(config.Get().TorrentHistorySize))\n\tfor _, th := range ths {\n\t\td.db.DeleteStruct(&th)\n\t}\n\td.db.ReIndex(&TorrentHistory{})\n}",
"func (h *History) Add(key int, input, output interface{}, start, end int64) {\n\th.Lock()\n\tdefer h.Unlock()\n\tif _, exists := h.shard[key]; !exists {\n\t\th.shard[key] = make([]*operation, 0)\n\t}\n\to := &operation{input, output, start, end}\n\th.shard[key] = append(h.shard[key], o)\n\th.operations = append(h.operations, o)\n}",
"func (history *History) Add(record any) {\n\thistory.mu.Lock()\n\tdefer history.mu.Unlock()\n\n\thistory.latest = record\n\n\tif equiv, ok := record.(Deduplicable); ok && history.length > 0 {\n\t\tif equiv.IsDuplicate(history.lastAdded) {\n\t\t\treturn\n\t\t}\n\t}\n\n\thistory.records[history.next] = record\n\thistory.lastAdded = record\n\n\tif history.length < len(history.records) {\n\t\thistory.length++\n\t}\n\n\thistory.next = (history.next + 1) % len(history.records)\n}",
"func (t *HistoryTree) Add(event []byte) common.Hash {\n\tt.version++\n\t// Add event hash\n\tpos := common.NewPosition(t.version, 0)\n\tt.Store[pos] = computeHashLeaf(event)\n\n\t// Compute root node and update tree in post-order\n\tdepth := computeDepth(t.version)\n\troot := common.NewPosition(0, depth)\n\tt.updatePostOrder(depth, root)\n\treturn t.Store.Get(root)\n}",
"func (nu *NurseUpdate) AddHistorytaking(h ...*Historytaking) *NurseUpdate {\n\tids := make([]int, len(h))\n\tfor i := range h {\n\t\tids[i] = h[i].ID\n\t}\n\treturn nu.AddHistorytakingIDs(ids...)\n}",
"func (nuo *NurseUpdateOne) AddHistorytaking(h ...*Historytaking) *NurseUpdateOne {\n\tids := make([]int, len(h))\n\tfor i := range h {\n\t\tids[i] = h[i].ID\n\t}\n\treturn nuo.AddHistorytakingIDs(ids...)\n}",
"func (db *DB) AddHistoryCommand(command []string) error {\n\treturn db.AddHistoryCommandWithTime(command, time.Now())\n}",
"func (pu *PatientrecordUpdate) AddHistorytaking(h ...*Historytaking) *PatientrecordUpdate {\n\tids := make([]int, len(h))\n\tfor i := range h {\n\t\tids[i] = h[i].ID\n\t}\n\treturn pu.AddHistorytakingIDs(ids...)\n}",
"func (puo *PatientrecordUpdateOne) AddHistorytaking(h ...*Historytaking) *PatientrecordUpdateOne {\n\tids := make([]int, len(h))\n\tfor i := range h {\n\t\tids[i] = h[i].ID\n\t}\n\treturn puo.AddHistorytakingIDs(ids...)\n}",
"func (d *StormDatabase) AddSearchHistory(historyType, query string) {\n\tdefer perf.ScopeTimer()()\n\n\tvar qh QueryHistory\n\n\tif err := d.db.One(\"ID\", fmt.Sprintf(\"%s|%s\", historyType, query), &qh); err == nil {\n\t\tqh.Dt = time.Now()\n\t\td.db.Update(&qh)\n\t\treturn\n\t}\n\n\tqh = QueryHistory{\n\t\tID: fmt.Sprintf(\"%s|%s\", historyType, query),\n\t\tDt: time.Now(),\n\t\tType: historyType,\n\t\tQuery: query,\n\t}\n\n\td.db.Save(&qh)\n\n\tvar qhs []QueryHistory\n\td.db.Select(q.Eq(\"Type\", historyType)).Skip(historyMaxSize).Find(&qhs)\n\tfor _, qh := range qhs {\n\t\td.db.DeleteStruct(&qh)\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
HistorySetMaxlen sets the maximum length for the history. Truncate the current history if needed.
|
func (l *Linenoise) HistorySetMaxlen(n int) {
if n < 0 {
return
}
l.historyMaxlen = n
currentLength := len(l.history)
if currentLength > l.historyMaxlen {
// truncate and retain the latest history
l.history = l.history[currentLength-l.historyMaxlen:]
}
}
|
[
"func (obj *DaemonSet) SetHistoryLimit(limit int32) *DaemonSet {\n\tif limit <= 0 {\n\t\tlimit = 10\n\t}\n\tobj.ds.Spec.RevisionHistoryLimit = &limit\n\treturn obj\n}",
"func (s *RedisStore) SetMaxLength(l int) {\n\tif l >= 0 {\n\t\ts.maxLength = l\n\t}\n}",
"func (store *RedisStore) SetMaxLength(l int) {\r\n\tif l >= 0 {\r\n\t\tstore.maxLength = l\r\n\t}\r\n}",
"func (r *Release) getMaxHistory() []string {\n\tif r.MaxHistory != 0 {\n\t\treturn []string{\"--history-max\", strconv.Itoa(r.MaxHistory)}\n\t}\n\treturn []string{}\n}",
"func (o ApplicationSpecOutput) RevisionHistoryLimit() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v ApplicationSpec) *int { return v.RevisionHistoryLimit }).(pulumi.IntPtrOutput)\n}",
"func (_m *RediStore) SetMaxLength(l int) {\n\t_m.Called(l)\n}",
"func WithMaxReleaseHistory(maxHistory int) Option {\n\treturn func(r *Reconciler) error {\n\t\tif maxHistory < 0 {\n\t\t\treturn errors.New(\"maximum Helm release history size must not be negative\")\n\t\t}\n\t\tr.maxHistory = maxHistory\n\t\treturn nil\n\t}\n}",
"func (q *Queue) SetMaxLen(maxLen int) {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\tq.maxLen = maxLen\n}",
"func HistoryLength() int {\n\treturn int(C.history_length)\n}",
"func (o StorageClusterSpecPtrOutput) RevisionHistoryLimit() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *StorageClusterSpec) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.RevisionHistoryLimit\n\t}).(pulumi.IntPtrOutput)\n}",
"func (s *RediStore) SetMaxLength(length int) *RediStore {\n\ts.maxLength = length\n\treturn s\n}",
"func (r *Release) inheritMaxHistory(s *State) {\n\tif s.Settings.GlobalMaxHistory != 0 {\n\t\tif r.MaxHistory == 0 {\n\t\t\tr.MaxHistory = s.Settings.GlobalMaxHistory\n\t\t}\n\t}\n}",
"func (gc *defaultGaleraControl) truncateHistory(\n\tgalera *apigalera.Galera,\n\tpods []*corev1.Pod,\n\trevisions []*appsv1.ControllerRevision,\n\tcurrent *appsv1.ControllerRevision,\n\tupdate *appsv1.ControllerRevision) error {\n\thistorySl := make([]*appsv1.ControllerRevision, 0, len(revisions))\n\t// mark all live revisions\n\tlive := map[string]bool{current.Name: true, update.Name: true}\n\tfor i := range pods {\n\t\tlive[getPodRevision(pods[i])] = true\n\t}\n\t// collect live revisions and historic revisions\n\tfor i := range revisions {\n\t\tif !live[revisions[i].Name] {\n\t\t\thistorySl = append(historySl, revisions[i])\n\t\t}\n\t}\n\thistoryLen := len(historySl)\n\tvar historyLimit int\n\n\tif galera.Spec.RevisionHistoryLimit == nil {\n\t\thistoryLimit = apigalera.DefaultRevisionHistoryLimit\n\t} else {\n\t\thistoryLimit = int(*galera.Spec.RevisionHistoryLimit)\n\t}\n\n\tif historyLen <= historyLimit {\n\t\treturn nil\n\t}\n\t// delete any non-live history to maintain the revision limit.\n\thistorySl = historySl[:(historyLen - historyLimit)]\n\tfor i := 0; i < len(historySl); i++ {\n\t\tif err := gc.controllerHistory.DeleteControllerRevision(historySl[i]); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (m *DeviceManagementConfigurationStringSettingValueDefinition) SetMaximumLength(value *int64)() {\n err := m.GetBackingStore().Set(\"maximumLength\", value)\n if err != nil {\n panic(err)\n }\n}",
"func OptMaxLen(maxLen int) Option {\n\treturn func(afb *Buffer) {\n\t\tafb.MaxLen = maxLen\n\t}\n}",
"func (bwh *bwHistory) UpdateLen(mlen int) {\n\tif mlen == bwh.historyLen {\n\t\t// NOOP\n\t\treturn\n\t}\n\n\thlen := len(bwh.history)\n\tif mlen < hlen {\n\t\t// Shrink\n\t\tbwh.history = bwh.history[hlen-mlen:]\n\t}\n\tbwh.historyLen = mlen\n}",
"func NewHistory(size int) *History {\n\treturn &History{\n\t\tentries: make([]string, size),\n\t}\n}",
"func (v *Validator) SetMaxLength(length int) {\n\tv.maxLength = length\n}",
"func (s *DbStore) MaxLength(l int) {\n\tfor _, c := range s.Codecs {\n\t\tif codec, ok := c.(*securecookie.SecureCookie); ok {\n\t\t\tcodec.MaxLength(l)\n\t\t}\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
HistorySave saves the history to a file.
|
func (l *Linenoise) HistorySave(fname string) {
if len(l.history) == 0 {
return
}
f, err := os.Create(fname)
if err != nil {
log.Printf("error opening %s\n", fname)
return
}
_, err = f.WriteString(strings.Join(l.history, "\n"))
if err != nil {
log.Printf("%s error writing %s\n", fname, err)
}
f.Close()
}
|
[
"func (h *UpdateHistory) Save() error {\n\tf, err := os.Create(historyPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tif err := json.NewEncoder(f).Encode(h); err != nil {\n\t\treturn err\n\t}\n\tf.Sync()\n\n\treturn nil\n}",
"func (m *Editor) SaveHistory() error {\n\tif m.histFile == \"\" {\n\t\treturn errors.New(\"no savefile configured\")\n\t}\n\th := m.GetHistory()\n\tif h == nil {\n\t\treturn errors.New(\"history not configured\")\n\t}\n\treturn history.SaveHistory(h, m.histFile)\n}",
"func SaveHistory(path string) error {\n\tp := C.CString(path)\n\te := C.write_history(p)\n\tC.free(unsafe.Pointer(p))\n\n\tif e == 0 {\n\t\treturn nil\n\t}\n\treturn syscall.Errno(e)\n}",
"func (bh browserHistory) Save() error {\n\tbytes, err := json.Marshal(bh.Records)\n\tif err != nil {\n\t\treturn err\n\t}\n\tjs.Global().Get(\"localStorage\").Set(\"history\", string(bytes))\n\n\treturn nil\n}",
"func (g *Guild) SaveToHistory(p *Play) {\n\tfor i := len(g.History) - 1; i > 0; i-- {\n\t\tg.History[i] = g.History[i-1]\n\t}\n\tg.History[0] = p\n}",
"func (r *historyRow) save(dir string) error {\n\trBytes, err := json.Marshal(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thistoryFile := getLatestHistoryFile(dir)\n\n\tf, err := os.OpenFile(historyFile.path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\t_, err = f.Write(append(rBytes, []byte(\"\\n\")...))\n\treturn err\n}",
"func (rl *readline) WriteHistory() error {\n\tf, err := os.OpenFile(rl.historyFile, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\t_, err = rl.State.WriteHistory(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func saveGame(board *chess.Board, name string) {\n\tname = strings.Trim(name, \"\\n\")\n\tfile, _ := os.Create(name)\n\twriter := bufio.NewWriter(file)\n\tfmt.Println(\"Name: \", name)\n\tfor i, v := range board.History {\n\t\twriter.WriteString(strconv.Itoa(i+1) + \": \")\n\t\twriter.WriteString(v)\n\t\twriter.WriteString(\"\\n\")\n\t}\n\twriter.Flush()\n\tfile.Close()\n\n}",
"func WriteHistory(path string) error {\n\tcpath := toCStringOrNil(path)\n\tdefer freeOrNil(unsafe.Pointer(cpath))\n\treturn errnoToError(C.write_history(cpath))\n}",
"func (m *Editor) SetAutoSaveHistory(file string, autoSave bool) {\n\tm.autoSaveHistory = autoSave\n\tm.histFile = file\n}",
"func (tv *TextView) SavePosHistory(pos TextPos) {\n\tif tv.Buf == nil {\n\t\treturn\n\t}\n\ttv.Buf.SavePosHistory(pos)\n\ttv.PosHistIdx = len(tv.Buf.PosHistory) - 1\n}",
"func Save(vs ...Histo) (err error) {\n\tfile, err := os.Create(\"r-g-b.gob\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\tenc := gob.NewEncoder(file)\n\tfor _, v := range vs {\n\t\terr = enc.Encode(v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func save(novel *Novel) {\n\t//if novel exist history\n\ttag := false\n\tfor index, historyNovel := range historyNovels {\n\t\tif historyNovel.Name == novel.Name {\n\t\t\thistoryNovels[index] = novel\n\t\t\ttag = true\n\t\t}\n\t}\n\tif !tag {\n\t\thistoryNovels = append(historyNovels, novel)\n\t}\n\tSaveHistory(historyNovels)\n\tfmt.Println(\"Save complete...\")\n}",
"func saveToFile(path string, lines []string) error {\n\toutputData := strings.Join(lines, \"\\n\")\n\toutputData += \"\\n\"\n\n\tinfo, err := os.Stat(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = os.WriteFile(path, []byte(outputData), info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (b *Bookmarks) Save() error {\n\treturn b.WriteToFile(b.Filename)\n}",
"func SaveToFile() error {\n\tdata, err := json.Marshal(Events)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(eventsFilename, data, 0644)\n}",
"func (tb *TextBuf) SavePosHistory(pos TextPos) bool {\n\tif tb.PosHistory == nil {\n\t\ttb.PosHistory = make([]TextPos, 0, 1000)\n\t}\n\tsz := len(tb.PosHistory)\n\tif sz > 0 {\n\t\tif tb.PosHistory[sz-1].Ln == pos.Ln {\n\t\t\treturn false\n\t\t}\n\t}\n\ttb.PosHistory = append(tb.PosHistory, pos)\n\t// fmt.Printf(\"saved pos hist: %v\\n\", pos)\n\treturn true\n}",
"func (o *Object) SaveToFile(filename string) {\n\tobjJson, _ := json.Marshal(o)\n\tioutil.WriteFile(filename, objJson, 0644)\n}",
"func (b *bookMark) save(event win_eventlog.EvtHandle) error {\n\tnewBookmark, err := win_eventlog.UpdateBookmark(b.handle, event, b.buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := b.file.Truncate(0); err != nil {\n\t\treturn err\n\t}\n\tif _, err := b.file.Seek(0, 0); err != nil {\n\t\treturn err\n\t}\n\t_, err = b.file.WriteString(newBookmark)\n\treturn err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
HistoryLoad loads history from a file.
|
func (l *Linenoise) HistoryLoad(fname string) {
info, err := os.Stat(fname)
if err != nil {
return
}
if !info.Mode().IsRegular() {
log.Printf("%s is not a regular file\n", fname)
return
}
f, err := os.Open(fname)
if err != nil {
log.Printf("%s error on open %s\n", fname, err)
return
}
b := bufio.NewReader(f)
l.history = make([]string, 0, l.historyMaxlen)
for {
s, err := b.ReadString('\n')
if err == nil || err == io.EOF {
s = strings.TrimSpace(s)
if len(s) != 0 {
l.history = append(l.history, s)
}
if err == io.EOF {
break
}
} else {
log.Printf("%s error on read %s\n", fname, err)
}
}
f.Close()
}
|
[
"func LoadHistory(path string) error {\n\tp := C.CString(path)\n\te := C.read_history(p)\n\tC.free(unsafe.Pointer(p))\n\n\tif e == 0 {\n\t\treturn nil\n\t}\n\treturn syscall.Errno(e)\n}",
"func (m *Editor) LoadHistory(file string) error {\n\th, err := history.LoadHistory(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tm.SetHistory(h)\n\treturn nil\n}",
"func (n *nameHistory) Load() error {\n\tfp, err := os.OpenFile(n.filepath, os.O_RDONLY, os.ModePerm)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not open %q file: %v\", n.filepath, err)\n\t}\n\tdefer fp.Close()\n\n\tif err := yaml.NewDecoder(fp).Decode(&n.entries); err != nil {\n\t\treturn fmt.Errorf(\"could not decode file: %v\", err)\n\t}\n\n\tn.isChanged = false\n\n\treturn nil\n}",
"func (bh *browserHistory) Load() error {\n\thist := js.Global().Get(\"localStorage\").Get(\"history\")\n\tif hist.Type() == js.TypeUndefined {\n\t\treturn nil // nothing to unmarashal\n\t}\n\tvar records []string\n\tif err := json.Unmarshal([]byte(hist.String()), &records); err != nil {\n\t\treturn err\n\t}\n\tbh.Records = records\n\n\treturn nil\n}",
"func loadHistory(path string) (map[string]bool, error) {\n\tfiles := make(map[string]bool) // create the map\n\n\t// Open or create the new file in read only mode.\n\t// 0666 - chmod 666 means that all users can read and write but cannot execute\n\tfile, err := os.OpenFile(path, os.O_RDONLY|os.O_CREATE, 0666)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\n\t// create a scanner so we can create a map of each filename in this fil\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\t// Setting the value to true,\n\t\t// since we have already processed this file\n\t\tfiles[line] = true\n\t}\n\treturn files, scanner.Err()\n}",
"func (h *Histfile) loadHistory(bashHistoryPath, zshHistoryPath string, maxInitHistSize, minInitHistSizeKB int) {\n\th.recentMutex.Lock()\n\tdefer h.recentMutex.Unlock()\n\tlog.Println(\"histfile: Checking if resh_history is large enough ...\")\n\tfi, err := os.Stat(h.historyPath)\n\tvar size int\n\tif err != nil {\n\t\tlog.Println(\"histfile ERROR: failed to stat resh_history file:\", err)\n\t} else {\n\t\tsize = int(fi.Size())\n\t}\n\tuseNativeHistories := false\n\tif size/1024 < minInitHistSizeKB {\n\t\tuseNativeHistories = true\n\t\tlog.Println(\"histfile WARN: resh_history is too small - loading native bash and zsh history ...\")\n\t\th.bashCmdLines = records.LoadCmdLinesFromBashFile(bashHistoryPath)\n\t\tlog.Println(\"histfile: bash history loaded - cmdLine count:\", len(h.bashCmdLines.List))\n\t\th.zshCmdLines = records.LoadCmdLinesFromZshFile(zshHistoryPath)\n\t\tlog.Println(\"histfile: zsh history loaded - cmdLine count:\", len(h.zshCmdLines.List))\n\t\t// no maxInitHistSize when using native histories\n\t\tmaxInitHistSize = math.MaxInt32\n\t}\n\tlog.Println(\"histfile: Loading resh history from file ...\")\n\thistory := records.LoadFromFile(h.historyPath, math.MaxInt32)\n\tlog.Println(\"histfile: resh history loaded from file - count:\", len(history))\n\tgo h.loadCliRecords(history)\n\t// NOTE: keeping this weird interface for now because we might use it in the future\n\t//\t\t\twhen we only load bash or zsh history\n\treshCmdLines := loadCmdLines(history)\n\tlog.Println(\"histfile: resh history loaded - cmdLine count:\", len(reshCmdLines.List))\n\tif useNativeHistories == false {\n\t\th.bashCmdLines = reshCmdLines\n\t\th.zshCmdLines = histlist.Copy(reshCmdLines)\n\t\treturn\n\t}\n\th.bashCmdLines.AddHistlist(reshCmdLines)\n\tlog.Println(\"histfile: bash history + resh history - cmdLine count:\", len(h.bashCmdLines.List))\n\th.zshCmdLines.AddHistlist(reshCmdLines)\n\tlog.Println(\"histfile: zsh history + resh history - cmdLine count:\", len(h.zshCmdLines.List))\n}",
"func (c *Client) LoadHistory(roomID string) ([]models.Message, error) {\n\tm, err := c.ddp.Call(\"loadHistory\", roomID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thistory := m.(map[string]interface{})\n\n\tdocument, _ := gabs.Consume(history[\"messages\"])\n\tmsgs, err := document.Children()\n\n\tif err != nil {\n\t\tlog.Printf(\"response is in an unexpected format: %v\", err)\n\t\treturn make([]models.Message, 0), nil\n\t}\n\n\tmessages := make([]models.Message, len(msgs))\n\n\tfor i, arg := range msgs {\n\t\tmessages[i] = *getMessageFromDocument(arg)\n\t}\n\n\t//log.Println(messages)\n\n\treturn messages, nil\n}",
"func (t *T) Load(fileName string) error {\n\tfile, err := os.OpenFile(fileName, os.O_APPEND|os.O_RDWR, os.ModeAppend)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tt.file = file\n\tt.links = make(map[string]string)\n\n\tscanner := bufio.NewScanner(file)\n\n\tscanner.Split(bufio.ScanLines)\n\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\ttokens := strings.Split(line, \",\")\n\n\t\tif len(tokens) != 2 {\n\t\t\tfile.Close()\n\t\t\treturn fmt.Errorf(\"invalid line: %s\", line)\n\t\t}\n\n\t\tt.links[tokens[0]] = tokens[1]\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\tfile.Close()\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func ParseHistoryFile(b []byte) (*History, error) {\n\tversion, err := detectHistoryFileVersion(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch version {\n\tcase 1:\n\t\treturn parseHistoryFileV1(b)\n\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown history file version: %d\", version)\n\t}\n}",
"func (h *History) ReadFile(path string) error {\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr := csv.NewReader(file)\n\n\tfor {\n\t\trecord, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(record) < 5 {\n\t\t\treturn errors.New(\"operation history file format error\")\n\t\t}\n\n\t\t// get id / key\n\t\tid, err := strconv.Atoi(record[0])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\toperation := new(operation)\n\n\t\t// get input\n\t\tif record[1] == \"null\" || record[1] == \"\" {\n\t\t\toperation.input = nil\n\t\t} else {\n\t\t\toperation.input = record[1]\n\t\t}\n\n\t\t// get output\n\t\tif record[2] == \"null\" || record[2] == \"\" {\n\t\t\toperation.output = nil\n\t\t} else {\n\t\t\toperation.output = record[2]\n\t\t}\n\n\t\t// get start time\n\t\tstart, err := strconv.ParseInt(record[3], 10, 64)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\treturn err\n\t\t}\n\t\toperation.start = start\n\n\t\t// get end time\n\t\tend, err := strconv.ParseInt(record[4], 10, 64)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\treturn err\n\t\t}\n\t\toperation.end = end\n\n\t\th.AddOperation(id, operation)\n\t}\n\n\treturn file.Close()\n}",
"func ReadHistory(path string) error {\n\tcpath := toCStringOrNil(path)\n\tdefer freeOrNil(unsafe.Pointer(cpath))\n\treturn errnoToError(C.read_history(cpath))\n}",
"func LoadHistoryResult(t int64) error {\n\th, err := GetHistory(t)\n\tif err != nil {\n\t\treturn err\n\t}\n\trecordMutex.Lock()\n\tfor _, v := range h.Record {\n\t\tcurrentRecord[v] = countThres\n\t}\n\trecordMutex.Unlock()\n\treturn nil\n}",
"func LoadChangesFromFile(filePath string) (*Changes, error) {\n\tfile, err := os.Open(filePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\n\tb, err := ioutil.ReadAll(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmixtape := Changes{}\n\terr = json.Unmarshal(b, &mixtape)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &mixtape, nil\n}",
"func (d *Dump) Load() error {\n\td.mutex.Lock()\n\tdefer d.mutex.Unlock()\n\n\tvar (\n\t\tdata []byte\n\t\terr error\n\t)\n\n\tif data, err = ioutil.ReadFile(d.filename); err != nil {\n\t\treturn err\n\t}\n\n\treturn d.decodeGob(data)\n}",
"func (rl *readline) ReadHistory() error {\n\tf, err := os.Open(rl.historyFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\t_, err = rl.State.ReadHistory(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func LoadFromFile() error {\n\t_, err := os.Stat(eventsFilename)\n\tif os.IsNotExist(err) {\n\t\treturn nil\n\t}\n\tdata, err := ioutil.ReadFile(eventsFilename)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn json.Unmarshal(data, &Events)\n}",
"func LoadStravaActivityHistory(userID string) (history data.StravaActivityHistory, err error) {\n\n\t// Create connection\n\tconn, err := dbconn.GetConnectionPoolAppDB()\n\n\tif err == nil {\n\n\t\trows, err1 := conn.Query(`SELECT user_id, history FROM strava_activity_history WHERE user_id=$1`, userID)\n\t\tif err1 != nil {\n\t\t\treturn history, fmt.Errorf(\"DB.Query: %v\", err1)\n\t\t}\n\t\tdefer rows.Close()\n\n\t\t// Find result\n\t\tfor rows.Next() {\n\t\t\terr = rows.Scan(&history.UserID, &history.History)\n\t\t\tif err == nil {\n\t\t\t\thistory.HasHistory = true\n\t\t\t}\n\t\t}\n\n\t\tif history.HasHistory == false {\n\t\t\thistory.UserID = userID\n\t\t}\n\n\t}\n\n\treturn history, err\n}",
"func (wd WorkDir) Load(file string) ([]byte, error) {\n\tfh, err := os.Open(wd.Join(file))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer fh.Close()\n\treturn ioutil.ReadAll(fh)\n}",
"func loadLogs(path string) ([]Log, error) {\n\t// Create the file if it does not exist\n\tf, err := os.OpenFile(path+\"/logs.json\", os.O_RDWR|os.O_CREATE|os.O_EXCL, 0644)\n\tif err == nil {\n\t\t// New file. Write empty list to it\n\t\t_, err = f.WriteString(\"[]\")\n\t\tif err != nil {\n\t\t\tf.Close()\n\t\t\treturn []Log{}, err\n\t\t}\n\t}\n\tf.Close()\n\n\t// Load the file\n\tlogs := &[]Log{}\n\tdata, err := ioutil.ReadFile(path + \"/logs.json\")\n\tif err != nil {\n\t\treturn []Log{}, err\n\t}\n\n\terr = json.Unmarshal(data, &logs)\n\tif err != nil {\n\t\treturn []Log{}, err\n\t}\n\n\treturn *logs, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
/ 'DSP' API Frees a DSP object. This will free the DSP object. NOTE: If DSP is not removed from the Channel, ChannelGroup or System object with "Channel.RemoveDSP" or "ChannelGroup.RemoveDSP", after being added with "Channel.AddDSP" or "ChannelGroup.AddDSP", it will not release and will instead return FMOD_ERR_DSP_INUSE.
|
func (d *DSP) Release() error {
res := C.FMOD_DSP_Release(d.cptr)
return errs[res]
}
|
[
"func (ctx *Context) Free() {\n\tC.avfilter_free((*C.struct_AVFilterContext)(ctx))\n}",
"func (pl *Player) Free() {\n\tfor _, s := range pl.Sounds {\n\t\ts.Free()\n\t}\n\tmix.CloseAudio()\n}",
"func (f *Frame) Free() {\n\tif f.cPtrAVFrame != nil {\n\t\tC.av_frame_free(&f.cPtrAVFrame)\n\t}\n}",
"func (ctxt *Context) Free() {\n\tvar ptr *C.struct_AVCodecContext = (*C.struct_AVCodecContext)(unsafe.Pointer(ctxt))\n\tC.avcodec_free_context(&ptr)\n}",
"func (d *DSP) Reset() error {\n\tres := C.FMOD_DSP_Reset(d.cptr)\n\treturn errs[res]\n}",
"func (screen *Surface) Free() { C.SDL_FreeSurface((*C.SDL_Surface)(cast(screen))) }",
"func (sig Signature) Free() {\n\tC.CSignatureFree(sig.sig)\n}",
"func (s *Sdr) SetDSP(state bool) error {\n\tvar v C.uint8_t\n\tif state {\n\t\tv = 1\n\t}\n\tif C.airspyhf_set_lib_dsp(s.handle, v) != C.AIRSPYHF_SUCCESS {\n\t\treturn fmt.Errorf(\"airspyhf.Sdr.SetDSP: failed to set DSP\")\n\t}\n\treturn nil\n}",
"func (s *System) PlayDSP(dsp *DSP, channelgroup *ChannelGroup, paused bool) (*Channel, error) {\n\tvar channel Channel\n\tres := C.FMOD_System_PlayDSP(s.cptr, dsp.cptr, (*C.FMOD_CHANNELGROUP)(null), getBool(paused), &channel.cptr)\n\treturn &channel, errs[res]\n}",
"func Free(descr C.int) {\n\tStop(descr)\n\tUnregisterAlgorithm((AlgorithmDescr)(descr))\n}",
"func Close() {\n\tglobalBufferSize = 0\n\tglobalSoundBuffer.Release()\n\tglobalPrimarySoundBuffer.Release()\n\tglobalDirectSoundObject.Release()\n}",
"func (s *System) RegisterDSP(description *C.FMOD_DSP_DESCRIPTION, handle *C.uint) error {\n\t//FMOD_RESULT F_API FMOD_System_RegisterDSP (FMOD_SYSTEM *system, const FMOD_DSP_DESCRIPTION *description, unsigned int *handle);\n\treturn ErrNoImpl\n}",
"func (m *Message) DSP() (*DSP, error) {\n\tps, err := m.Parse(\"DSP\")\n\tpst, ok := ps.(*DSP)\n\tif ok {\n\t\treturn pst, err\n\t}\n\treturn nil, err\n}",
"func Free() error {\n\treturn boolToError(C.BASS_Free())\n}",
"func (self *_Runtime) Free(ptr *uint8) {\n C.wasm_runtime_free((unsafe.Pointer)(ptr))\n}",
"func (sig InsecureSignature) Free() {\n\tC.CInsecureSignatureFree(sig.sig)\n}",
"func (self Sound) Destroy() {\n\tC.sfSound_destroy(self.Cref)\n}",
"func (s *System) CreateDSPByPlugin(handle C.uint, dsp **C.FMOD_DSP) error {\n\t//FMOD_RESULT F_API FMOD_System_CreateDSPByPlugin (FMOD_SYSTEM *system, unsigned int handle, FMOD_DSP **dsp);\n\treturn ErrNoImpl\n}",
"func LinkFree(l **Link) {\n\tC.avfilter_link_free((**C.struct_AVFilterLink)(unsafe.Pointer(l)))\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
/ Connection / disconnection / input and output enumeration. Adds the specified DSP unit as an input of the DSP object. input: The DSP unit to add as an input of the current unit. connection: The connection between the 2 units. Optional. Specify 0 or NULL to ignore. typ: The type of connection between the 2 units. See "DSPConnectionType". If you want to add a unit as an output of another unit, then add 'this' unit as an input of that unit instead. Inputs are automatically mixed together, then the mixed data is sent to the unit's output(s). To find the number of inputs or outputs a unit has use "DSP.NumInputs" or "DSP.NumOutputs". Note: The connection pointer retrieved here will become invalid if you disconnect the 2 dsp units that use it.
|
func (d *DSP) AddInput(input DSP, typ DSPConnectionType) (DspConnection, error) {
var dspConn DspConnection
res := C.FMOD_DSP_AddInput(d.cptr, input.cptr, &dspConn.cptr, C.FMOD_DSPCONNECTION_TYPE(typ))
return dspConn, errs[res]
}
|
[
"func (d *DspConnection) Input() (DSP, error) {\n\tvar input DSP\n\tres := C.FMOD_DSPConnection_GetInput(d.cptr, &input.cptr)\n\treturn input, errs[res]\n}",
"func (d *DSP) Input(index int) (DSP, DspConnection, error) {\n\tvar input DSP\n\tvar inputconnection DspConnection\n\tres := C.FMOD_DSP_GetInput(d.cptr, C.int(index), &input.cptr, &inputconnection.cptr)\n\treturn input, inputconnection, errs[res]\n}",
"func (neuron *Neuron) AddInput(input *Neuron, w Data) {\n\tinput.AddOutput(&Connection {c: neuron.input, weight: w })\n\tch := make(chan string)\n\tneuron.command <- message{ message: AddInput, payload: input, reply: ch}\n\t_ = <-ch\n}",
"func (p *Peer) addInputChannel(label string) error {\n\tch, err := p.conn.CreateDataChannel(label, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tch.OnOpen(func() {\n\t\tp.log.Debug().Str(\"label\", ch.Label()).Uint16(\"id\", *ch.ID()).Msg(\"Data channel [input] opened\")\n\t})\n\tch.OnError(p.logx)\n\tch.OnMessage(func(mess webrtc.DataChannelMessage) {\n\t\tif len(mess.Data) == 0 {\n\t\t\treturn\n\t\t}\n\t\t// echo string messages (e.g. ping/pong)\n\t\tif mess.IsString {\n\t\t\tp.logx(ch.Send(mess.Data))\n\t\t\treturn\n\t\t}\n\t\tif p.OnMessage != nil {\n\t\t\tp.OnMessage(mess.Data)\n\t\t}\n\t})\n\tp.dTrack = ch\n\tch.OnClose(func() { p.log.Debug().Msg(\"Data channel [input] has been closed\") })\n\treturn nil\n}",
"func (n *Node) AddInputPort(portName string, portType string, representation string, channel string, defaultMsg string) {\n\tinput := In{IO: IO{Name: portName, Channel: channel, Type: portType, Representation: representation}, Default: defaultMsg}\n\tn.Ports.Inputs = append(n.Ports.Inputs, input)\n}",
"func (wr wiring) connect(src pin, sType int, sIName string, dst pin, dType int) error {\n\tif dst.p < 0 {\n\t\tswitch dst.name {\n\t\tcase Clk:\n\t\t\treturn errors.New(\"output pin connected to clock signal\")\n\t\tcase False:\n\t\t\treturn errors.New(\"output pin connected to constant false input\")\n\t\tcase True:\n\t\t\treturn errors.New(\"output pin connected to constant true input\")\n\t\t}\n\t}\n\n\tws := wr[src]\n\tif ws == nil {\n\t\tws = &node{name: sIName, pin: src, typ: sType}\n\t\twr[src] = ws\n\t} else if err := ws.checkType(sType); err != nil {\n\t\treturn err\n\n\t}\n\tif ws.isPartInput() {\n\t\treturn errors.New(\"part input pin used as output pin\")\n\t}\n\n\twd := wr[dst]\n\tif wd == nil {\n\t\twd = &node{pin: dst, typ: dType}\n\t\twr[dst] = wd\n\t} else if err := wd.checkType(dType); err != nil {\n\t\treturn err\n\t}\n\tswitch {\n\tcase wd.isOutput() && wd.pin.p >= 0:\n\t\treturn errors.New(\"part output pin used as output\")\n\tcase wd.isChipInput():\n\t\treturn errors.New(\"chip input pin used as output\")\n\tcase wd.src == nil:\n\t\twd.src = ws\n\tdefault:\n\t\treturn errors.New(\"output pin already used as output\")\n\t}\n\twd.setName(ws.name)\n\tws.outs = append(ws.outs, wd)\n\treturn nil\n}",
"func (p *Concatenator) In() *scipipe.InPort { return p.InPort(\"in\") }",
"func (tx *Tx) AddInput(in *protocol.TxIn) {\n\ttx.Inputs = append(tx.Inputs, in)\n}",
"func (fn *CXFunction) AddInput(prgrm *CXProgram, param *CXArgument) *CXFunction {\n\tfnInputs := fn.GetInputs(prgrm)\n\tfor _, inputIdx := range fnInputs {\n\t\tinput := prgrm.GetCXTypeSignatureFromArray(inputIdx)\n\t\tif input.Name == param.Name {\n\t\t\treturn fn\n\t\t}\n\t}\n\n\tparam.Package = fn.Package\n\tnewField := GetCXTypeSignatureRepresentationOfCXArg(prgrm, param)\n\n\tif fn.Inputs == nil {\n\t\tfn.Inputs = &CXStruct{}\n\t}\n\n\tnewFieldIdx := prgrm.AddCXTypeSignatureInArray(newField)\n\tfn.Inputs.AddField_TypeSignature(prgrm, newFieldIdx)\n\n\treturn fn\n}",
"func (s *System) addInput(config *InputConf) {\n\tif config.Set {\n\t\t// A set of utilities is needed\n\t\tutils := newInputSetUtility(config.Curve, config.NonZero)\n\t\tutils.Limit(config.Min, config.Max)\n\t\ts.utils[config.Id] = utils\n\t} else {\n\t\t// Singleton input utility, insert as is\n\t\tutil := newInputUtility(config.Curve, config.NonZero)\n\t\tutil.Limit(config.Min, config.Max)\n\t\ts.utils[config.Id] = util\n\t}\n}",
"func (ev *SentGenEnv) AddInput(sidx int, role string) {\n\twrd := ev.TransWord(ev.CurSent[sidx])\n\tfil := ev.Rules.States[role]\n\tev.SentInputs = append(ev.SentInputs, []string{wrd, role, fil})\n}",
"func (neuron *Neuron) AddInput(ni NeuronIndex) error {\n\tif neuron.Is(ni) {\n\t\treturn errors.New(\"adding a neuron as input to itself\")\n\t}\n\tif neuron.HasInput(ni) {\n\t\treturn errors.New(\"neuron already exists\")\n\t}\n\tneuron.InputNodes = append(neuron.InputNodes, ni)\n\n\treturn nil\n}",
"func (w *World) AddSystemInterface(sys SystemAddByInterfacer, in interface{}, ex interface{}) {\n\tw.AddSystem(sys)\n\n\tif w.sysIn == nil {\n\t\tw.sysIn = make(map[reflect.Type][]reflect.Type)\n\t}\n\n\tif !reflect.TypeOf(in).AssignableTo(reflect.TypeOf([]interface{}{})) {\n\t\tin = []interface{}{in}\n\t}\n\tfor _, v := range in.([]interface{}) {\n\t\tw.sysIn[reflect.TypeOf(sys)] = append(w.sysIn[reflect.TypeOf(sys)], reflect.TypeOf(v).Elem())\n\t}\n\n\tif ex == nil {\n\t\treturn\n\t}\n\n\tif w.sysEx == nil {\n\t\tw.sysEx = make(map[reflect.Type][]reflect.Type)\n\t}\n\n\tif !reflect.TypeOf(ex).AssignableTo(reflect.TypeOf([]interface{}{})) {\n\t\tex = []interface{}{ex}\n\t}\n\tfor _, v := range ex.([]interface{}) {\n\t\tw.sysEx[reflect.TypeOf(sys)] = append(w.sysEx[reflect.TypeOf(sys)], reflect.TypeOf(v).Elem())\n\t}\n}",
"func (s *BasevhdlListener) EnterAdding_operator(ctx *Adding_operatorContext) {}",
"func (d *DSP) Output(index int) (DSP, DspConnection, error) {\n\tvar output DSP\n\tvar outputconnection DspConnection\n\tres := C.FMOD_DSP_GetOutput(d.cptr, C.int(index), &output.cptr, &outputconnection.cptr)\n\treturn output, outputconnection, errs[res]\n}",
"func (c *Connector) AddExternalInterface(localIfID common.IFIDType, link control.LinkInfo,\n\towned bool) error {\n\n\tintf := uint16(localIfID)\n\tlog.Debug(\"Adding external interface\", \"interface\", localIfID,\n\t\t\"local_isd_as\", link.Local.IA, \"local_addr\", link.Local.Addr,\n\t\t\"remote_isd_as\", link.Remote.IA, \"remote_addr\", link.Remote.IA,\n\t\t\"owned\", owned, \"bfd\", !link.BFD.Disable)\n\n\tif !c.ia.Equal(link.Local.IA) {\n\t\treturn serrors.WithCtx(errMultiIA, \"current\", c.ia, \"new\", link.Local.IA)\n\t}\n\tif err := c.DataPlane.AddLinkType(intf, link.LinkTo); err != nil {\n\t\treturn serrors.WrapStr(\"adding link type\", err, \"if_id\", localIfID)\n\t}\n\tif err := c.DataPlane.AddNeighborIA(intf, link.Remote.IA); err != nil {\n\t\treturn serrors.WrapStr(\"adding neighboring IA\", err, \"if_id\", localIfID)\n\t}\n\n\tif !owned {\n\t\tif !link.BFD.Disable {\n\t\t\terr := c.DataPlane.AddNextHopBFD(intf, link.Local.Addr, link.Remote.Addr,\n\t\t\t\tlink.BFD, link.Instance)\n\t\t\tif err != nil {\n\t\t\t\treturn serrors.WrapStr(\"adding next hop BFD\", err, \"if_id\", localIfID)\n\t\t\t}\n\t\t}\n\t\treturn c.DataPlane.AddNextHop(intf, link.Remote.Addr)\n\t}\n\tconnection, err := conn.New(link.Local.Addr, link.Remote.Addr,\n\t\t&conn.Config{ReceiveBufferSize: receiveBufferSize})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !link.BFD.Disable {\n\t\terr := c.DataPlane.AddExternalInterfaceBFD(intf, connection, link.Local,\n\t\t\tlink.Remote, link.BFD)\n\t\tif err != nil {\n\t\t\treturn serrors.WrapStr(\"adding external BFD\", err, \"if_id\", localIfID)\n\t\t}\n\t}\n\treturn c.DataPlane.AddExternalInterface(intf, connection)\n}",
"func (c *Client) Input(i Input) error {\n\t// The hash of the _current_ input\n\t// is needed for the request\n\tcur, err := c.CurrentInput()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdata := reqInput{\n\t\tRequest: reqModify,\n\t\tHash: cur.Hash,\n\t\tValue: i.Name,\n\t}\n\n\tvar body bytes.Buffer\n\tif err := json.NewEncoder(&body).Encode(data); err != nil {\n\t\treturn err\n\t}\n\n\treq, err := http.NewRequest(http.MethodPut, \"/menu_native/dynamic/tv_settings/devices/current_input\", &body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// response contains no specific fields\n\treturn c.do(req, &respWrapper{})\n}",
"func (c *Compiler) RegisterInput(name string, typ types.Type) (int, error) {\n\tinputIndex := c.ctx.Builder.NewInput(typ)\n\tinputSymbol := symbol.NewInputSymbol(name, typ, inputIndex)\n\terr := c.ctx.GlobalScope.Add(inputSymbol)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn inputIndex, nil\n}",
"func (gr *groupT) addInputArg(inp *inputT) {\n\tgr.Inputs = append(gr.Inputs, inp)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Disconnect the DSP unit from the specified target. target: The unit that this unit is to be removed from. Specify 0 or NULL to disconnect the unit from all outputs and inputs. connection: If there is more than one connection between 2 dsp units, this can be used to define which of the connections should be disconnected. Note that when you disconnect a unit, it is up to you to reconnect the network so that data flow can continue. Important note: If you have a handle to the connection pointer that binds these 2 DSP units, then it will become invalid. The connection is then sent back to a freelist to be reused again by a later addInput command.
|
func (d *DSP) DisconnectFrom(target DSP, connection DspConnection) error {
res := C.FMOD_DSP_DisconnectFrom(d.cptr, target.cptr, connection.cptr)
return errs[res]
}
|
[
"func (p *Port) Disconnect(q *Port) error {\n\tif !p.Connected(q) {\n\t\treturn errors.New(\"not connected\")\n\t}\n\tq.src = nil\n\tdelete(p.dests, q)\n\treturn nil\n}",
"func (s *SimpleDriver) DisconnectDevice(address *models.Addressable) error {\n\treturn nil\n}",
"func (d *DirectMemifConnector) Disconnect(crossConnect *crossconnect.CrossConnect) {\n\tvalue, exist := d.proxyMap.Load(crossConnect.GetId())\n\tif !exist {\n\t\tlogrus.Warnf(\"Proxy for cross connect with id=%s doesn't exist. Nothing to stop\", crossConnect.GetId())\n\t\treturn\n\t}\n\n\tproxy := value.(memifproxy.Proxy)\n\tproxy.Stop()\n\n\td.proxyMap.Delete(crossConnect.Id)\n}",
"func (d *Decoder) Disconnect(h FrameHandler) {\n\t// Delete handler\n\td.d.delHandler(h)\n\n\t// Disconnect nodes\n\tastiencoder.DisconnectNodes(d, h)\n}",
"func (r *FrameRateEmulator) Disconnect(h FrameHandler) {\n\t// Delete handler\n\tr.d.delHandler(h)\n\n\t// Disconnect nodes\n\tastiencoder.DisconnectNodes(r, h)\n}",
"func (client *Client) RemoveTarget(target Target) (id string, err error) {\n\tif target == client.status {\n\t\treturn \"\", ErrTargetIsStatus\n\t}\n\n\tclient.mutex.Lock()\n\tdefer client.mutex.Unlock()\n\n\tfor i := range client.targets {\n\t\tif target == client.targets[i] {\n\t\t\tid = target.ID()\n\n\t\t\tevent := NewEvent(\"hook\", \"remove_target\")\n\t\t\tevent.Args = []string{target.ID(), target.Kind(), target.Name()}\n\t\t\tclient.EmitNonBlocking(event)\n\n\t\t\tclient.targets[i] = client.targets[len(client.targets)-1]\n\t\t\tclient.targets = client.targets[:len(client.targets)-1]\n\n\t\t\t// Ensure the channel has been parted\n\t\t\tif channel, ok := target.(*Channel); ok && !channel.parted {\n\t\t\t\tclient.SendQueuedf(\"PART %s\", channel.Name())\n\t\t\t}\n\n\t\t\treturn\n\t\t}\n\t}\n\n\terr = ErrTargetNotFound\n\treturn\n}",
"func (self *SinglePad) Disconnect() {\n self.Object.Call(\"disconnect\")\n}",
"func (b *Bluez) Disconnect(adapterName, deviceMac string) error {\n\treturn b.CallDevice(adapterName, deviceMac, \"Disconnect\", 0).Store()\n}",
"func disconnectRemoteTunnel(c *cli.Context) error {\n\tport := c.Uint(\"dest\")\n\tif port == 0 {\n\t\treturn errors.New(\"Please provide destination port on local machine\")\n\t}\n\tdisconnectRemoteForwardedPort(sprint(port))\n\treturn nil\n}",
"func (m *Module) Disconnect(gate Gate, egress bool) {\n\tif egress {\n\t\tdelete(m.EGates, gate)\n\t} else {\n\t\tdelete(m.IGates, gate)\n\t}\n}",
"func (gw *Gateway) Disconnect(gnetID uint64) error {\n\tvar err error\n\tgw.strand(\"Disconnect\", func() {\n\t\tc := gw.d.connections.getByGnetID(gnetID)\n\t\tif c == nil {\n\t\t\terr = ErrConnectionNotExist\n\t\t\treturn\n\t\t}\n\n\t\terr = gw.d.Disconnect(c.Addr, ErrDisconnectRequestedByOperator)\n\t})\n\treturn err\n}",
"func Disconnect(w http.ResponseWriter, r *http.Request) {\n\truntime := r.Context().Value(\"runtime\").(*libpod.Runtime)\n\n\tvar netDisconnect types.NetworkDisconnect\n\tif err := json.NewDecoder(r.Body).Decode(&netDisconnect); err != nil {\n\t\tutils.Error(w, \"Something went wrong.\", http.StatusInternalServerError, errors.Wrap(err, \"Decode()\"))\n\t\treturn\n\t}\n\n\tname := utils.GetName(r)\n\terr := runtime.DisconnectContainerFromNetwork(netDisconnect.Container, name, netDisconnect.Force)\n\tif err != nil {\n\t\tif errors.Cause(err) == define.ErrNoSuchCtr {\n\t\t\tutils.Error(w, \"container not found\", http.StatusNotFound, err)\n\t\t\treturn\n\t\t}\n\t\tif errors.Cause(err) == define.ErrNoSuchNetwork {\n\t\t\tutils.Error(w, \"network not found\", http.StatusNotFound, err)\n\t\t\treturn\n\t\t}\n\t\tutils.Error(w, \"Something went wrong.\", http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tutils.WriteResponse(w, http.StatusOK, \"OK\")\n}",
"func (a *App) DisconnectFrom(target uint64) {\n\ta.Node.probabilityLock.Lock()\n\tdefer a.Node.probabilityLock.Unlock()\n\ta.Node.peerLossProbability[target] = 1.0\n}",
"func (bi *BaseInstance) disconnect(match VswMatch, param interface{}) error {\n\treturn bi.rules.remove(match, param)\n}",
"func (o OfflineNotaryRepository) RemoveTarget(string, ...data.RoleName) error {\n\treturn nil\n}",
"func (t *TargetCollection) RemoveTarget(host string) {\n\tif host == \"\" {\n\t\treturn\n\t}\n\n\tt.mux.Lock()\n\tdefer t.mux.Unlock()\n\n\tdelete(t.entries, host)\n}",
"func (c *localController) DisconnectNetwork(name string, containerPid int) error {\n\tlogrus.Debugf(\"disconnecting %d from networks %s\", containerPid, name)\n\n\tpeer, err := c.ds.GetNetworkPeer(name, containerPid)\n\tif err != nil {\n\t\tif err == ds.ErrNetworkPeerDoesNotExist {\n\t\t\treturn fmt.Errorf(\"container %d is not connected to network %s\", containerPid, name)\n\t\t}\n\n\t\treturn err\n\t}\n\ttmpConfDir, err := ioutil.TempDir(\"\", \"circuit-\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.RemoveAll(tmpConfDir)\n\n\tcninet, nc, rt, err := c.getCniConfig(name, tmpConfDir, containerPid, peer.IfaceName)\n\tif err != nil {\n\t\tlogrus.Warnf(\"unable to detect peer: %s\", err)\n\t}\n\n\tif err := cninet.DelNetwork(nc, rt); err != nil {\n\t\tlogrus.Warnf(\"unable to disconnect: %s\", err)\n\t}\n\n\tif err := c.ds.DeleteNetworkPeer(name, containerPid); err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n\n\treturn nil\n}",
"func (*GenericFramework) PeerDisconnect(ctx *PeerContext) {}",
"func (c *Client) DropMountTarget(args *DropMountTargetArgs) error {\n\treturn bce.NewRequestBuilder(c).\n\t\tWithMethod(http.DELETE).\n\t\tWithURL(getMountTargetUri(args.FSID, args.MountId)).\n\t\tDo()\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Helper function to disconnect either all inputs or all outputs of a dsp unit. inputs: true = disconnect all inputs to this DSP unit. false = leave input connections alone. outputs: true = disconnect all outputs to this DSP unit. false = leave output connections alone. This function is optimized to be faster than disconnecting inputs and outputs manually one by one. Important note: If you have a handle to DSPConnection pointers that bind any of the inputs or outputs to this DSP unit, then they will become invalid. The connections are sent back to a freelist to be reused again by a later addInput command.
|
func (d *DSP) DisconnectAll(inputs, outputs bool) error {
res := C.FMOD_DSP_DisconnectAll(d.cptr, getBool(inputs), getBool(outputs))
return errs[res]
}
|
[
"func (w *Wire) DisconnectOutputs() {\n\tw.SetNumOutputs(0)\n\tw.outputs = w.outputs[0:0]\n}",
"func (m *Module) Disconnect(gate Gate, egress bool) {\n\tif egress {\n\t\tdelete(m.EGates, gate)\n\t} else {\n\t\tdelete(m.IGates, gate)\n\t}\n}",
"func (p *Port) Disconnect(q *Port) error {\n\tif !p.Connected(q) {\n\t\treturn errors.New(\"not connected\")\n\t}\n\tq.src = nil\n\tdelete(p.dests, q)\n\treturn nil\n}",
"func (self *SinglePad) DisconnectI(args ...interface{}) {\n self.Object.Call(\"disconnect\", args)\n}",
"func filterOutDs(ds domainSet) {\n\tfor k, _ := range ds {\n\t\tif blockedDs.domainSet[k] {\n\t\t\tdelete(blockedDs.domainSet, k)\n\t\t\tblockedDomainChanged = true\n\t\t}\n\t\tif directDs.domainSet[k] {\n\t\t\tdelete(directDs.domainSet, k)\n\t\t\tdirectDomainChanged = true\n\t\t}\n\t}\n}",
"func (m *lispMachine) UnbindAll() {\n\t// if m.dealloc() {\n\tfor _, n := range m.sorted {\n\t\tm.logf(\"dealloc n; %v %x %p\", n, n.Hashcode(), n)\n\t\tif !n.isInput() {\n\t\t\tn.unbind()\n\t\t}\n\t}\n\t// }\n}",
"func (tx *Tx) ClearOutputs() {\n\ttx.Outputs = nil\n}",
"func (self *SinglePad) Disconnect() {\n self.Object.Call(\"disconnect\")\n}",
"func disconnectAll(clients []Client) {\n\tfor _, c := range clients {\n\t\tc.Disconnect()\n\t}\n}",
"func DisconnectNodes(parent, child Node) {\n\tparent.DelChild(child)\n\tchild.DelParent(parent)\n}",
"func DisconnectedLinks(stopchan chan struct{}) bool {\n\tl := len(linkBridges.unconnected)\n\tputOffers := make(chan dynamic.DHTPutReturn, l)\n\tfor _, link := range linkBridges.unconnected {\n\t\tif link.State == StateInit {\n\t\t\tutil.Info.Println(\"DisconnectedLinks for\", link.LinkParticipants(), link.LinkID())\n\t\t\tvar linkBridge = NewLinkBridge(link.LinkAccount, link.MyAccount, accounts)\n\t\t\tlinkBridge.SessionID = link.SessionID\n\t\t\tlinkBridge.Get = link.Get\n\t\t\tpc, err := ConnectToIceServices(config)\n\t\t\tif err != nil {\n\t\t\t\tutil.Error.Println(\"DisconnectedLinks error connecting tot ICE services\", err)\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tlinkBridge.PeerConnection = pc\n\t\t\t\tdataChannel, err := linkBridge.PeerConnection.CreateDataChannel(link.LinkParticipants(), nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.Error.Println(\"DisconnectedLinks error creating dataChannel for\", link.LinkAccount, link.LinkID())\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tlinkBridge.DataChannel = dataChannel\n\t\t\t}\n\t\t\tlinkBridge.Offer, _ = linkBridge.PeerConnection.CreateOffer(nil)\n\t\t\tlinkBridge.Answer = link.Answer\n\t\t\tencoded, err := util.EncodeObject(linkBridge.Offer)\n\t\t\tif err != nil {\n\t\t\t\tutil.Info.Println(\"DisconnectedLinks error EncodeObject\", err)\n\t\t\t}\n\t\t\tdynamicd.PutLinkRecord(linkBridge.MyAccount, linkBridge.LinkAccount, encoded, putOffers)\n\t\t\tlinkBridge.State = StateWaitForAnswer\n\t\t\tlinkBridges.unconnected[linkBridge.LinkID()] = &linkBridge\n\t\t} else {\n\t\t\tl--\n\t\t}\n\t}\n\tfor i := 0; i < l; i++ {\n\t\tselect {\n\t\tdefault:\n\t\t\toffer := <-putOffers\n\t\t\tlinkBridge := NewLinkBridge(offer.Sender, offer.Receiver, accounts)\n\t\t\tlink := linkBridges.unconnected[linkBridge.LinkID()]\n\t\t\tlink.Put = offer.DHTPutJSON\n\t\t\tutil.Info.Println(\"DisconnectedLinks Offer saved\", offer)\n\t\tcase <-stopchan:\n\t\t\tutil.Info.Println(\"DisconnectedLinks stopped\")\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (x *Root) ClearOutputDrivers() {\n\tfor _, o := range x.outputdrivers {\n\t\to.Detach()\n\t}\n\tx.outputdrivers = make([]OutputDriver, 0, 1)\n}",
"func InoutFree(i **Input) {\n\tC.avfilter_inout_free((**C.struct_AVFilterInOut)(unsafe.Pointer(i)))\n}",
"func (dc *DummyClient) Disconnect() {\n\n\t// Close connections to all nodes.\n\tfor id, connection := range dc.connections {\n\t\tif connection == nil {\n\t\t\tdc.logger.Log(logging.LevelWarn, fmt.Sprintf(\"No connection to close to node %d\", id))\n\t\t} else if _, err := connection.CloseAndRecv(); err != nil {\n\t\t\tdc.logger.Log(logging.LevelWarn, fmt.Sprintf(\"Could not close connection to node %d\", id))\n\t\t}\n\t}\n}",
"func (cs *Channels) CloseChannels() {\n\tfor _, c := range cs.Out {\n\t\tclose(c)\n\t}\n\tcs.Out = cs.Out[:0]\n}",
"func Disconnect() {\n\tif pool != nil {\n\t\t_ = pool.Close() //todo: handle this error?\n\t}\n\tpool = nil\n}",
"func Disconnect() {\n\tfmt.Println(\"Disconnecting from Motor Board...\")\n\tport.Flush()\n\tport.Close()\n\tConnected = false\n}",
"func (d *Driver) Outs() (outs []drivers.Out, err error) {\n\tvar num int\n\tfor i := 0; i < portmidi.CountDevices(); i++ {\n\t\tinfo := portmidi.Info(portmidi.DeviceID(i))\n\t\t//\t\tfmt.Printf(\"%q devideID %v\\n\", info.Name, portmidi.DeviceID(i))\n\t\tif info != nil && info.IsOutputAvailable {\n\t\t\t//\t\tfmt.Printf(\"registering out port %q, number [%v], devideID %v\\n\", info.Name, num, portmidi.DeviceID(i))\n\t\t\touts = append(outs, newOut(d, portmidi.DeviceID(i), num, info.Name))\n\t\t\tnum++\n\t\t}\n\t}\n\treturn\n}",
"func (d *DSP) DisconnectFrom(target DSP, connection DspConnection) error {\n\tres := C.FMOD_DSP_DisconnectFrom(d.cptr, target.cptr, connection.cptr)\n\treturn errs[res]\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieves the number of inputs connected to the DSP unit. Inputs are units that feed data to this unit. When there are multiple inputs, they are mixed together. Performance warning! Because this function needs to flush the dsp queue before it can determine how many units are available, this function may block significantly while the background mixer thread operates.
|
func (d *DSP) NumInputs() (int, error) {
var numinputs C.int
res := C.FMOD_DSP_GetNumInputs(d.cptr, &numinputs)
return int(numinputs), errs[res]
}
|
[
"func (tx *Tx) InputCount() int {\r\n\treturn len(tx.Inputs)\r\n}",
"func (dev *PMX) OutputCount() int {\n\treturn len(dev.Channels)\n}",
"func (tt *Meta) InputCount() int {\n return len(tt.meta_.Inputs)\n}",
"func (p Plexer) NumChannels() int {\n\treturn len(p.channels)\n}",
"func (dev *E36xx) OutputCount() int {\n\treturn len(dev.Channels)\n}",
"func (q *Queue) GetInputLength() int64 {\n\treturn q.redisClient.LLen(queueInputKey(q.Name)).Val()\n}",
"func (q *TransmitLimitedQueue) NumQueued() int {\n\tq.mu.Lock()\n\tdefer q.mu.Unlock()\n\treturn q.lenLocked()\n}",
"func (queue *Queue) GetInputLength() int64 {\n\treturn queue.redisClient.LLen(queueInputKey(queue.Name)).Val()\n}",
"func (ctx Context) Count(input chan float64) (n uint) {\n\tfor _ = range input {\n\t\tn++\n\t}\n\n\treturn n\n}",
"func (d *DSP) NumParameters() (int, error) {\n\tvar numparams C.int\n\tres := C.FMOD_DSP_GetNumParameters(d.cptr, &numparams)\n\treturn int(numparams), errs[res]\n}",
"func (c *connection) getQueueLength(inputs input) (int, error) {\n\n\tif inputs.limit > 0 {\n\t\treturn inputs.limit, nil\n\t}\n\n\tqLength, err := redis.Int(conn.redis.Do(\"LLEN\", inputs.source))\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tif qLength < 1 {\n\t\treturn 0, fmt.Errorf(\"Source queue is empty\")\n\t}\n\n\treturn qLength, nil\n}",
"func (st *statement) NumInput() int {\n\tif st.dpiStmt == nil {\n\t\tif st.query == getConnection {\n\t\t\treturn 1\n\t\t}\n\t\treturn 0\n\t}\n\tst.Lock()\n\tdefer st.Unlock()\n\tvar cnt C.uint32_t\n\t//defer func() { fmt.Printf(\"%p.NumInput=%d (%q)\\n\", st, cnt, st.query) }()\n\tif C.dpiStmt_getBindCount(st.dpiStmt, &cnt) == C.DPI_FAILURE {\n\t\treturn -1\n\t}\n\tif cnt < 2 { // 1 can't decrease...\n\t\treturn int(cnt)\n\t}\n\tnames := make([]*C.char, int(cnt))\n\tlengths := make([]C.uint32_t, int(cnt))\n\tif C.dpiStmt_getBindNames(st.dpiStmt, &cnt, &names[0], &lengths[0]) == C.DPI_FAILURE {\n\t\treturn -1\n\t}\n\t//fmt.Printf(\"%p.NumInput=%d\\n\", st, cnt)\n\n\t// return the number of *unique* arguments\n\treturn int(cnt)\n}",
"func (pl *Pipeline) GetNumChannels() int {\n\treturn len(pl.channels)\n}",
"func GetDeviceQueueItemCountForDevEUI(ctx context.Context, db sqlx.Queryer, devEUI lorawan.EUI64) (int, error) {\n\tvar count int\n\terr := sqlx.Get(db, &count, `\n\t\tselect\n\t\t\tcount(*)\n\t\tfrom\n\t\t\tdevice_queue\n\t\twhere\n\t\t\tdev_eui = $1\n\t`, devEUI)\n\tif err != nil {\n\t\treturn 0, handlePSQLError(err, \"select error\")\n\t}\n\n\treturn count, nil\n}",
"func (h *InputHost) GetNumConnections() int {\n\treturn int(h.hostMetrics.Get(load.HostMetricNumOpenConns))\n}",
"func (p *FuncInfo) NumIn() int {\n\treturn len(p.in)\n}",
"func (s *System) DSPBufferSize() (uint32, int, error) {\n\tvar bufferlength C.uint\n\tvar numbuffers C.int\n\tres := C.FMOD_System_GetDSPBufferSize(s.cptr, &bufferlength, &numbuffers)\n\treturn uint32(bufferlength), int(numbuffers), errs[res]\n}",
"func (ct *cpuTracker) Len() int {\n\tct.lock.Lock()\n\tdefer ct.lock.Unlock()\n\n\treturn len(ct.cpuSpenders)\n}",
"func (o *MDRaid) GetNumDevices(ctx context.Context) (numDevices uint32, err error) {\n\terr = o.object.CallWithContext(ctx, \"org.freedesktop.DBus.Properties.Get\", 0, InterfaceMDRaid, \"NumDevices\").Store(&numDevices)\n\treturn\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieves a pointer to a DSP unit which is acting as an input to this unit. index: Index of the input unit to retrieve. An input is a unit which feeds audio data to this unit. If there are more than 1 input to this unit, the inputs will be mixed, and the current unit processes the mixed result. Find out the number of input units to this unit by calling "DSP.NumInputs". Performance warning! Because this function needs to flush the dsp queue before it can determine if the specified numerical input is available or not, this function may block significantly while the background mixer thread operates. Note: The connection pointer retrieved here will become invalid if you disconnect the 2 dsp units that use it.
|
func (d *DSP) Input(index int) (DSP, DspConnection, error) {
var input DSP
var inputconnection DspConnection
res := C.FMOD_DSP_GetInput(d.cptr, C.int(index), &input.cptr, &inputconnection.cptr)
return input, inputconnection, errs[res]
}
|
[
"func (d *DspConnection) Input() (DSP, error) {\n\tvar input DSP\n\tres := C.FMOD_DSPConnection_GetInput(d.cptr, &input.cptr)\n\treturn input, errs[res]\n}",
"func (d *DSP) Output(index int) (DSP, DspConnection, error) {\n\tvar output DSP\n\tvar outputconnection DspConnection\n\tres := C.FMOD_DSP_GetOutput(d.cptr, C.int(index), &output.cptr, &outputconnection.cptr)\n\treturn output, outputconnection, errs[res]\n}",
"func (tx *Tx) InputIdx(i int) *Input {\r\n\tif i > tx.InputCount()-1 {\r\n\t\treturn nil\r\n\t}\r\n\treturn tx.Inputs[i]\r\n}",
"func (_e *AsyncProducer_Expecter) Input() *AsyncProducer_Input_Call {\n\treturn &AsyncProducer_Input_Call{Call: _e.mock.On(\"Input\")}\n}",
"func (fm *Frame) InputChan() chan any {\n\treturn fm.ports[0].Chan\n}",
"func (samples *Samples) getNumber(index int, fieldName string, retVal *C.double) error {\n\tfieldNameCStr := C.CString(fieldName)\n\tdefer C.free(unsafe.Pointer(fieldNameCStr))\n\n\tretcode := int(C.RTI_Connector_get_number_from_sample(unsafe.Pointer(samples.input.connector.native), retVal, samples.input.nameCStr, C.int(index+1), fieldNameCStr))\n\treturn checkRetcode(retcode)\n}",
"func (p *Payload) GetInDev() uint32 {\n\treturn uint32(C.nfq_get_indev(p.nfad))\n}",
"func (m *MPQ) Input() io.ReadSeeker {\n\treturn m.input\n}",
"func (q *chunkQueue) GetSender(index uint32) p2p.ID {\n\tq.Lock()\n\tdefer q.Unlock()\n\treturn q.chunkSenders[index]\n}",
"func (p *AsyncProducer) Input() chan<- *sarama.ProducerMessage { return p.input }",
"func (d *DustDevil) InputChannel() chan *erebos.Transport {\n\treturn d.Input\n}",
"func (fm *Frame) InputChan() chan interface{} {\n\treturn fm.ports[0].Chan\n}",
"func (a *Analyzer) InputChannel() chan []Frame {\n\treturn a.frameStream\n}",
"func (d *DSP) AddInput(input DSP, typ DSPConnectionType) (DspConnection, error) {\n\tvar dspConn DspConnection\n\tres := C.FMOD_DSP_AddInput(d.cptr, input.cptr, &dspConn.cptr, C.FMOD_DSPCONNECTION_TYPE(typ))\n\treturn dspConn, errs[res]\n}",
"func (swp *SourceWorkerPool) GetInputChannel() (chan map[string]interface{}, error) {\n\treturn nil, ErrInputChanDoesNotExist\n}",
"func GetInput(p *player.Player) (int, int, error) {\n\tvar userInputIndex int\n\tfmt.Scanln(&userInputIndex)\n\n\terr := ValidateInput(userInputIndex)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\treturn p.Id(), userInputIndex, nil\n}",
"func (m *TeleconferenceDeviceMediaQuality) GetChannelIndex()(*int32) {\n val, err := m.GetBackingStore().Get(\"channelIndex\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*int32)\n }\n return nil\n}",
"func (socket *MockSocket) Input() *socket.InputProtocol {\n\treturn socket.input\n}",
"func PlayInput(fileName string) int {\n\tc_fileName := C.CString(fileName)\n\tdefer C.free(unsafe.Pointer(c_fileName))\n\n\t//int IupPlayInput(const char* filename);\n\treturn int(C.IupPlayInput(c_fileName))\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieves a pointer to a DSP unit which is acting as an output to this unit. index: Index of the output unit to retrieve. An output is a unit which this unit will feed data too once it has processed its data. Find out the number of output units to this unit by calling "DSP.NumOutputs". Performance warning! Because this function needs to flush the dsp queue before it can determine if the specified numerical output is available or not, this function may block significantly while the background mixer thread operates. Note: The connection pointer retrieved here will become invalid if you disconnect the 2 dsp units that use it.
|
func (d *DSP) Output(index int) (DSP, DspConnection, error) {
var output DSP
var outputconnection DspConnection
res := C.FMOD_DSP_GetOutput(d.cptr, C.int(index), &output.cptr, &outputconnection.cptr)
return output, outputconnection, errs[res]
}
|
[
"func (d *DspConnection) Output() (DSP, error) {\n\tvar output DSP\n\tres := C.FMOD_DSPConnection_GetOutput(d.cptr, &output.cptr)\n\treturn output, errs[res]\n}",
"func (tx *Tx) OutputIdx(i int) *Output {\r\n\tif i > tx.OutputCount()-1 {\r\n\t\treturn nil\r\n\t}\r\n\treturn tx.Outputs[i]\r\n}",
"func (d *DSP) Input(index int) (DSP, DspConnection, error) {\n\tvar input DSP\n\tvar inputconnection DspConnection\n\tres := C.FMOD_DSP_GetInput(d.cptr, C.int(index), &input.cptr, &inputconnection.cptr)\n\treturn input, inputconnection, errs[res]\n}",
"func (e *Engine) DefaultOutputDevice() *portaudio.DeviceInfo {\n\tif !e.initialized {\n\t\treturn nil\n\t}\n\tif defaultOutputDeviceInfo, err := portaudio.DefaultOutputDevice(); err != nil {\n\t\treturn nil\n\t} else {\n\t\treturn defaultOutputDeviceInfo\n\t}\n}",
"func (md *MassDns) GetOutput() <-chan dns.RR {\n\toc := md.output\n\treturn oc\n}",
"func (dev *E36xx) OutputCount() int {\n\treturn len(dev.Channels)\n}",
"func (c *Computer) GetOutput() chan int {\n\treturn c.output\n}",
"func (p *OutProvider) GetOut(localDomain, remoteDomain string) stream.S2SOut {\n\tdomainPair := getDomainPair(localDomain, remoteDomain)\n\tp.mu.RLock()\n\toutStm := p.outConnections[domainPair]\n\tp.mu.RUnlock()\n\n\tif outStm != nil {\n\t\treturn outStm\n\t}\n\tp.mu.Lock()\n\toutStm = p.outConnections[domainPair] // 2nd check\n\tif outStm != nil {\n\t\tp.mu.Unlock()\n\t\treturn outStm\n\t}\n\toutStm = p.newOut(localDomain, remoteDomain)\n\tp.outConnections[domainPair] = outStm\n\tp.mu.Unlock()\n\n\tlog.Infof(\"registered s2s out stream... (domainpair: %s)\", domainPair)\n\n\treturn outStm\n}",
"func (fm *Frame) OutputChan() chan<- interface{} {\n\treturn fm.ports[1].Chan\n}",
"func (dev *PMX) OutputCount() int {\n\treturn len(dev.Channels)\n}",
"func (d *Drain) GetOutput() <-chan interface{} {\n\treturn d.output\n}",
"func nvmlUnitGetHandleByIndex(Index uint32, Unit *Unit) Return {\n\tcIndex, _ := (C.uint)(Index), cgoAllocsUnknown\n\tcUnit, _ := (*C.nvmlUnit_t)(unsafe.Pointer(Unit)), cgoAllocsUnknown\n\t__ret := C.nvmlUnitGetHandleByIndex(cIndex, cUnit)\n\t__v := (Return)(__ret)\n\treturn __v\n}",
"func (n *Net) Out(i int) float64 {\n\treturn n.out[len(n.lsize)-1][i]\n}",
"func (d *Dry) OuputChannel() <-chan string {\n\treturn d.output\n}",
"func (m *Music) Playback(buf1, buf2 []int16) {\n\tbufWave := make([]int16, len(buf1)*2)\n\tfor i, bar := range buf1 {\n\t\tbufWave[i*2] = bar\n\t\tbufWave[i*2+1] = buf2[i]\n\t}\n\n\t// Go 1.6 cgocheck fix: Can't pass Go pointer to C function\n\twavehdr := (*C.WAVEHDR)(C.malloc(C.wavehdrsize))\n\tC.memset(unsafe.Pointer(wavehdr), 0, C.wavehdrsize)\n\twavehdr.lpData = C.LPSTR(unsafe.Pointer(&bufWave[0]))\n\twavehdr.dwBufferLength = C.DWORD(len(bufWave) * 2)\n\n\tres := C.waveOutPrepareHeader(hwaveout, wavehdr, C.UINT(C.wavehdrsize))\n\tif res != C.MMSYSERR_NOERROR {\n\t\tfmt.Fprintln(os.Stderr, \"Error: waveOutPrepareHeader:\", winmmErrorText(res))\n\t\tos.Exit(1)\n\t}\n\n\tif wavehdrLast != nil {\n\t\tfor wavehdrLast.dwFlags&C.WHDR_DONE == 0 {\n\t\t\t// still playing\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t}\n\n\tif !m.stopping {\n\t\tres = C.waveOutWrite(hwaveout, wavehdr, C.UINT(C.wavehdrsize))\n\t\tif res != C.MMSYSERR_NOERROR {\n\t\t\tfmt.Fprintln(os.Stderr, \"Error: waveOutWrite:\", winmmErrorText(res))\n\t\t}\n\n\t\tfor wavehdr.dwFlags&C.WHDR_DONE == 0 {\n\t\t\t// still playing\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t\tres = C.waveOutUnprepareHeader(hwaveout, wavehdr, C.UINT(C.wavehdrsize))\n\t\tif res != C.MMSYSERR_NOERROR {\n\t\t\tfmt.Fprintln(os.Stderr, \"Error: waveOutUnprepareHeader:\", winmmErrorText(res))\n\t\t}\n\n\t\tfor wavehdr.dwFlags&C.WHDR_DONE == 0 {\n\t\t\t// still playing\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t\tres = C.waveOutUnprepareHeader(hwaveout, wavehdr, C.UINT(C.wavehdrsize))\n\t\tif res != C.MMSYSERR_NOERROR {\n\t\t\tfmt.Fprintln(os.Stderr, \"Error: waveOutUnprepareHeader:\", winmmErrorText(res))\n\t\t}\n\t}\n\n\twavehdrLast = wavehdr\n\n\tm.linePlayed <- true // notify that playback is done\n}",
"func (net Network) GetOutputValue() float64 {\n\treturn net[len(net)-1][0].value\n}",
"func (p *Payload) GetOutDev() uint32 {\n\treturn uint32(C.nfq_get_outdev(p.nfad))\n}",
"func (c *Camera) OutputBufferIndex() int {\n\treturn c.H3DNode.NodeParamI(horde3d.Camera_OutBufIndexI)\n}",
"func makeOutputDevice(ch chan<- int) func(int) {\n\treturn func(n int) {\n\t\tch <- n\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
/ DSP unit control. Enables or disables a unit for being processed. active: true = unit is activated, false = unit is deactivated. This does not connect or disconnect a unit in any way, it just disables it so that it is not processed. If a unit is disabled, and has inputs, they will also cease to be processed. To disable a unit but allow the inputs of the unit to continue being processed, use "DSP.SetBypass" instead.
|
func (d *DSP) SetActive(active bool) error {
res := C.FMOD_DSP_SetActive(d.cptr, getBool(active))
return errs[res]
}
|
[
"func (t *Trader) SetActive(value bool) {\n\tt.active = value\n}",
"func (c *CmdBuff) SetActive(b bool) {\n\tc.mx.Lock()\n\t{\n\t\tc.active = b\n\t}\n\tc.mx.Unlock()\n\n\tc.fireActive(c.active)\n}",
"func DUTActive(ctx context.Context, servoInst *servo.Servo) (bool, error) {\n\tstate, err := servoInst.GetECSystemPowerState(ctx)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"failed to get ec_system_power_state\")\n\t}\n\ttesting.ContextLog(ctx, \"state: \", state)\n\tif state == \"S0\" {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}",
"func (s *Service) enableUnit() error {\n\tLogf(\"%s enabling service\\n\", s.ID())\n\n\tunits := []string{s.unit}\n\t_, changes, err := s.conn.EnableUnitFiles(units, false, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, change := range changes {\n\t\tLogf(\"%s %s %s -> %s\\n\", s.ID(), change.Type, change.Filename, change.Destination)\n\t}\n\n\treturn nil\n}",
"func (d *Device) SetRunning(isRunning bool) error {\n\tdata := []uint8{0}\n\terr := d.bus.ReadRegister(uint8(d.Address), REG_CONTROL, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif isRunning {\n\t\tdata[0] &^= uint8(1 << EOSC)\n\t} else {\n\t\tdata[0] |= 1 << EOSC\n\t}\n\terr = d.bus.WriteRegister(uint8(d.Address), REG_CONTROL, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (c *Component) SetActive(active bool) {\n\tc.active = active\n}",
"func (self *PhysicsP2) Enable1O(object interface{}, debug bool) {\n self.Object.Call(\"enable\", object, debug)\n}",
"func (this *channelMeterStruct) setEnabled(value bool) {\n\tthis.mutex.Lock()\n\tenabled := this.enabled\n\n\t/*\n\t * Check if status of meter must be changed.\n\t */\n\tif value != enabled {\n\n\t\t/*\n\t\t * If level meter should be disabled, clear state.\n\t\t */\n\t\tif !value {\n\t\t\tthis.currentValue = 0.0\n\t\t\tthis.peakValue = 0.0\n\t\t\tthis.sampleCounter = 0\n\t\t}\n\n\t\tthis.enabled = value\n\t}\n\n\tthis.mutex.Unlock()\n}",
"func (w *WidgetBase) SetActive(a bool) {\n\tw.active = a\n}",
"func (self Context) SetActive(active bool) {\n\tif active {\n\t\tC.sfContext_setActive(self.Cref, C.sfBool(1))\n\t} else {\n\t\tC.sfContext_setActive(self.Cref, C.sfBool(0))\n\t}\n}",
"func (self *RenderWindow) SetActive(active Bool) sfBool {\n return C.sfRenderWindow_SetActive(self.Cref, active.Cref)\n}",
"func (p *PortForwarder) SetActive(b bool) {\n\tp.active = b\n}",
"func (u *Unit) StartUnit() error {\n\tconn, err := sd.NewSystemdConnection()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to get systemd bus connection: %v\", err)\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\treschan := make(chan string)\n\t_, err = conn.StartUnit(u.Unit, \"replace\", reschan)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to start unit %s: %v\", u.Unit, err)\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (self *TileSprite) SetInputEnabledA(member bool) {\n self.Object.Set(\"inputEnabled\", member)\n}",
"func (this *meterStruct) SetEnabled(value bool) {\n\tthis.mutex.Lock()\n\tenabled := this.enabled\n\n\t/*\n\t * Check if value must be changed.\n\t */\n\tif value != enabled {\n\t\tchannelMeters := this.channelMeters\n\n\t\t/*\n\t\t * Enable or disable each channel meter.\n\t\t */\n\t\tfor _, channelMeter := range channelMeters {\n\t\t\tchannelMeter.setEnabled(value)\n\t\t}\n\n\t\tthis.enabled = value\n\t}\n\n\tthis.mutex.Unlock()\n}",
"func (self *Graphics) SetInputEnabledA(member bool) {\n self.Object.Set(\"inputEnabled\", member)\n}",
"func (d *DSP) Bypass() (bool, error) {\n\tvar bypass C.FMOD_BOOL\n\tres := C.FMOD_DSP_GetBypass(d.cptr, &bypass)\n\treturn setBool(bypass), errs[res]\n}",
"func (o *baseOp) Active() bool {\n\to.m.Lock()\n\tdefer o.m.Unlock()\n\treturn o.started\n}",
"func (sdc *SystemdClient) IsActive(unit string) (bool, error) {\n\tconn, err := dbus.New()\n\tif err != nil {\n\t\treturn false, maskAny(err)\n\t}\n\n\tustates, err := conn.ListUnits()\n\tif err != nil {\n\t\treturn false, maskAny(err)\n\t}\n\n\tfor _, ustate := range ustates {\n\t\tif ustate.Name == unit {\n\t\t\treturn ustate.ActiveState == \"active\", nil\n\t\t}\n\t}\n\n\treturn false, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Enables or disables the read callback of a DSP unit so that it does or doesn't process the data coming into it. A DSP unit that is disabled still processes its inputs, it will just be 'dry'. bypass: Boolean to cause the read callback of the DSP unit to be bypassed or not. Default = false. If a unit is bypassed, it will still process its inputs. To disable the unit and all of its inputs, use "DSP.SetActive" instead.
|
func (d *DSP) SetBypass(bypass bool) error {
res := C.FMOD_DSP_SetBypass(d.cptr, getBool(bypass))
return errs[res]
}
|
[
"func (d *DSP) Bypass() (bool, error) {\n\tvar bypass C.FMOD_BOOL\n\tres := C.FMOD_DSP_GetBypass(d.cptr, &bypass)\n\treturn setBool(bypass), errs[res]\n}",
"func wrapper_audio(t *eval.Thread, in []eval.Value, out []eval.Value) {\n\tif uiSettings.Terminated() {\n\t\treturn\n\t}\n\n\tenable := in[0].(eval.BoolValue).Get(t)\n\n\tmutex.Lock()\n\tuiSettings.EnableAudio(enable)\n\tmutex.Unlock()\n}",
"func SPIDelayRead(v bool) func(*options) error {\n\treturn func(o *options) error { return o.setSPIDelayRead(v) }\n}",
"func (m *DirectRoutingLogRow) SetMediaBypassEnabled(value *bool)() {\n m.mediaBypassEnabled = value\n}",
"func (d *dependencySleepAfterInitializeSubscribe) enable() {\n\td.f = true\n}",
"func (r *Router) UseBypass(mwf ...gorillamux.MiddlewareFunc) {\n\tr.mux.Use(mwf...)\n}",
"func (sdc *SystemdClient) Disable(unit string) error {\n\tsdc.Logger.Debugf(\"disabling %s\", unit)\n\n\tconn, err := dbus.New()\n\tif err != nil {\n\t\treturn maskAny(err)\n\t}\n\n\tif _, err := conn.DisableUnitFiles([]string{unit}, false); err != nil {\n\t\tsdc.Logger.Errorf(\"disabling %s failed: %#v\", unit, err)\n\t\treturn maskAny(err)\n\t}\n\n\treturn nil\n}",
"func (device *SilentStepperBrick) Disable() (err error) {\n\tvar buf bytes.Buffer\n\n\tresultBytes, err := device.device.Set(uint8(FunctionDisable), buf.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 8 {\n\t\t\treturn fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 8)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tbytes.NewBuffer(resultBytes[8:])\n\n\t}\n\n\treturn nil\n}",
"func DisableEffects() {\n\n\tgl.UseProgram(0)\n\tpaunchEffect = nil\n}",
"func EnableClock(lp bool) {\n\tRCC := rcc.RCC\n\tif lp {\n\t\tRCC.FMCLPEN().AtomicSet()\n\t} else {\n\t\tRCC.FMCLPEN().AtomicClear()\n\n\t}\n\tRCC.FMCEN().AtomicSet()\n}",
"func (u *Unit) Disable() {\n\tu.pru.wr(u.ctlBase+c_CONTROL, ctl_RESET)\n}",
"func (self *PhysicsP2) Enable1O(object interface{}, debug bool) {\n self.Object.Call(\"enable\", object, debug)\n}",
"func GetBypass(conn io.ReadWriter) (bypass Bypass, err error) {\n\n\tresp, err := getQuery(getBypass, conn)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn *resp.(*Bypass), err\n}",
"func (p *Port) EnableClock(lp bool) {\n\tenableClock(p, lp)\n}",
"func (o *Manager) LoopSetup(ctx context.Context, fd dbus.UnixFD, options map[string]dbus.Variant) (resultingDevice dbus.ObjectPath, err error) {\n\terr = o.object.CallWithContext(ctx, InterfaceManager+\".LoopSetup\", 0, fd, options).Store(&resultingDevice)\n\treturn\n}",
"func (s *System) Set3DRolloffCallback(callback C.FMOD_3D_ROLLOFF_CALLBACK) error {\n\t//FMOD_RESULT F_API FMOD_System_Set3DRolloffCallback (FMOD_SYSTEM *system, FMOD_3D_ROLLOFF_CALLBACK callback);\n\treturn ErrNoImpl\n}",
"func (p *plug) set(on bool) error {\n\t// lock pins\n\tmutex.Lock()\n\tdefer mutex.Unlock()\n\n\t// clear error\n\tclearPinError()\n\n\t// set d2-d1-d0 depending on which plug\n\tswitch p.id {\n\tcase all:\n\t\t// 011\n\t\td2.off()\n\t\td1.on()\n\t\td0.on()\n\tcase one:\n\t\t// 111\n\t\td2.on()\n\t\td1.on()\n\t\td0.on()\n\tcase two:\n\t\t// 110\n\t\td2.on()\n\t\td1.on()\n\t\td0.off()\n\tdefault:\n\t\t// not recognised, return error\n\t\treturn fmt.Errorf(\"%d is not a valid plug id\", p.id)\n\t}\n\n\t// set d3 depending on on/off\n\tif on {\n\t\td3.on()\n\t} else {\n\t\td3.off()\n\t}\n\n\t// allow the encoder to settle\n\ttime.Sleep(100 * time.Millisecond)\n\n\t// enable the modulator\n\tenable.on()\n\t// pause\n\ttime.Sleep(250 * time.Millisecond)\n\t// disable the modulator\n\tenable.off()\n\n\terr := lastPinError()\n\tif err == nil {\n\t\tp.state = on\n\t}\n\treturn err\n}",
"func (s *System) MixerSuspend() error {\n\tres := C.FMOD_System_MixerSuspend(s.cptr)\n\treturn errs[res]\n}",
"func (d *Device) UseLowPower(power bool) {\n\tif power {\n\t\td.bwRate.lowPower = 1\n\t} else {\n\t\td.bwRate.lowPower = 0\n\t}\n\td.bus.WriteRegister(uint8(d.Address), REG_BW_RATE, []byte{d.bwRate.toByte()})\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.