query
stringlengths
8
6.75k
document
stringlengths
9
1.89M
negatives
listlengths
19
19
metadata
dict
Keyword search for query
func KeywordSearch(t *testing.T, url string) (statusCode int, respBody []byte, err error) { respStatusCode, respBody, _ := GetResponseByGet(t, url) return respStatusCode, respBody, nil }
[ "func (s *searcher) Keyword(resp http.ResponseWriter, req *http.Request) {\n\tsearchTerms := mux.Vars(req)\n\n\tkey := searchTerms[\"keyword\"]\n\tif len(key) == 0 {\n\t\tresp.WriteHeader(http.StatusBadRequest)\n\t\tresp.Write([]byte(\"keyword is empty\"))\n\t\treturn\n\t}\n\n\tsearchPaths, err := s.searchIndex.GetTransactionPathsByKeyword(key)\n\tif err != nil {\n\t\tresp.WriteHeader(http.StatusInternalServerError)\n\t\tresp.Write([]byte(fmt.Sprintf(\"error finding transactions: %s\", err.Error())))\n\t\treturn\n\t}\n\n\ttransactions, err := s.searchIndex.GetTransactionsFromFiles(searchPaths)\n\tif err != nil {\n\t\tresp.WriteHeader(http.StatusInternalServerError)\n\t\tresp.Write([]byte(fmt.Sprintf(\"error finding transactions: %s\", err.Error())))\n\t\treturn\n\t}\n\n\tresultBytes, err := json.Marshal(transactions)\n\tif err != nil {\n\t\tresp.WriteHeader(http.StatusInternalServerError)\n\t\tresp.Write([]byte(fmt.Sprintf(\"error marshallig transactions to json: %s\", err.Error())))\n\t\treturn\n\t}\n\n\tresp.WriteHeader(http.StatusOK)\n\tresp.Write(resultBytes)\n}", "func searchEventByKeywords(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tfmt.Print(\"serach events by keywords\\n\")\n\n\t//loginModel := model.LoginViewModel{}\n\n\t// show the events page if request type is Get\n\tif r.Method == http.MethodGet {\n\t\trenderTemplates(w, \"events.html\", \"Get mothod\")\n\t\treturn\n\t}\n\n\t// request data from ticketmaster api if request type is Post\n\tif r.Method == http.MethodPost {\n\n\t\tr.ParseForm()\n\t\tkeyWords := r.Form.Get(\"searchKeyWords\")\n\n\t\t// request results from ticktmaster api\n\t\tevents, err := searchEventFromTicketmaster(\"keyword\", string(keyWords), defaultEventCount)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Search Event from tickemaster api error: \" + err.Error())\n\t\t}\n\t\trenderTemplates(w, \"events.html\", events)\n\t}\n}", "func makeKeywordSearchQuery(keywords, fields []string, start, count string) string {\n\tif len(fields) == 0 {\n\t\tfields = defaultQueryFields\n\t}\n\n\tvar condition []string\n\tselectQuery := \"select \"\n\tfieldsQuery := strings.Join(fields, \",\")\n\t//\toldfromQuery := \" from book_items left join book_category on book_items.category_num=book_category.category_num where \"\n\tfromQuery := \" from book_items \" +\n\t\t\"left join book_category on book_items.category_num=book_category.category_num where \"\n\tfor _, value := range keywords {\n\t\tbookCondition := fmt.Sprintf(\n\t\t\t\"(author like '%%%s%%' or title like '%%%s%%' or summary like '%%%s%%' or isbn='%s' )\",\n\t\t\tvalue, value, value, value)\n\t\tcondition = append(condition, bookCondition)\n\t}\n\n\tconditionQuery := strings.Join(condition, \" and \")\n//\tsortQuery := \" order by (average*100+num_raters*0.3) desc \"\n\tsortQuery := \" order by rank desc \"\n\tpageQuery := \" limit \" + start + \",\" + count\n\n\tsqlQuery := selectQuery + fieldsQuery +\n\t\tfromQuery + conditionQuery + sortQuery + pageQuery\n//\tfmt.Println(sqlQuery)\n\treturn sqlQuery\n}", "func (tmdb *TMDb) SearchKeyword(name string, options map[string]string) (*KeywordSearchResults, error) {\n\tvar availableOptions = map[string]struct{}{\n\t\t\"page\": {}}\n\tvar keywords KeywordSearchResults\n\tsafeName := url.QueryEscape(name)\n\toptionsString := getOptionsString(options, availableOptions)\n\turi := fmt.Sprintf(\"%s/search/keyword?query=%s&api_key=%s%s\", baseURL, safeName, tmdb.apiKey, optionsString)\n\tresult, err := getTmdb(uri, &keywords)\n\treturn result.(*KeywordSearchResults), err\n}", "func (collection Collection) Search(query string) {\n\n}", "func (f *Search) Search(term string, roomIDs, keys []string, limit, from int, orderByStreamPos bool) (*bleve.SearchResult, error) {\n\tqry := bleve.NewConjunctionQuery()\n\ttermQuery := bleve.NewBooleanQuery()\n\n\tterms := strings.Split(term, \" \")\n\tfor _, term := range terms {\n\t\tmatchQuery := bleve.NewMatchQuery(term)\n\t\tmatchQuery.SetField(\"Content\")\n\t\ttermQuery.AddMust(matchQuery)\n\t}\n\tqry.AddQuery(termQuery)\n\n\troomQuery := bleve.NewBooleanQuery()\n\tfor _, roomID := range roomIDs {\n\t\troomSearch := bleve.NewMatchQuery(roomID)\n\t\troomSearch.SetField(\"RoomID\")\n\t\troomQuery.AddShould(roomSearch)\n\t}\n\tif len(roomIDs) > 0 {\n\t\tqry.AddQuery(roomQuery)\n\t}\n\tkeyQuery := bleve.NewBooleanQuery()\n\tfor _, key := range keys {\n\t\tkeySearch := bleve.NewMatchQuery(key)\n\t\tkeySearch.SetField(\"ContentType\")\n\t\tkeyQuery.AddShould(keySearch)\n\t}\n\tif len(keys) > 0 {\n\t\tqry.AddQuery(keyQuery)\n\t}\n\n\ts := bleve.NewSearchRequestOptions(qry, limit, from, false)\n\ts.Fields = []string{\"*\"}\n\ts.SortBy([]string{\"_score\"})\n\tif orderByStreamPos {\n\t\ts.SortBy([]string{\"-StreamPosition\"})\n\t}\n\n\t// Highlight some words\n\ts.Highlight = bleve.NewHighlight()\n\ts.Highlight.Fields = []string{\"Content\"}\n\n\treturn f.FulltextIndex.Search(s)\n}", "func GetRelKw(keyword string) (sliceKw []string, err error) {\n\tkeyword = strings.ReplaceAll(keyword, \" \", \"+\")\n\tmapKeywords := make(map[string]bool)\n\tchkeywords := make(chan []string)\n\tchFinished := make(chan bool)\n\t// ----------------------------- google ----------------------------\n\tfor x := 0; x <= 25; x++ {\n\t\tgo func(x int, chkeywords chan []string, chFinshed chan bool) {\n\t\t\tresSlice, err := getGoogleJSON(\"http://suggestqueries.google.com/complete/search?client=chrome&hl=kr&q=\" + keyword + \"+\" + string(rune('a'+x)))\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"error: %v\\n\", err)\n\t\t\t\tchkeywords <- nil\n\t\t\t\tchFinshed <- true\n\t\t\t}\n\t\t\tchkeywords <- resSlice\n\t\t\tchFinshed <- true\n\t\t}(x, chkeywords, chFinished)\n\t}\n\tfor x := 0; x <= 25; {\n\t\tselect {\n\t\tcase keywords := <-chkeywords:\n\t\t\tfor _, keyword := range keywords {\n\t\t\t\tif !strings.Contains(keyword, \"xaml\") {\n\t\t\t\t\tmapKeywords[keyword] = true\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-chFinished:\n\t\t\tx++\n\t\t}\n\t}\n\t// ----------------------------- bing ----------------------------\n\tfor x := 0; x <= 25; x++ {\n\t\tgo func(x int, chkeywords chan []string, chFinshed chan bool) {\n\t\t\tresSlice, err := getBingJSON(\"https://www.bing.com/AS/Suggestions?pt=page.home&cp=1&cvid=\" +\n\t\t\t\trandomStrFromCharset(22, \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\") +\n\t\t\t\t\"&qry=\" + keyword + \"+\" + string(rune('a'+x)))\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"error: %v\\n\", err)\n\t\t\t\tchkeywords <- nil\n\t\t\t\tchFinshed <- true\n\t\t\t}\n\t\t\tchkeywords <- resSlice\n\t\t\tchFinshed <- true\n\t\t}(x, chkeywords, chFinished)\n\t}\n\tfor x := 0; x <= 25; {\n\t\tselect {\n\t\tcase keywords := <-chkeywords:\n\t\t\tfor _, keyword := range keywords {\n\t\t\t\tmapKeywords[keyword] = true\n\t\t\t}\n\t\tcase <-chFinished:\n\t\t\tx++\n\t\t}\n\t}\n\tfor keyword := range mapKeywords {\n\t\tsliceKw = append(sliceKw, keyword)\n\t}\n\treturn sliceKw, nil\n}", "func TestKeywordSearch(t *testing.T) {\n\ttestURL1 := TestBaseURL + \"/v1/query?keyword=node01\"\n\ttestURL2 := TestBaseURL + \"/v1/query?keyword=node02\"\n\n\ttests := []TestStruct{\n\t\t{\"TestKeywordSearchNode01\", testURL1, \"\", 200, \"\", 0},\n\t\t{\"TestKeywordSearchNode02\", testURL2, \"\", 200, \"\", 0},\n\t}\n\n\tfor i, testCase := range tests {\n\t\tt.Run(testCase.testCaseName, func(t *testing.T) {\n\t\t\tresCode, resBody, _ := KeywordSearch(t, testCase.testURL)\n\t\t\ttests[i].observedStatusCode = resCode\n\t\t\ttests[i].responseBody = string(resBody)\n\t\t})\n\t}\n\tDisplayTestCaseResults(\"TestKeywordSearch\", tests, t, \"uid\")\n}", "func (itemSearch *ItemSearch) Search(keywords string, maxiumNumberOfResults int) []Result {\n\n\t// routes\n\tif isRouteSearch(keywords) {\n\t\trouteComponents := strings.Replace(keywords, \"/\", \" \", -1)\n\t\treturn itemSearch.routesFullTextIndex.Search(routeComponents, maxiumNumberOfResults)\n\t}\n\n\t// items\n\treturn itemSearch.itemContentFullTextIndex.Search(keywords, maxiumNumberOfResults)\n}", "func getKeysByKeyWord(c *gin.Context, kw string) []models.Key {\n\tdatabase := c.MustGet(\"db\").(*mgo.Database)\n\tkey := []models.Key{}\n\terr := database.C(models.CollectionKey).Find(bson.M{\"$or\": []bson.M{{\"Key\": bson.RegEx{kw, \"$i\"}}}}).Sort(\"-Kcount\").Limit(7).All(&key)\n\tcommon.CheckError(c, err)\n\treturn key\n\n}", "func (m defaultMatcher) Search(feed *Feed, searchTerm string) ([]*Result, error) {\n\treturn nil, nil\n}", "func FindKeysByKeyWord(c *gin.Context) {\n\tKeys := getKeysByKeyWord(c, c.Param(\"kw\"))\n\tupKcount(c, c.Param(\"kw\"))\n\t// go getKeysByKeyWord(c, c.Param(\"kw\"))\n\tc.JSON(http.StatusOK, Keys)\n\n}", "func (rec *Record) Search(text string) bool {\n\n\tif strings.Contains(strings.ToLower(rec.Title), text) {\n\t\treturn true\n\t}\n\tif strings.Contains(strings.ToLower(rec.Account), text) {\n\t\treturn true\n\t}\n\tif strings.Contains(strings.ToLower(rec.Password), text) {\n\t\treturn true\n\t}\n\tif strings.Contains(strings.ToLower(strings.Join(rec.Tags, \", \")), text) {\n\t\treturn true\n\t}\n\tif strings.Contains(strings.ToLower(rec.Url), text) {\n\t\treturn true\n\t}\n\tif strings.Contains(strings.ToLower(rec.Notes), text) {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (content Content) Search(keywords []string) SearchResult {\n\tz := html.NewTokenizerFragment(strings.NewReader(string(content)), \"div\")\n\tr := SearchResult{}\n\tverse := 0\n\n\tfoundKeywords := make(map[string]bool)\n\teof := false\n\n\tfor !eof {\n\t\tswitch z.Next() {\n\t\tcase html.ErrorToken:\n\t\t\teof = true\n\t\tcase html.TextToken:\n\t\t\ttext := strings.ToLower(string(z.Text()))\n\t\t\tfor _, k := range keywords {\n\t\t\t\tweight := strings.Count(text, k)\n\t\t\t\tif weight > 0 {\n\t\t\t\t\tfoundKeywords[k] = true\n\t\t\t\t\tif verse > 0 {\n\t\t\t\t\t\tr.VersesHighlighted = append(r.VersesHighlighted, verse)\n\t\t\t\t\t}\n\t\t\t\t\tr.Weight += weight\n\t\t\t\t}\n\t\t\t}\n\t\tcase html.StartTagToken:\n\t\t\t_, hasAttr := z.TagName()\n\t\t\tvar key, val []byte\n\t\t\tfor hasAttr {\n\t\t\t\tkey, val, hasAttr = z.TagAttr()\n\t\t\t\tif string(key) == \"id\" {\n\t\t\t\t\tverse, _ = strconv.Atoi(string(val))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// If not all the keywords were found, this is not a result.\n\tfor _, k := range keywords {\n\t\tif !foundKeywords[k] {\n\t\t\tr.Weight = 0\n\t\t}\n\t}\n\n\tr.Clean()\n\treturn r\n}", "func (sem SEManager) Get(keyword string) (SearchEngine, error) {\n if len(keyword) < 1 {\n return SearchEngine{}, errors.New(\"Keyword too short\")\n }\n for _, se := range sem.Engines {\n if keyword == se.Keyword {\n return se, nil\n }\n }\n return SearchEngine{}, errors.New(fmt.Sprintf(\"No search engine with keyword %s found\", keyword))\n}", "func SearchTerm(query url.Values, m martini.Context) {\n\tp := SearchParameter{}\n\tp.Search = query.Get(\"search\")\n\tp.Success = p.Search != \"\"\n\n\tm.Map(p)\n}", "func (a *Alfred) Search(query string) error {\n\t_, err := util.RunJS(fmt.Sprintf(scriptSearch, util.QuoteJS(query)))\n\treturn err\n}", "func processQuery(userQuery string) (keywords string) {\n\tcandidates := rake.RunRake(userQuery)\n\tkeywords = \"\"\n\tfor _, candidate := range candidates {\n\t\tkeywords += candidate.Key + \";\"\n\t}\n\treturn keywords\n\n}", "func IsKeyword(name string) bool {}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Qsl search for query
func QslSearch(t *testing.T, url string) (statusCode int, respBody []byte, err error) { respStatusCode, respBody, _ := GetResponseByGet(t, url) return respStatusCode, respBody, nil }
[ "func (collection Collection) Search(query string) {\n\n}", "func (m *RootRequestBuilder) SearchWithQ(q *string)(*id724096a8c3b90aa72504dcd2761cc99b632bad9d405f048a071a5f697d2195f.SearchWithQRequestBuilder) {\n return id724096a8c3b90aa72504dcd2761cc99b632bad9d405f048a071a5f697d2195f.NewSearchWithQRequestBuilderInternal(m.pathParameters, m.requestAdapter, q);\n}", "func (c *cli) Search(q search, items list.List) list.List {\n\tout := list.New()\n\tfor item := items.Front(); item != nil; item = item.Next() {\n\t\trv := reflect.ValueOf(item.Value)\n\t\tf := rv.FieldByName(q.name)\n\t\tif !f.IsValid() {\n\t\t\tlog.Errorf(\"Field %s doesn't exist\\n\", q.name)\n\t\t\tbreak\n\t\t} else {\n\t\t\tswitch f.Kind() {\n\t\t\tcase reflect.String:\n\t\t\t\tsValue := f.String()\n\t\t\t\tif strings.Contains(strings.ToLower(sValue), strings.ToLower(q.query)) {\n\t\t\t\t\tout.PushBack(item.Value)\n\t\t\t\t}\n\t\t\tcase reflect.Int:\n\t\t\t\tqInt, err := strconv.Atoi(q.query)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Errorf(\"%s is an int field, %s is not an int\", q.name, q.query)\n\t\t\t\t\tbreak\n\t\t\t\t} else {\n\t\t\t\t\tif f.Int() == int64(qInt) {\n\t\t\t\t\t\tout.PushBack(item.Value)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase reflect.Bool:\n\t\t\t\tqBoo, err := strconv.ParseBool(q.query)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Errorf(\"%s is a bool field, %s is not a bool\", q.name, q.query)\n\t\t\t\t\tbreak\n\t\t\t\t} else {\n\t\t\t\t\tif f.Bool() == qBoo {\n\t\t\t\t\t\tout.PushBack(item.Value)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn *out\n}", "func (sc *Chaincode) search(stub shim.ChaincodeStubInterface, args []string) peer.Response {\n\tqueryString = \"{\\\"selector\\\": {\\\"structType\\\": \\\"CustomerBasic\\\", \\\"$or\\\": [{\\\"name\\\": {\\\"$regex\\\":\\\".*%s.*\\\"}}, {\\\"email\\\":{\\\"$regex\\\":\\\".*%s.*\\\"}}]}}\"\n\tqueryString := fmt.Sprintf(queryString, args[0])\n\n\tcustomersBytes, err := getQueryResultForQueryString(stub, queryString)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\treturn shim.Success(customersBytes)\n}", "func QueryBuilder(req *search.SearchRequest) (es.Query, error) {\n\n\t// Validate request\n\tif req == nil {\n\t\treturn nil, errors.New(\"nil search request\")\n\t}\n\n\t// Check if QueryString is valid and construct QueryStringQuery\n\tif len(req.QueryString) != 0 {\n\t\treturn es.NewQueryStringQuery(req.QueryString), nil\n\t}\n\n\t// Construct Bool query based on search requirements\n\tif req.Query == nil {\n\t\treturn nil, errors.New(\"nil search request\")\n\t}\n\n\tquery := es.NewBoolQuery().QueryName(\"CompleteQuery\")\n\n\t// Process Text Requirements\n\ttextReq := es.NewBoolQuery().QueryName(\"TextQuery\")\n\tfor _, treq := range req.Query.Texts {\n\n\t\ttQuery := es.NewBoolQuery()\n\t\tfor _, text := range treq.Text {\n\t\t\tif strings.Contains(text, \" \") {\n\t\t\t\t// Phrase query\n\t\t\t\ttQuery = tQuery.Must(es.NewQueryStringQuery(fmt.Sprintf(\"\\\"%s\\\"\", text)))\n\t\t\t} else {\n\t\t\t\t// Word query\n\t\t\t\ttQuery = tQuery.Must(es.NewQueryStringQuery(text))\n\t\t\t}\n\t\t}\n\t\ttextReq = textReq.Should(tQuery).MinimumNumberShouldMatch(1)\n\t}\n\tif len(req.Query.Texts) > 0 {\n\t\tquery = query.Must(textReq)\n\t}\n\n\t// Process categories requirement\n\tcatReq := es.NewBoolQuery().QueryName(\"CategoryQuery\")\n\tfor _, cat := range req.Query.Categories {\n\t\tcatReq = catReq.Should(es.NewTermQuery(\"meta.labels._category.keyword\", cat)).MinimumNumberShouldMatch(1)\n\t}\n\tif len(req.Query.Categories) > 0 {\n\t\tquery = query.Must(catReq)\n\t}\n\n\t// Process kinds requirement\n\tkindReq := es.NewBoolQuery().QueryName(\"KindQuery\")\n\tfor _, cat := range req.Query.Kinds {\n\t\tkindReq = kindReq.Should(es.NewTermQuery(\"kind.keyword\", cat)).MinimumNumberShouldMatch(1)\n\t}\n\tif len(req.Query.Kinds) > 0 {\n\t\tquery = query.Must(kindReq)\n\t}\n\n\t// Process field requirement if it is valid\n\tif req.Query.Fields != nil {\n\t\tfor _, field := range req.Query.Fields.Requirements {\n\t\t\tswitch field.Operator {\n\t\t\tcase fields.Operator_name[int32(fields.Operator_equals)]:\n\t\t\t\tif len(field.Values) > 0 {\n\t\t\t\t\tif strings.Contains(field.Values[0], \"*\") {\n\t\t\t\t\t\tquery = query.Must(es.NewWildcardQuery(field.Key, field.Values[0]))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tquery = query.Must(es.NewMatchPhraseQuery(field.Key, field.Values[0]))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase fields.Operator_name[int32(fields.Operator_notEquals)]:\n\t\t\t\tif len(field.Values) > 0 {\n\t\t\t\t\tif strings.Contains(field.Values[0], \"*\") {\n\t\t\t\t\t\tquery = query.MustNot(es.NewWildcardQuery(field.Key, field.Values[0]))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tquery = query.MustNot(es.NewMatchPhraseQuery(field.Key, field.Values[0]))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase fields.Operator_name[int32(fields.Operator_in)]:\n\t\t\t\tfieldQuery := es.NewBoolQuery().MinimumNumberShouldMatch(1)\n\t\t\t\tfor _, val := range field.GetValues() {\n\t\t\t\t\tif strings.Contains(val, \"*\") {\n\t\t\t\t\t\tfieldQuery.Should(es.NewWildcardQuery(field.Key, val))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfieldQuery.Should(es.NewMatchPhraseQuery(field.Key, val))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tquery = query.Must(fieldQuery)\n\t\t\tcase fields.Operator_name[int32(fields.Operator_notIn)]:\n\t\t\t\tfor _, val := range field.GetValues() {\n\t\t\t\t\tif strings.Contains(val, \"*\") {\n\t\t\t\t\t\tquery = query.MustNot(es.NewWildcardQuery(field.Key, val))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tquery = query.MustNot(es.NewMatchPhraseQuery(field.Key, val))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase fields.Operator_name[int32(fields.Operator_gt)]:\n\t\t\t\tif len(field.Values) > 0 {\n\t\t\t\t\tquery = query.Must(es.NewRangeQuery(field.Key).Gt(field.Values[0]))\n\t\t\t\t}\n\t\t\tcase fields.Operator_name[int32(fields.Operator_gte)]:\n\t\t\t\tif len(field.Values) > 0 {\n\t\t\t\t\tquery = query.Must(es.NewRangeQuery(field.Key).Gte(field.Values[0]))\n\t\t\t\t}\n\t\t\tcase fields.Operator_name[int32(fields.Operator_lt)]:\n\t\t\t\tif len(field.Values) > 0 {\n\t\t\t\t\tquery = query.Must(es.NewRangeQuery(field.Key).Lt(field.Values[0]))\n\t\t\t\t}\n\t\t\tcase fields.Operator_name[int32(fields.Operator_lte)]:\n\t\t\t\tif len(field.Values) > 0 {\n\t\t\t\t\tquery = query.Must(es.NewRangeQuery(field.Key).Lte(field.Values[0]))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Process label requirements if it is valid\n\tif req.Query.Labels != nil {\n\t\tfor _, label := range req.Query.Labels.Requirements {\n\t\t\tlKey := fmt.Sprintf(\"%s.keyword\", label.Key)\n\t\t\tswitch label.Operator {\n\t\t\tcase labels.Operator_name[int32(labels.Operator_equals)]:\n\t\t\t\tif len(label.Values) > 0 {\n\t\t\t\t\tquery = query.Must(es.NewTermQuery(lKey, label.Values[0]))\n\t\t\t\t}\n\t\t\tcase labels.Operator_name[int32(labels.Operator_notEquals)]:\n\t\t\t\tif len(label.Values) > 0 {\n\t\t\t\t\tquery = query.MustNot(es.NewTermQuery(lKey, label.Values[0]))\n\t\t\t\t}\n\t\t\tcase labels.Operator_name[int32(labels.Operator_in)]:\n\t\t\t\tvalues := make([]interface{}, len(label.Values))\n\t\t\t\tfor i, v := range label.Values {\n\t\t\t\t\tvalues[i] = v\n\t\t\t\t}\n\t\t\t\tquery = query.Must(es.NewTermsQuery(lKey, values...))\n\t\t\tcase labels.Operator_name[int32(labels.Operator_notIn)]:\n\t\t\t\tvalues := make([]interface{}, len(label.Values))\n\t\t\t\tfor i, v := range label.Values {\n\t\t\t\t\tvalues[i] = v\n\t\t\t\t}\n\t\t\t\tquery = query.MustNot(es.NewTermsQuery(lKey, values...))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn query, nil\n}", "func (e Es) Query(index string, doc string, query string) (*SearchResult, error) {\n\tif doc != \"\" {\n\t\tdoc = \"/\" + doc\n\t}\n\tbody, err := e.getJSONWithBody(fmt.Sprintf(\"%s%s/_search\", index, doc), query)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = checkError(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tallhits, ok := body[\"hits\"].(map[string]interface{})\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"Failed to retrieve hits from response\")\n\t}\n\n\ttotal := int(allhits[\"total\"].(float64))\n\thits := allhits[\"hits\"].([]interface{})\n\n\tresult := make([]string, len(hits))\n\ti := 0\n\tfor _, hit := range hits {\n\t\trecord, err := utils.MapToYaml(hit)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresult[i] = record\n\t\ti++\n\t}\n\n\treturn &SearchResult{\n\t\tTotal: total,\n\t\tHits: result,\n\t}, nil\n}", "func (i *Index) Search(q query.Query) (docs []index.Document, total int, err error) {\n\tconn := i.getConn()\n\tdefer conn.Close()\n\n\targs := redis.Args{i.name, q.Term, \"LIMIT\", q.Paging.Offset, q.Paging.Num, \"WITHSCORES\"}\n\t//if q.Flags&query.QueryVerbatim != 0 {\n\targs = append(args, \"VERBATIM\")\n\t//}\n\tif q.Flags&query.QueryNoContent != 0 {\n\t\targs = append(args, \"NOCONTENT\")\n\t}\n\n\tif q.HighlightOpts != nil {\n\t\targs = args.Add(\"HIGHLIGHT\")\n\t\tif q.HighlightOpts.Fields != nil && len(q.HighlightOpts.Fields) > 0 {\n\t\t\targs = args.Add(\"FIELDS\", len(q.HighlightOpts.Fields))\n\t\t\targs = args.AddFlat(q.HighlightOpts.Fields)\n\t\t}\n\t\targs = args.Add(\"TAGS\", q.HighlightOpts.Tags[0], q.HighlightOpts.Tags[1])\n\t}\n\n\tif q.SummarizeOpts != nil {\n\t\targs = args.Add(\"SUMMARIZE\")\n\t\tif q.SummarizeOpts.Fields != nil && len(q.SummarizeOpts.Fields) > 0 {\n\t\t\targs = args.Add(\"FIELDS\", len(q.SummarizeOpts.Fields))\n\t\t\targs = args.AddFlat(q.SummarizeOpts.Fields)\n\t\t}\n\t\tif q.SummarizeOpts.FragmentLen > 0 {\n\t\t\targs = args.Add(\"LEN\", q.SummarizeOpts.FragmentLen)\n\t\t}\n\t\tif q.SummarizeOpts.NumFragments > 0 {\n\t\t\targs = args.Add(\"FRAGS\", q.SummarizeOpts.NumFragments)\n\t\t}\n\t\tif q.SummarizeOpts.Separator != \"\" {\n\t\t\targs = args.Add(\"SEPARATOR\", q.SummarizeOpts.Separator)\n\t\t}\n\t}\n\n\tif err := conn.Send(i.commandPrefix+\".SEARCH\", args...); err != nil {\n\t\tpanic(err)\n\t}\n\n\tif err := conn.Flush(); err != nil {\n\t\tpanic(err)\n\t}\n\n\tif _, err := conn.Receive(); err != nil {\n\t\tpanic(err)\n\t}\n\n\tres, err := redis.Values(conn.Do(i.commandPrefix+\".SEARCH\", args...))\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\tif total, err = redis.Int(res[0], nil); err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\tdocs = make([]index.Document, 0, len(res)-1)\n\n\tif len(res) > 2 {\n\t\tfor i := 1; i < len(res); i += 2 {\n\n\t\t\tvar fields interface{} = []interface{}{}\n\t\t\tif q.Flags&query.QueryNoContent == 0 {\n\t\t\t\tfields = res[i+2]\n\n\t\t\t}\n\t\t\tif d, e := loadDocument(res[i], res[i+1], fields); e == nil {\n\t\t\t\tdocs = append(docs, d)\n\t\t\t}\n\t\t\tif q.Flags&query.QueryNoContent == 0 {\n\t\t\t\ti++\n\t\t\t}\n\t\t}\n\t}\n\treturn docs, len(docs), nil\n}", "func (a *Alfred) Search(query string) error {\n\t_, err := util.RunJS(fmt.Sprintf(scriptSearch, util.QuoteJS(query)))\n\treturn err\n}", "func (m *DriveItemItemRequestBuilder) SearchWithQ(q *string)(*iae2098c15020f00a005771b8a0dfdf0cc230504133ee3754b7e1f29877241492.SearchWithQRequestBuilder) {\n return iae2098c15020f00a005771b8a0dfdf0cc230504133ee3754b7e1f29877241492.NewSearchWithQRequestBuilderInternal(m.pathParameters, m.requestAdapter, q);\n}", "func (b Builder) Search(tokens []Token) squirrel.Sqlizer {\n\tvar conj squirrel.And\n\tfor _, token := range tokens {\n\t\tif part := b.buildPart(token); part != nil {\n\t\t\tconj = append(conj, part)\n\t\t}\n\t}\n\treturn conj\n}", "func SearchQuery(q string, qualifiers *SearchQualifiers) string {\n\tvar parts []string\n\tif q != \"\" {\n\t\tparts = append(parts, q)\n\t}\n\tif qualifiers != nil {\n\t\tqualStr := (*qualifiers).String()\n\t\tif len(qualStr) > 0 {\n\t\t\tparts = append(parts, qualStr)\n\t\t}\n\t}\n\n\treturn strings.Join(parts, \" \")\n}", "func (f *Search) Search(term string, roomIDs, keys []string, limit, from int, orderByStreamPos bool) (*bleve.SearchResult, error) {\n\tqry := bleve.NewConjunctionQuery()\n\ttermQuery := bleve.NewBooleanQuery()\n\n\tterms := strings.Split(term, \" \")\n\tfor _, term := range terms {\n\t\tmatchQuery := bleve.NewMatchQuery(term)\n\t\tmatchQuery.SetField(\"Content\")\n\t\ttermQuery.AddMust(matchQuery)\n\t}\n\tqry.AddQuery(termQuery)\n\n\troomQuery := bleve.NewBooleanQuery()\n\tfor _, roomID := range roomIDs {\n\t\troomSearch := bleve.NewMatchQuery(roomID)\n\t\troomSearch.SetField(\"RoomID\")\n\t\troomQuery.AddShould(roomSearch)\n\t}\n\tif len(roomIDs) > 0 {\n\t\tqry.AddQuery(roomQuery)\n\t}\n\tkeyQuery := bleve.NewBooleanQuery()\n\tfor _, key := range keys {\n\t\tkeySearch := bleve.NewMatchQuery(key)\n\t\tkeySearch.SetField(\"ContentType\")\n\t\tkeyQuery.AddShould(keySearch)\n\t}\n\tif len(keys) > 0 {\n\t\tqry.AddQuery(keyQuery)\n\t}\n\n\ts := bleve.NewSearchRequestOptions(qry, limit, from, false)\n\ts.Fields = []string{\"*\"}\n\ts.SortBy([]string{\"_score\"})\n\tif orderByStreamPos {\n\t\ts.SortBy([]string{\"-StreamPosition\"})\n\t}\n\n\t// Highlight some words\n\ts.Highlight = bleve.NewHighlight()\n\ts.Highlight.Fields = []string{\"Content\"}\n\n\treturn f.FulltextIndex.Search(s)\n}", "func (r *Search) Q(q string) *Search {\n\tr.values.Set(\"q\", q)\n\n\treturn r\n}", "func (engine *Engine) BoolSearch(q string) (res []int) {\n\tq = strings.TrimSpace(q)\n\tfirstBoolQueryGroup := newBoolQueryGroup([]string{q}, \"RET\", res)\n\tres = (*engine).recursiveBoolSearch(firstBoolQueryGroup).result\n\treturn res\n}", "func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) {\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn make([]*abci.TxResult, 0), nil\n\n\tdefault:\n\t}\n\n\tvar hashesInitialized bool\n\tfilteredHashes := make(map[string][]byte)\n\n\t// get a list of conditions (like \"tx.height > 5\")\n\tconditions, err := q.Conditions()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error during parsing conditions from query: %w\", err)\n\t}\n\n\t// if there is a hash condition, return the result immediately\n\thash, ok, err := lookForHash(conditions)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error during searching for a hash in the query: %w\", err)\n\t} else if ok {\n\t\tres, err := txi.Get(hash)\n\t\tswitch {\n\t\tcase err != nil:\n\t\t\treturn []*abci.TxResult{}, fmt.Errorf(\"error while retrieving the result: %w\", err)\n\t\tcase res == nil:\n\t\t\treturn []*abci.TxResult{}, nil\n\t\tdefault:\n\t\t\treturn []*abci.TxResult{res}, nil\n\t\t}\n\t}\n\n\t// conditions to skip because they're handled before \"everything else\"\n\tskipIndexes := make([]int, 0)\n\n\t// extract ranges\n\t// if both upper and lower bounds exist, it's better to get them in order not\n\t// no iterate over kvs that are not within range.\n\tranges, rangeIndexes := indexer.LookForRanges(conditions)\n\tif len(ranges) > 0 {\n\t\tskipIndexes = append(skipIndexes, rangeIndexes...)\n\n\t\tfor _, qr := range ranges {\n\t\t\tif !hashesInitialized {\n\t\t\t\tfilteredHashes = txi.matchRange(ctx, qr, startKey(qr.Key), filteredHashes, true)\n\t\t\t\thashesInitialized = true\n\n\t\t\t\t// Ignore any remaining conditions if the first condition resulted\n\t\t\t\t// in no matches (assuming implicit AND operand).\n\t\t\t\tif len(filteredHashes) == 0 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfilteredHashes = txi.matchRange(ctx, qr, startKey(qr.Key), filteredHashes, false)\n\t\t\t}\n\t\t}\n\t}\n\n\t// if there is a height condition (\"tx.height=3\"), extract it\n\theight := lookForHeight(conditions)\n\n\t// for all other conditions\n\tfor i, c := range conditions {\n\t\tif intInSlice(i, skipIndexes) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif !hashesInitialized {\n\t\t\tfilteredHashes = txi.match(ctx, c, startKeyForCondition(c, height), filteredHashes, true)\n\t\t\thashesInitialized = true\n\n\t\t\t// Ignore any remaining conditions if the first condition resulted\n\t\t\t// in no matches (assuming implicit AND operand).\n\t\t\tif len(filteredHashes) == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t} else {\n\t\t\tfilteredHashes = txi.match(ctx, c, startKeyForCondition(c, height), filteredHashes, false)\n\t\t}\n\t}\n\n\tresults := make([]*abci.TxResult, 0, len(filteredHashes))\n\tfor _, h := range filteredHashes {\n\t\tres, err := txi.Get(h)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to get Tx{%X}: %w\", h, err)\n\t\t}\n\t\tresults = append(results, res)\n\n\t\t// Potentially exit early.\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tbreak\n\t\tdefault:\n\t\t}\n\t}\n\n\treturn results, nil\n}", "func (m *ItemItemsDriveItemItemRequestBuilder) SearchWithQ(q *string)(*ItemItemsItemSearchWithQRequestBuilder) {\n return NewItemItemsItemSearchWithQRequestBuilderInternal(m.BaseRequestBuilder.PathParameters, m.BaseRequestBuilder.RequestAdapter, q)\n}", "func (s SearchService) Query(params *SearchParams) (results *SearchResults, resp *http.Response, err error) {\n\tresults = new(SearchResults)\n\tresp, err = s.client.base().Get(\"search\").QueryStruct(params).ReceiveSuccess(results)\n\treturn\n}", "func routeSearch(c echo.Context) error {\n\tvar input QueryRequest\n\tvar q query.Query\n\n\tif err := c.Bind(&input); err != nil {\n\t\treturn c.JSON(400, map[string]interface{}{\n\t\t\t\"success\": false,\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t}\n\n\tndx, typ := c.Param(\"index\"), c.Param(\"type\")\n\tindex, err := store.GetIndex(ndx + \"/\" + typ)\n\tif err != nil {\n\t\treturn c.JSON(404, map[string]interface{}{\n\t\t\t\"success\": false,\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t}\n\n\tif input.QueryString != \"\" {\n\t\tq = query.Query(bleve.NewQueryStringQuery(input.QueryString))\n\t}\n\n\tif q == nil {\n\t\tq = query.Query(bleve.NewMatchAllQuery())\n\t}\n\n\tres, err := index.Search(q, input.Offset, input.Size, input.Sort)\n\tif err != nil {\n\t\treturn c.JSON(500, map[string]interface{}{\n\t\t\t\"success\": false,\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t}\n\n\treturn c.JSON(200, map[string]interface{}{\n\t\t\"success\": true,\n\t\t\"payload\": res,\n\t})\n}", "func Find(name string) (string, bool) { q, ok := queries[name]; return q, ok }" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test keyword search query
func TestKeywordSearch(t *testing.T) { testURL1 := TestBaseURL + "/v1/query?keyword=node01" testURL2 := TestBaseURL + "/v1/query?keyword=node02" tests := []TestStruct{ {"TestKeywordSearchNode01", testURL1, "", 200, "", 0}, {"TestKeywordSearchNode02", testURL2, "", 200, "", 0}, } for i, testCase := range tests { t.Run(testCase.testCaseName, func(t *testing.T) { resCode, resBody, _ := KeywordSearch(t, testCase.testURL) tests[i].observedStatusCode = resCode tests[i].responseBody = string(resBody) }) } DisplayTestCaseResults("TestKeywordSearch", tests, t, "uid") }
[ "func KeywordSearch(t *testing.T, url string) (statusCode int, respBody []byte, err error) {\n\n\trespStatusCode, respBody, _ := GetResponseByGet(t, url)\n\treturn respStatusCode, respBody, nil\n}", "func TestSearch(t *testing.T) {\n\ttweetHandler := &handler.TweetHandler{\n\t\tTweetService: &mock.MockTweetService{},\n\t}\n\n\t// test cases\n\ttt := []struct {\n\t\tName string\n\t\tQuery string\n\t\tStatus int\n\t}{\n\t\t{Name: \"Found tweets\", Query: \"golang\", Status: http.StatusOK},\n\t\t{Name: \"No search keyword\", Query: \"\", Status: http.StatusBadRequest},\n\t}\n\n\tfor _, tc := range tt {\n\t\tt.Run(tc.Name, func(t *testing.T) {\n\t\t\treq, err := http.NewRequest(\"GET\", \"localhost:8080/search?q=\"+tc.Query, nil)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Error creating http request: %s\", err.Error())\n\t\t\t}\n\t\t\trec := httptest.NewRecorder()\n\n\t\t\ttweetHandler.HandleSearch(rec, req)\n\n\t\t\tres := rec.Result()\n\t\t\tdefer res.Body.Close()\n\n\t\t\tif res.StatusCode != tc.Status {\n\t\t\t\tt.Fatalf(\"Expected status %d; got %d\", tc.Status, res.StatusCode)\n\t\t\t}\n\t\t})\n\t}\n}", "func searchEventByKeywords(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tfmt.Print(\"serach events by keywords\\n\")\n\n\t//loginModel := model.LoginViewModel{}\n\n\t// show the events page if request type is Get\n\tif r.Method == http.MethodGet {\n\t\trenderTemplates(w, \"events.html\", \"Get mothod\")\n\t\treturn\n\t}\n\n\t// request data from ticketmaster api if request type is Post\n\tif r.Method == http.MethodPost {\n\n\t\tr.ParseForm()\n\t\tkeyWords := r.Form.Get(\"searchKeyWords\")\n\n\t\t// request results from ticktmaster api\n\t\tevents, err := searchEventFromTicketmaster(\"keyword\", string(keyWords), defaultEventCount)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Search Event from tickemaster api error: \" + err.Error())\n\t\t}\n\t\trenderTemplates(w, \"events.html\", events)\n\t}\n}", "func verify(t *testing.T, searchQuery string, searchResults []workitem.WorkItem, expectedCount int) {\n\t// Since this test adds test data, whether or not other workitems exist\n\t// there must be at least 1 search result returned.\n\tif len(searchResults) == expectedCount && expectedCount == 0 {\n\t\t// no point checking further, we got what we wanted.\n\t\treturn\n\t}\n\trequire.Equal(t, expectedCount, len(searchResults), \"invalid number of results in the search\")\n\n\t// These keywords need a match in the textual part.\n\tallKeywords := strings.Fields(searchQuery)\n\t// These keywords need a match optionally either as URL string or ID\t\t +\t\t\t\tkeyWord = strings.ToLower(keyWord)\n\t// optionalKeywords := []string{workItemURLInSearchString, strconv.Itoa(fxt.WorkItems[idx].Number)}\n\t// We will now check the legitimacy of the search results.\n\t// Iterate through all search results and see whether they meet the criteria\n\tfor _, searchResult := range searchResults {\n\t\tt.Logf(\"Examining workitem id=`%v` number=`%d` using keywords %v\", searchResult.ID, searchResult.Number, allKeywords)\n\t\tfor _, keyWord := range allKeywords {\n\t\t\tkeyWord = strings.ToLower(keyWord)\n\t\t\tt.Logf(\"Verifying workitem id=`%v` number=`%d` for keyword `%s`...\", searchResult.ID, searchResult.Number, keyWord)\n\t\t\tworkItemTitle := \"\"\n\t\t\tif searchResult.Fields[workitem.SystemTitle] != nil {\n\t\t\t\tworkItemTitle = strings.ToLower(searchResult.Fields[workitem.SystemTitle].(string))\n\t\t\t}\n\t\t\tworkItemDescription := \"\"\n\t\t\tif searchResult.Fields[workitem.SystemDescription] != nil {\n\t\t\t\tdescriptionField := searchResult.Fields[workitem.SystemDescription].(rendering.MarkupContent)\n\t\t\t\tworkItemDescription = strings.ToLower(descriptionField.Content)\n\t\t\t}\n\t\t\tassert.True(t,\n\t\t\t\tstrings.Contains(workItemTitle, keyWord) || strings.Contains(workItemDescription, keyWord),\n\t\t\t\t\"`%s` neither found in title `%s` nor in the description `%s` for workitem #%d\", keyWord, workItemTitle, workItemDescription, searchResult.Number)\n\t\t}\n\t}\n}", "func (s *searcher) Keyword(resp http.ResponseWriter, req *http.Request) {\n\tsearchTerms := mux.Vars(req)\n\n\tkey := searchTerms[\"keyword\"]\n\tif len(key) == 0 {\n\t\tresp.WriteHeader(http.StatusBadRequest)\n\t\tresp.Write([]byte(\"keyword is empty\"))\n\t\treturn\n\t}\n\n\tsearchPaths, err := s.searchIndex.GetTransactionPathsByKeyword(key)\n\tif err != nil {\n\t\tresp.WriteHeader(http.StatusInternalServerError)\n\t\tresp.Write([]byte(fmt.Sprintf(\"error finding transactions: %s\", err.Error())))\n\t\treturn\n\t}\n\n\ttransactions, err := s.searchIndex.GetTransactionsFromFiles(searchPaths)\n\tif err != nil {\n\t\tresp.WriteHeader(http.StatusInternalServerError)\n\t\tresp.Write([]byte(fmt.Sprintf(\"error finding transactions: %s\", err.Error())))\n\t\treturn\n\t}\n\n\tresultBytes, err := json.Marshal(transactions)\n\tif err != nil {\n\t\tresp.WriteHeader(http.StatusInternalServerError)\n\t\tresp.Write([]byte(fmt.Sprintf(\"error marshallig transactions to json: %s\", err.Error())))\n\t\treturn\n\t}\n\n\tresp.WriteHeader(http.StatusOK)\n\tresp.Write(resultBytes)\n}", "func TestSearch(t *testing.T) {\n\tvar result *SearchResult\n\tvar err error\n\n\tresult, err = Search(\n\t\tSimpleQuery{\n\t\t\tSearch: \"electron\",\n\t\t\tMaxResults: 5,\n\t\t},\n\t)\n\n\tif err != nil {\n\t\tlog.Printf(\"error occurred during Search(): %v\", err)\n\t\tt.FailNow()\n\t}\n\n\tif result == nil {\n\t\tlog.Printf(\"empty SearchResult: %v\", result)\n\t\tt.FailNow()\n\t}\n\n\tif len(result.Entries) != 5 {\n\t\tlog.Printf(\"invalid length for SearchResult.entries: %v\", result.Entries)\n\t\tt.FailNow()\n\t}\n\n}", "func makeKeywordSearchQuery(keywords, fields []string, start, count string) string {\n\tif len(fields) == 0 {\n\t\tfields = defaultQueryFields\n\t}\n\n\tvar condition []string\n\tselectQuery := \"select \"\n\tfieldsQuery := strings.Join(fields, \",\")\n\t//\toldfromQuery := \" from book_items left join book_category on book_items.category_num=book_category.category_num where \"\n\tfromQuery := \" from book_items \" +\n\t\t\"left join book_category on book_items.category_num=book_category.category_num where \"\n\tfor _, value := range keywords {\n\t\tbookCondition := fmt.Sprintf(\n\t\t\t\"(author like '%%%s%%' or title like '%%%s%%' or summary like '%%%s%%' or isbn='%s' )\",\n\t\t\tvalue, value, value, value)\n\t\tcondition = append(condition, bookCondition)\n\t}\n\n\tconditionQuery := strings.Join(condition, \" and \")\n//\tsortQuery := \" order by (average*100+num_raters*0.3) desc \"\n\tsortQuery := \" order by rank desc \"\n\tpageQuery := \" limit \" + start + \",\" + count\n\n\tsqlQuery := selectQuery + fieldsQuery +\n\t\tfromQuery + conditionQuery + sortQuery + pageQuery\n//\tfmt.Println(sqlQuery)\n\treturn sqlQuery\n}", "func (collection Collection) Search(query string) {\n\n}", "func TestSingleQuerySearch(t *testing.T) {\n\tfor _, tItem := range testData {\n\t\tsq := NewSingleQuery(tItem.query, tItem.links)\n\t\trealFind, err := sq.QuerySearch()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Can't make test, too much errors, %v\", err)\n\t\t}\n\t\tif len(realFind) != len(tItem.results) {\n\t\t\tt.Errorf(\"got %v slice of resources, need to %v (query=%s)\", realFind, tItem.results, tItem.query)\n\t\t} else {\n\t\t\t// detail comparison\n\t\t\tfor _, realItem := range realFind {\n\t\t\t\texists := false\n\t\t\t\tfor _, expected := range tItem.results {\n\t\t\t\t\tif expected == realItem {\n\t\t\t\t\t\texists = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !exists {\n\t\t\t\t\tt.Errorf(\"wrong content, got %v slice of resources, need to %v (query=%s)\", realFind, tItem.results, tItem.query)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (tmdb *TMDb) SearchKeyword(name string, options map[string]string) (*KeywordSearchResults, error) {\n\tvar availableOptions = map[string]struct{}{\n\t\t\"page\": {}}\n\tvar keywords KeywordSearchResults\n\tsafeName := url.QueryEscape(name)\n\toptionsString := getOptionsString(options, availableOptions)\n\turi := fmt.Sprintf(\"%s/search/keyword?query=%s&api_key=%s%s\", baseURL, safeName, tmdb.apiKey, optionsString)\n\tresult, err := getTmdb(uri, &keywords)\n\treturn result.(*KeywordSearchResults), err\n}", "func (s *server) checkKeyword(kw Keyword) (bool, error) {\n\tvar k string\n\terr := s.db.QueryRow(`\n\t\tSELECT COUNT(keyword) FROM keywords WHERE keyword=? ;\n\t`, kw.Name).Scan(&k)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tkNr, err := strconv.ParseInt(k, 6, 12)\n\n\tvar b bool\n\tif kNr == 0 {\n\t\tb = false\n\t} else {\n\t\tb = true\n\t}\n\n\treturn b, err\n}", "func (itemSearch *ItemSearch) Search(keywords string, maxiumNumberOfResults int) []Result {\n\n\t// routes\n\tif isRouteSearch(keywords) {\n\t\trouteComponents := strings.Replace(keywords, \"/\", \" \", -1)\n\t\treturn itemSearch.routesFullTextIndex.Search(routeComponents, maxiumNumberOfResults)\n\t}\n\n\t// items\n\treturn itemSearch.itemContentFullTextIndex.Search(keywords, maxiumNumberOfResults)\n}", "func (sq *SQ3Driver) realSearch(pattern string, ignoreCase, searchContent bool, ch SearchFeed) {\n\tcolumn := \"key\"\n\tif ignoreCase {\n\t\tcolumn = \"lc_key\"\n\t\tpattern = strings.ToLower(pattern)\n\t}\n\tpattern = \"%\" + pattern + \"%\"\n\n\tsql := fmt.Sprintf(\"SELECT key FROM %v WHERE %v LIKE ?\", dbTable, column)\n\tfmt.Printf(\"DEBUG: Search sql: %v\\n\", sql)\n\trows, err := sq.DB.Query(sql, pattern)\n\tif err != nil {\n\t\tfmt.Printf(\"Error starting query: %v\\n\", err)\n\t\treturn\n\t}\n\tdefer rows.Close()\n\n\terrorStrings := \"\"\n\n\tfor rows.Next() {\n\t\tvar key string\n\t\terr = rows.Scan(&key)\n\t\tif err != nil {\n\t\t\terrorStrings = errorStrings + fmt.Sprintf(\"Error: %v\\n\", err)\n\t\t} else {\n\t\t\t// emit this result\n\t\t\tch <- &DBObj{Key: key}\n\t\t}\n\t}\n\n\tif len(errorStrings) > 0 {\n\t\terr = fmt.Errorf(\"Errors found:\\n%v\", errorStrings)\n\t}\n\t// Return that error as a key for now\n\tch <- &DBObj{Key: errorStrings}\n\n\t// Finally close our channel to notify any consumers we're done.\n\tclose(ch)\n}", "func Test_Search(t *testing.T) {\n\tfor _, testCase := range searchTestCases {\n\t\tresult := org.Search(org.CEO, testCase.name)\n\n\t\tif testCase.expected && result == nil {\n\t\t\tt.Errorf(\"Expected to find '%s', but did not find\", testCase.name)\n\t\t\tt.Fail()\n\t\t} else if !testCase.expected && result != nil {\n\t\t\tt.Errorf(\"Expected to not find '%s', but found\", testCase.name)\n\t\t}\n\t}\n}", "func TestEmptyInputString(t *testing.T) {\n\temptyString := \"\"\n\tfa := NewFrequencyAnalyzer(emptyString)\n\n\tactualWords := fa.Search()\n\tassert.Nil(t, actualWords)\n}", "func IsKeyword(name string) bool {}", "func TestSearchWord(t *testing.T) {\n\ts, dir := createTestServer(5, 8, 8, 0.000001, uint64(100000))\n\tdefer os.RemoveAll(dir)\n\n\tc, cliDir := createTestClient(s, 0)\n\tdefer os.RemoveAll(cliDir)\n\n\tcontents := []string{\n\t\t\"This is a simple test file\",\n\t\t\"This is another test file\",\n\t\t\"This is a different test file\",\n\t\t\"This is yet another test file\",\n\t\t\"This is the last test file\"}\n\n\tfilenames := make([]string, 5)\n\n\tfor i := 0; i < len(contents); i++ {\n\t\tfile := createTestFile(contents[i])\n\t\tdefer os.Remove(file)\n\t\t_, filenames[i] = path.Split(file)\n\t\tc.AddFile(file)\n\t}\n\n\tc2, cliDir2 := createTestClient(s, 1)\n\tdefer os.RemoveAll(cliDir2)\n\n\texpected := []string{filenames[1], filenames[3]}\n\tsort.Strings(expected)\n\tactual, _, err := c2.SearchWord(\"another\")\n\tif err != nil {\n\t\tt.Fatalf(\"error when searching word: %s\", err)\n\t}\n\tsort.Strings(actual)\n\tif !reflect.DeepEqual(expected, actual) {\n\t\tt.Fatalf(\"incorrect search result\")\n\t}\n\n\tempty, _, err := c2.SearchWord(\"non-existing\")\n\tif err != nil {\n\t\tt.Fatalf(\"error when searching word: %s\", err)\n\t}\n\tif len(empty) > 0 {\n\t\tt.Fatalf(\"filenames found for non-existing word\")\n\t}\n\n\texpected = filenames\n\tsort.Strings(expected)\n\tactual, _, err = c2.SearchWord(\"file\")\n\tif err != nil {\n\t\tt.Fatalf(\"error when searching word: %s\", err)\n\t}\n\tsort.Strings(actual)\n\tif !reflect.DeepEqual(expected, actual) {\n\t\tt.Fatalf(\"incorrect search result\")\n\t}\n}", "func (m defaultMatcher) Search(feed *Feed, searchTerm string) ([]*Result, error) {\n\treturn nil, nil\n}", "func (p *Searcher) buildTextQuery() elastic.Query {\n\tquery := elastic.NewMatchQuery(\"text\", p.request.Query).\n\t\tOperator(\"AND\").\n\t\tBoost(3)\n\n\tif len(p.request.Query) > 3 {\n\t\tquery = query.Fuzziness(\"AUTO\").\n\t\t\tMaxExpansions(20).\n\t\t\tPrefixLength(2)\n\t}\n\n\treturn query\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test Qsl search query
func TestQslSearch(t *testing.T) { if testing.Short() { t.Skip("skipping TestQSLSearch in short mode") } testURL1 := TestBaseURL + "/v1/qsl/node[@name=\"node01\"]{*}" testURL2 := TestBaseURL + "/v1/qsl/node[@name=\"node01\"]{@labels}" tests := []TestStruct{ {"TestSearchNode01All", testURL1, "", 200, "", 0}, {"TestSearchNode01@labels", testURL2, "", 200, "", 0}, } for i, testCase := range tests { t.Run(testCase.testCaseName, func(t *testing.T) { resCode, resBody, _ := QslSearch(t, testCase.testURL) tests[i].observedStatusCode = resCode tests[i].responseBody = string(resBody) }) } DisplayTestCaseResults("TestQslSearch", tests, t, "uid") }
[ "func TestSingleQuerySearch(t *testing.T) {\n\tfor _, tItem := range testData {\n\t\tsq := NewSingleQuery(tItem.query, tItem.links)\n\t\trealFind, err := sq.QuerySearch()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Can't make test, too much errors, %v\", err)\n\t\t}\n\t\tif len(realFind) != len(tItem.results) {\n\t\t\tt.Errorf(\"got %v slice of resources, need to %v (query=%s)\", realFind, tItem.results, tItem.query)\n\t\t} else {\n\t\t\t// detail comparison\n\t\t\tfor _, realItem := range realFind {\n\t\t\t\texists := false\n\t\t\t\tfor _, expected := range tItem.results {\n\t\t\t\t\tif expected == realItem {\n\t\t\t\t\t\texists = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !exists {\n\t\t\t\t\tt.Errorf(\"wrong content, got %v slice of resources, need to %v (query=%s)\", realFind, tItem.results, tItem.query)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func QslSearch(t *testing.T, url string) (statusCode int, respBody []byte, err error) {\n\n\trespStatusCode, respBody, _ := GetResponseByGet(t, url)\n\treturn respStatusCode, respBody, nil\n}", "func TestQuery(t *testing.T) {\n\n\tresults, err := FindAll(Published())\n\tif err != nil {\n\t\tt.Fatalf(\"pages: error getting pages :%s\", err)\n\t}\n\tif len(results) == 0 {\n\t\tt.Fatalf(\"pages: published pages not found :%s\", err)\n\t}\n\n\tresults, err = FindAll(Query().Where(\"id>=? AND id <=?\", 0, 100))\n\tif err != nil || len(results) == 0 {\n\t\tt.Fatalf(\"pages: no page found :%s\", err)\n\t}\n\tif len(results) > 1 {\n\t\tt.Fatalf(\"pages: more than one page found for where :%s\", err)\n\t}\n\n}", "func testQuery(t *testing.T, q *Query, models []*indexedTestModel) {\n\texpected := expectedResultsForQuery(q.query, models)\n\ttestQueryRun(t, q, expected)\n\ttestQueryIDs(t, q, expected)\n\ttestQueryCount(t, q, expected)\n\ttestQueryStoreIDs(t, q, expected)\n\tcheckForLeakedTmpKeys(t, q.query)\n}", "func TESTQ(ir, mr operand.Op) { ctx.TESTQ(ir, mr) }", "func TestSearch(t *testing.T) {\n\tvar result *SearchResult\n\tvar err error\n\n\tresult, err = Search(\n\t\tSimpleQuery{\n\t\t\tSearch: \"electron\",\n\t\t\tMaxResults: 5,\n\t\t},\n\t)\n\n\tif err != nil {\n\t\tlog.Printf(\"error occurred during Search(): %v\", err)\n\t\tt.FailNow()\n\t}\n\n\tif result == nil {\n\t\tlog.Printf(\"empty SearchResult: %v\", result)\n\t\tt.FailNow()\n\t}\n\n\tif len(result.Entries) != 5 {\n\t\tlog.Printf(\"invalid length for SearchResult.entries: %v\", result.Entries)\n\t\tt.FailNow()\n\t}\n\n}", "func (m *RootRequestBuilder) SearchWithQ(q *string)(*id724096a8c3b90aa72504dcd2761cc99b632bad9d405f048a071a5f697d2195f.SearchWithQRequestBuilder) {\n return id724096a8c3b90aa72504dcd2761cc99b632bad9d405f048a071a5f697d2195f.NewSearchWithQRequestBuilderInternal(m.pathParameters, m.requestAdapter, q);\n}", "func TestPaymentsSearch(t *testing.T) {\n\tfmt.Println(\"mp_test : PaymentsSearch\")\n\n\tfilter := &url.Values{}\n\tfilter.Add(\"range\", \"date_created\")\n\tfilter.Add(\"begin_date\", \"NOW-1MONTH\")\n\tfilter.Add(\"end_date\", \"NOW\")\n\tfilter.Add(\"status\", \"approved\")\n\n\tpmtSearch, err := mp.PaymentsSearch(filter)\n\tif err != nil {\n\t\tt.Fatalf(\"Error getting the payment: %v\", err)\n\t}\n\tfmt.Println(\"Payments: \", pmtSearch)\n}", "func runSearch(t *testing.T, queryTree *search.IntoExpr) *search.Space {\n\tt.Helper()\n\tspace := search.NewSpace(queryTree, &planner{stats: new(mockStats)}, testSearchOpts)\n\tspace.Explore()\n\tspace.Implement()\n\tspace.PredictCosts()\n\t_, err := space.BestPlan()\n\tassert.NoError(t, err)\n\treturn space\n}", "func (m *DriveItemItemRequestBuilder) SearchWithQ(q *string)(*iae2098c15020f00a005771b8a0dfdf0cc230504133ee3754b7e1f29877241492.SearchWithQRequestBuilder) {\n return iae2098c15020f00a005771b8a0dfdf0cc230504133ee3754b7e1f29877241492.NewSearchWithQRequestBuilderInternal(m.pathParameters, m.requestAdapter, q);\n}", "func (collection Collection) Search(query string) {\n\n}", "func TestSetSearchParams(t *testing.T) {\n\tstartDate, _ := time.Parse(\"2006-01-02\", \"2019-01-02\")\n\tendDate, _ := time.Parse(\"2006-01-02\", \"2019-02-02\")\n\n\ttestList := []SearchParamTest{\n\t\t{\"/anime.php\", searchModel.Query{\n\t\t\tQuery: \"naruto\",\n\t\t\tPage: 2}, \"/anime.php?c%5B%5D=a&c%5B%5D=b&c%5B%5D=c&c%5B%5D=d&c%5B%5D=e&c%5B%5D=f&c%5B%5D=g&gx=0&mid=0&p=0&q=naruto&score=0&show=50&status=0&type=0\",\n\t\t},\n\t\t{\"/manga.php\", searchModel.Query{\n\t\t\tQuery: \"naruto\",\n\t\t\tPage: 2,\n\t\t\tScore: 7}, \"/manga.php?c%5B%5D=a&c%5B%5D=b&c%5B%5D=c&c%5B%5D=d&c%5B%5D=e&c%5B%5D=f&c%5B%5D=g&gx=0&mid=0&p=0&q=naruto&score=7&show=50&status=0&type=0\",\n\t\t},\n\t\t{\"/anime.php\", searchModel.Query{\n\t\t\tQuery: \"naruto\",\n\t\t\tPage: 2,\n\t\t\tStartDate: startDate,\n\t\t\tEndDate: endDate}, \"/anime.php?c%5B%5D=a&c%5B%5D=b&c%5B%5D=c&c%5B%5D=d&c%5B%5D=e&c%5B%5D=f&c%5B%5D=g&ed=2019&em=2&ey=2&gx=0&mid=0&p=0&q=naruto&score=0&sd=2019&show=50&sm=1&status=0&sy=2&type=0\",\n\t\t},\n\t\t{\"/anime.php\", searchModel.Query{\n\t\t\tQuery: \"naruto\",\n\t\t\tGenre: []int{\n\t\t\t\t1,\n\t\t\t\t4,\n\t\t\t\t5,\n\t\t\t}}, \"/anime.php?c%5B%5D=a&c%5B%5D=b&c%5B%5D=c&c%5B%5D=d&c%5B%5D=e&c%5B%5D=f&c%5B%5D=g&genre%5B%5D=1&genre%5B%5D=4&genre%5B%5D=5&gx=0&mid=0&p=0&q=naruto&score=0&status=0&type=0\",\n\t\t},\n\t}\n\n\tfor _, param := range testList {\n\t\tu, _ := url.Parse(param.URL)\n\t\tq := SetSearchParams(u, param.Query)\n\t\tu.RawQuery = q.Encode()\n\t\tif u.String() != param.Result {\n\t\t\tt.Errorf(\"SetSearchParams() failed: expected %v got %v\", param.Result, u.String())\n\t\t}\n\t}\n}", "func verify(t *testing.T, searchQuery string, searchResults []workitem.WorkItem, expectedCount int) {\n\t// Since this test adds test data, whether or not other workitems exist\n\t// there must be at least 1 search result returned.\n\tif len(searchResults) == expectedCount && expectedCount == 0 {\n\t\t// no point checking further, we got what we wanted.\n\t\treturn\n\t}\n\trequire.Equal(t, expectedCount, len(searchResults), \"invalid number of results in the search\")\n\n\t// These keywords need a match in the textual part.\n\tallKeywords := strings.Fields(searchQuery)\n\t// These keywords need a match optionally either as URL string or ID\t\t +\t\t\t\tkeyWord = strings.ToLower(keyWord)\n\t// optionalKeywords := []string{workItemURLInSearchString, strconv.Itoa(fxt.WorkItems[idx].Number)}\n\t// We will now check the legitimacy of the search results.\n\t// Iterate through all search results and see whether they meet the criteria\n\tfor _, searchResult := range searchResults {\n\t\tt.Logf(\"Examining workitem id=`%v` number=`%d` using keywords %v\", searchResult.ID, searchResult.Number, allKeywords)\n\t\tfor _, keyWord := range allKeywords {\n\t\t\tkeyWord = strings.ToLower(keyWord)\n\t\t\tt.Logf(\"Verifying workitem id=`%v` number=`%d` for keyword `%s`...\", searchResult.ID, searchResult.Number, keyWord)\n\t\t\tworkItemTitle := \"\"\n\t\t\tif searchResult.Fields[workitem.SystemTitle] != nil {\n\t\t\t\tworkItemTitle = strings.ToLower(searchResult.Fields[workitem.SystemTitle].(string))\n\t\t\t}\n\t\t\tworkItemDescription := \"\"\n\t\t\tif searchResult.Fields[workitem.SystemDescription] != nil {\n\t\t\t\tdescriptionField := searchResult.Fields[workitem.SystemDescription].(rendering.MarkupContent)\n\t\t\t\tworkItemDescription = strings.ToLower(descriptionField.Content)\n\t\t\t}\n\t\t\tassert.True(t,\n\t\t\t\tstrings.Contains(workItemTitle, keyWord) || strings.Contains(workItemDescription, keyWord),\n\t\t\t\t\"`%s` neither found in title `%s` nor in the description `%s` for workitem #%d\", keyWord, workItemTitle, workItemDescription, searchResult.Number)\n\t\t}\n\t}\n}", "func TestQueryParser(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tinputQuery string\n\t\texpectedProblemSelector string\n\t\texpectedEntitySelector string\n\t}{\n\t\t{\n\t\t\tname: \"valid\",\n\t\t\tinputQuery: \"PV2;problemSelector=status(open)&entitySelector=mzId(7030365576649815430)\",\n\t\t\texpectedProblemSelector: \"status(open)\",\n\t\t\texpectedEntitySelector: \"mzId(7030365576649815430)\",\n\t\t},\n\t\t{\n\t\t\tname: \"valid - empty\",\n\t\t\tinputQuery: \"PV2;\",\n\t\t},\n\t\t{\n\t\t\tname: \"valid\",\n\t\t\tinputQuery: \"PV2;entitySelector=mzId(7030365576649815430)\",\n\t\t\texpectedEntitySelector: \"mzId(7030365576649815430)\",\n\t\t},\n\t\t{\n\t\t\tname: \"valid\",\n\t\t\tinputQuery: \"PV2;problemSelector=status(open)\",\n\t\t\texpectedProblemSelector: \"status(open)\",\n\t\t},\n\t}\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tquery, err := NewQueryParser(tc.inputQuery).Parse()\n\n\t\t\tassert.NoError(t, err)\n\t\t\tif assert.NotNil(t, query) {\n\t\t\t\tassert.EqualValues(t, tc.expectedProblemSelector, query.GetProblemSelector())\n\t\t\t\tassert.EqualValues(t, tc.expectedEntitySelector, query.GetEntitySelector())\n\t\t\t}\n\t\t})\n\t}\n}", "func TestQuery(t *testing.T, harness Harness, e *sqle.Engine, q string, expected []sql.Row) {\n\tt.Run(q, func(t *testing.T) {\n\t\tif sh, ok := harness.(SkippingHarness); ok {\n\t\t\tif sh.SkipQueryTest(q) {\n\t\t\t\tt.Skipf(\"Skipping query %s\", q)\n\t\t\t}\n\t\t}\n\n\t\tctx := NewContextWithEngine(harness, e)\n\t\tTestQueryWithContext(t, ctx, e, q, expected)\n\t})\n}", "func TestGenerateQuery(t *testing.T) {\n\tfor _, test := range []struct {\n\t\tname, q, want string\n\t}{\n\t\t{\"querySearchSymbol\", SymbolQuery(SearchTypeSymbol), querySearchSymbol},\n\t\t{\"querySearchPackageDotSymbol\", SymbolQuery(SearchTypePackageDotSymbol), querySearchPackageDotSymbol},\n\t\t{\"querySearchMultiWordExact\", SymbolQuery(SearchTypeMultiWordExact), querySearchMultiWordExact},\n\t} {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tif diff := cmp.Diff(test.want, test.q); diff != \"\" {\n\t\t\t\tt.Errorf(\"mismatch (-want, +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}", "func (r *Search) Q(q string) *Search {\n\tr.values.Set(\"q\", q)\n\n\treturn r\n}", "func QueryBuilder(req *search.SearchRequest) (es.Query, error) {\n\n\t// Validate request\n\tif req == nil {\n\t\treturn nil, errors.New(\"nil search request\")\n\t}\n\n\t// Check if QueryString is valid and construct QueryStringQuery\n\tif len(req.QueryString) != 0 {\n\t\treturn es.NewQueryStringQuery(req.QueryString), nil\n\t}\n\n\t// Construct Bool query based on search requirements\n\tif req.Query == nil {\n\t\treturn nil, errors.New(\"nil search request\")\n\t}\n\n\tquery := es.NewBoolQuery().QueryName(\"CompleteQuery\")\n\n\t// Process Text Requirements\n\ttextReq := es.NewBoolQuery().QueryName(\"TextQuery\")\n\tfor _, treq := range req.Query.Texts {\n\n\t\ttQuery := es.NewBoolQuery()\n\t\tfor _, text := range treq.Text {\n\t\t\tif strings.Contains(text, \" \") {\n\t\t\t\t// Phrase query\n\t\t\t\ttQuery = tQuery.Must(es.NewQueryStringQuery(fmt.Sprintf(\"\\\"%s\\\"\", text)))\n\t\t\t} else {\n\t\t\t\t// Word query\n\t\t\t\ttQuery = tQuery.Must(es.NewQueryStringQuery(text))\n\t\t\t}\n\t\t}\n\t\ttextReq = textReq.Should(tQuery).MinimumNumberShouldMatch(1)\n\t}\n\tif len(req.Query.Texts) > 0 {\n\t\tquery = query.Must(textReq)\n\t}\n\n\t// Process categories requirement\n\tcatReq := es.NewBoolQuery().QueryName(\"CategoryQuery\")\n\tfor _, cat := range req.Query.Categories {\n\t\tcatReq = catReq.Should(es.NewTermQuery(\"meta.labels._category.keyword\", cat)).MinimumNumberShouldMatch(1)\n\t}\n\tif len(req.Query.Categories) > 0 {\n\t\tquery = query.Must(catReq)\n\t}\n\n\t// Process kinds requirement\n\tkindReq := es.NewBoolQuery().QueryName(\"KindQuery\")\n\tfor _, cat := range req.Query.Kinds {\n\t\tkindReq = kindReq.Should(es.NewTermQuery(\"kind.keyword\", cat)).MinimumNumberShouldMatch(1)\n\t}\n\tif len(req.Query.Kinds) > 0 {\n\t\tquery = query.Must(kindReq)\n\t}\n\n\t// Process field requirement if it is valid\n\tif req.Query.Fields != nil {\n\t\tfor _, field := range req.Query.Fields.Requirements {\n\t\t\tswitch field.Operator {\n\t\t\tcase fields.Operator_name[int32(fields.Operator_equals)]:\n\t\t\t\tif len(field.Values) > 0 {\n\t\t\t\t\tif strings.Contains(field.Values[0], \"*\") {\n\t\t\t\t\t\tquery = query.Must(es.NewWildcardQuery(field.Key, field.Values[0]))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tquery = query.Must(es.NewMatchPhraseQuery(field.Key, field.Values[0]))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase fields.Operator_name[int32(fields.Operator_notEquals)]:\n\t\t\t\tif len(field.Values) > 0 {\n\t\t\t\t\tif strings.Contains(field.Values[0], \"*\") {\n\t\t\t\t\t\tquery = query.MustNot(es.NewWildcardQuery(field.Key, field.Values[0]))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tquery = query.MustNot(es.NewMatchPhraseQuery(field.Key, field.Values[0]))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase fields.Operator_name[int32(fields.Operator_in)]:\n\t\t\t\tfieldQuery := es.NewBoolQuery().MinimumNumberShouldMatch(1)\n\t\t\t\tfor _, val := range field.GetValues() {\n\t\t\t\t\tif strings.Contains(val, \"*\") {\n\t\t\t\t\t\tfieldQuery.Should(es.NewWildcardQuery(field.Key, val))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfieldQuery.Should(es.NewMatchPhraseQuery(field.Key, val))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tquery = query.Must(fieldQuery)\n\t\t\tcase fields.Operator_name[int32(fields.Operator_notIn)]:\n\t\t\t\tfor _, val := range field.GetValues() {\n\t\t\t\t\tif strings.Contains(val, \"*\") {\n\t\t\t\t\t\tquery = query.MustNot(es.NewWildcardQuery(field.Key, val))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tquery = query.MustNot(es.NewMatchPhraseQuery(field.Key, val))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase fields.Operator_name[int32(fields.Operator_gt)]:\n\t\t\t\tif len(field.Values) > 0 {\n\t\t\t\t\tquery = query.Must(es.NewRangeQuery(field.Key).Gt(field.Values[0]))\n\t\t\t\t}\n\t\t\tcase fields.Operator_name[int32(fields.Operator_gte)]:\n\t\t\t\tif len(field.Values) > 0 {\n\t\t\t\t\tquery = query.Must(es.NewRangeQuery(field.Key).Gte(field.Values[0]))\n\t\t\t\t}\n\t\t\tcase fields.Operator_name[int32(fields.Operator_lt)]:\n\t\t\t\tif len(field.Values) > 0 {\n\t\t\t\t\tquery = query.Must(es.NewRangeQuery(field.Key).Lt(field.Values[0]))\n\t\t\t\t}\n\t\t\tcase fields.Operator_name[int32(fields.Operator_lte)]:\n\t\t\t\tif len(field.Values) > 0 {\n\t\t\t\t\tquery = query.Must(es.NewRangeQuery(field.Key).Lte(field.Values[0]))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Process label requirements if it is valid\n\tif req.Query.Labels != nil {\n\t\tfor _, label := range req.Query.Labels.Requirements {\n\t\t\tlKey := fmt.Sprintf(\"%s.keyword\", label.Key)\n\t\t\tswitch label.Operator {\n\t\t\tcase labels.Operator_name[int32(labels.Operator_equals)]:\n\t\t\t\tif len(label.Values) > 0 {\n\t\t\t\t\tquery = query.Must(es.NewTermQuery(lKey, label.Values[0]))\n\t\t\t\t}\n\t\t\tcase labels.Operator_name[int32(labels.Operator_notEquals)]:\n\t\t\t\tif len(label.Values) > 0 {\n\t\t\t\t\tquery = query.MustNot(es.NewTermQuery(lKey, label.Values[0]))\n\t\t\t\t}\n\t\t\tcase labels.Operator_name[int32(labels.Operator_in)]:\n\t\t\t\tvalues := make([]interface{}, len(label.Values))\n\t\t\t\tfor i, v := range label.Values {\n\t\t\t\t\tvalues[i] = v\n\t\t\t\t}\n\t\t\t\tquery = query.Must(es.NewTermsQuery(lKey, values...))\n\t\t\tcase labels.Operator_name[int32(labels.Operator_notIn)]:\n\t\t\t\tvalues := make([]interface{}, len(label.Values))\n\t\t\t\tfor i, v := range label.Values {\n\t\t\t\t\tvalues[i] = v\n\t\t\t\t}\n\t\t\t\tquery = query.MustNot(es.NewTermsQuery(lKey, values...))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn query, nil\n}", "func Test_Search(t *testing.T) {\n\tfor _, testCase := range searchTestCases {\n\t\tresult := org.Search(org.CEO, testCase.name)\n\n\t\tif testCase.expected && result == nil {\n\t\t\tt.Errorf(\"Expected to find '%s', but did not find\", testCase.name)\n\t\t\tt.Fail()\n\t\t} else if !testCase.expected && result != nil {\n\t\t\tt.Errorf(\"Expected to not find '%s', but found\", testCase.name)\n\t\t}\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GenerateTimestampArray allows to generate a Timestamp array
func GenerateTimestampArray(startTS, endTS int64) (tsArray []int64) { daysNbr := (endTS - startTS) / (24 * 3600) var i int64 for i = 0; i <= daysNbr; i++ { tsArray = append(tsArray, startTS+(i*24*3600)) } return tsArray }
[ "func generateTimestamp(ts float64) *timestamp.Timestamp {\n\tsec, nano := math.Modf(ts)\n\treturn &timestamp.Timestamp{\n\t\tSeconds: int64(sec),\n\t\tNanos: int32(nano * 1e9),\n\t}\n}", "func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {\n\tvec := vals[0].(Vector)\n\tfor _, el := range vec {\n\t\tenh.Out = append(enh.Out, Sample{\n\t\t\tMetric: enh.DropMetricName(el.Metric),\n\t\t\tF: float64(el.T) / 1000,\n\t\t})\n\t}\n\treturn enh.Out\n}", "func newTimeSeriesPB(values []float64, timestamps []int64, labels []prompbmarshal.Label) prompbmarshal.TimeSeries {\n\tts := prompbmarshal.TimeSeries{\n\t\tSamples: make([]prompbmarshal.Sample, len(values)),\n\t}\n\tfor i := range values {\n\t\tts.Samples[i] = prompbmarshal.Sample{\n\t\t\tValue: values[i],\n\t\t\tTimestamp: time.Unix(timestamps[i], 0).UnixNano() / 1e6,\n\t\t}\n\t}\n\tts.Labels = labels\n\treturn ts\n}", "func GenerateData[N Number](len int, dtype DataType) []Element[N] {\n\tres := make([]Element[N], len)\n\tt := int64(0)\n\tfor i := 0; i < len; i++ {\n\t\tswitch dtype {\n\t\tcase Int32:\n\t\t\tres[i] = Element[N]{\n\t\t\t\tTimestamp: t,\n\t\t\t\tValue: N(int32(i)),\n\t\t\t}\n\t\tcase Float32:\n\t\t\tres[i] = Element[N]{\n\t\t\t\tTimestamp: t,\n\t\t\t\tValue: N(float32(i)),\n\t\t\t}\n\t\tcase Float64:\n\t\t\tres[i] = Element[N]{\n\t\t\t\tTimestamp: t,\n\t\t\t\tValue: N(float64(i)),\n\t\t\t}\n\t\t}\n\t\tt += 3\n\t}\n\treturn res\n}", "func (_TellorMesosphere *TellorMesosphereCaller) Timestamps(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _TellorMesosphere.contract.Call(opts, &out, \"timestamps\", arg0, arg1)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_ECC *ECCCaller) Timestamps(opts *bind.CallOpts, arg0 [32]byte) (struct {\n\tUnixTime int64\n\tHash [32]byte\n\tDevice common.Address\n}, error) {\n\tret := new(struct {\n\t\tUnixTime int64\n\t\tHash [32]byte\n\t\tDevice common.Address\n\t})\n\tout := ret\n\terr := _ECC.contract.Call(opts, out, \"timestamps\", arg0)\n\treturn *ret, err\n}", "func Timestamp(t, i uint32) Val {\n\tv := Val{t: bsontype.Timestamp}\n\tv.bootstrap[0] = byte(i)\n\tv.bootstrap[1] = byte(i >> 8)\n\tv.bootstrap[2] = byte(i >> 16)\n\tv.bootstrap[3] = byte(i >> 24)\n\tv.bootstrap[4] = byte(t)\n\tv.bootstrap[5] = byte(t >> 8)\n\tv.bootstrap[6] = byte(t >> 16)\n\tv.bootstrap[7] = byte(t >> 24)\n\treturn v\n}", "func GenerateTimestamp() int64 {\n\t// Current time.\n\tnow := time.Now()\n\n\t// Unix time in nanoseconds.(Nanoseconds since januar 1970)\n\treturn now.UnixNano()\n}", "func WireToTimestampsByEvent(arr map[string]uint64) TimestampsByEvent {\n\tres := make(TimestampsByEvent, len(arr))\n\n\tfor hex, t := range arr {\n\t\thash_ := hash.HexToEventHash(hex)\n\t\tres[hash_] = inter.Timestamp(t)\n\t}\n\n\treturn res\n}", "func NewTimestampCollection(builder *array.TimestampBuilder) *TimestampCollection {\n\treturn &TimestampCollection{\n\t\tbuilder: builder,\n\t}\n}", "func AvroTimeSlice(times []time.Time) []int64 {\n\tnewTimes := make([]int64, len(times))\n\tfor i, t := range times {\n\t\tnewTimes[i] = AvroTime(t)\n\t}\n\treturn newTimes\n}", "func TimeToTimeStampPPB(times ...time.Time) ([]*timestamp.Timestamp, error) {\n\tvar returnStamps []*timestamp.Timestamp\n\tfor _, currentTime := range times {\n\t\tnewStamp, err := ptypes.TimestampProto(currentTime)\n\t\tif err != nil {\n\t\t\tlog.Printf(glErr.DtTimeStampToProtoTimeStamp(currentTime, err))\n\t\t\t// log.Printf(\"Unable to convert timestamp %v to proto timestamp when trying to update promotion. Error: %v \\n\", currentTime, err)\n\t\t\treturn nil, err\n\t\t}\n\t\treturnStamps = append(returnStamps, newStamp)\n\t}\n\treturn returnStamps, nil\n}", "func (_ECC *ECCCallerSession) Timestamps(arg0 [32]byte) (struct {\n\tUnixTime int64\n\tHash [32]byte\n\tDevice common.Address\n}, error) {\n\treturn _ECC.Contract.Timestamps(&_ECC.CallOpts, arg0)\n}", "func generateLargeTimeseries() (ts []prompb.TimeSeries, mint, maxt int64) {\n\tmetrics := []prompb.TimeSeries{\n\t\t{\n\t\t\tLabels: []prompb.Label{\n\t\t\t\t{Name: \"aaa\", Value: \"000\"},\n\t\t\t\t{Name: labels.MetricName, Value: \"metric_1\"},\n\t\t\t\t{Name: \"foo\", Value: \"bar\"},\n\t\t\t\t{Name: \"instance\", Value: \"1\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tLabels: []prompb.Label{\n\t\t\t\t{Name: labels.MetricName, Value: \"metric_1\"},\n\t\t\t\t{Name: \"foo\", Value: \"bar\"},\n\t\t\t\t{Name: \"instance\", Value: \"2\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tLabels: []prompb.Label{\n\t\t\t\t{Name: labels.MetricName, Value: \"metric_1\"},\n\t\t\t\t{Name: \"foo\", Value: \"bar\"},\n\t\t\t\t{Name: \"instance\", Value: \"3\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tLabels: []prompb.Label{\n\t\t\t\t{Name: labels.MetricName, Value: \"metric_2\"},\n\t\t\t\t{Name: \"foo\", Value: \"bat\"},\n\t\t\t\t{Name: \"instance\", Value: \"1\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tLabels: []prompb.Label{\n\t\t\t\t{Name: labels.MetricName, Value: \"metric_2\"},\n\t\t\t\t{Name: \"foo\", Value: \"bat\"},\n\t\t\t\t{Name: \"instance\", Value: \"2\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tLabels: []prompb.Label{\n\t\t\t\t{Name: labels.MetricName, Value: \"metric_2\"},\n\t\t\t\t{Name: \"foo\", Value: \"bat\"},\n\t\t\t\t{Name: \"instance\", Value: \"3\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tLabels: []prompb.Label{\n\t\t\t\t{Name: labels.MetricName, Value: \"metric_3\"},\n\t\t\t\t{Name: \"instance\", Value: \"1\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tLabels: []prompb.Label{\n\t\t\t\t{Name: labels.MetricName, Value: \"metric_3\"},\n\t\t\t\t{Name: \"instance\", Value: \"2\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor i := range metrics {\n\t\tmetrics[i].Samples = generateSamples(i + 1)\n\t}\n\n\treturn metrics, startTime, endTime\n}", "func randDate() [2]time.Time {\n var t [2]time.Time\n startDay := randomIntn(7)\n startHour := randomIntn(23)\n\n t[0] = createDate(startDay, startHour)\n t[1] = createDate(startDay, startHour + 1)\n debugf(\"%s\\n\", t[0])\n return t\n}", "func (el Elements) Timestamp(i int) int64 {\n\tswitch el.Type {\n\tcase part3.Int32:\n\t\treturn el.I32[i].Ts\n\tcase part3.Float32:\n\t\treturn el.F32[i].Ts\n\tcase part3.Float64:\n\t\treturn el.F64[i].Ts\n\tdefault:\n\t\treturn int64(-1)\n\t}\n}", "func Timestamp(t *timestamp.Timestamp) time.Time {\n\tans, err := ptypes.Timestamp(t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn ans\n}", "func (t *Timestamp) MarshalJSON() ([]byte, error) {\n\tts := time.Time(*t).Unix()\n\tstamp := fmt.Sprint(ts)\n\n\treturn []byte(stamp), nil\n}", "func NewTweetArray(n int) []Tweet {\n\tres := make([]Tweet, n)\n\tfor i := 0; i < n; i++ {\n\t\tres[i] = NewTweet()\n\t}\n\treturn res\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
initialising ledger only for the purpose for testing
func (s *SmartContract) initLedger(stub shim.ChaincodeStubInterface) sc.Response { fmt.Println("Initalising ledger") chainkeys := []ChainKey{ ChainKey{DocumentID: "doc1", Glue: "glue1", TypeofDocument: "type1"}, ChainKey{DocumentID: "doc2", Glue: "glue2", TypeofDocument: "type2"}, } m := 0 for m < len(chainkeys) { var chainkeysoutput = chainkeys[m] chainkeyAsBytes, _ := json.Marshal(&chainkeysoutput) stub.PutState(strconv.Itoa(m+1), chainkeyAsBytes) fmt.Println("Added", chainkeys[m]) m = m + 1 } return shim.Success(nil) }
[ "func (s *SmartContract) initLedger(APIstub shim.ChaincodeStubInterface) sc.Response {\n\t supporter := []Supporter{\n\t\t Supporter{Name:\"Soyoung Yoo\", ID:\"991212-2999999\",Account:\"kook,12345323987\", Email:\"[email protected]\", Password:\"1ARVn2Auq2/WAqx2gNrL+q3RNjAzXpUfCXrzkA6d4Xa22yhRLy4AC50E+6UTPoscbo31nbOoq51gvkuXzJ6B2w==\", Address:\"Seoul\", PhoneNum:\"01089145587\"},\n\t }\n \n\tsupporterAsBytes, _ := json.Marshal(supporter[0])\n\tAPIstub.PutState( \"991212-2999999\", supporterAsBytes)\n\n\t return shim.Success(nil)\n }", "func (s *SmartContract) initLedger(APIstub shim.ChaincodeStubInterface) sc.Response {\n\tsows := Seed()\n\n\ti := 0\n\tfor i < len(sows) {\n\t\tfmt.Println(\"i is \", i)\n\t\tsowAsBytes, _ := json.Marshal(sows[i])\n\t\tAPIstub.PutState(strconv.Itoa(i+1), sowAsBytes)\n\t\tfmt.Println(\"Added\", sows[i])\n\t\ti = i + 1\n\t}\n\n\treturn shim.Success(nil)\n}", "func Initialize() {\n\tl := new(Ledger)\n\tl.historyAll = make([]*Transaction, 0)\n\tl.HistoryByAssetID = make(map[int][]*Transaction)\n\tl.historyByUserID = make(map[int][]*Transaction)\n\tl.users = NewUsers()\n\tl.populate()\n\n\tglobalLedger = l\n\t// TODO: temporary\n\t//globalLedger.populate()\n}", "func (s *SmartContract) initLedger(APIstub shim.ChaincodeStubInterface) sc.Response {\n\n\torgan := []Organ{\n\t\t//Organ{ID: \"222\", HolderHospital: \"nabeul\", LifeSpan: \"36\", Type: \"foie\", Used: f},\n\t\t//Organ{ID: \"333\", HolderHospital: \"Tunis\", LifeSpan: \"48\", Type: \"coeur\", Used: t},\n\t}\n\n\ti := 0\n\tfor i < len(organ) {\n\t\tfmt.Println(\"i is \", i)\n\t\torganAsBytes, _ := json.Marshal(organ[i])\n\t\tAPIstub.PutState(strconv.Itoa(i+1), organAsBytes)\n\t\tfmt.Println(\"Added\", organ[i])\n\t\ti = i + 1\n\t}\n\n\treturn shim.Success(nil)\n}", "func (s *SmartContract) initLedger(APIstub shim.ChaincodeStubInterface) sc.Response {\n\tassets := getLinkAssets()\n\tcomponents := getLinkComponents()\n\tobservations := getLinkObservations()\n\thashes := getLinkHashes()\n\n\ti := 0\n\tfor i < len(assets) {\n\t\tfmt.Println(\"i is \", i)\n\t\tassetsBytes, _ := json.Marshal(assets[i])\n\t\tAPIstub.PutState(\"ASSET\"+strconv.Itoa(i), assetsBytes)\n\t\tfmt.Println(\"Added\", assets[i])\n\t\ti = i + 1\n\t}\n\n\tj := 0\n\tfor j < len(components) {\n\t\tfmt.Println(\"j is \", j)\n\t\tcomponentsBytes, _ := json.Marshal(components[j])\n\t\tAPIstub.PutState(\"COMP\"+strconv.Itoa(j), componentsBytes)\n\t\tfmt.Println(\"Added\", components[j])\n\t\tj = j + 1\n\t}\n\n\tk := 0\n\tfor k < len(observations) {\n\t\tfmt.Println(\"k is \", k)\n\t\tobsAsBytes, _ := json.Marshal(observations[k])\n\t\tAPIstub.PutState(\"OBS\"+strconv.Itoa(k), obsAsBytes)\n\t\tfmt.Println(\"Added\", observations[k])\n\t\tk = k + 1\n\t}\n\n\tl := 0\n\tfor l < len(hashes) {\n\t\tfmt.Println(\"l is \", l)\n\t\thashBytes, _ := json.Marshal(hashes[l])\n\t\tAPIstub.PutState(\"HASH\"+strconv.Itoa(l), hashBytes)\n\t\tfmt.Println(\"Added\", hashes[l])\n\t\tl = l + 1\n\t}\n\n\tfmt.Printf(\"initLedger Successful\")\n\treturn shim.Success([]byte(\"=== Initialisation of Ledger Complete ===\"))\n}", "func initateSequencer() {\n\tisDesignatedSequencer = true\n\tRSA.KeyGen(1024) // generates new keyPair\n\tSequencerKeyPair = RSA.GetKeyPair() // initiates this as keypair\n\tvar e1 = convertBigIntToString(SequencerKeyPair.E)\n\tvar n1 = convertBigIntToString(SequencerKeyPair.N)\n\tledger.Sequencer = n1 + \",\" + e1\n\tfmt.Println(\"hello\")\n\tledger.NewBlock = new(Block)\n\tledger.NewBlock.IDlist = make(map[int]string)\n\tledger.NewBlock.BlockNr = LocalBlockNumber\n\ttime.Sleep(20000 *time.Millisecond) // gives the peers 20 seconds to connect to the network\n\tledger.Phase = 2\n\tlocalMsg.Ledger = ledger // updates the ledger in the localMessage \n\tfor _, conn := range connections{\n\t\tgo send(localMsg, conn) // tells all its connections that it now stage two\n\t}\n}", "func createFakeLedgerInfo(assert *assert.Assertions) *wire.MsgReturnInit {\n\t// create a fullmerkletree\n\tvar tree = merkle.NewFullMerkleTree()\n\terr := tree.Append(&genesis.GenesisBlocks[shard0].Header.OutsMerkleRoot)\n\tassert.Nil(err)\n\terr = tree.HookSecondLayerTree(genesis.GenesisBlocksOutsMerkleTrees[shard0])\n\tassert.Nil(err)\n\n\t// get lastnode's merkle path\n\tlastPath, err := tree.GetLastNodeMerklePath()\n\tassert.Nil(err)\n\n\t// ledger: [0:1,1:0,2:0,3:0]\n\tledgerInfo := wire.LedgerInfo{\n\t\tSize: 1,\n\t}\n\tledgerInfo.SetShardHeight(shard0, 1)\n\n\taddress, err := multivacaddress.StringToUserAddress(pks)\n\tassert.Nil(err)\n\n\tdeposits, err := getDepositsFromGenesisBlock(shard0, address, lastPath)\n\tassert.Nil(err)\n\tledger := &wire.MsgReturnInit{\n\t\tLedger: ledgerInfo,\n\t\tRightPath: *lastPath,\n\t\tShardHeight: 1,\n\t\tLatestHeader: genesis.GenesisBlocks[shard0].Header,\n\t\tTreeSize: 1,\n\t\tShardIndex: shard0,\n\t\tDeposits: deposits,\n\t}\n\treturn ledger\n}", "func (s *SmartContract) InitLedger(ctx contractapi.TransactionContextInterface) error {\n\ttimestamp, _ := ctx.GetStub().GetTxTimestamp()\n\tdate := time.Unix(timestamp.Seconds, int64(timestamp.Nanos))\n\n\ttraces := []Trace{\n\t\t{Issuer: \"Standard Company\", Artefact: \"README.md\", Hash: \"246768bd999c2df2c03a5e33219ae4b3b52d9de6\", Date: date, State: \"ISSUED\", Version: \"1.0.0\", Message: \"Initial commit\"},\n\t}\n\n\tCloneRepo()\n\tGitConfig()\n\n\tfor i, trace := range traces {\n\t\ttraceAsBytes, _ := json.Marshal(trace)\n\t\terr := ctx.GetStub().PutState(\"TRACE\"+strconv.Itoa(i), traceAsBytes)\n\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to put to world state. %s\", err.Error())\n\t\t}\n\t}\n\n\treturn nil\n}", "func NewLedger(endpoint string, authToken string) Ledger {\n\treturn Ledger{endpoint: endpoint, authToken: authToken}\n}", "func (t *SimpleChaincode) initLedger(APIstub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tCert := []cert{\n\t\tcert{PR_no: \"101\", Student_Name: \"Gaurav1\", Seat_no: \"1\", College_Name: \"PCCE\", Examination: \"may/june\", Year_Of_Passing: \"2019\", Sub: \"abc\"},\n\t\tcert{PR_no: \"102\", Student_Name: \"Gaurav2\", Seat_no: \"2\", College_Name: \"PCCE\", Examination: \"may/june\", Year_Of_Passing: \"2019\", Sub: \"abc\"},\n\t\tcert{PR_no: \"103\", Student_Name: \"Gaurav3\", Seat_no: \"3\", College_Name: \"PCCE\", Examination: \"may/june\", Year_Of_Passing: \"2019\", Sub: \"abc\"},\n\t\tcert{PR_no: \"104\", Student_Name: \"Gaurav4\", Seat_no: \"4\", College_Name: \"PCCE\", Examination: \"may/june\", Year_Of_Passing: \"2019\", Sub: \"abc\"},\n\t}\n\n\ti := 0\n\tfor i < len(Cert) {\n\t\tfmt.Println(\"i is \", i)\n\t\tvalAsBytes, _ := json.Marshal(Cert[i])\n\t\tAPIstub.PutState(strconv.Itoa(i+1), valAsBytes)\n\t\tfmt.Println(\"Added\", Cert[i])\n\t\ti = i + 1\n\t}\n\n\t// ==== Create student object and marshal to JSON ====\n\tstudent := &student{\"123\", \"123\", \"G\", \"U\", \"S\", \"PCCE\", \"IT\", \"2015\", \"[email protected]\", \"8007067665\"}\n\tstudentJSONasBytes, _ := json.Marshal(student)\n\t// === Save student to state ===\n\tAPIstub.PutState(\"123\", studentJSONasBytes)\n\n\treturn shim.Success(nil)\n}", "func (h *txnHeartbeater) init(\n\tac log.AmbientContext,\n\tstopper *stop.Stopper,\n\tclock *hlc.Clock,\n\tmetrics *TxnMetrics,\n\tloopInterval time.Duration,\n\tgatekeeper lockedSender,\n\tmu sync.Locker,\n\ttxn *roachpb.Transaction,\n) {\n\th.AmbientContext = ac\n\th.stopper = stopper\n\th.clock = clock\n\th.metrics = metrics\n\th.loopInterval = loopInterval\n\th.gatekeeper = gatekeeper\n\th.mu.Locker = mu\n\th.mu.txn = txn\n}", "func (s *SmartContract) initDonorLedger(APIstub shim.ChaincodeStubInterface) sc.Response {\n\tdonors := []Donor{\n\t\tDonor{Name: \"Alex\", SSN: \"100001\", Address: \"Address 1, City, ZIP\", Phone: \"67-50\", Age: \"21\", Sex:\"Male\", Btype: \"Apos\"},\n\t Donor{Name: \"Ben\" , SSN: \"200002\", Address: \"Address 1, City, ZIP\", Phone: \"89-99\", Age: \"22\", Sex:\"Male\", Btype: \"ABpos\"},\n\t Donor{Name: \"John\", SSN: \"300003\", Address: \"Address 1, City, ZIP\", Phone: \"44-45\", Age: \"33\", Sex:\"Male\", Btype: \"Apos\"},\n\t Donor{Name: \"Nick\", SSN: \"400004\", Address: \"Address 1, City, ZIP\", Phone: \"01-22\", Age: \"44\", Sex:\"Male\", Btype: \"Aneg\"},\n\t}\n\n\ti := 0\n\tfor i < len(donors) {\n\t\tfmt.Println(\"i is \", i)\n\t\tdonorAsBytes, _ := json.Marshal(donors[i])\n\n\t\tAPIstub.PutState(randomId(), donorAsBytes)\n\t\tfmt.Println(\"Added\", donors[i])\n\t\ti = i + 1\n\t}\n\n\treturn shim.Success(nil)\n}", "func InitLedger(stub shim.ChaincodeStubInterface) pb.Response {\n\tservices := []Service{\n\t\tService{ServiceId: \"idservice1\", Name: \"service1\", Description: \"service Description 1\"},\n\t\tService{ServiceId: \"idservice2\", Name: \"service2\", Description: \"service Description 2\"},\n\t\tService{ServiceId: \"idservice3\", Name: \"service3\", Description: \"service Description 3\"},\n\t\tService{ServiceId: \"idservice4\", Name: \"service4\", Description: \"service Description 4\"},\n\t\tService{ServiceId: \"idservice5\", Name: \"service5\", Description: \"service Description 5\"},\n\t\tService{ServiceId: \"idservice99\", Name: \"service99\", Description: \"service Description 99\"},\n\t}\n\tagents := []Agent{\n\t\tAgent{AgentId: \"idagent1\", Name: \"agent1\", Address: \"address1\"},\n\t\tAgent{AgentId: \"idagent2\", Name: \"agent2\", Address: \"address2\"},\n\t\tAgent{AgentId: \"idagent3\", Name: \"agent3\", Address: \"address3\"},\n\t\tAgent{AgentId: \"idagent4\", Name: \"agent4\", Address: \"address4\"},\n\t\tAgent{AgentId: \"idagent5\", Name: \"agent5\", Address: \"address5\"},\n\t\tAgent{AgentId: \"idagent98\", Name: \"agent98\", Address: \"address98\"},\n\t\tAgent{AgentId: \"idagent99\", Name: \"agent99\", Address: \"address99\"},\n\t}\n\tserviceRelationAgents := []ServiceRelationAgent{\n\t\tServiceRelationAgent{\"idservice99idagent99\",\"idservice99\",\"idagent99\" ,\"5\",\"7\"},\n\t}\n\treputations := []Reputation{\n\t\tReputation{\"idagent99idservice99EXECUTER\",\"idagent99\",\"idservice99\" ,\"EXECUTER\",\"9\"},\n\t\tReputation{\"idagent98idservice99DEMANDER\",\"idagent98\",\"idservice99\" ,\"DEMANDER\",\"8\"},\n\t}\n\n\n\t// non funziona ( come chiamare, si può fare?)\n\t// InitServiceAgentRelation(stub, []string{\"idservice1idagent1\", \"idservice1\", \"idagent1\", \"5\", \"3\", \"9\"})\n\t// InitServiceAgentRelation(stub, []string{\"idservice1idagent2\", \"idservice1\", \"idagent2\", \"6\", \"2\", \"8\"})\n\n\tfor i := 0; i < len(services); i++ {\n\t\tserviceLog.Info(\"i is \", i)\n\t\tserviceAsBytes, _ := json.Marshal(services[i])\n\t\tserviceLog.Info(serviceAsBytes)\n\t\terr := stub.PutState(services[i].ServiceId, serviceAsBytes)\n\t\tif err != nil {\n\t\t\treturn shim.Error(err.Error())\n\t\t}\n\t\tserviceLog.Info(\"Addeds\", services[i])\n\t}\n\tfor i := 0; i < len(agents); i++ {\n\t\tserviceLog.Info(\"i is \", i)\n\t\tagentAsBytes, _ := json.Marshal(agents[i])\n\t\terr := stub.PutState(agents[i].AgentId, agentAsBytes)\n\t\tif err != nil {\n\t\t\treturn shim.Error(err.Error())\n\t\t}\n\t\tserviceLog.Info(\"Added\", agents[i])\n\t}\n\tfor i := 0; i < len(serviceRelationAgents); i++ {\n\t\tserviceLog.Info(\"i is \", i)\n\t\tserviceRelationAgentsAsBytes, _ := json.Marshal(serviceRelationAgents[i])\n\t\terr := stub.PutState(serviceRelationAgents[i].RelationId, serviceRelationAgentsAsBytes)\n\t\tif err != nil {\n\t\t\treturn shim.Error(err.Error())\n\t\t}\n\t\tserviceLog.Info(\"Added\", serviceRelationAgents[i])\n\t}\n\tfor i := 0; i < len(reputations); i++ {\n\t\tserviceLog.Info(\"i is \", i)\n\t\treputationsAsBytes, _ := json.Marshal(reputations[i])\n\t\terr := stub.PutState(reputations[i].ReputationId, reputationsAsBytes)\n\t\tif err != nil {\n\t\t\treturn shim.Error(err.Error())\n\t\t}\n\t\tserviceLog.Info(\"Added\", reputations[i])\n\t}\n\n\treturn shim.Success(nil)\n}", "func NewLedger(endpoint string, authToken string) *Ledger {\n\tif endpoint == \"\" {\n\t\tendpoint = os.Getenv(\"LEDGER_ENDPOINT\")\n\t}\n\tif authToken == \"\" {\n\t\tauthToken = os.Getenv(\"LEDGER_AUTH_TOKEN\")\n\t}\n\treturn &Ledger{endpoint: endpoint, authToken: authToken}\n}", "func InitLedgerCache() {\n\tledgerCache = make(map[string]GLAccount)\n}", "func NewTestLedger(\n\tdb *storage.DB,\n\tam *artifactmanager.LedgerArtifactManager,\n\tpm *pulsemanager.PulseManager,\n\tjc *jetcoordinator.JetCoordinator,\n\tls *localstorage.LocalStorage,\n) *Ledger {\n\treturn &Ledger{\n\t\tdb: db,\n\t\tArtifactManager: am,\n\t\tPulseManager: pm,\n\t\tJetCoordinator: jc,\n\t\tLocalStorage: ls,\n\t}\n}", "func buildTestLedger2(ledger *ledger.Ledger, t *testing.T) {\n\t// -----------------------------<Block #0>---------------------\n\t// Add the 0th (genesis block)\n\tledger.BeginTxBatch(0)\n\tledger.CommitTxBatch(0, []*protos.Transaction{}, nil, []byte(\"dummy-proof\"))\n\t// -----------------------------<Block #0>---------------------\n\n\t// -----------------------------<Block #1>------------------------------------\n\n\t// Deploy a contract\n\t// To deploy a contract, we call the 'NewContract' function in the 'Contracts' contract\n\t// TODO Use chaincode instead of contract?\n\t// TODO Two types of transactions. Execute transaction, deploy/delete/update contract\n\tledger.BeginTxBatch(1)\n\ttransaction1a, err := protos.NewTransaction(protos.ChaincodeID{Path: \"Contracts\"}, generateUUID(t), \"NewContract\", []string{\"name: MyContract1, code: var x; function setX(json) {x = json.x}}\"})\n\tif err != nil {\n\t\tt.Logf(\"Error creating NewTransaction: %s\", err)\n\t\tt.Fail()\n\t}\n\t// VM runs transaction1a and updates the global state with the result\n\t// In this case, the 'Contracts' contract stores 'MyContract1' in its state\n\tledger.TxBegin(transaction1a.Txid)\n\tledger.SetState(\"MyContract1\", \"code\", []byte(\"code example\"))\n\tledger.TxFinished(transaction1a.Txid, true)\n\tledger.CommitTxBatch(1, []*protos.Transaction{transaction1a}, nil, []byte(\"dummy-proof\"))\n\n\t// -----------------------------</Block #1>-----------------------------------\n\n\t// -----------------------------<Block #2>------------------------------------\n\n\tledger.BeginTxBatch(2)\n\ttransaction2a, err := protos.NewTransaction(protos.ChaincodeID{Path: \"MyContract\"}, generateUUID(t), \"setX\", []string{\"{x: \\\"hello\\\"}\"})\n\tif err != nil {\n\t\tt.Logf(\"Error creating NewTransaction: %s\", err)\n\t\tt.Fail()\n\t}\n\ttransaction2b, err := protos.NewTransaction(protos.ChaincodeID{Path: \"MyOtherContract\"}, generateUUID(t), \"setY\", []string{\"{y: \\\"goodbuy\\\"}\"})\n\tif err != nil {\n\t\tt.Logf(\"Error creating NewTransaction: %s\", err)\n\t\tt.Fail()\n\t}\n\n\t// Run this transction in the VM. The VM updates the state\n\tledger.TxBegin(transaction2a.Txid)\n\tledger.SetState(\"MyContract\", \"x\", []byte(\"hello\"))\n\tledger.SetState(\"MyOtherContract\", \"y\", []byte(\"goodbuy\"))\n\tledger.TxFinished(transaction2a.Txid, true)\n\n\t// Commit txbatch that creates the 2nd block on blockchain\n\tledger.CommitTxBatch(2, []*protos.Transaction{transaction2a, transaction2b}, nil, []byte(\"dummy-proof\"))\n\t// -----------------------------</Block #2>-----------------------------------\n\n\t// -----------------------------<Block #3>------------------------------------\n\n\tledger.BeginTxBatch(3)\n\ttransaction3a, err := protos.NewTransaction(protos.ChaincodeID{Path: \"MyContract\"}, generateUUID(t), \"setX\", []string{\"{x: \\\"hello\\\"}\"})\n\tif err != nil {\n\t\tt.Logf(\"Error creating NewTransaction: %s\", err)\n\t\tt.Fail()\n\t}\n\ttransaction3b, err := protos.NewTransaction(protos.ChaincodeID{Path: \"MyOtherContract\"}, generateUUID(t), \"setY\", []string{\"{y: \\\"goodbuy\\\"}\"})\n\tif err != nil {\n\t\tt.Logf(\"Error creating NewTransaction: %s\", err)\n\t\tt.Fail()\n\t}\n\ttransaction3c, err := protos.NewTransaction(protos.ChaincodeID{Path: \"MyImportantContract\"}, generateUUID(t), \"setZ\", []string{\"{z: \\\"super\\\"}\"})\n\tif err != nil {\n\t\tt.Logf(\"Error creating NewTransaction: %s\", err)\n\t\tt.Fail()\n\t}\n\tledger.TxBegin(transaction3a.Txid)\n\tledger.SetState(\"MyContract\", \"x\", []byte(\"hello\"))\n\tledger.SetState(\"MyOtherContract\", \"y\", []byte(\"goodbuy\"))\n\tledger.SetState(\"MyImportantContract\", \"z\", []byte(\"super\"))\n\tledger.TxFinished(transaction3a.Txid, true)\n\tledger.CommitTxBatch(3, []*protos.Transaction{transaction3a, transaction3b, transaction3c}, nil, []byte(\"dummy-proof\"))\n\n\t// -----------------------------</Block #3>-----------------------------------\n\n\t// -----------------------------<Block #4>------------------------------------\n\n\tledger.BeginTxBatch(4)\n\t// Now we want to run the function 'setX' in 'MyContract\n\n\t// Create a transaction'\n\ttransaction4a, err := protos.NewTransaction(protos.ChaincodeID{Path: \"MyContract\"}, generateUUID(t), \"setX\", []string{\"{x: \\\"hello\\\"}\"})\n\tif err != nil {\n\t\tt.Logf(\"Error creating NewTransaction: %s\", err)\n\t\tt.Fail()\n\t}\n\ttransaction4b, err := protos.NewTransaction(protos.ChaincodeID{Path: \"MyOtherContract\"}, generateUUID(t), \"setY\", []string{\"{y: \\\"goodbuy\\\"}\"})\n\tif err != nil {\n\t\tt.Logf(\"Error creating NewTransaction: %s\", err)\n\t\tt.Fail()\n\t}\n\ttransaction4c, err := protos.NewTransaction(protos.ChaincodeID{Path: \"MyImportantContract\"}, generateUUID(t), \"setZ\", []string{\"{z: \\\"super\\\"}\"})\n\tif err != nil {\n\t\tt.Logf(\"Error creating NewTransaction: %s\", err)\n\t\tt.Fail()\n\t}\n\ttransaction4d, err := protos.NewTransaction(protos.ChaincodeID{Path: \"MyMEGAContract\"}, generateUUID(t), \"setMEGA\", []string{\"{mega: \\\"MEGA\\\"}\"})\n\tif err != nil {\n\t\tt.Logf(\"Error creating NewTransaction: %s\", err)\n\t\tt.Fail()\n\t}\n\n\t// Run this transction in the VM. The VM updates the state\n\tledger.TxBegin(transaction4a.Txid)\n\tledger.SetState(\"MyContract\", \"x\", []byte(\"hello\"))\n\tledger.SetState(\"MyOtherContract\", \"y\", []byte(\"goodbuy\"))\n\tledger.SetState(\"MyImportantContract\", \"z\", []byte(\"super\"))\n\tledger.SetState(\"MyMEGAContract\", \"mega\", []byte(\"MEGA\"))\n\tledger.TxFinished(transaction4a.Txid, true)\n\n\t// Create the 4th block and add it to the chain\n\tledger.CommitTxBatch(4, []*protos.Transaction{transaction4a, transaction4b, transaction4c, transaction4d}, nil, []byte(\"dummy-proof\"))\n\t// -----------------------------</Block #4>-----------------------------------\n\n\treturn\n}", "func (s *SmartContract) InitLedger(ctx contractapi.TransactionContextInterface) error {\n\tassets := []Asset{\n\t\t{ID: strconv.Itoa(1), Color: \"blue\", Size: 5, Owner: \"Tomoko\", AppraisedValue: 300},\n\t\t{ID: strconv.Itoa(2), Color: \"red\", Size: 5, Owner: \"Brad\", AppraisedValue: 400},\n\t}\n\n\ttotalAssets = 6\n\n\tfor _, asset := range assets {\n\t\tassetJSON, err := json.Marshal(asset)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = ctx.GetStub().PutState(asset.ID, assetJSON)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to put to world state. %v\", err)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *SmartContract) InitLedger(ctx contractapi.TransactionContextInterface) error {\n\n\tproducts := []Product{\n\t\t{ID: \"p1\", Description: \"apples\", Status: \"MANUFACTURED\"},\n\t\t{ID: \"p2\", Description: \"coffee\", Status: \"MANUFACTURED\"},\n\t\t{ID: \"p3\", Description: \"bag\", Status: \"MANUFACTURED\"},\n\t}\n\n\tfor _, product := range products {\n\t\tproductJSON, err := json.Marshal(product)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = ctx.GetStub().PutState(product.ID, productJSON)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to put to world state. %v\", err)\n\t\t}\n\t}\n\n\treturn nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
whenAll wrapps multiple dependencies in a single dependency the result is completed once any dependency completes with an error or once all dependencies ran to completion without error
func whenAll(dependencies []dependency) dependency { completionSource := &completionSource{ done: make(chan struct{}), } go func() { defer close(completionSource.done) cases := make([]reflect.SelectCase, len(dependencies)) for ix, dependency := range dependencies { cases[ix] = reflect.SelectCase{ Chan: reflect.ValueOf(dependency.Done()), Dir: reflect.SelectRecv, } } for len(dependencies) > 0 { ix, _, _ := reflect.Select(cases) if err := dependencies[ix].Err(); err != nil { completionSource.err = err return } cases = append(cases[:ix], cases[ix+1:]...) dependencies = append(dependencies[:ix], dependencies[ix+1:]...) } }() return completionSource }
[ "func (m *manager) BuildAll(options BuildOptions) error {\n\tif m.config == nil || m.config.Dependencies == nil || len(m.config.Dependencies) == 0 {\n\t\treturn nil\n\t}\n\n\t// Resolve all dependencies\n\tdependencies, err := m.resolver.Resolve(options.UpdateDependencies)\n\tif err != nil {\n\t\tif _, ok := err.(*cyclicError); ok {\n\t\t\treturn errors.Errorf(\"%v.\\n To allow cyclic dependencies run with the '%s' flag\", err, ansi.Color(\"--allow-cyclic\", \"white+b\"))\n\t\t}\n\n\t\treturn err\n\t}\n\n\tdefer m.log.StopWait()\n\n\tif options.Verbose == false {\n\t\tm.log.Infof(\"To display the complete dependency build run with the '--verbose-dependencies' flag\")\n\t}\n\n\t// Deploy all dependencies\n\tfor i := 0; i < len(dependencies); i++ {\n\t\tvar (\n\t\t\tdependency = dependencies[i]\n\t\t\tbuff = &bytes.Buffer{}\n\t\t\tdependencyLogger = m.log\n\t\t)\n\n\t\t// If not verbose log to a stream\n\t\tif options.Verbose == false {\n\t\t\tm.log.StartWait(fmt.Sprintf(\"Building dependency %d of %d: %s\", i+1, len(dependencies), dependency.ID))\n\t\t\tdependencyLogger = log.NewStreamLogger(buff, logrus.InfoLevel)\n\t\t} else {\n\t\t\tm.log.Infof(fmt.Sprintf(\"Building dependency %d of %d: %s\", i+1, len(dependencies), dependency.ID))\n\t\t}\n\n\t\terr := dependency.Build(options.SkipPush, options.ForceDeployDependencies, options.ForceBuild, dependencyLogger)\n\t\tif err != nil {\n\t\t\treturn errors.Errorf(\"Error building dependency %s: %s %v\", dependency.ID, buff.String(), err)\n\t\t}\n\n\t\tm.log.Donef(\"Built dependency %s\", dependency.ID)\n\t}\n\n\tm.log.StopWait()\n\tm.log.Donef(\"Successfully built %d dependencies\", len(dependencies))\n\n\treturn nil\n}", "func WhenAll(tasks ...Task) Task {\n\treturn Run(func() (interface{}, error) {\n\t\tif len(tasks) == 0 {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\tvar waitGroup sync.WaitGroup\n\t\twaitGroup.Add(len(tasks))\n\t\tfor _, task := range tasks {\n\t\t\tgo func(task Task) {\n\t\t\t\tdefer waitGroup.Done()\n\t\t\t\t_, _ = task.Result()\n\t\t\t}(task)\n\t\t}\n\t\twaitGroup.Wait()\n\n\t\treturn nil, nil\n\t})\n}", "func (m *manager) DeployAll(options DeployOptions) error {\n\tif m.config == nil || m.config.Dependencies == nil || len(m.config.Dependencies) == 0 {\n\t\treturn nil\n\t}\n\n\t// Resolve all dependencies\n\tdependencies, err := m.resolver.Resolve(options.UpdateDependencies)\n\tif err != nil {\n\t\tif _, ok := err.(*cyclicError); ok {\n\t\t\treturn errors.Errorf(\"%v.\\n To allow cyclic dependencies run with the '%s' flag\", err, ansi.Color(\"--allow-cyclic\", \"white+b\"))\n\t\t}\n\n\t\treturn err\n\t}\n\n\tdefer m.log.StopWait()\n\n\tif options.Verbose == false {\n\t\tm.log.Infof(\"To display the complete dependency deployment run with the '--verbose-dependencies' flag\")\n\t}\n\n\t// Deploy all dependencies\n\tfor i := 0; i < len(dependencies); i++ {\n\t\tvar (\n\t\t\tdependency = dependencies[i]\n\t\t\tbuff = &bytes.Buffer{}\n\t\t\tdependencyLogger = m.log\n\t\t)\n\n\t\t// If not verbose log to a stream\n\t\tif options.Verbose == false {\n\t\t\tm.log.StartWait(fmt.Sprintf(\"Deploying dependency %d of %d: %s\", i+1, len(dependencies), dependency.ID))\n\t\t\tdependencyLogger = log.NewStreamLogger(buff, logrus.InfoLevel)\n\t\t} else {\n\t\t\tm.log.Infof(fmt.Sprintf(\"Deploying dependency %d of %d: %s\", i+1, len(dependencies), dependency.ID))\n\t\t}\n\n\t\terr := dependency.Deploy(m.client, options.SkipPush, options.ForceDeployDependencies, options.SkipBuild, options.ForceBuild, options.ForceDeploy, dependencyLogger)\n\t\tif err != nil {\n\t\t\treturn errors.Errorf(\"Error deploying dependency %s: %s %v\", dependency.ID, buff.String(), err)\n\t\t}\n\n\t\t// Prettify path if its a path deployment\n\t\tm.log.Donef(\"Deployed dependency %s\", dependency.ID)\n\t}\n\n\tm.log.StopWait()\n\tm.log.Donef(\"Successfully deployed %d dependencies\", len(dependencies))\n\n\treturn nil\n}", "func WhenAny(tasks ...Task) Task {\n\treturn Run(func() (interface{}, error) {\n\t\tif len(tasks) == 0 {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\tfinished := make(chan Task)\n\n\t\tvar waitGroup sync.WaitGroup\n\t\twaitGroup.Add(1)\n\n\t\t// perhaps add cancelation here to avoid blocking these unfinished go routines?\n\t\tfor _, task := range tasks {\n\t\t\tgo func(task Task, finished chan Task) {\n\t\t\t\tdefer waitGroup.Done()\n\t\t\t\t_, _ = task.Result()\n\t\t\t\tfinished <- task\n\t\t\t}(task, finished)\n\t\t}\n\t\twaitGroup.Wait()\n\t\treturn <-finished, nil\n\t})\n}", "func DeployAll(config *latest.Config, cache *generated.Config, allowCyclic, updateDependencies, skipPush, forceDeployDependencies, forceBuild, forceDeploy bool, logger log.Logger) error {\n\tif config == nil || config.Dependencies == nil || len(*config.Dependencies) == 0 {\n\t\treturn nil\n\t}\n\n\t// Create a new dependency resolver\n\tresolver, err := NewResolver(config, cache, allowCyclic, logger)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"new resolver\")\n\t}\n\n\t// Resolve all dependencies\n\tdependencies, err := resolver.Resolve(*config.Dependencies, updateDependencies)\n\tif err != nil {\n\t\tif _, ok := err.(*CyclicError); ok {\n\t\t\treturn fmt.Errorf(\"%v.\\n To allow cyclic dependencies run with the '%s' flag\", err, ansi.Color(\"--allow-cyclic\", \"white+b\"))\n\t\t}\n\n\t\treturn err\n\t}\n\n\tdefer logger.StopWait()\n\n\t// Deploy all dependencies\n\tfor i := 0; i < len(dependencies); i++ {\n\t\tdependency := dependencies[i]\n\n\t\tlogger.StartWait(fmt.Sprintf(\"Deploying dependency %d of %d: %s\", i+1, len(dependencies), dependency.ID))\n\t\tbuff := &bytes.Buffer{}\n\t\tstreamLog := log.NewStreamLogger(buff, logrus.InfoLevel)\n\n\t\terr := dependency.Deploy(skipPush, forceDeployDependencies, forceBuild, forceDeploy, streamLog)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error deploying dependency %s: %s %v\", dependency.ID, buff.String(), err)\n\t\t}\n\n\t\t// Prettify path if its a path deployment\n\t\tif dependency.DependencyConfig.Source.Path != nil {\n\t\t\tlogger.Donef(\"Deployed dependency %s\", dependency.ID[len(filepath.Dir(dependency.ID))+1:])\n\t\t} else {\n\t\t\tlogger.Donef(\"Deployed dependency %s\", dependency.ID)\n\t\t}\n\t}\n\n\tlogger.StopWait()\n\tlogger.Donef(\"Successfully deployed %d dependencies\", len(dependencies))\n\n\treturn nil\n}", "func WaitAll(services ...Service) <-chan struct{} {\n\tvar wg sync.WaitGroup\n\n\tfor _, s := range services {\n\t\twg.Add(1)\n\t\tgo func(s Service) {\n\t\t\tfor range s.Done() {\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(s)\n\t}\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(done)\n\t}()\n\n\treturn done\n}", "func All() {\n\tmg.SerialDeps(Build, Lint, Test)\n}", "func WaitAll(tasks ...Task) {\n\tt := WhenAll(tasks...)\n\tt.Wait()\n}", "func (config *Config) FetchAllDependencies(force bool) error {\n\turls := []string{}\n\tfor url := range config.Dependencies {\n\t\turls = append(urls, url)\n\t}\n\tsort.Strings(urls)\n\n\ttitle := \"Checking dependencies\"\n\tif force {\n\t\ttitle = \"Updating dependencies\"\n\t}\n\n\treturn config.UI.InMeasuredFrame(title, func(ctx *gluey.Ctx, frame *gluey.Frame) error {\n\t\tgroup := ctx.NewSpinGroup()\n\t\tfor _, url := range urls {\n\t\t\tdepurl := url\n\t\t\tgroup.Go(depurl, func() error {\n\t\t\t\treturn config.addDep(depurl, force)\n\t\t\t})\n\t\t}\n\t\treturn config.UI.Debreif(group.Wait())\n\t})\n}", "func DoAll(err error, f func(error)) {\n\tswitch terr := err.(type) {\n\tcase *failure:\n\t\tfor _, serr := range Stack(err) {\n\t\t\tf(serr)\n\t\t}\n\tcase *errorCollection:\n\t\tfor _, aerr := range All(err) {\n\t\t\tf(aerr)\n\t\t}\n\tdefault:\n\t\tf(terr)\n\t}\n}", "func WaitAll(futures []*Future) {\r\n\tfor _,future:= range futures {\r\n\t\tfuture.Join()\r\n\t}\r\n\tfor _,future:= range futures {\r\n\t\tif !future.Success() && !future.Cancelled() {\r\n\t\t\terr:= future.Error()\r\n\t\t\tif err != nil {\r\n\t\t\t\tpanic(err)\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n}", "func (m *manager) PurgeAll(verbose bool) error {\n\tif m.config == nil || m.config.Dependencies == nil || len(m.config.Dependencies) == 0 {\n\t\treturn nil\n\t}\n\n\t// Resolve all dependencies\n\tdependencies, err := m.resolver.Resolve(false)\n\tif err != nil {\n\t\tif _, ok := err.(*cyclicError); ok {\n\t\t\treturn errors.Errorf(\"%v.\\n To allow cyclic dependencies run with the '%s' flag\", err, ansi.Color(\"--allow-cyclic\", \"white+b\"))\n\t\t}\n\n\t\treturn errors.Wrap(err, \"resolve dependencies\")\n\t}\n\n\tdefer m.log.StopWait()\n\n\tif verbose == false {\n\t\tm.log.Infof(\"To display the complete dependency deletion run with the '--verbose-dependencies' flag\")\n\t}\n\n\t// Purge all dependencies\n\tfor i := len(dependencies) - 1; i >= 0; i-- {\n\t\tvar (\n\t\t\tdependency = dependencies[i]\n\t\t\tbuff = &bytes.Buffer{}\n\t\t\tdependencyLogger = m.log\n\t\t)\n\n\t\t// If not verbose log to a stream\n\t\tif verbose == false {\n\t\t\tm.log.StartWait(fmt.Sprintf(\"Purging %d dependencies\", i+1))\n\t\t\tdependencyLogger = log.NewStreamLogger(buff, logrus.InfoLevel)\n\t\t}\n\n\t\terr := dependency.Purge(m.client, dependencyLogger)\n\t\tif err != nil {\n\t\t\treturn errors.Errorf(\"Error deploying dependency %s: %s %v\", dependency.ID, buff.String(), err)\n\t\t}\n\n\t\tm.log.Donef(\"Purged dependency %s\", dependency.ID)\n\t}\n\n\tm.log.StopWait()\n\tm.log.Donef(\"Successfully purged %d dependencies\", len(dependencies))\n\n\treturn nil\n}", "func (m *manager) UpdateAll() error {\n\tif m.config == nil || m.config.Dependencies == nil || len(m.config.Dependencies) == 0 {\n\t\treturn nil\n\t}\n\n\tm.log.StartWait(\"Update dependencies\")\n\tdefer m.log.StopWait()\n\n\t// Resolve all dependencies\n\t_, err := m.resolver.Resolve(true)\n\tif err != nil {\n\t\tif _, ok := err.(*cyclicError); ok {\n\t\t\treturn errors.Errorf(\"%v.\\n To allow cyclic dependencies run with the '%s' flag\", err, ansi.Color(\"--allow-cyclic\", \"white+b\"))\n\t\t}\n\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (sl TaskList) RunAll(handler func(*Task)) error {\n\trunningTasks := new(util.Counter)\n\terrors := util.NewErrorList()\n\tgate := make(chan struct{})\n\n\t// Keep looping until all tasks report either finished,\n\t// skipped, or failed.\n\tfor !sl.IsFinished() {\n\t\trtr, err := sl.ReadyToRun() // All dependencies met successfully\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(`failed determine runnable tasks: %w`, err)\n\t\t}\n\t\tnewTasks := len(rtr)\n\t\tif runningTasks.Val() == 0 && newTasks == 0 && !sl.IsFinished() {\n\t\t\t// No running tasks, no new tasks, but some tasks are still\n\t\t\t// waiting to run. That means a dependency loop.\n\t\t\ttaskdump := new(strings.Builder)\n\t\t\tfor name, task := range sl {\n\t\t\t\tfmt.Fprintf(taskdump, \"%s: %s\\n\", name, task.GetStatus())\n\t\t\t\tfor _, dep := range task.Dependencies {\n\t\t\t\t\tfmt.Fprintf(taskdump, \"\\t- %s\\n\", dep)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"deadlock detected: not finished, but not ready to run\\n%s\", taskdump.String())\n\t\t}\n\n\t\t// Keep track of how many tasks are in-flight.\n\t\trunningTasks.Add(newTasks)\n\n\t\t// This loop may be empty if there are still\n\t\t// tasks running.\n\t\tfor _, task := range rtr {\n\t\t\tgo func(s *Task) {\n\t\t\t\tdefer func() {\n\t\t\t\t\trunningTasks.Dec()\n\t\t\t\t\t// Gate is unbuffered. This will block until\n\t\t\t\t\t// the loop comes round again.\n\t\t\t\t\tgate <- struct{}{}\n\t\t\t\t}()\n\t\t\t\terrInner := s.Run(handler)\n\t\t\t\tif errInner != nil {\n\t\t\t\t\terrors.Appendf(`error running task %q: %w`, s.Name, errInner)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}(task)\n\t\t}\n\t\t// Wait until one task finishes, then loop around\n\t\t// to re-evaluate if any tasks have had their\n\t\t// dependencies successfully run.\n\t\t// Because the above loop may not run, <-gate should\n\t\t// be called as many times as gate <- struct{}{} is\n\t\t// called.\n\t\t<-gate\n\t\tif errors.Len() > 0 {\n\t\t\tfor _, err = range errors.Errors() {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t\treturn fmt.Errorf(`received one or more errors running tasks, the last of which is %w`, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (s YieldingWaitStrategy) NotifyAll() {\n}", "func installAllDependencies(mypackage Package, wg *sync.WaitGroup) {\n\n // TODO: need to check if the dependency is already downloaded\n // TODO: what if one of the dependency is removed from the gopackage.json\n\n // read the gopackage.json's dependencies section\n for k,_ := range mypackage.Dependencies {\n wg.Add(1)\n fmt.Printf(\"Dependency = %s\\n\", k)\n go func(packageName string) {\n fetch(packageName)\n wg.Done()\n }(k)\n }\n}", "func ApplyAll(manifests name.ManifestMap, version pkgversion.Version, opts *kubectlcmd.Options) (CompositeOutput, error) {\n\tlog.Infof(\"Preparing manifests for these components:\")\n\tfor c := range manifests {\n\t\tlog.Infof(\"- %s\", c)\n\t}\n\tlog.Infof(\"Component dependencies tree: \\n%s\", installTreeString())\n\tif err := InitK8SRestClient(opts.Kubeconfig, opts.Context); err != nil {\n\t\treturn nil, err\n\t}\n\treturn applyRecursive(manifests, version, opts)\n}", "func (h *Handler) Wait(options ...Options) error {\n\n\toption := &Options{\n\t\tdefaultRetries,\n\t\tdefaultTimeout,\n\t}\n\n\tif len(options) >= 1 {\n\t\toption = &options[0]\n\t}\n\n\tvar wg sync.WaitGroup\n\terrorMessages := make(chan error, len(h.dependencies)+1)\n\n\tfor name, check := range h.dependencies {\n\t\twg.Add(1)\n\t\tgo func(n string, c Check) {\n\t\t\tlogrus.Infof(\"Waiting for %s\\n\", n)\n\t\t\tif err := performCheck(n, c, *option); err != nil {\n\t\t\t\terrorMessages <- err\n\t\t\t} else {\n\t\t\t\tlogrus.Debugf(\"%s is ready\\n\", n)\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(name, check)\n\t}\n\n\twg.Wait()\n\tclose(errorMessages)\n\n\treturn toError(errorMessages)\n}", "func onePackageCheckerRuner(command string, args []string, deps []string, errCh chan<- checkerErr, done func()) {\n\tvar wg sync.WaitGroup\n\twg.Add(len(deps))\n\tfor _, dep := range deps {\n\t\trate <- struct{}{}\n\t\tgo func(dep string) {\n\t\t\tdefer func() {\n\t\t\t\t<-rate\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tmultiCheckerRuner(command, append(args, dep), errCh)\n\t\t}(dep)\n\t}\n\twg.Wait()\n\tdone()\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
then schedules a continuation task once the current promise is completed. It propagates errors and returns a promise wrapping the continuation
func (p promise) then(next func(ctx context.Context) error) promise { completionSource := &completionSource{ done: make(chan struct{}), } go func() { defer close(completionSource.done) select { case <-p.Done(): if err := p.Err(); err != nil { completionSource.err = err return } completionSource.err = p.scheduler.schedule(next).wait() } }() return newPromise(p.scheduler, completionSource) }
[ "func (t *task) ContinueWith(ctx context.Context, nextAction func(interface{}, error) (interface{}, error)) Task {\n\treturn Invoke(ctx, func(context.Context) (interface{}, error) {\n\t\tresult, err := t.Outcome()\n\t\treturn nextAction(result, err)\n\t})\n}", "func scheduleAndUnwrap(scheduler scheduler, do func(ctx context.Context) (dependency, error)) promise {\n\tcompletionSource := &completionSource{\n\t\tdone: make(chan struct{}),\n\t}\n\tscheduler.schedule(func(ctx context.Context) error {\n\t\tp, err := do(ctx)\n\t\tif err != nil {\n\t\t\tcompletionSource.err = err\n\t\t\tclose(completionSource.done)\n\t\t\treturn err\n\t\t}\n\t\tgo func() {\n\t\t\t<-p.Done()\n\t\t\tcompletionSource.err = p.Err()\n\t\t\tclose(completionSource.done)\n\t\t}()\n\t\treturn nil\n\t})\n\treturn newPromise(scheduler, completionSource)\n}", "func (promise *Promise) Then(fulfillment func(data interface{}) interface{}) *Promise {\n\tpromise.mutex.Lock()\n\tdefer promise.mutex.Unlock()\n\n\tif promise.state == pending {\n\t\tpromise.wg.Add(1)\n\t\tpromise.then = append(promise.then, fulfillment)\n\t} else if promise.state == fulfilled {\n\t\tpromise.result = fulfillment(promise.result)\n\t}\n\n\treturn promise\n}", "func (p *promise) Then(promise Promise) Promise {\n\treturn p.Thenf(func() Promise { return promise })\n}", "func (p *promise) ThenWithResult(factory FactoryWithResult) Promise {\n\tresult := NewPromise()\n\n\tp.Always(func(p2 Controller) {\n\t\tif p2.IsSuccess() {\n\t\t\tfactory(p2.Result()).Always(func(p3 Controller) {\n\t\t\t\tresult.DeliverWithPromise(p3)\n\t\t\t})\n\t\t} else {\n\t\t\tresult.DeliverWithPromise(p2)\n\t\t}\n\t})\n\n\treturn result\n}", "func (promise *Promise) Catch(rejection func(err error) error) *Promise {\n\n\tresult, err := promise.Await()\n\n\tnewPromise := New(func(context *Promise) {\n\n\t\tif err != nil {\n\t\t\tcontext.reject(rejection(err))\n\t\t\treturn\n\t\t}\n\t\tcontext.resolve(result)\n\t})\n\n\treturn newPromise\n}", "func (r RequestPromise) Then(callable func(data *js.Object)) {\n\tr.Call(\"then\", js.MakeFunc(\n\t\tfunc(this *js.Object, args []*js.Object) interface{} {\n\t\t\tcallable(args[0])\n\t\t\treturn nil\n\t\t}))\n}", "func (f *FirstErrPromise) Promise() func(*Record, error) {\n\tf.wg.Add(1)\n\treturn f.promise\n}", "func (p *Promise) Then(success func(interface{}) error, failure func(error)) *Promise {\n\n\t// Create the promise that the method is going to return\n\tresult := new(Promise)\n\n\t// By giving a small buffer size we can bu sure that the method doesnt stop executing if another handler is not chained to this one\n\tresult.successChannel = make(chan interface{}, 1)\n\tresult.failureChannel = make(chan error, 1)\n\n\ttimeout := time.After(1 * time.Second)\n\n\t// Now we add the code that decides if the promise was successful or not\n\t// This will allow us to return the new promise synchronously while it is actually being process asynchronosly\n\tgo func() {\n\t\tselect {\n\t\tcase obj := <-p.successChannel:\n\t\t\tnewErr := success(obj)\n\t\t\tif newErr == nil {\n\t\t\t\tresult.successChannel <- obj\n\t\t\t} else {\n\t\t\t\tresult.failureChannel <- newErr\n\t\t\t}\n\t\tcase err := <-p.failureChannel:\n\t\t\tfailure(err)\n\t\t\tresult.failureChannel <- err\n\t\tcase <-timeout:\n\t\t\tfailure(errors.New(\"Promise timeout\"))\n\t\t}\n\t}()\n\treturn result\n}", "func (f *FirstErrPromise) promise(_ *Record, err error) {\n\tdefer f.wg.Done()\n\tif err != nil && atomic.SwapUint32(&f.once, 1) == 0 {\n\t\tf.err = err\n\t\tif f.cl != nil {\n\t\t\tf.wg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer f.wg.Done()\n\t\t\t\tf.cl.AbortBufferedRecords(context.Background())\n\t\t\t}()\n\t\t}\n\t}\n}", "func (t *Thread) RunContinuation(c Cont) (err error) {\n\tvar next Cont\n\tvar errContCount = 0\n\t_ = t.triggerCall(t, c)\n\tfor c != nil {\n\t\tif t != t.gcThread {\n\t\t\tt.runPendingFinalizers()\n\t\t}\n\t\tt.currentCont = c\n\t\tnext, err = c.RunInThread(t)\n\t\tif err != nil {\n\t\t\trtErr := ToError(err)\n\t\t\tif rtErr.Handled() {\n\t\t\t\treturn rtErr\n\t\t\t}\n\t\t\terr = rtErr.AddContext(c, -1)\n\t\t\terrContCount++\n\t\t\tif t.messageHandler != nil {\n\t\t\t\tif errContCount > maxErrorsInMessageHandler {\n\t\t\t\t\treturn newHandledError(errErrorInMessageHandler)\n\t\t\t\t}\n\t\t\t\tnext = t.messageHandler.Continuation(t, newMessageHandlerCont(c))\n\t\t\t} else {\n\t\t\t\tnext = newMessageHandlerCont(c)\n\t\t\t}\n\t\t\tnext.Push(t.Runtime, ErrorValue(err))\n\t\t}\n\t\tc = next\n\t}\n\treturn\n}", "func Go(f func(context.Context)) Task {\n\tcloseChan := make(chan struct{})\n\tcancelChan := make(chan struct{})\n\tdCtx := ctx{\n\t\tclosed: closeChan,\n\t\tdone: cancelChan,\n\t}\n\n\tgo func() {\n\t\tdefer close(closeChan)\n\t\tf(dCtx)\n\t}()\n\n\treturn Task{cancelChan, new(sync.Once), closeChan}\n}", "func (c *Coaxer) Coax(ctx context.Context, manifest ManifestFunc, desc string) Promise {\n\trun := coaxRun{\n\t\tCoaxer: *c, // This is a copy, so don't worry about modifying it.\n\t\tctx: ctx,\n\t\tdesc: desc,\n\t\tmanifest: manifest,\n\t\tresult: make(chan Result),\n\t\tfinalResult: make(chan Result),\n\t}\n\treturn run.future()\n}", "func (p *promise) Success(handler SuccessHandler) Promise {\n\tvar notify bool\n\n\tp.lock.Lock()\n\tdefer func() {\n\t\t// release the lock (before calling handler)\n\t\tp.lock.Unlock()\n\n\t\t// do we need to directly notify?\n\t\tif notify {\n\t\t\thandler(p.Result())\n\t\t}\n\t}()\n\n\t// already delivered and successful?\n\tif p.IsSuccess() {\n\t\t// direct invoke\n\t\tnotify = true\n\t} else {\n\t\t// deferred invoke\n\t\tp.successHandlers = append(p.successHandlers, handler)\n\t}\n\n\treturn p\n}", "func (p *promise) SucceedWithResult(result interface{}) Controller {\n\treturn p.deliver(result)\n}", "func (p *promise) deliver(result interface{}) Controller {\n\tvar wasDelivered bool\n\n\tp.lock.Lock()\n\tdefer func() {\n\t\t// release the lock prior to notifying\n\t\tp.lock.Unlock()\n\n\t\t// do we need to notify\n\t\tif wasDelivered {\n\t\t\tp.notify()\n\t\t}\n\t}()\n\n\tif !p.IsDelivered() {\n\t\t// invoke callbacks via notify()\n\t\twasDelivered = true\n\n\t\t// if nil is delivered, use nilResult as a non-nil place holder\n\t\tif result == nil {\n\t\t\tresult = nilResult\n\t\t}\n\n\t\t// store the delivered result\n\t\tp.result.Store(result)\n\t} else {\n\t\t// This would be great as a panic, but in 'all' and 'any' scenarios it\n\t\t// is difficult to prevent async code from double completing\n\t\tlog.Println(\"Attempt to deliver promise that is already delivered\")\n\t}\n\n\treturn p\n}", "func AbortingFirstErrPromise(cl *Client) *FirstErrPromise {\n\treturn &FirstErrPromise{\n\t\tcl: cl,\n\t}\n}", "func (p *promise) Canceled(handler CanceledHandler) Promise {\n\tvar notify bool\n\n\tp.lock.Lock()\n\tdefer func() {\n\t\t// release the lock (before invoking handler)\n\t\tp.lock.Unlock()\n\n\t\t// is direct notify?\n\t\tif notify {\n\t\t\thandler()\n\t\t}\n\t}()\n\n\t// is delivered and canceled?\n\tif p.IsCanceled() {\n\t\t// direct invoke\n\t\tnotify = true\n\t} else {\n\t\t// deferred invoke\n\t\tp.canceledHandlers = append(p.canceledHandlers, handler)\n\t}\n\n\treturn p\n}", "func (promise *Promise) Catch(rejection func(err error) error) *Promise {\n\tpromise.mutex.Lock()\n\tdefer promise.mutex.Unlock()\n\n\tif promise.state == pending {\n\t\tpromise.wg.Add(1)\n\t\tpromise.catch = append(promise.catch, rejection)\n\t} else if promise.state == rejected {\n\t\tpromise.err = rejection(promise.err)\n\t}\n\n\treturn promise\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
newPromise creates a promise out of a dependency
func newPromise(scheduler scheduler, dependency dependency) promise { return promise{scheduler: scheduler, dependency: dependency} }
[ "func newPromise() *promise {\n\tp := &promise{}\n\n\tp.JS = js.Global.Get(\"Promise\").New(func(resolve *js.Object, reject *js.Object) {\n\t\tp.Resolve = func(val interface{}) { resolve.Invoke(val) }\n\t\tp.Reject = func(val interface{}) { reject.Invoke(val) }\n\t})\n\n\treturn p\n}", "func makePromise(f func() (any, error)) js.Value {\n\thandler := js.FuncOf(func(this js.Value, args []js.Value) any {\n\t\tresolve := args[0]\n\t\treject := args[1]\n\t\tgo func() {\n\t\t\tif res, err := f(); err == nil {\n\t\t\t\tresolve.Invoke(res)\n\t\t\t} else {\n\t\t\t\treject.Invoke(err.Error())\n\t\t\t}\n\t\t}()\n\t\treturn nil\n\t})\n\n\tpromiseConstructor := js.Global().Get(\"Promise\")\n\treturn promiseConstructor.New(handler)\n}", "func NewPromise() Promise {\n\treturn &promise{sync.Mutex{}, make(chan error), nil, nil, false}\n}", "func NewPromise() Controller {\n\treturn &promise{}\n}", "func NewPromise(function func(resolve func(interface{}), reject func(interface{}))) *js.Object {\n\tp := newPromise()\n\tgo func() {\n\n\t\t// Reject on panic\n\t\tdefer CallOnPanic(p.Reject)\n\n\t\t// Call the function, allowing the user to resolve/reject\n\t\tfunction(p.Resolve, p.Reject)\n\t}()\n\treturn p.JS\n}", "func New(executor func(resolve func(interface{}), reject func(error))) *Promise {\n\tvar wg = &sync.WaitGroup{}\n\twg.Add(1)\n\n\tvar promise = &Promise{\n\t\tstate: pending,\n\t\texecutor: executor,\n\t\tthen: make([]func(interface{}) interface{}, 0),\n\t\tcatch: make([]func(error) error, 0),\n\t\tresult: nil,\n\t\terr: nil,\n\t\tmutex: &sync.Mutex{},\n\t\twg: wg,\n\t}\n\n\tgo func() {\n\t\tdefer promise.handlePanic()\n\t\tpromise.executor(promise.resolve, promise.reject)\n\t}()\n\n\treturn promise\n}", "func newPushPromise(conn *Conn, req *http.Request, s *Stream) *frame.Frame {\n\n\t// Initialize request headers\n\thfs := initReqHFs(req)\n\n\t// Encode headers\n\tencodedHeaders := conn.hpack.Encode(hfs) // Header compression\n\tpayloadLength := uint32(len(encodedHeaders))\n\n\t// Choose next stream identifier\n\t// RFC7540 - Section 5.1.1 states that new stream ids from the server must be even\n\tconn.prevStreamID = conn.prevStreamID + 2 // prevStreamID starts at zero, so it is always even\n\n\tpushFrame := &frame.Frame{\n\t\tID: s.id,\n\t\tType: frame.PushPromiseType,\n\t\tFlags: types.PushPromiseFlags{\n\t\t\tEndHeaders: true,\n\t\t\tPadded: false,\n\t\t},\n\t\tPayload: types.PushPromisePayload{\n\t\t\tStreamID: conn.prevStreamID,\n\t\t\tFragment: encodedHeaders,\n\t\t\tPadLength: 0,\n\t\t},\n\t\tLength: payloadLength + 4,\n\t}\n\n\treturn pushFrame\n}", "func NewPromise() RedisPubSubPromise {\n\treturn &awaiter{\n\t\tc: make(chan interface{}),\n\t}\n}", "func (p *Pool) NewFuture(type T)(f func() (T, error))", "func (p *Politician) CreatePromise(name string, details string, status PromiseStatus, category PromiseCategory, source Source) {\n\tp.Promises = append(p.Promises, Promise{\n\t\tPoliticianID: p.ID,\n\t\tPolitician: p,\n\t\tName: name,\n\t\tDetails: details,\n\t\tHistory: []PromiseStatus{status},\n\t\tCategory: category,\n\t\tSources: []Source{source},\n\t})\n}", "func newFuture() *future {\n\treturn &future{\n\t\twaitCh: make(chan struct{}),\n\t\tid: uuid.Generate(),\n\t}\n}", "func populatePromise(p *Promise, r interface{}, e error) *Promise {\n\n\t// If promise is undefined\n\tif p == nil {\n\n\t\t// Create a new promise\n\t\tp = getPromiseOrEmptyPromise(nil)\n\n\t\t// If result provided, set status and result values\n\t\tif r != nil {\n\t\t\tp.status = RESOLVED\n\t\t\tp.res = r\n\t\t\tp.err = nil\n\t\t}\n\n\t\t// If error provided, set status and error values\n\t\tif e != nil {\n\t\t\tp.status = REJECTED\n\t\t\tp.err = e\n\t\t\tp.res = nil\n\t\t}\n\n\t\t// If result and error both not provided, set status as RESOLVED (Case where no more then or catch are required)\n\t\tif r == nil && e == nil {\n\t\t\tp.status = RESOLVED\n\t\t}\n\t}\n\treturn p\n}", "func New(f interface{}, args ...interface{}) *Promise {\n\t// Extract the type\n\tp := &Promise{\n\t\tcond: sync.Cond{L: new(sync.Mutex)},\n\t\tt: simpleCall,\n\t}\n\n\tfunctionRv := reflect.ValueOf(f)\n\n\tif functionRv.Kind() != reflect.Func {\n\t\tpanic(errors.Errorf(\"expected Function, got %s\", functionRv.Kind()))\n\t}\n\n\treflectType := functionRv.Type()\n\n\tinputs := []reflect.Type{}\n\tfor i := 0; i < reflectType.NumIn(); i++ {\n\t\tinputs = append(inputs, reflectType.In(i))\n\t}\n\n\tif len(args) != len(inputs) {\n\t\tpanic(errors.Errorf(\"expected %d args, got %d args\", len(inputs), len(args)))\n\t}\n\n\tp.resultType, p.returnsError = getResultType(reflectType)\n\n\targValues := []reflect.Value{}\n\n\tfor i := 0; i < len(args); i++ {\n\t\tprovidedArgRv := reflect.ValueOf(args[i])\n\t\tprovidedArgType := providedArgRv.Type()\n\t\tif providedArgType != inputs[i] {\n\t\t\tpanic(errors.Errorf(\"for argument %d: expected type %s got type %s\", i, inputs[i], providedArgType))\n\t\t}\n\t\targValues = append(argValues, providedArgRv)\n\t}\n\tgo p.run(functionRv, nil, nil, 0, argValues)\n\treturn p\n}", "func WrapInPromise(f func() (interface{}, error)) js.Value {\n\tvar executor js.Func\n\texecutor = js.FuncOf(func(this js.Value, args []js.Value) interface{} {\n\t\tresolve := args[0]\n\t\treject := args[1]\n\t\tgo func() {\n\t\t\tdefer executor.Release()\n\t\t\tif result, err := f(); err != nil {\n\t\t\t\treject.Invoke(ErrorToJS(err))\n\t\t\t} else {\n\t\t\t\tresolve.Invoke(result)\n\t\t\t}\n\t\t}()\n\t\treturn nil\n\t})\n\treturn js.Global().Get(\"Promise\").New(executor)\n}", "func NewLockingPromise(lock sync.Locker, activeCounter counter.GoRoutineCounter) promise.LockingMutable {\n\treturn &lockingPromise{\n\t\tlock: lock,\n\t\tcond: *sync.NewCond(lock),\n\t\tactiveCounter: activeCounter,\n\t}\n}", "func newTask() task {\n\treturn task{}\n}", "func NewPromises() Promises {\n\treturn Promises{\n\t\tindex: make(map[string]int),\n\t\tkeys: make([]int, 0),\n\t\tvalues: make(map[int]string),\n\t}\n}", "func newAsync() *async {\n\treturn &async{state: pending, completions: []func(){}, done: make(chan struct{})}\n}", "func NewReceiptPromise(hash ethCommon.Hash) *ReceiptPromise {\n\treturn &ReceiptPromise{\n\t\tHash: hash,\n\t\tRespCh: make(chan ReceiptPromiseResponse, 1),\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this schedule a task that itself produces a promise, and returns a promise wrapping the produced promise
func scheduleAndUnwrap(scheduler scheduler, do func(ctx context.Context) (dependency, error)) promise { completionSource := &completionSource{ done: make(chan struct{}), } scheduler.schedule(func(ctx context.Context) error { p, err := do(ctx) if err != nil { completionSource.err = err close(completionSource.done) return err } go func() { <-p.Done() completionSource.err = p.Err() close(completionSource.done) }() return nil }) return newPromise(scheduler, completionSource) }
[ "func (p *Pool) Schedule(task func()) {\n\tp.schedule(task, nil)\n}", "func (t Task) Reschedule() Task {\n\t// linear backoff\n\ttimeOffset := time.Second * time.Duration(1+10*t.failures)\n\trescheduledTask := t\n\trescheduledTask.ScheduledTime = rescheduledTask.ScheduledTime.Add(timeOffset)\n\treturn rescheduledTask\n}", "func (s *sched) trigger(t Task) (TaskFuture, error) {\n\tt.SetExecution(time.Now().UTC())\n\treturn s.submit(t)\n}", "func (service *Service) Task(id string) *Task {\n\treturn service.Worker.Task(id).(*Task)\n}", "func scheduleTask(task func()) {\n\tvar nextRun time.Time\n\tsched := common.Conf().Schedule\n\tstartTime := time.Duration(sched.StartTime)\n\tendTime := time.Duration(sched.EndTime)\n\n\tfor {\n\t\tnow := time.Now()\n\t\ttoday := time.Date(\n\t\t\tnow.Year(), now.Month(), now.Day(),\n\t\t\t0, 0, 0, 0, time.Local,\n\t\t)\n\t\tif now.Before(today.Add(startTime)) {\n\t\t\tnextRun = today.Add(startTime)\n\t\t} else if now.After(today.Add(endTime)) {\n\t\t\tnextRun = today.Add(startTime + day)\n\t\t}\n\t\tif !nextRun.IsZero() {\n\t\t\tlog.Info().Msgf(\"next scheduled run: %v\", nextRun)\n\t\t\ttime.Sleep(time.Until(nextRun))\n\t\t}\n\t\ttask()\n\t}\n}", "func (s *scheduler) scheduleTask(t Task) error {\n\tif _, ok := s.tasks[t.name()]; ok {\n\t\treturn errTaskAlreadyExists\n\t}\n\n\ts.tasks[t.name()] = t\n\t// t.Start()\n\tt.run()\n\treturn nil\n}", "func (s Scheduler) Schedule(t TaskRecord) (string, error) {\n key := buildTaskKey(t)\n old, _ := s.Cache.Get(key)\n sched := false\n var oldTsk TaskRecord\n var err error\n\n if old == \"\" {\n sched = true\n } else {\n oldTsk, _ = ParseTask(old)\n if oldTsk.CanReschedule() {\n sched = true\n } else {\n err = errors.New(\"A Task with the submitted ID and UID [\" + oldTsk.ID + \", \" + oldTsk.UID + \"] is being processed\")\n }\n }\n\n if sched {\n return schedule(key, t, s.Queue, s.Cache)\n } else {\n return key, err\n }\n}", "func (l *limiter) Schedule(task string, slice time.Duration) (delay time.Duration) {\n\treply := make(chan time.Duration, 1)\n\tl.schedule <- ask{\n\t\tstring: task,\n\t\tDuration: slice,\n\t\treply: reply,\n\t}\n\treturn <-reply\n}", "func Schedule(in Duration, fn func()) (Job, error) {\n\treturn DefaultScheduler.Schedule(in, fn)\n}", "func (c *TestClient) TriggerTask(t *swarming.SwarmingRpcsNewTaskRequest) (*swarming.SwarmingRpcsTaskRequestMetadata, error) {\n\tcreatedTs := time.Now().UTC().Format(TIMESTAMP_FORMAT)\n\tid := uuid.NewV5(uuid.NewV1(), uuid.NewV4().String()).String()\n\trv := &swarming.SwarmingRpcsTaskRequestMetadata{\n\t\tRequest: &swarming.SwarmingRpcsTaskRequest{\n\t\t\tCreatedTs: createdTs,\n\t\t\tExpirationSecs: t.ExpirationSecs,\n\t\t\tName: t.Name,\n\t\t\tPriority: t.Priority,\n\t\t\tProperties: t.Properties,\n\t\t\tTags: t.Tags,\n\t\t},\n\t\tTaskId: id,\n\t\tTaskResult: &swarming.SwarmingRpcsTaskResult{\n\t\t\tCreatedTs: createdTs,\n\t\t\tName: t.Name,\n\t\t\tState: \"PENDING\",\n\t\t\tTaskId: id,\n\t\t\tTags: t.Tags,\n\t\t},\n\t}\n\tc.taskListMtx.Lock()\n\tdefer c.taskListMtx.Unlock()\n\tc.taskList = append(c.taskList, rv)\n\treturn rv, nil\n}", "func Delay(f func() interface{}) *Promise {\n\treturn &Promise{f: f}\n}", "func (s *Scheduler) Schedule(in Duration, fn func()) (Job, error) {\n\tif in < 1 {\n\t\t// Execute immediately\n\t\tgo fn()\n\t\treturn Job{}, nil\n\t}\n\n\tid, err := newJobID(s.now().Add(in))\n\tif err != nil {\n\t\treturn Job{}, fmt.Errorf(\"generating unique KSUID: %w\", err)\n\t}\n\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\n\tif s.queue.Has(ksuid.KSUID(id)) {\n\t\treturn Job{}, fmt.Errorf(\"identifier collision: %s\", id.String())\n\t}\n\n\tif !s.queue.Set(ksuid.KSUID(id), fn) {\n\t\treturn id, nil\n\t}\n\n\ts.execute(id, fn)\n\n\treturn id, nil\n}", "func (p promise) then(next func(ctx context.Context) error) promise {\n\tcompletionSource := &completionSource{\n\t\tdone: make(chan struct{}),\n\t}\n\tgo func() {\n\t\tdefer close(completionSource.done)\n\t\tselect {\n\t\tcase <-p.Done():\n\t\t\tif err := p.Err(); err != nil {\n\t\t\t\tcompletionSource.err = err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcompletionSource.err = p.scheduler.schedule(next).wait()\n\t\t}\n\t}()\n\treturn newPromise(p.scheduler, completionSource)\n}", "func (p *Pool) ScheduleTimeout(timeout time.Duration, task func()) error {\n\treturn p.schedule(task, time.After(timeout))\n}", "func NewPromise() RedisPubSubPromise {\n\treturn &awaiter{\n\t\tc: make(chan interface{}),\n\t}\n}", "func (t TaskFunc) Execute() { t() }", "func newPromise(scheduler scheduler, dependency dependency) promise {\n\treturn promise{scheduler: scheduler, dependency: dependency}\n}", "func (t *ModTask) Task() task.Task { return t }", "func (s *Scheduler) ReschedulerTask() {\n\tfor {\n\t\tselect {\n\t\tcase msg := <-s.ReschedQueue:\n\t\t\ttask, err := s.store.FetchTask(msg.TaskID)\n\t\t\tif err != nil {\n\t\t\t\tmsg.Err <- fmt.Errorf(\"Rescheduling task failed: %s\", err.Error())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif task == nil {\n\t\t\t\tmsg.Err <- fmt.Errorf(\"Task %s does not exists\", msg.TaskID)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif _, err := s.KillTask(task); err != nil {\n\t\t\t\tmsg.Err <- fmt.Errorf(\"Kill task failed: %s for rescheduling\", err.Error())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ts.Status = \"busy\"\n\n\t\t\tresources := s.BuildResources(task.Cpus, task.Mem, task.Disk)\n\t\t\toffers, err := s.RequestOffers(resources)\n\t\t\tif err != nil {\n\t\t\t\tmsg.Err <- fmt.Errorf(\"Request offers failed: %s for rescheduling\", err.Error())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tvar choosedOffer *mesos.Offer\n\t\t\tfor _, offer := range offers {\n\t\t\t\tcpus, mem, disk := s.OfferedResources(offer)\n\t\t\t\tif cpus >= task.Cpus && mem >= task.Mem && disk >= task.Disk {\n\t\t\t\t\tchoosedOffer = offer\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar taskInfos []*mesos.TaskInfo\n\t\t\ttaskInfo := s.BuildTaskInfo(choosedOffer, resources, task)\n\t\t\ttaskInfos = append(taskInfos, taskInfo)\n\n\t\t\tresp, err := s.LaunchTasks(choosedOffer, taskInfos)\n\t\t\tif err != nil {\n\t\t\t\tmsg.Err <- fmt.Errorf(\"Launchs task failed: %s for rescheduling\", err.Error())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif resp != nil && resp.StatusCode != http.StatusAccepted {\n\t\t\t\tmsg.Err <- fmt.Errorf(\"Launchs task failed: status code %d for rescheduling\", resp.StatusCode)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlogrus.Infof(\"Remove health check for task %s\", msg.TaskID)\n\t\t\tif err := s.store.DeleteCheck(msg.TaskID); err != nil {\n\t\t\t\tmsg.Err <- fmt.Errorf(\"Remove health check for %s failed: %s\", msg.TaskID, err.Error())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif len(task.HealthChecks) != 0 {\n\t\t\t\tif err := s.store.SaveCheck(task,\n\t\t\t\t\t*taskInfo.Container.Docker.PortMappings[0].HostPort,\n\t\t\t\t\tmsg.AppID); err != nil {\n\t\t\t\t}\n\t\t\t\tfor _, healthCheck := range task.HealthChecks {\n\t\t\t\t\tcheck := types.Check{\n\t\t\t\t\t\tID: task.Name,\n\t\t\t\t\t\tAddress: *task.AgentHostname,\n\t\t\t\t\t\tPort: int(*taskInfo.Container.Docker.PortMappings[0].HostPort),\n\t\t\t\t\t\tTaskID: task.Name,\n\t\t\t\t\t\tAppID: msg.AppID,\n\t\t\t\t\t\tProtocol: healthCheck.Protocol,\n\t\t\t\t\t\tInterval: int(healthCheck.IntervalSeconds),\n\t\t\t\t\t\tTimeout: int(healthCheck.TimeoutSeconds),\n\t\t\t\t\t}\n\t\t\t\t\tif healthCheck.Command != nil {\n\t\t\t\t\t\tcheck.Command = healthCheck.Command\n\t\t\t\t\t}\n\n\t\t\t\t\tif healthCheck.Path != nil {\n\t\t\t\t\t\tcheck.Path = *healthCheck.Path\n\t\t\t\t\t}\n\n\t\t\t\t\tif healthCheck.MaxConsecutiveFailures != nil {\n\t\t\t\t\t\tcheck.MaxFailures = *healthCheck.MaxConsecutiveFailures\n\t\t\t\t\t}\n\n\t\t\t\t\ts.HealthCheckManager.Add(&check)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tmsg.Err <- nil\n\n\t\t\ts.Status = \"idle\"\n\n\t\tcase <-s.doneChan:\n\t\t\treturn\n\t\t}\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate validates this alertable event type
func (m *AlertableEventType) Validate(formats strfmt.Registry) error { var res []error if err := m.validateCategory(formats); err != nil { res = append(res, err) } if err := m.validateScope(formats); err != nil { res = append(res, err) } if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil }
[ "func (e EventType) Validate() error {\n\tswitch e {\n\tcase InsertOp:\n\tcase UpdateOp:\n\tcase DeleteOp:\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown event type: %s\", e)\n\t}\n\n\treturn nil\n}", "func (e EEventType) Validate() error {\n switch e {\n case EV_UNKNOWN,EV_BASE,EV_SCRIPT,EV_PERL,EV_BIN,EV_URL,EV_RESULT,EV_WORKER_DONE,EV_COMMAND,EV_REPLY,EV_ERROR:\n return nil\n default:\n return e // error condition - Error() can be invoked on it\n } // switch\n}", "func (e Event) Validate() error {\n\tif e.Type == \"\" {\n\t\treturn wrapError(ErrInvalidEvent, \"missing type\")\n\t}\n\n\tif e.UserID == 0 {\n\t\treturn wrapError(ErrInvalidEvent, \"missing owner\")\n\t}\n\n\tif e.Visibility < 10 || e.Visibility > 50 {\n\t\treturn wrapError(ErrInvalidEvent, \"visibility not supported\")\n\t}\n\n\treturn nil\n}", "func ValidateEvent(e interface{}) error {\n\tswitch v := e.(type) {\n\tcase *Event:\n\tcase *File:\n\tcase *Message:\n\tcase *RichMessage:\n\tcase *SystemMessage:\n\tcase Event:\n\tcase File:\n\tcase Message:\n\tcase RichMessage:\n\tcase SystemMessage:\n\tdefault:\n\t\treturn fmt.Errorf(\"event type %T not supported\", v)\n\t}\n\n\treturn nil\n}", "func (v BlockEventType) Validate() error {\n\tif !(v == \"block_added\" || v == \"block_removed\") {\n\t\treturn fmt.Errorf(\"api: invalid BlockEventType value: %q\", v)\n\t}\n\treturn nil\n}", "func (ut *EventPayload) Validate() (err error) {\n\tif ut.SportID == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"sportId\"))\n\t}\n\tif ut.EventID == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"eventId\"))\n\t}\n\tif ut.Name == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"name\"))\n\t}\n\tif ut.SubTitle == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"subTitle\"))\n\t}\n\tif ut.StartDtTm == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"startDtTm\"))\n\t}\n\tif ut.EndDtTm == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"endDtTm\"))\n\t}\n\tif ut.LocationID == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"locationId\"))\n\t}\n\tif ut.TeamAdvanceMethod == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"teamAdvanceMethod\"))\n\t}\n\treturn\n}", "func (e *Event) Validate() error {\n\tif e.Message == \"\" || e.MessageOffset == \"\" || e.Time == nil || e.Type == \"\" {\n\t\treturn errs.ErrMissingParameters\n\t}\n\treturn nil\n}", "func (m *AlertingPredefinedEventFilter) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateEventType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNegate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (er EventResource) Validate() error {\n\tswitch er {\n\tcase CursorReminder:\n\t\treturn errors.New(\"event reminder resource is not allowed to be created\")\n\tcase Publish:\n\tcase Application:\n\tcase CredentialEvent:\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported event resource: %s\", er)\n\t}\n\n\treturn nil\n}", "func (m *Event) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateID(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (a *Alert) Validate() error {\n\tif a.StartsAt.IsZero() {\n\t\treturn fmt.Errorf(\"start time missing\")\n\t}\n\tif !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {\n\t\treturn fmt.Errorf(\"start time must be before end time\")\n\t}\n\tif err := a.Labels.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"invalid label set: %s\", err)\n\t}\n\tif len(a.Labels) == 0 {\n\t\treturn fmt.Errorf(\"at least one label pair required\")\n\t}\n\tif err := a.Annotations.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"invalid annotations: %s\", err)\n\t}\n\treturn nil\n}", "func (v ExemptionType) Validate() error {\n\tif !(v == \"dynamic\" || v == \"greater_or_equal\" || v == \"less_or_equal\") {\n\t\treturn fmt.Errorf(\"api: invalid ExemptionType value: %q\", v)\n\t}\n\treturn nil\n}", "func (et EventType) Valid() bool {\n\tswitch et {\n\tcase Reopened, Closed, Renamed, Labeled, Unlabeled, CommentDeleted:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func (m *Alert) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (ut *eventPayload) Validate() (err error) {\n\tif ut.SportID == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"sportId\"))\n\t}\n\tif ut.EventID == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"eventId\"))\n\t}\n\tif ut.Name == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"name\"))\n\t}\n\tif ut.SubTitle == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"subTitle\"))\n\t}\n\tif ut.StartDtTm == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"startDtTm\"))\n\t}\n\tif ut.EndDtTm == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"endDtTm\"))\n\t}\n\tif ut.LocationID == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"locationId\"))\n\t}\n\tif ut.TeamAdvanceMethod == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"teamAdvanceMethod\"))\n\t}\n\treturn\n}", "func (m *EventDescription) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func validateSubscribeEvent(subscribeBuilder *SubscribeBuilder) error {\n\tsubscribeEvent := struct {\n\t\tTopic string `valid:\"required\"`\n\t\tEventName string `valid:\"alphanum,stringlength(1|256),required\"`\n\t\tGroupID string `valid:\"alphanum,stringlength(1|256),required\"`\n\t\tCallback func(ctx context.Context, event *Event, err error) error\n\t}{\n\t\tTopic: subscribeBuilder.topic,\n\t\tEventName: subscribeBuilder.eventName,\n\t\tGroupID: subscribeBuilder.groupID,\n\t\tCallback: subscribeBuilder.callback,\n\t}\n\n\t_, err := validator.ValidateStruct(subscribeEvent)\n\tif err != nil {\n\t\tlogrus.Errorf(\"unable to validate subscribe event. error : %v\", err)\n\t\treturn errInvalidSubStruct\n\t}\n\n\tif subscribeEvent.Callback == nil {\n\t\treturn errInvalidCallback\n\t}\n\n\treturn nil\n}", "func (ec EventContextV03) Validate() ValidationError {\n\terrors := map[string]error{}\n\n\t// type\n\t// Type: String\n\t// Constraints:\n\t// REQUIRED\n\t// MUST be a non-empty string\n\t// SHOULD be prefixed with a reverse-DNS name. The prefixed domain dictates the organization which defines the semantics of this event type.\n\teventType := strings.TrimSpace(ec.Type)\n\tif eventType == \"\" {\n\t\terrors[\"type\"] = fmt.Errorf(\"MUST be a non-empty string\")\n\t}\n\n\t// source\n\t// Type: URI-reference\n\t// Constraints:\n\t// REQUIRED\n\tsource := strings.TrimSpace(ec.Source.String())\n\tif source == \"\" {\n\t\terrors[\"source\"] = fmt.Errorf(\"REQUIRED\")\n\t}\n\n\t// subject\n\t// Type: String\n\t// Constraints:\n\t// OPTIONAL\n\t// MUST be a non-empty string\n\tif ec.Subject != nil {\n\t\tsubject := strings.TrimSpace(*ec.Subject)\n\t\tif subject == \"\" {\n\t\t\terrors[\"subject\"] = fmt.Errorf(\"if present, MUST be a non-empty string\")\n\t\t}\n\t}\n\n\t// id\n\t// Type: String\n\t// Constraints:\n\t// REQUIRED\n\t// MUST be a non-empty string\n\t// MUST be unique within the scope of the producer\n\tid := strings.TrimSpace(ec.ID)\n\tif id == \"\" {\n\t\terrors[\"id\"] = fmt.Errorf(\"MUST be a non-empty string\")\n\n\t\t// no way to test \"MUST be unique within the scope of the producer\"\n\t}\n\n\t// time\n\t// Type: Timestamp\n\t// Constraints:\n\t// OPTIONAL\n\t// If present, MUST adhere to the format specified in RFC 3339\n\t// --> no need to test this, no way to set the time without it being valid.\n\n\t// schemaurl\n\t// Type: URI\n\t// Constraints:\n\t// OPTIONAL\n\t// If present, MUST adhere to the format specified in RFC 3986\n\tif ec.SchemaURL != nil {\n\t\tschemaURL := strings.TrimSpace(ec.SchemaURL.String())\n\t\t// empty string is not RFC 3986 compatible.\n\t\tif schemaURL == \"\" {\n\t\t\terrors[\"schemaurl\"] = fmt.Errorf(\"if present, MUST adhere to the format specified in RFC 3986\")\n\t\t}\n\t}\n\n\t// datacontenttype\n\t// Type: String per RFC 2046\n\t// Constraints:\n\t// OPTIONAL\n\t// If present, MUST adhere to the format specified in RFC 2046\n\tif ec.DataContentType != nil {\n\t\tdataContentType := strings.TrimSpace(*ec.DataContentType)\n\t\tif dataContentType == \"\" {\n\t\t\terrors[\"datacontenttype\"] = fmt.Errorf(\"if present, MUST adhere to the format specified in RFC 2046\")\n\t\t} else {\n\t\t\t_, _, err := mime.ParseMediaType(dataContentType)\n\t\t\tif err != nil {\n\t\t\t\terrors[\"datacontenttype\"] = fmt.Errorf(\"if present, MUST adhere to the format specified in RFC 2046\")\n\t\t\t}\n\t\t}\n\t}\n\n\t// datacontentencoding\n\t// Type: String per RFC 2045 Section 6.1\n\t// Constraints:\n\t// The attribute MUST be set if the data attribute contains string-encoded binary data.\n\t// Otherwise the attribute MUST NOT be set.\n\t// If present, MUST adhere to RFC 2045 Section 6.1\n\tif ec.DataContentEncoding != nil {\n\t\tdataContentEncoding := strings.ToLower(strings.TrimSpace(*ec.DataContentEncoding))\n\t\tif dataContentEncoding != Base64 {\n\t\t\terrors[\"datacontentencoding\"] = fmt.Errorf(\"if present, MUST adhere to RFC 2045 Section 6.1\")\n\t\t}\n\t}\n\n\tif len(errors) > 0 {\n\t\treturn errors\n\t}\n\treturn nil\n}", "func (ec EventContextV01) Validate() error {\n\terrors := []string(nil)\n\n\t// eventType\n\t// Type: String\n\t// Constraints:\n\t// \tREQUIRED\n\t// \tMUST be a non-empty string\n\t// \tSHOULD be prefixed with a reverse-DNS name. The prefixed domain dictates the organization which defines the semantics of this event type.\n\teventType := strings.TrimSpace(ec.EventType)\n\tif eventType == \"\" {\n\t\terrors = append(errors, \"eventType: MUST be a non-empty string\")\n\t}\n\n\t// eventTypeVersion\n\t// Type: String\n\t// Constraints:\n\t// \tOPTIONAL\n\t// \tIf present, MUST be a non-empty string\n\tif ec.EventTypeVersion != nil {\n\t\teventTypeVersion := strings.TrimSpace(*ec.EventTypeVersion)\n\t\tif eventTypeVersion == \"\" {\n\t\t\terrors = append(errors, \"eventTypeVersion: if present, MUST be a non-empty string\")\n\t\t}\n\t}\n\n\t// cloudEventsVersion\n\t// Type: String\n\t// Constraints:\n\t// \tREQUIRED\n\t// \tMUST be a non-empty string\n\tcloudEventsVersion := strings.TrimSpace(ec.CloudEventsVersion)\n\tif cloudEventsVersion == \"\" {\n\t\terrors = append(errors, \"cloudEventsVersion: MUST be a non-empty string\")\n\t}\n\n\t// source\n\t// Type: URI\n\t// Constraints:\n\t// \tREQUIRED\n\tsource := strings.TrimSpace(ec.Source.String())\n\tif source == \"\" {\n\t\terrors = append(errors, \"source: REQUIRED\")\n\t}\n\n\t// eventID\n\t// Type: String\n\t// Constraints:\n\t// \tREQUIRED\n\t// \tMUST be a non-empty string\n\t// \tMUST be unique within the scope of the producer\n\teventID := strings.TrimSpace(ec.EventID)\n\tif eventID == \"\" {\n\t\terrors = append(errors, \"eventID: MUST be a non-empty string\")\n\n\t\t// no way to test \"MUST be unique within the scope of the producer\"\n\t}\n\n\t// eventTime\n\t// Type: Timestamp\n\t// Constraints:\n\t// \tOPTIONAL\n\t//\tIf present, MUST adhere to the format specified in RFC 3339\n\t// --> no need to test this, no way to set the eventTime without it being valid.\n\n\t// schemaURL\n\t// Type: URI\n\t// Constraints:\n\t// \tOPTIONAL\n\t// \tIf present, MUST adhere to the format specified in RFC 3986\n\tif ec.SchemaURL != nil {\n\t\tschemaURL := strings.TrimSpace(ec.SchemaURL.String())\n\t\t// empty string is not RFC 3986 compatible.\n\t\tif schemaURL == \"\" {\n\t\t\terrors = append(errors, \"schemaURL: if present, MUST adhere to the format specified in RFC 3986\")\n\t\t}\n\t}\n\n\t// contentType\n\t// Type: String per RFC 2046\n\t// Constraints:\n\t// \tOPTIONAL\n\t// \tIf present, MUST adhere to the format specified in RFC 2046\n\tif ec.ContentType != nil {\n\t\tcontentType := strings.TrimSpace(*ec.ContentType)\n\t\tif contentType == \"\" {\n\t\t\t// TODO: need to test for RFC 2046\n\t\t\terrors = append(errors, \"contentType: if present, MUST adhere to the format specified in RFC 2046\")\n\t\t}\n\t}\n\n\t// extensions\n\t// Type: Map\n\t// Constraints:\n\t// \tOPTIONAL\n\t// \tIf present, MUST contain at least one entry\n\tif ec.Extensions != nil {\n\t\tif len(ec.Extensions) == 0 {\n\t\t\terrors = append(errors, \"extensions: if present, MUST contain at least one entry\")\n\t\t}\n\t}\n\n\tif len(errors) > 0 {\n\t\treturn fmt.Errorf(strings.Join(errors, \"\\n\"))\n\t}\n\treturn nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
IsEmpty checks whether taskConfig has all it's fields declared properly or not.
func (t TaskConfig) IsEmpty() bool { return t == TaskConfig{} || t.DbURL == "" || t.DbType == "" }
[ "func (c Config) Empty() bool { return c.Runtime == \"\" }", "func (mgc Config) Empty() bool {\n\treturn mgc.AuthDB == \"\" &&\n\t\tmgc.DB == \"\" &&\n\t\tmgc.User == \"\" &&\n\t\tmgc.Password == \"\" &&\n\t\tmgc.Host == \"\"\n}", "func (c *ProjectConfig) NotEmpty() bool {\n\tif err := c.ValidateConfigVersion(); err != nil {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (t *TLSConfig) Empty() bool {\n\treturn *t == TLSConfig{}\n}", "func (d DiscoveryConfig) IsEmpty() bool {\n\treturn len(d.AWSMatchers) == 0 && len(d.AzureMatchers) == 0 &&\n\t\tlen(d.GCPMatchers) == 0 && len(d.KubernetesMatchers) == 0\n}", "func (config *V1RegistriesConf) Nonempty() bool {\n\treturn (len(config.V1TOMLConfig.Search.Registries) != 0 ||\n\t\tlen(config.V1TOMLConfig.Insecure.Registries) != 0 ||\n\t\tlen(config.V1TOMLConfig.Block.Registries) != 0)\n}", "func (v *VisConfig) IsEmpty() bool {\n\treturn v.Format == \"\" && v.Visualizations == nil\n\t// v.Format == \"\" && v.DataPath == \"\" && v.Visualizations == nil\n}", "func (r SoftwareConfigPage) IsEmpty() (bool, error) {\n\tis, err := ExtractSoftwareConfigs(r)\n\treturn len(is) == 0, err\n}", "func (a *awsConfigurator) IsEmpty() bool {\n\treturn len(a.actions) == 0\n}", "func (r *ScalingConfigOrT[_]) IsEmpty() bool {\n\treturn r.ScalingConfig.IsEmpty() && r.Value == nil\n}", "func (c *ConfigurationFile) CheckEmpty() error {\n\tif c.Servers == nil || len(c.Servers) <= 0 || c.Roles == nil || len(c.Roles) <= 0 {\n\t\treturn errors.New(\"kishell is not configured. Use configure option before searching\")\n\t}\n\treturn nil\n}", "func (r DNSConfig) IsEmpty() bool {\n\treturn (storage.DNSConfig)(r).IsEmpty()\n}", "func (r *RangeConfig) IsEmpty() bool {\n\treturn r.Min == nil && r.Max == nil && r.SpotFrom == nil\n}", "func (wq *TaskQueue) IsNotEmpty() bool {\n\treturn wq.totalTasks > 0\n}", "func (a *AdvancedScalingConfig[_]) IsEmpty() bool {\n\treturn a.Cooldown.IsEmpty() && a.Value == nil\n}", "func (i ignore) HasConfig() bool {\n\treturn len(i) != 0\n}", "func (tt TargetTable) IsEmpty() bool {\n\treturn tt.Task == \"\" // now we treat it as empty if no task name specified.\n}", "func (c Config) IsZero() bool {\n\treturn len(c.ProjectKey) == 0 || len(c.ProjectID) == 0\n}", "func (o *FormField) HasConfig() bool {\n\tif o != nil && o.Config != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
TaskConfiguration returns configuration properties related to task server, which includes db details.
func TaskConfiguration() TaskConfig { return conf.TaskConfig }
[ "func (j *AuroraJob) TaskConfig() *aurora.TaskConfig {\n\treturn j.jobConfig.TaskConfig\n}", "func (m *BusinessScenarioPlanner) GetTaskConfiguration()(PlannerTaskConfigurationable) {\n val, err := m.GetBackingStore().Get(\"taskConfiguration\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(PlannerTaskConfigurationable)\n }\n return nil\n}", "func (d *Driver) TaskConfigSchema() (*hclspec.Spec, error) {\n\treturn taskConfigSpec, nil\n}", "func (d *Driver) TaskConfigSchema() (*hclspec.Spec, error) {\n\treturn api.TaskConfigSpec, nil\n}", "func (client *Client) TaskConfigList(request *TaskConfigListRequest) (response *TaskConfigListResponse, err error) {\n\tresponse = CreateTaskConfigListResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func (c *CvpClient) GetConfigForTask(workOrderForID string) (ConfigForTask, error) {\n\tcft := ConfigForTask{}\n\turl := \"/provisioning/getconfigfortask.do?workorderid=\" + workOrderForID\n\tresp, err := c.Get(url)\n\tif err != nil {\n\t\treturn cft, err\n\t}\n\terr = json.Unmarshal(resp, &cft)\n\treturn cft, err\n}", "func (a *Agent) makeTaskConfig(ctx context.Context, tc *taskContext) (*internal.TaskConfig, error) {\n\tif tc.project == nil {\n\t\tgrip.Info(\"Fetching project config.\")\n\t\terr := a.fetchProjectConfig(ctx, tc)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tgrip.Info(\"Fetching distro configuration.\")\n\tvar confDistro *apimodels.DistroView\n\tvar err error\n\tif a.opts.Mode == HostMode {\n\t\tconfDistro, err = a.comm.GetDistroView(ctx, tc.task)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tgrip.Info(\"Fetching project ref.\")\n\tconfRef, err := a.comm.GetProjectRef(ctx, tc.task)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif confRef == nil {\n\t\treturn nil, errors.New(\"agent retrieved an empty project ref\")\n\t}\n\n\tvar confPatch *patch.Patch\n\tif evergreen.IsGitHubPatchRequester(tc.taskModel.Requester) {\n\t\tgrip.Info(\"Fetching patch document for GitHub PR request.\")\n\t\tconfPatch, err = a.comm.GetTaskPatch(ctx, tc.task, \"\")\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"fetching patch for GitHub PR request\")\n\t\t}\n\t}\n\n\tgrip.Info(\"Constructing task config.\")\n\ttaskConfig, err := internal.NewTaskConfig(a.opts.WorkingDirectory, confDistro, tc.project, tc.taskModel, confRef, confPatch, tc.expansions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttaskConfig.Redacted = tc.privateVars\n\ttaskConfig.TaskSync = a.opts.SetupData.TaskSync\n\ttaskConfig.EC2Keys = a.opts.SetupData.EC2Keys\n\n\treturn taskConfig, nil\n}", "func (cttsdtp ConnectToTargetSQLDbTaskProperties) AsProjectTaskProperties() (*ProjectTaskProperties, bool) {\n\treturn nil, false\n}", "func TaskTimeout(duration time.Duration) func(*Config) error {\n\treturn func(c *Config) error {\n\t\tc.TaskTimeout = duration\n\t\treturn nil\n\t}\n}", "func (t *Task) GetConfig() Config {\n\tif reflect.TypeOf(t.config).Kind() == reflect.Ptr {\n\t\t// Pointer:\n\t\treturn reflect.New(reflect.ValueOf(t.config).Elem().Type()).Interface().(Config)\n\t}\n\t// Not pointer:\n\treturn reflect.New(reflect.TypeOf(t.config)).Elem().Interface().(Config)\n}", "func GetTaskConfig(name, action string, conf *config.ComposeConfig) (types.TaskConfig, error) {\n\tact, err := getAction(action, name, conf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn types.NewTaskConfig(act.name, conf, act.deps, NewTask(act.Run, act.Stop)), nil\n}", "func (ctssstp ConnectToSourceSQLServerTaskProperties) AsProjectTaskProperties() (*ProjectTaskProperties, bool) {\n\treturn nil, false\n}", "func (cttsmtp ConnectToTargetSQLMITaskProperties) AsProjectTaskProperties() (*ProjectTaskProperties, bool) {\n\treturn nil, false\n}", "func (m *BusinessScenarioPlanner) SetTaskConfiguration(value PlannerTaskConfigurationable)() {\n err := m.GetBackingStore().Set(\"taskConfiguration\", value)\n if err != nil {\n panic(err)\n }\n}", "func dbConfig() (map[string]string, error) {\n\tconf := make(map[string]string)\n\thost, ok := os.LookupEnv(dbhost)\n\tif !ok {\n\t\treturn nil, errors.New(\"DBHOST environment variable required\")\n\t}\n\tport, ok := os.LookupEnv(dbport)\n\tif !ok {\n\t\treturn nil, errors.New(\"DBPORT environment variable required\")\n\t}\n\tuser, ok := os.LookupEnv(dbuser)\n\tif !ok {\n\t\treturn nil, errors.New(\"DBUSER environment variable required\")\n\t}\n\tpassword, ok := os.LookupEnv(dbpass)\n\tif !ok {\n\t\treturn nil, errors.New(\"DBPASS environment variable required\")\n\t}\n\tname, ok := os.LookupEnv(dbname)\n\tif !ok {\n\t\treturn nil, errors.New(\"DBNAME environment variable required\")\n\t}\n\tconf[dbhost] = host\n\tconf[dbport] = port\n\tconf[dbuser] = user\n\tconf[dbpass] = password\n\tconf[dbname] = name\n\treturn conf, nil\n}", "func (msssdtp MigrateSQLServerSQLDbTaskProperties) AsProjectTaskProperties() (*ProjectTaskProperties, bool) {\n\treturn nil, false\n}", "func (cttsdtp ConnectToTargetSQLDbTaskProperties) AsMigrateSQLServerSQLDbTaskProperties() (*MigrateSQLServerSQLDbTaskProperties, bool) {\n\treturn nil, false\n}", "func (msssmtp MigrateSQLServerSQLMITaskProperties) AsProjectTaskProperties() (*ProjectTaskProperties, bool) {\n\treturn nil, false\n}", "func GetSMTPServerConfig() map[string]string {\n\tconfig := make(map[string]string)\n\n\tconfig[\"host\"] = SMTPHost\n\tconfig[\"port\"] = SMTPPort\n\tconfig[\"username\"] = SMTPUsername\n\tconfig[\"password\"] = SMTPPassword\n\n\treturn config\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewSSHClientConfig returns client configuration for SSH client
func NewSSHClientConfig(user, keyFile, passworkPhrase string) (*SSHClient, error) { publicKeyMenthod, err := publicKey(keyFile, passworkPhrase) if err != nil { return nil, err } sshConfig := &ssh.ClientConfig{ User: user, Auth: []ssh.AuthMethod{ publicKeyMenthod, }, HostKeyCallback: ssh.InsecureIgnoreHostKey(), } return &SSHClient{ config: sshConfig, }, nil }
[ "func newSSHClientConfig(host string, section *SSHConfigFileSection, userName, identity string, agentForwarding bool) (*sshClientConfig, error) {\n\tvar (\n\t\tconfig *sshClientConfig\n\t\terr error\n\t)\n\n\tif section != nil {\n\t\tupdateFromSSHConfigFile(section, &host, &userName, &agentForwarding)\n\t}\n\n\tif agentForwarding {\n\t\tconfig, err = newSSHAgentConfig(userName)\n\t} else {\n\t\tconfig, err = newSSHDefaultConfig(userName, identity)\n\t}\n\n\tif config != nil {\n\t\tconfig.host = host\n\t}\n\treturn config, err\n}", "func GetSSHClientConfig(opts *Options) (*ssh.ClientConfig, error) {\n\tauthMethod, err := opts.PrivateKey.ToSSHAuthMethod()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclientConfig := &ssh.ClientConfig{\n\t\tUser: opts.Username,\n\t\tHostKeyCallback: ssh.InsecureIgnoreHostKey(),\n\t\tAuth: []ssh.AuthMethod{authMethod},\n\t}\n\n\treturn clientConfig, nil\n}", "func NewClientConfig(s *state.State, host kubeone.HostConfig) (*clientv3.Config, error) {\n\tsshconn, err := s.Connector.Connect(host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgrpcDialer, err := sshtunnel.NewGRPCDialer(s.Connector, host)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to create grpc tunnel dialer\")\n\t}\n\n\ttlsConf, err := LoadTLSConfig(sshconn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &clientv3.Config{\n\t\tEndpoints: []string{fmt.Sprintf(\"%s:2379\", host.PrivateAddress)},\n\t\tTLS: tlsConf,\n\t\tContext: s.Context,\n\t\tDialTimeout: 5 * time.Second,\n\t\tDialOptions: []grpc.DialOption{\n\t\t\tgrpc.WithBlock(),\n\t\t\tgrpcDialer,\n\t\t},\n\t}, nil\n}", "func (i *IdentityFile) SSHClientConfig() (*ssh.ClientConfig, error) {\n\tsshCert, err := sshutils.ParseCertificate(i.Certs.SSH)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\tpriv, err := keys.ParsePrivateKey(i.PrivateKey)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\tssh, err := sshutils.ProxyClientSSHConfig(sshCert, priv, i.CACerts.SSH...)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\treturn ssh, nil\n}", "func newSSHAgentConfig(userName string) (*sshClientConfig, error) {\n\tagent, err := newAgent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig, err := sshAgentConfig(userName, agent)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &sshClientConfig{\n\t\tagent: agent,\n\t\tClientConfig: config,\n\t}, nil\n}", "func newSSHDefaultConfig(userName, identity string) (*sshClientConfig, error) {\n\tconfig, err := sshDefaultConfig(userName, identity)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &sshClientConfig{ClientConfig: config}, nil\n}", "func ClientConfig(alias string, configFile string) (*ssh.ClientConfig, string, error) {\n\tvar err error\n\tvar connectHost string\n\n\tuserConfig, err := decodeSSHConfig(configFile)\n\tif err != nil {\n\t\treturn nil, connectHost, errors.Wrap(err, \"failed to decode ssh config file\")\n\t}\n\n\t/* TODO\n\t // Rand\n\t // BannerCallback\n\t // ClientVersion\n\t*/\n\n\tconfig := &ssh.Config{}\n\tmacs := userConfig.Get(alias, \"MACs\")\n\tif macs != \"\" {\n\t\tconfig.MACs = strings.Split(macs, \",\")\n\t}\n\tkexs := userConfig.Get(alias, \"KexAlgorithms\")\n\tif kexs != \"\" {\n\t\tconfig.KeyExchanges = strings.Split(kexs, \",\")\n\t}\n\tciphers := userConfig.Get(alias, \"Ciphers\")\n\tif ciphers != \"\" {\n\t\tconfig.Ciphers = strings.Split(ciphers, \",\")\n\t}\n\n\tclientConfig := &ssh.ClientConfig{\n\t\tConfig: *config,\n\t}\n\n\t// TODO handle known_hosts2\n\t// TODO default empty?\n\tuserKnownHostsFile := userConfig.Get(alias, \"UserKnownHostsFile\")\n\tif userKnownHostsFile == \"\" {\n\t\tuserKnownHostsFile, err = homedir.Expand(\"~/.ssh/known_hosts\")\n\t\tif err != nil {\n\t\t\treturn nil, connectHost, errors.Wrap(err, \"failed to expand ~/.ssh/known_hosts\")\n\t\t}\n\t}\n\thostKeyCallback, err := getHostKeyCallback(strings.Split(userKnownHostsFile, \" \"))\n\tif err != nil {\n\t\treturn nil, connectHost, errors.Wrap(err, \"failed to create host key callback\")\n\t}\n\tclientConfig.HostKeyCallback = hostKeyCallback\n\n\tuser := userConfig.Get(alias, \"User\")\n\tif user == \"\" {\n\t\tcurrentUser, err := osuser.Current()\n\t\tif err != nil {\n\t\t\treturn nil, connectHost, errors.Wrap(err, \"failed to detect current user\")\n\t\t}\n\t\tuser = currentUser.Username\n\t}\n\tclientConfig.User = user\n\n\tsigners := sshAgentSigners()\n\tidentityFile, err := homedir.Expand(userConfig.Get(alias, \"IdentityFile\"))\n\tif err != nil {\n\t\treturn nil, connectHost, errors.Wrap(err, \"failed to expand home directory for IdentityFile\")\n\t}\n\tpubkey := publicKeyFile(identityFile)\n\tif pubkey != nil {\n\t\tsigners = append(signers, pubkey)\n\t}\n\tclientConfig.Auth = []ssh.AuthMethod{ssh.PublicKeys(signers...)}\n\n\thostKeyAlgorithms := userConfig.Get(alias, \"HostKeyAlgorithms\")\n\tif hostKeyAlgorithms != \"\" {\n\t\tclientConfig.HostKeyAlgorithms = strings.Split(hostKeyAlgorithms, \",\")\n\t}\n\n\ttimeoutString := userConfig.Get(alias, \"ConnectTimeout\")\n\tif timeoutString != \"\" {\n\t\ttimeoutInt, err := strconv.ParseInt(timeoutString, 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, connectHost, errors.Wrap(err, \"failed to convert ConnectTimeout to int64\")\n\t\t}\n\t\tclientConfig.Timeout = time.Duration(timeoutInt) * time.Second\n\t}\n\n\thostname := userConfig.Get(alias, \"Hostname\")\n\tif hostname == \"\" {\n\t\thostname = alias\n\t}\n\tport := userConfig.Get(alias, \"Port\")\n\tif port == \"\" {\n\t\tport = \"22\"\n\t}\n\tconnectHost = hostname + \":\" + port\n\n\treturn clientConfig, connectHost, nil\n}", "func NewSSHClient(config Config) Client {\n\treturn &SSH{\n\t\tprompt: regexp.MustCompile(`(?sm)(\\x1b)?(\\x5b\\x39\\x39\\x39\\x39\\x42)?\\[[\\sA-Za-z0-9!\"#$%&'()*+,\\-./:;<=>^_]*?@[\\sA-Za-z0-9!\"#$%&'()*+,\\-./:;<=>^_]*?\\] >.{0,1}$`),\n\t\tnonASCIIremover: regexp.MustCompile(\"[[:^ascii:]]+\"),\n\t\tutf8ArtefactRemover: regexp.MustCompile(`\\x1b\\x5b\\x4b\\x0a`),\n\t\tConfig: config,\n\t}\n}", "func (cfg *SSHConfig) GetSSHClient() *SSHClient {\n\tvar auths []ssh.AuthMethod\n\tsshAgent := cfg.GetSSHAgent()\n\tif sshAgent != nil {\n\t\tauths = []ssh.AuthMethod{\n\t\t\tsshAgent,\n\t\t}\n\t}\n\n\tsshConfig := &ssh.ClientConfig{\n\t\tUser: cfg.user,\n\t\tAuth: auths,\n\t\t// ssh.InsecureIgnoreHostKey is OK in test code.\n\t\tHostKeyCallback: ssh.InsecureIgnoreHostKey(), // lgtm[go/insecure-hostkeycallback]\n\t\tTimeout: 15 * time.Second,\n\t}\n\n\treturn &SSHClient{\n\t\tConfig: sshConfig,\n\t\tHost: cfg.host,\n\t\tPort: cfg.port,\n\t}\n}", "func NewClient(sshconfig *ssh.ClientConfig, host string, port string, prompts []Prompt) *Client {\n\treturn &Client{\n\t\tSshconfig: sshconfig,\n\t\tHost: host,\n\t\tPort: port,\n\t\tPrompt: prompts,\n\t}\n}", "func getSSHConfig(config DefaultConfig) (*ssh.ClientConfig, io.Closer) {\n\tvar sshAgent io.Closer\n\n\t// auths holds the detected ssh auth methods\n\tauths := []ssh.AuthMethod{}\n\n\t// figure out what auths are requested, what is supported\n\tif config.Password != \"\" {\n\t\tauths = append(auths, ssh.Password(config.Password))\n\t}\n\tif config.KeyPath != \"\" {\n\t\tif pubkey, err := getKeyFile(config.KeyPath, config.Passphrase); err != nil {\n\t\t\tlog.Printf(\"getKeyFile error: %v\\n\", err)\n\t\t} else {\n\t\t\tauths = append(auths, ssh.PublicKeys(pubkey))\n\t\t}\n\t}\n\n\tif config.Key != \"\" {\n\t\tvar signer ssh.Signer\n\t\tvar err error\n\t\tif config.Passphrase != \"\" {\n\t\t\tsigner, err = sshkeys.ParseEncryptedPrivateKey([]byte(config.Key), []byte(config.Passphrase))\n\t\t} else {\n\t\t\tsigner, err = ssh.ParsePrivateKey([]byte(config.Key))\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.Printf(\"ssh.ParsePrivateKey: %v\\n\", err)\n\t\t} else {\n\t\t\tauths = append(auths, ssh.PublicKeys(signer))\n\t\t}\n\t}\n\n\tif sshAgent, err := net.Dial(\"unix\", os.Getenv(\"SSH_AUTH_SOCK\")); err == nil {\n\t\tauths = append(auths, ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers))\n\t}\n\n\tc := ssh.Config{}\n\tif config.UseInsecureCipher {\n\t\tc.SetDefaults()\n\t\tc.Ciphers = append(c.Ciphers, \"aes128-cbc\", \"aes192-cbc\", \"aes256-cbc\", \"3des-cbc\")\n\t\tc.KeyExchanges = append(c.KeyExchanges, \"diffie-hellman-group-exchange-sha1\", \"diffie-hellman-group-exchange-sha256\")\n\t}\n\n\tif len(config.Ciphers) > 0 {\n\t\tc.Ciphers = append(c.Ciphers, config.Ciphers...)\n\t}\n\n\tif len(config.KeyExchanges) > 0 {\n\t\tc.KeyExchanges = append(c.KeyExchanges, config.KeyExchanges...)\n\t}\n\n\thostKeyCallback := ssh.InsecureIgnoreHostKey()\n\tif config.Fingerprint != \"\" {\n\t\thostKeyCallback = func(hostname string, remote net.Addr, publicKey ssh.PublicKey) error {\n\t\t\tif ssh.FingerprintSHA256(publicKey) != config.Fingerprint {\n\t\t\t\treturn fmt.Errorf(\"ssh: host key fingerprint mismatch\")\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn &ssh.ClientConfig{\n\t\tConfig: c,\n\t\tTimeout: config.Timeout,\n\t\tUser: config.User,\n\t\tAuth: auths,\n\t\tHostKeyCallback: hostKeyCallback,\n\t}, sshAgent\n}", "func genConfig(user, pass string) *ssh.ClientConfig {\n\treturn &ssh.ClientConfig{\n\t\tUser: user,\n\t\tAuth: []ssh.AuthMethod{\n\t\t\tssh.Password(pass),\n\t\t\tssh.KeyboardInteractive(\n\t\t\t\tfunc(string, string, []string, []bool) (\n\t\t\t\t\tanswers []string, err error,\n\t\t\t\t) {\n\t\t\t\t\treturn []string{pass}, nil\n\t\t\t\t},\n\t\t\t),\n\t\t},\n\t\tClientVersion: CONFIG.Version,\n\t\tTimeout: 3 * time.Second,\n\t}\n}", "func SSHConfig() *ssh.ClientConfig {\n\treturn &ssh.ClientConfig{\n\t\tUser: dockerUser(),\n\t\tAuth: []ssh.AuthMethod{\n\t\t\tPublicKeyFile(dockerCertPath()),\n\t\t},\n\t}\n}", "func NewSSHClient(ctx context.Context, projectID string, repo Repo) *SSHClient {\n\treturn &SSHClient{\n\t\tprojectID: projectID,\n\t\trepo: repo,\n\t\tlog: logFromContext(ctx).WithField(\"project-id\", projectID),\n\t}\n}", "func GetSSHClient(host string, port int, user string) *SSHClient {\n\n\tsshConfig := &ssh.ClientConfig{\n\t\tUser: user,\n\t\tAuth: []ssh.AuthMethod{\n\t\t\tSSHAgent(),\n\t\t},\n\t\t// ssh.InsecureIgnoreHostKey is OK in test code.\n\t\tHostKeyCallback: ssh.InsecureIgnoreHostKey(), // lgtm[go/insecure-hostkeycallback]\n\t\tTimeout: 15 * time.Second,\n\t}\n\n\treturn &SSHClient{\n\t\tConfig: sshConfig,\n\t\tHost: host,\n\t\tPort: port,\n\t}\n\n}", "func sshConfig(useAgent bool, username, privateKeyPath, password string) func(multistep.StateBag) (*ssh.ClientConfig, error) {\n\treturn func(state multistep.StateBag) (*ssh.ClientConfig, error) {\n\n\t\tif useAgent {\n\t\t\tlog.Println(\"Configuring SSH agent.\")\n\n\t\t\tauthSock := os.Getenv(\"SSH_AUTH_SOCK\")\n\t\t\tif authSock == \"\" {\n\t\t\t\treturn nil, fmt.Errorf(\"SSH_AUTH_SOCK is not set\")\n\t\t\t}\n\n\t\t\tsshAgent, err := net.Dial(\"unix\", authSock)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Cannot connect to SSH Agent socket %q: %s\", authSock, err)\n\t\t\t}\n\n\t\t\treturn &ssh.ClientConfig{\n\t\t\t\tUser: username,\n\t\t\t\tAuth: []ssh.AuthMethod{\n\t\t\t\t\tssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers),\n\t\t\t\t},\n\t\t\t}, nil\n\t\t}\n\n\t\thasKey := privateKeyPath != \"\"\n\n\t\tif hasKey {\n\t\t\tlog.Printf(\"Configuring SSH private key '%s'.\", privateKeyPath)\n\n\t\t\tprivateKeyBytes, err := ioutil.ReadFile(privateKeyPath)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Unable to read SSH private key: %s\", err)\n\t\t\t}\n\n\t\t\tsigner, err := ssh.ParsePrivateKey(privateKeyBytes)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Error setting up SSH config: %s\", err)\n\t\t\t}\n\n\t\t\treturn &ssh.ClientConfig{\n\t\t\t\tUser: username,\n\t\t\t\tAuth: []ssh.AuthMethod{\n\t\t\t\t\tssh.PublicKeys(signer),\n\t\t\t\t},\n\t\t\t}, nil\n\t\t} else {\n\t\t\tlog.Println(\"Configuring SSH keyboard interactive.\")\n\n\t\t\treturn &ssh.ClientConfig{\n\t\t\t\tUser: username,\n\t\t\t\tAuth: []ssh.AuthMethod{\n\t\t\t\t\tssh.Password(password),\n\t\t\t\t\tssh.KeyboardInteractive(\n\t\t\t\t\t\tpackerssh.PasswordKeyboardInteractive(password)),\n\t\t\t\t}}, nil\n\t\t}\n\t}\n}", "func NewConfSSH() *ConfSSH {\n\treturn &ConfSSH{\n\t\tBindPort: 22,\n\t\tBindHost: \"[::]\",\n\t\tServerConfig: &ssh.ServerConfig{\n\t\t\tServerVersion: \"SSH-2.0-OpenSSH_7.6p1\",\n\t\t},\n\t}\n}", "func NewClient(sshc *ssh.Client) (cl *Client, err error) {\n\tlogp := \"New\"\n\tcl = &Client{}\n\n\tcl.sess, err = sshc.NewSession()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%s: NewSession: %w\", logp, err)\n\t}\n\n\tcl.pipeIn, err = cl.sess.StdinPipe()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%s: StdinPipe: %w\", logp, err)\n\t}\n\tcl.pipeOut, err = cl.sess.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%s: StdoutPipe: %w\", logp, err)\n\t}\n\tcl.pipeErr, err = cl.sess.StderrPipe()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%s: StderrPipe: %w\", logp, err)\n\t}\n\n\terr = cl.sess.RequestSubsystem(subsystemNameSftp)\n\tif err != nil {\n\t\tif err.Error() == \"ssh: subsystem request failed\" {\n\t\t\treturn nil, ErrSubsystem\n\t\t}\n\t\treturn nil, fmt.Errorf(\"%s: RequestSubsystem: %w\", logp, err)\n\t}\n\n\tcl.requestId = uint32(time.Now().Unix())\n\n\terr = cl.init()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%s: %w\", logp, err)\n\t}\n\n\treturn cl, nil\n}", "func (e *EdgeSwitchClient) newClient() (*ssh.Client, error) {\n\tsshConfig := &ssh.ClientConfig{\n\t\tUser: DEFAULT_USER,\n\t\tAuth: []ssh.AuthMethod{ssh.Password(DEFAULT_USER)},\n\t\tHostKeyCallback: ssh.InsecureIgnoreHostKey(),\n\t}\n\n\tclient, err := ssh.Dial(\"tcp\", e.ipaddress, sshConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn client, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ExecuteBatch execute bach commands
func (c *SSHClient) ExecuteBatch(commands []string, prependSudo bool) error { fmt.Printf("**** Host: %s ****\n", c.client.RemoteAddr().String()) for _, cmd := range commands { fmt.Printf("** Execute command: %s **\n", cmd) stdout, stderr, err := c.ExecuteOne(cmd, prependSudo) if err != nil { return err } if len(stdout) > 0 { fmt.Println(stdout) } if len(stderr) > 0 { fmt.Printf("Error: %s\n", stderr) } fmt.Println() } return nil }
[ "func (_BaseContentSpace *BaseContentSpaceTransactor) ExecuteBatch(opts *bind.TransactOpts, _v []uint8, _r [][32]byte, _s [][32]byte, _from []common.Address, _dest []common.Address, _value []*big.Int, _ts []*big.Int) (*types.Transaction, error) {\n\treturn _BaseContentSpace.contract.Transact(opts, \"executeBatch\", _v, _r, _s, _from, _dest, _value, _ts)\n}", "func (s *Stmt) ExecBatch(args [][]interface{}) (Result, error) {\n\treturn s.execBatchContext(context.Background(), &args)\n}", "func (tb *Batch) ExecuteBatch(session *gocql.Session) error {\n\tspan := tb.newChildSpan(tb.ctx)\n\terr := session.ExecuteBatch(tb.Batch)\n\ttb.finishSpan(span, err)\n\treturn err\n}", "func (c *cassandra) ExecuteBatch(batchType gocql.BatchType, queries []string, params [][]interface{}) error {\n\tcount := len(queries)\n\n\t// quick sanity check\n\tif count != len(params) {\n\t\treturn errors.New(\"Amount of queries and params does not match\")\n\t}\n\n\tbatch := c.session.NewBatch(batchType)\n\tbatch.Cons = c.wcl\n\tfor idx := 0; idx < count; idx++ {\n\t\tbatch.Query(queries[idx], params[idx]...)\n\t}\n\n\treturn c.session.ExecuteBatch(batch)\n}", "func (runner *TestRunner) executeBatch (batchResultId string, batchParams BatchExecParams, testCasePaths []string, stopRequest <-chan struct{}, executionLogAppend chan<- string) {\n\n\ttype BatchExecutionStatus int\n\tconst (\n\t\tBATCH_EXEC_STATUS_RUNNING BatchExecutionStatus = iota\n\t\tBATCH_EXEC_STATUS_DONE\n\t\tBATCH_EXEC_STATUS_LINK_ERROR\n\t\tBATCH_EXEC_STATUS_GENERIC_ERROR\n\t)\n\n\t// Start running batch result.\n\n\t{\n\t\topSet := rtdb.NewOpSet()\n\t\topSet.Call(typeBatchResult, batchResultId, \"SetStatus\", BATCH_STATUS_CODE_RUNNING)\n\t\terr := runner.rtdbServer.ExecuteOpSet(opSet)\n\t\tif err != nil { panic(err) }\n\t}\n\n\tvar appendExecutionLog = func(content string) {\n\t\texecutionLogAppend <- content\n\t}\n\n\tvar appendTestInfoLog = func(content string) {\n\t\twrappedContent := \"**** Test binary info log chunk begins ****\\n\"\n\t\twrappedContent += content\n\t\twrappedContent += \"**** Test binary info log chunk ends ****\\n\"\n\t\tappendExecutionLog(wrappedContent)\n\t}\n\n\tvar appendCherryLogLine = func(content string) {\n\t\tappendExecutionLog(content + \"\\n\")\n\t}\n\n\tvar appendRunnerLogLine = func(content string) {\n\t\tlog.Printf(\"[runner] %s\\n\", content)\n\t\tappendCherryLogLine(content)\n\t}\n\n\tappendRunnerLogLine(fmt.Sprintf(\"Starting test batch execution at %v\", time.Now().Format(defaultHumanReadableTimeFormat)))\n\n\tvar deviceConfig DeviceConfig\n\tadbOk\t\t\t:= true\n\trunCanceled\t\t:= false\n\n\t{\n\t\terr := runner.rtdbServer.GetObject(batchParams.DeviceId, &deviceConfig)\n\t\tif err != nil { panic(err) }\n\t}\n\n\tif deviceConfig.IsADBDevice {\n\t\terr := LaunchAndroidExecServer(deviceConfig.ADBSerialNumber, batchParams.TargetPort)\n\t\tif err != nil {\n\t\t\tappendRunnerLogLine(fmt.Sprintf(\"Failed to launch ExecServer on Android via ADB: %v\", err))\n\t\t\tadbOk = false\n\t\t}\n\t}\n\n\tif adbOk {\n\t\t// Processed cases (to avoid re-executing them).\n\t\tprocessedCasePaths := make(map[string]bool)\n\n\t\t// Spawn execution as long as more cases to handle.\n\t\tappendRunnerLogLine(fmt.Sprintf(\"Execute %d tests...\", len(testCasePaths)))\n\t\tfor len(testCasePaths) > 0 {\n\t\t\t// Choose next batch to execute & encode case list trie.\n\t\t\tpackageName, launchCaseList := getNextTestCaseBatch(testCasePaths)\n\t\t\tencodedCaseList := prefixEncode(launchCaseList)\n\t\t\tappendRunnerLogLine(fmt.Sprintf(\"Launch %d cases from package '%s'...\", len(launchCaseList), packageName))\n\t\t\ttestPackage := runner.testPackages[packageName]\n\n\t\t\tdidProgress := false\n\t\t\tvar executionStatus BatchExecutionStatus = BATCH_EXEC_STATUS_RUNNING\n\n\t\t\t// Try a few times (in case of connection errors).\n\t\t\tfor tryNdx := 0; tryNdx < 3; tryNdx++ {\n\t\t\t\tif tryNdx > 0 {\n\t\t\t\t\tappendRunnerLogLine(fmt.Sprintf(\"Try again: %d\", tryNdx))\n\t\t\t\t\ttime.Sleep((time.Duration)(tryNdx) * 500 * time.Millisecond)\n\t\t\t\t}\n\n\t\t\t\t// If previous error was link error, relaunch execserver just to be sure\n\t\t\t\tif executionStatus == BATCH_EXEC_STATUS_LINK_ERROR && deviceConfig.IsADBDevice {\n\t\t\t\t\tappendRunnerLogLine(\"Relaunching execserver\")\n\t\t\t\t\terr := RelaunchAndroidExecServer(deviceConfig.ADBSerialNumber, batchParams.TargetPort)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tappendRunnerLogLine(fmt.Sprintf(\"Failed to relaunch ExecServer on Android via ADB: %v\", err))\n\t\t\t\t\t\tcontinue // Just try again, if tries left\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Create link to target.\n\t\t\t\tlinkParams := CommLinkParams {\n\t\t\t\t\tSpawnProcessPath:\tbatchParams.SpawnLocalProcess,\n\t\t\t\t\tTargetAddress:\t\tbatchParams.TargetAddress,\n\t\t\t\t\tTargetPort:\t\t\tbatchParams.TargetPort,\n\t\t\t\t}\n\t\t\t\tlink := NewCommLinkTcpIp(linkParams, appendCherryLogLine)\n\t\t\t\terr := link.Start()\n\t\t\t\tif err != nil {\n\t\t\t\t\tappendRunnerLogLine(fmt.Sprintf(\"WARNING: failed to start link: %s\", err))\n\t\t\t\t\tcontinue // Just try again, if tries left\n\t\t\t\t}\n\n\t\t\t\t// Execute test case on target device.\n\t\t\t\texecEventChan := make(chan TestExecutorEvent, 4)\n\t\t\t\tlinkStopRequest := make(chan struct{}, 1)\n\t\t\t\texecParams := CommLinkExecParams {\n\t\t\t\t\tbinaryName:\t\tstrings.Replace(batchParams.TestBinaryName, \"${TestPackageName}\", testPackage.binaryName, -1),\n\t\t\t\t\tcommandLine:\tbatchParams.TestBinaryCommandLine,\n\t\t\t\t\tworkingDir:\t\tstrings.Replace(batchParams.TestBinaryWorkingDir, \"${TestPackageDir}\", testPackage.binaryDir, -1),\n\t\t\t\t\ttestCasePaths:\tencodedCaseList,\n\t\t\t\t}\n\t\t\t\terr = link.Execute(execParams, execEventChan, linkStopRequest)\n\t\t\t\tif err != nil {\n\t\t\t\t\tappendRunnerLogLine(fmt.Sprintf(\"WARNING: connecting to target device failed: %s\", err))\n\t\t\t\t\tlink.Stop()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tcurrentlyRunningCases := make(map[string]bool) // Paths of the test cases currently running.\n\n\t\t\t\t// Handle all events from comm link, as well as stop requests.\n\t\t\t\texecutionStatus = BATCH_EXEC_STATUS_RUNNING\n\t\t\t\tfor executionStatus == BATCH_EXEC_STATUS_RUNNING {\n\t\t\t\t\tselect {\n\t\t\t\t\t\tcase <-stopRequest:\n\t\t\t\t\t\t\trunCanceled = true\n\t\t\t\t\t\t\tappendRunnerLogLine(\"Got stop request\")\n\t\t\t\t\t\t\tselect {\n\t\t\t\t\t\t\t\tcase linkStopRequest <- struct{}{}:\n\t\t\t\t\t\t\t\t\tappendRunnerLogLine(\"Sent stop request to comm link\")\n\t\t\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\t\t\tappendRunnerLogLine(\"Stop request already sent to comm link\")\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\tcase event := <-execEventChan:\n\t\t\t\t\t\t\tswitch event.(type) {\n\t\t\t\t\t\t\t\tcase EventSessionInfoRead:\n\t\t\t\t\t\t\t\t\tappendRunnerLogLine(\"Session info received\")\n\t\t\t\t\t\t\t\t\topSet := rtdb.NewOpSet()\n\t\t\t\t\t\t\t\t\topSet.Call(typeBatchResult, batchResultId, \"SetSessionInfo\", event.(EventSessionInfoRead).sessionInfo)\n\t\t\t\t\t\t\t\t\terr := runner.rtdbServer.ExecuteOpSet(opSet)\n\t\t\t\t\t\t\t\t\tif err != nil { panic(err) }\n\n\t\t\t\t\t\t\t\tcase EventInfoLogRead:\n\t\t\t\t\t\t\t\t\tlogContent := event.(EventInfoLogRead).infoLog\n\t\t\t\t\t\t\t\t\tappendTestInfoLog(logContent)\n\n\t\t\t\t\t\t\t\tcase EventTestCaseStarted:\n\t\t\t\t\t\t\t\t\ttestCasePath := event.(EventTestCaseStarted).testCasePath\n\n\t\t\t\t\t\t\t\t\tif _, isAlreadyProcessed := processedCasePaths[testCasePath]; isAlreadyProcessed {\n\t\t\t\t\t\t\t\t\t\tappendRunnerLogLine(fmt.Sprintf(\"WARNING: got EventTestCaseStarted for already-processed test case '%s'; ignoring\", testCasePath))\n\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\trunner.setTestCaseStatus(batchResultId, testCasePath, TEST_STATUS_CODE_RUNNING)\n\t\t\t\t\t\t\t\t\t\tcurrentlyRunningCases[testCasePath] = true\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\tcase EventTestCaseFinished:\n\t\t\t\t\t\t\t\t\ttestCaseInfo\t:= event.(EventTestCaseFinished)\n\t\t\t\t\t\t\t\t\tpath\t\t\t:= testCaseInfo.path\n\n\t\t\t\t\t\t\t\t\tif _, isCurrentlyRunning := currentlyRunningCases[path]; !isCurrentlyRunning {\n\t\t\t\t\t\t\t\t\t\tif _, isAlreadyProcessed := processedCasePaths[path]; !isAlreadyProcessed {\n\t\t\t\t\t\t\t\t\t\t\tappendRunnerLogLine(fmt.Sprintf(\"WARNING: got EventTestCaseFinished for test case '%s' that isn't running; ignoring\", path))\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\tdelete(currentlyRunningCases, path)\n\t\t\t\t\t\t\t\t\t\tprocessedCasePaths[path] = true\n\t\t\t\t\t\t\t\t\t\trunner.finishTestCase(batchResultId, testCaseInfo) // upload to rtdb\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\tcase EventProcessStarted:\n\t\t\t\t\t\t\t\t\tappendRunnerLogLine(\"Test process started\")\n\n\t\t\t\t\t\t\t\tcase EventProcessLaunchFailed:\n\t\t\t\t\t\t\t\t\tlaunchFailed := event.(EventProcessLaunchFailed)\n\t\t\t\t\t\t\t\t\tappendRunnerLogLine(fmt.Sprintf(\"Process launch failed: %s\", launchFailed.reason))\n\t\t\t\t\t\t\t\t\texecutionStatus = BATCH_EXEC_STATUS_GENERIC_ERROR\n\n\t\t\t\t\t\t\t\tcase EventExecutionFinished:\n\t\t\t\t\t\t\t\t\tappendRunnerLogLine(fmt.Sprintf(\"Test execution finished with status %#v\", event.(EventExecutionFinished).status))\n\t\t\t\t\t\t\t\t\tswitch (event.(EventExecutionFinished).status) {\n\t\t\t\t\t\t\t\t\t\tcase EXEC_STATUS_DONE:\n\t\t\t\t\t\t\t\t\t\t\texecutionStatus = BATCH_EXEC_STATUS_DONE\n\t\t\t\t\t\t\t\t\t\tcase EXEC_STATUS_LINK_ERROR:\n\t\t\t\t\t\t\t\t\t\t\texecutionStatus = BATCH_EXEC_STATUS_LINK_ERROR\n\t\t\t\t\t\t\t\t\t\tcase EXEC_STATUS_TIMEOUT:\n\t\t\t\t\t\t\t\t\t\t\texecutionStatus = BATCH_EXEC_STATUS_GENERIC_ERROR\n\t\t\t\t\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\t\t\t\t\tappendRunnerLogLine(fmt.Sprintf(\"WARNING: unknown end status received: %#v\", event.(EventExecutionFinished).status))\n\t\t\t\t\t\t\t\t\t\t\texecutionStatus = BATCH_EXEC_STATUS_GENERIC_ERROR\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\t\t\tappendRunnerLogLine(fmt.Sprintf(\"WARNING: unknown execute event received: %#v\", event))\n\t\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Disconnect from target.\n\t\t\t\t// \\todo [petri] keep link active for longer?\n\t\t\t\tlink.Stop()\n\n\t\t\t\t// Reset unfinished (running) cases to pending, so they can be re-run in the future.\n\t\t\t\tfor testCasePath, _ := range currentlyRunningCases {\n\t\t\t\t\trunner.setTestCaseStatus(batchResultId, testCasePath, TEST_STATUS_CODE_PENDING)\n\t\t\t\t}\n\n\t\t\t\t// Remove processed cases from the list\n\t\t\t\tdstNdx := 0\n\t\t\t\tfor srcNdx := 0; srcNdx < len(testCasePaths); srcNdx++ {\n\t\t\t\t\tcasePath := testCasePaths[srcNdx]\n\t\t\t\t\tif _, ok := processedCasePaths[casePath]; !ok {\n\t\t\t\t\t\ttestCasePaths[dstNdx] = testCasePaths[srcNdx]\n\t\t\t\t\t\tdstNdx++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tnumProcessed := len(testCasePaths) - dstNdx\n\t\t\t\tif numProcessed > 0 {\n\t\t\t\t\tappendRunnerLogLine(fmt.Sprintf(\"%d test case(s) processed\", numProcessed))\n\t\t\t\t\ttestCasePaths = testCasePaths[0:dstNdx]\n\t\t\t\t\tdidProgress = true\n\t\t\t\t}\n\n\t\t\t\tif runCanceled {\n\t\t\t\t\tappendRunnerLogLine(\"Run canceled\")\n\t\t\t\t}\n\n\t\t\t\tif runCanceled || didProgress {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tappendRunnerLogLine(\"WARNING: no test cases processed\")\n\t\t\t}\n\n\t\t\t// Exit loop if run was stopped or no progress was made.\n\t\t\tif runCanceled || !didProgress {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t// Mark the batch inactive and set its status.\n\tvar batchStatus BatchStatusCode\n\tif runCanceled {\n\t\tbatchStatus = BATCH_STATUS_CODE_CANCELED\n\t} else if len(testCasePaths) > 0 {\n\t\tbatchStatus = BATCH_STATUS_CODE_INTERRUPTED\n\t} else {\n\t\tbatchStatus = BATCH_STATUS_CODE_FINISHED\n\t}\n\n\t// Write status of batch result (in batchResult itself and in list of active batchResults).\n\t{\n\t\topSet := rtdb.NewOpSet()\n\t\topSet.Call(typeBatchResult, batchResultId, \"SetStatus\", batchStatus)\n\t\topSet.Call(typeActiveBatchResultList, \"activeBatchResultList\", \"Remove\", batchResultId)\n\t\terr := runner.rtdbServer.ExecuteOpSet(opSet)\n\t\tif err != nil { panic(err) }\n\t}\n\n\tif deviceConfig.IsADBDevice {\n\t\terr := RemoveADBPortForward(deviceConfig.ADBSerialNumber, batchParams.TargetPort)\n\t\tif err != nil {\n\t\t\tappendRunnerLogLine(fmt.Sprintf(\"WARNING: Failed to remove ADB port forward: %v\", err))\n\t\t}\n\t}\n\n\tappendRunnerLogLine(fmt.Sprintf(\"Ending test batch execution at %v\", time.Now().Format(defaultHumanReadableTimeFormat)))\n}", "func (_BaseContentFactoryExt *BaseContentFactoryExtTransactor) ExecuteAccessBatch(opts *bind.TransactOpts, _opCodes []uint32, _contentAddrs []common.Address, _userAddrs []common.Address, _ctxHashes [][32]byte, _ts []*big.Int, _amt []*big.Int) (*types.Transaction, error) {\n\treturn _BaseContentFactoryExt.contract.Transact(opts, \"executeAccessBatch\", _opCodes, _contentAddrs, _userAddrs, _ctxHashes, _ts, _amt)\n}", "func (_ExecutorContract *ExecutorContractSession) ExecuteCipherBatch(batchIndex uint64, cipherBatchHash [32]byte, transactions [][]byte, keyperIndex uint64) (*types.Transaction, error) {\n\treturn _ExecutorContract.Contract.ExecuteCipherBatch(&_ExecutorContract.TransactOpts, batchIndex, cipherBatchHash, transactions, keyperIndex)\n}", "func (_ExecutorContract *ExecutorContractTransactorSession) ExecuteCipherBatch(batchIndex uint64, cipherBatchHash [32]byte, transactions [][]byte, keyperIndex uint64) (*types.Transaction, error) {\n\treturn _ExecutorContract.Contract.ExecuteCipherBatch(&_ExecutorContract.TransactOpts, batchIndex, cipherBatchHash, transactions, keyperIndex)\n}", "func (self *SawtoothClient) ExecutePayloadBatch(payloads []SawtoothPayload) (string, error) {\n\ttransactions := make([]*transaction_pb2.Transaction, len(payloads))\n\n\tfor i, payload := range payloads {\n\t\ttransaction, err := self.CreateTransaction(payload)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"Error while creating transaction for payload %d: %s\", i, err)\n\t\t}\n\t\ttransactions[i] = transaction\n\t}\n\n\tbatch, err := self.CreateBatch(transactions)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tbatchId := batch.HeaderSignature\n\n\tbatches := []*batch_pb2.Batch{batch}\n\tbatchList, err := self.CreateBatchList(batches)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = self.Transport.SubmitBatchList(batchList)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn batchId, nil\n}", "func (s *Strategy) ExecBatch(handle HandleFunc) []error {\n\tcurrent := s.gen.Next()\n\tvar (\n\t\twg sync.WaitGroup\n\t\tmErr sync.Mutex\n\t\texecErrs []error\n\t)\n\twg.Add(current)\n\tfor i := 0; i < current; i++ {\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terr := handle()\n\t\t\tif err != nil {\n\t\t\t\tmErr.Lock()\n\t\t\t\tdefer mErr.Unlock()\n\n\t\t\t\texecErrs = append(execErrs, err)\n\t\t\t}\n\t\t}()\n\t}\n\twg.Wait()\n\n\treturn execErrs\n}", "func SafeExecuteBatch(b *storage.TableBatch) error {\n\treturn retryable.RetryWithOpts(b.ExecuteBatch, retryableOptions...)\n}", "func (bw *blockWriter) batchExecute(db *sql.DB, tableNum int) {\n\t// buffer values\n\tfor i := 0; i < blockWriterBatchSize; i++ {\n\t\tblockID := bw.rand.Int63()\n\t\tblockData := bw.randomBlock()\n\t\tbw.blockCount++\n\t\tbw.values[i] = fmt.Sprintf(\"(%d,'%s',%d,'%s')\", blockID, bw.id, bw.blockCount, blockData)\n\t}\n\tstart := time.Now()\n\tvar (\n\t\terr error\n\t\tindex string\n\t)\n\n\tif bw.index > 0 {\n\t\tindex = fmt.Sprintf(\"%d\", bw.index)\n\t}\n\t_, err = db.Exec(\n\t\tfmt.Sprintf(\n\t\t\t\"INSERT INTO block_writer%s (block_id, writer_id, block_num, raw_bytes) VALUES %s\",\n\t\t\tindex, strings.Join(bw.values, \",\")),\n\t)\n\n\tif err != nil {\n\t\tblockWriteFailedCounter.Inc()\n\t\tlog.Errorf(\"[block writer] insert err %v\", err)\n\t\treturn\n\t}\n\tbw.index = (bw.index + 1) % tableNum\n\tblockBatchWriteDuration.Observe(time.Since(start).Seconds())\n}", "func (c *Conn) SendBatch(ctx context.Context, b *Batch) (br BatchResults) {\n\tif c.batchTracer != nil {\n\t\tctx = c.batchTracer.TraceBatchStart(ctx, c, TraceBatchStartData{Batch: b})\n\t\tdefer func() {\n\t\t\terr := br.(interface{ earlyError() error }).earlyError()\n\t\t\tif err != nil {\n\t\t\t\tc.batchTracer.TraceBatchEnd(ctx, c, TraceBatchEndData{Err: err})\n\t\t\t}\n\t\t}()\n\t}\n\n\tif err := c.deallocateInvalidatedCachedStatements(ctx); err != nil {\n\t\treturn &batchResults{ctx: ctx, conn: c, err: err}\n\t}\n\n\tmode := c.config.DefaultQueryExecMode\n\n\tfor _, bi := range b.queuedQueries {\n\t\tvar queryRewriter QueryRewriter\n\t\tsql := bi.query\n\t\targuments := bi.arguments\n\n\toptionLoop:\n\t\tfor len(arguments) > 0 {\n\t\t\tswitch arg := arguments[0].(type) {\n\t\t\tcase QueryRewriter:\n\t\t\t\tqueryRewriter = arg\n\t\t\t\targuments = arguments[1:]\n\t\t\tdefault:\n\t\t\t\tbreak optionLoop\n\t\t\t}\n\t\t}\n\n\t\tif queryRewriter != nil {\n\t\t\tvar err error\n\t\t\tsql, arguments, err = queryRewriter.RewriteQuery(ctx, c, sql, arguments)\n\t\t\tif err != nil {\n\t\t\t\treturn &batchResults{ctx: ctx, conn: c, err: fmt.Errorf(\"rewrite query failed: %v\", err)}\n\t\t\t}\n\t\t}\n\n\t\tbi.query = sql\n\t\tbi.arguments = arguments\n\t}\n\n\tif mode == QueryExecModeSimpleProtocol {\n\t\treturn c.sendBatchQueryExecModeSimpleProtocol(ctx, b)\n\t}\n\n\t// All other modes use extended protocol and thus can use prepared statements.\n\tfor _, bi := range b.queuedQueries {\n\t\tif sd, ok := c.preparedStatements[bi.query]; ok {\n\t\t\tbi.sd = sd\n\t\t}\n\t}\n\n\tswitch mode {\n\tcase QueryExecModeExec:\n\t\treturn c.sendBatchQueryExecModeExec(ctx, b)\n\tcase QueryExecModeCacheStatement:\n\t\treturn c.sendBatchQueryExecModeCacheStatement(ctx, b)\n\tcase QueryExecModeCacheDescribe:\n\t\treturn c.sendBatchQueryExecModeCacheDescribe(ctx, b)\n\tcase QueryExecModeDescribeExec:\n\t\treturn c.sendBatchQueryExecModeDescribeExec(ctx, b)\n\tdefault:\n\t\tpanic(\"unknown QueryExecMode\")\n\t}\n}", "func (_ExecutorContract *ExecutorContractTransactorSession) ExecutePlainBatch(batchIndex uint64, transactions [][]byte) (*types.Transaction, error) {\n\treturn _ExecutorContract.Contract.ExecutePlainBatch(&_ExecutorContract.TransactOpts, batchIndex, transactions)\n}", "func (_ExecutorContract *ExecutorContractSession) ExecutePlainBatch(batchIndex uint64, transactions [][]byte) (*types.Transaction, error) {\n\treturn _ExecutorContract.Contract.ExecutePlainBatch(&_ExecutorContract.TransactOpts, batchIndex, transactions)\n}", "func runBatchMode(ctx *sql.Context, se *engine.SqlEngine, input io.Reader, continueOnErr bool) error {\n\tscanner := NewSqlStatementScanner(input)\n\n\tvar query string\n\tfor scanner.Scan() {\n\t\tif fileReadProg != nil {\n\t\t\tupdateFileReadProgressOutput()\n\t\t\tfileReadProg.setReadBytes(int64(len(scanner.Bytes())))\n\t\t}\n\t\tquery += scanner.Text()\n\t\tif len(query) == 0 || query == \"\\n\" {\n\t\t\tcontinue\n\t\t}\n\t\tif err := processBatchQuery(ctx, query, se); err != nil {\n\t\t\t// TODO: this line number will not be accurate for errors that occur when flushing a batch of inserts (as opposed\n\t\t\t// to processing the query)\n\t\t\tverr := formatQueryError(fmt.Sprintf(\"error on line %d for query %s\", scanner.statementStartLine, query), err)\n\t\t\tcli.PrintErrln(verr.Verbose())\n\t\t\t// If continueOnErr is set keep executing the remaining queries but print the error out anyway.\n\t\t\tif !continueOnErr {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tquery = \"\"\n\t}\n\n\tupdateBatchEditOutput()\n\tcli.Println() // need a newline after all updates are executed\n\n\tif err := scanner.Err(); err != nil {\n\t\tcli.Println(err.Error())\n\t}\n\n\treturn flushBatchedEdits(ctx, se)\n}", "func (s *rpcServer) BatchCommands(ss tikvpb.Tikv_BatchCommandsServer) error {\n\tdefer func() {\n\t\tif v := recover(); v != nil {\n\t\t\tlogutil.BgLogger().Error(\"panic when RPC server handing batch commands\", zap.Reflect(\"r\", v),\n\t\t\t\tzap.Stack(\"stack trace\"))\n\t\t}\n\t}()\n\tfor {\n\t\treqs, err := ss.Recv()\n\t\tif err != nil {\n\t\t\tlogutil.BgLogger().Error(\"RPC server batch commands receive fail\", zap.Error(err))\n\t\t\treturn err\n\t\t}\n\n\t\tresponses := make([]*tikvpb.BatchCommandsResponse_Response, 0, len(reqs.Requests))\n\t\tfor _, req := range reqs.Requests {\n\t\t\tvar response *tikvpb.BatchCommandsResponse_Response\n\t\t\tswitch request := req.Cmd.(type) {\n\t\t\tcase *tikvpb.BatchCommandsRequest_Request_Coprocessor:\n\t\t\t\tcop := request.Coprocessor\n\t\t\t\tresp, err := s.Coprocessor(context.Background(), cop)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tresponse = &tikvpb.BatchCommandsResponse_Response{\n\t\t\t\t\tCmd: &tikvpb.BatchCommandsResponse_Response_Coprocessor{\n\t\t\t\t\t\tCoprocessor: resp,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\tcase *tikvpb.BatchCommandsRequest_Request_Empty:\n\t\t\t\tresponse = &tikvpb.BatchCommandsResponse_Response{\n\t\t\t\t\tCmd: &tikvpb.BatchCommandsResponse_Response_Empty{\n\t\t\t\t\t\tEmpty: &tikvpb.BatchCommandsEmptyResponse{\n\t\t\t\t\t\t\tTestId: request.Empty.TestId,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tlogutil.BgLogger().Info(\"RPC server batch commands receive unknown request\", zap.Any(\"req\", request))\n\t\t\t\tresponse = &tikvpb.BatchCommandsResponse_Response{\n\t\t\t\t\tCmd: &tikvpb.BatchCommandsResponse_Response_Empty{\n\t\t\t\t\t\tEmpty: &tikvpb.BatchCommandsEmptyResponse{},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\t\t\tresponses = append(responses, response)\n\t\t}\n\n\t\terr = ss.Send(&tikvpb.BatchCommandsResponse{\n\t\t\tResponses: responses,\n\t\t\tRequestIds: reqs.GetRequestIds(),\n\t\t})\n\t\tif err != nil {\n\t\t\tlogutil.BgLogger().Error(\"RPC server batch commands send fail\", zap.Error(err))\n\t\t\treturn err\n\t\t}\n\t}\n}", "func execlist(tx migration.LimitedTx, stms []string) error {\n\tvar err error\n\tfor _, s := range stms {\n\t\t_, err = tx.Exec(s)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn err\n}", "func (s *Stmt) ExecBatchContext(ctx context.Context, args [][]interface{}) (Result, error) {\n\treturn s.execBatchContext(ctx, &args)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ExecuteOne executes one command
func (c *SSHClient) ExecuteOne(cmd string, prependSudo bool) (stdout, stderr string, err error) { sess, err := c.client.NewSession() if err != nil { return "", "", err } defer sess.Close() var stdoutBuf, stderrBuf bytes.Buffer sess.Stdout = &stdoutBuf sess.Stderr = &stderrBuf if prependSudo { cmd = "sudo " + cmd } _ = sess.Run(cmd) return strings.TrimSuffix(stdoutBuf.String(), "\n"), strings.TrimSuffix(stderrBuf.String(), "\n"), nil }
[ "func (db *DB) ExecOne(query interface{}, params ...interface{}) (orm.Result, error) {\n\tvar res orm.Result\n\tvar err error\n\ttracing.Trace(db.inner.Context(), query, func() error {\n\t\tif db.tx != nil {\n\t\t\tres, err = db.tx.ExecOne(query, params...)\n\t\t} else {\n\t\t\tres, err = db.inner.ExecOne(query, params...)\n\t\t}\n\t\treturn err\n\t})\n\treturn res, err\n}", "func dbExecSingle(ctx Ctx, conn DbConn, queryStr string, args []interface{}) error {\n\tquery := SqlQueryOrd(queryStr, args...)\n\tquery.Append(`returning true`)\n\n\tvar ok bool\n\treturn query.Query(ctx, conn, &ok)\n}", "func (c *DockerClient) ExecOne(containerID string, cmd []string, output io.Writer) error {\n\texec, err := c.CreateExec(docker.CreateExecOptions{\n\t\tAttachStdin: false,\n\t\tAttachStdout: true,\n\t\tAttachStderr: true,\n\t\tTty: false,\n\t\tCmd: cmd,\n\t\tContainer: containerID,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = c.StartExec(exec.ID, docker.StartExecOptions{\n\t\tOutputStream: output,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (q *Query) ExecSingle(dest interface{}) *Result {\n\tsingle := func(db *sqlx.DB, r *Result) error {\n\t\terr := db.Get(dest, q.SQL, q.Args...)\n\t\tif err == nil {\n\t\t\tr.RowsReturned = 1\n\t\t}\n\t\treturn err\n\t}\n\n\treturn execDB(q, single)\n}", "func ExecOne(tx *sql.Tx, query string, args ...interface{}) (int64, error) {\n\tresult, execErr := tx.Exec(query, args...)\n\tif execErr != nil {\n\t\treturn 0, fmt.Errorf(\"failed to execute: %w\", execErr)\n\t}\n\n\tcount, countErr := result.RowsAffected()\n\tif countErr != nil {\n\t\treturn 0, fmt.Errorf(\"failed to count affected rows: %w\", countErr)\n\t}\n\n\treturn count, countErr\n}", "func queryOne(exec executer, d dialect.Dialect, stmt Executable, dest interface{}) error {\n\tresult, err := query(exec, d, stmt)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Close the result rows or sqlite3 will open another connection\n\tdefer result.Close()\n\treturn result.One(dest)\n}", "func (q taskQuery) One(exec boil.Executor) (*Task, error) {\n\to := &Task{}\n\n\tqueries.SetLimit(q.Query, 1)\n\n\terr := q.Bind(nil, exec, o)\n\tif err != nil {\n\t\tif errors.Cause(err) == sql.ErrNoRows {\n\t\t\treturn nil, sql.ErrNoRows\n\t\t}\n\t\treturn nil, errors.Wrap(err, \"db: failed to execute a one query for tasks\")\n\t}\n\n\tif err := o.doAfterSelectHooks(exec); err != nil {\n\t\treturn o, err\n\t}\n\n\treturn o, nil\n}", "func executeSingle(ctx devspacecontext.Context, extraEnv map[string]string, event string) error {\n\tconfig := ctx.Config()\n\tif config == nil {\n\t\treturn nil\n\t}\n\n\tc := config.Config()\n\tif c.Hooks != nil && len(c.Hooks) > 0 {\n\t\thooksToExecute := []*latest.HookConfig{}\n\n\t\t// Gather all hooks we should execute\n\t\tfor _, hook := range c.Hooks {\n\t\t\tfor _, e := range hook.Events {\n\t\t\t\tif e == event {\n\t\t\t\t\thooksToExecute = append(hooksToExecute, hook)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Execute hooks\n\t\tfor _, hookConfig := range hooksToExecute {\n\t\t\tif hookConfig.Disabled || !command.ShouldExecuteOnOS(hookConfig.OperatingSystem) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\terr := runHook(ctx, hookConfig, extraEnv, event)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (q taskQuery) One(ctx context.Context, exec boil.ContextExecutor) (*Task, error) {\n\to := &Task{}\n\n\tqueries.SetLimit(q.Query, 1)\n\n\terr := q.Bind(ctx, exec, o)\n\tif err != nil {\n\t\tif errors.Cause(err) == sql.ErrNoRows {\n\t\t\treturn nil, sql.ErrNoRows\n\t\t}\n\t\treturn nil, errors.Wrap(err, \"models: failed to execute a one query for tasks\")\n\t}\n\n\tif err := o.doAfterSelectHooks(ctx, exec); err != nil {\n\t\treturn o, err\n\t}\n\n\treturn o, nil\n}", "func (c *Redigo) exec(commandName string, args ...any) (reply any, err error) {\n\tif len(args) < 1 {\n\t\treturn nil, errors.New(\"missing required arguments\")\n\t}\n\n\tconn := c.pool.Get()\n\tdefer conn.Close()\n\n\tif c.IsDebug() {\n\t\tst := time.Now()\n\t\treply, err = conn.Do(commandName, args...)\n\t\tc.Logf(\n\t\t\t\"operate redis cache. command: %s, key: %v, elapsed time: %.03f\\n\",\n\t\t\tcommandName, args[0], time.Since(st).Seconds()*1000,\n\t\t)\n\t\treturn\n\t}\n\n\treturn conn.Do(commandName, args...)\n}", "func (db *PgDB) namedExecOne(query string, arg interface{}) error {\n\tres, err := db.sql.NamedExec(query, arg)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"error in query %v \\narg %v\", query, arg)\n\t}\n\tnum, err := res.RowsAffected()\n\tif err != nil {\n\t\treturn errors.Wrapf(\n\t\t\terr,\n\t\t\t\"error checking rows affected for query %v\\n arg %v\",\n\t\t\tquery, arg)\n\t}\n\tif num != 1 {\n\t\treturn errors.Errorf(\"error: %v rows affected on query %v \\narg %v\", num, query, arg)\n\t}\n\treturn nil\n}", "func (db *DB) QueryOne(stmt Executable, i interface{}) error {\n\tresult, err := db.Query(stmt)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Close the result rows or sqlite3 will open another connection\n\tdefer result.rows.Close()\n\treturn result.One(i)\n}", "func (e *executor) Execute() error {\n\tif len(e.executables) < 1 {\n\t\treturn errors.New(\"nothing to Work\")\n\t}\n\n\tlog(e.id).Infof(\"processing %d item(s)\", len(e.executables))\n\treturn nil\n}", "func (q utxoQuery) One(ctx context.Context, exec boil.ContextExecutor) (*Utxo, error) {\n\to := &Utxo{}\n\n\tqueries.SetLimit(q.Query, 1)\n\n\terr := q.Bind(ctx, exec, o)\n\tif err != nil {\n\t\tif errors.Cause(err) == sql.ErrNoRows {\n\t\t\treturn nil, sql.ErrNoRows\n\t\t}\n\t\treturn nil, errors.Wrap(err, \"models: failed to execute a one query for utxo\")\n\t}\n\n\tif err := o.doAfterSelectHooks(ctx, exec); err != nil {\n\t\treturn o, err\n\t}\n\n\treturn o, nil\n}", "func (r *Runner) runSingle(ctx context.Context) (*Result, error) {\n\tif err := r.server.Stop(ctx); err != nil {\n\t\ttesting.ContextLog(ctx, \"Failed to stop Iperf server, err: \", err)\n\t}\n\n\terr := r.server.Start(ctx, r.config)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to start Iperf server\")\n\t}\n\tdefer r.server.Stop(ctx)\n\n\tresult, err := r.client.Start(ctx, r.config)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to run Iperf client\")\n\t}\n\n\tif r.config.FetchServerResults {\n\t\tresult, err = r.server.FetchResult(ctx, r.config)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to fetch results from Iperf server\")\n\t\t}\n\t}\n\n\treturn result, nil\n}", "func (q *Query) RunOne() (interface{}, error) {\n\t// optimize the query by limiting number of models to one\n\toldLimit := q.limit\n\tq.limit = 1\n\n\tresult, err := q.Run()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// set the limit back to the old limit in case the query will be run again\n\tq.limit = oldLimit\n\n\t// get the first item, if any\n\tresultVal := reflect.ValueOf(result)\n\tif resultVal.Len() == 0 {\n\t\treturn nil, NewModelNotFoundError()\n\t}\n\tfirst := resultVal.Index(0)\n\treturn first.Interface(), nil\n}", "func (tx *TX) QueryOne(stmt Executable, i interface{}) error {\n\tresult, err := tx.Query(stmt)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Close the result rows or sqlite3 will open another connection\n\tdefer result.rows.Close()\n\treturn result.One(i)\n}", "func (q synchronizationQuery) One(ctx context.Context, exec boil.ContextExecutor) (*Synchronization, error) {\n\to := &Synchronization{}\n\n\tqueries.SetLimit(q.Query, 1)\n\n\terr := q.Bind(ctx, exec, o)\n\tif err != nil {\n\t\tif errors.Cause(err) == sql.ErrNoRows {\n\t\t\treturn nil, sql.ErrNoRows\n\t\t}\n\t\treturn nil, errors.Wrap(err, \"models: failed to execute a one query for Synchronization\")\n\t}\n\n\treturn o, nil\n}", "func (accessor *DbAccessor) Execute(ctx context.Context, command string, args ...interface{}) (sql.Result, error) {\n\tvar r sql.Result\n\tvar err error\n\tLogOperation(ctx, \"ExecuteCommand\", func() error {\n\t\tr, err = accessor.db.ExecContext(ctx, command, args...)\n\t\treturn err\n\t})\n\n\treturn r, err\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewServicesHandler creates a new handler to return all specs.
func NewServicesHandler(storage storage.Storage) func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) { specs, err := storage.GetSpecs() if err != nil { http.Error(w, fmt.Sprintf("Error getting the specs from the storage %s", err), http.StatusInternalServerError) return } json, err := json.Marshal(specs) if err != nil { http.Error(w, fmt.Sprintf("Error converting the specs to json %s", err), http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/json") w.Write(json) } }
[ "func (h *ManagerHandler) newServiceHandler(ctx *fasthttp.RequestCtx) {\n\tdetail, err := models.ExtractCtxRequestDetail(ctx)\n\n\tservice := models.ApronService{}\n\terr = json.Unmarshal(detail.RequestBody, &service)\n\tinternal.CheckError(err)\n\n\tif h.storageManager.IsKeyExistingInBucket(internal.ServiceBucketName, service.Id) {\n\t\tctx.SetStatusCode(fasthttp.StatusBadRequest)\n\t\tctx.WriteString(\"duplicated service name\")\n\t} else {\n\t\tbinaryService, err := proto.Marshal(&service)\n\t\tinternal.CheckError(err)\n\t\terr = h.storageManager.SaveBinaryKeyData(internal.ServiceBucketName, service.Id, binaryService)\n\t\tinternal.CheckError(err)\n\n\t\tctx.SetStatusCode(fasthttp.StatusCreated)\n\t}\n}", "func NewHandler(ctx context.Context, endpoint string) (Handler, error) {\n\tif len(endpoint) == 0 {\n\t\treturn nil, errors.New(\"endpoint is empty\")\n\t}\n\n\t// Get the client\n\tcfg := openapi.NewConfiguration()\n\tapiClient := openapi.NewAPIClient(cfg)\n\tif endpoint != \"localhost/cnwan\" {\n\t\tapiClient.ChangeBasePath(strings.Replace(cfg.BasePath, \"localhost/cnwan\", endpoint, 1))\n\t}\n\n\treturn &servicesHandler{\n\t\tclient: apiClient,\n\t\tmainCtx: ctx,\n\t}, nil\n}", "func NewHandlerService(getService *repository.GetService, storeService *repository.StoreService) *HandlerService {\n\tif len(getService.GetLast(1)) == 0 { // if there is no even genesis yet\n\t\tgo func() {\n\t\t\tgenesisBlock := repository.GetGenesisBlock()\n\t\t\tspew.Dump(genesisBlock)\n\t\t\tstoreService.Store(genesisBlock)\n\t\t}()\n\t}\n\n\treturn &HandlerService{getService, storeService}\n}", "func NewServiceHandler(r *mux.Router, mw middleware.Middleware, cfg Config) {\n\n\tctx := logger.NewContext(context.Background(), nil)\n\tlog := logger.WithContext(ctx)\n\n\tlog.Infof(\"%s:> init service routes\", logPrefix)\n\n\thandler := &Handler{\n\t\tConfig: cfg,\n\t}\n\n\tr.Handle(\"/namespace/{namespace}/service\", h.Handle(mw.Authenticate(handler.ServiceCreateH))).Methods(http.MethodPost)\n\tr.Handle(\"/namespace/{namespace}/service\", h.Handle(mw.Authenticate(handler.ServiceListH))).Methods(http.MethodGet)\n\tr.Handle(\"/namespace/{namespace}/service/{service}\", h.Handle(mw.Authenticate(handler.ServiceInfoH))).Methods(http.MethodGet)\n\tr.Handle(\"/namespace/{namespace}/service/{service}\", h.Handle(mw.Authenticate(handler.ServiceUpdateH))).Methods(http.MethodPut)\n\tr.Handle(\"/namespace/{namespace}/service/{service}\", h.Handle(mw.Authenticate(handler.ServiceRemoveH))).Methods(http.MethodDelete)\n\tr.Handle(\"/namespace/{namespace}/service/{service}/logs\", h.Handle(mw.Authenticate(handler.ServiceLogsH))).Methods(http.MethodGet)\n}", "func NewServices(t mockConstructorTestingTNewServices) *Services {\n\tmock := &Services{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func New(serviceID, serviceName string, tt timetable.Timetable, em event.Manager, nm *store.NodesMap) *ServiceHandler {\n\tsh := &ServiceHandler{\n\t\tServiceID: serviceID,\n\t\tServiceName: serviceName,\n\t\tTimetable: tt,\n\t\tNodesMap: nm,\n\t\tEventManager: em,\n\t}\n\tsh.StartStopper = startstopper.NewGo(startstopper.RunnerFunc(sh.run))\n\tsh.eventLoop = startstopper.NewGo(startstopper.RunnerFunc(sh.runEventLoop))\n\tsh.periodLoop = startstopper.NewGo(startstopper.RunnerFunc(sh.runPeriodLoop))\n\n\treturn sh\n}", "func (s *Server) HandleGetServices() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\tdefer utils.CloseTheCloser(r.Body)\n\n\t\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\t\tdefer cancel()\n\n\t\t// Verify token\n\t\t_, err := s.auth.VerifyToken(utils.GetToken(r))\n\t\tif err != nil {\n\t\t\t_ = helpers.Logger.LogError(helpers.GetRequestID(ctx), \"Failed to apply service\", err, nil)\n\t\t\t_ = helpers.Response.SendErrorResponse(ctx, w, http.StatusUnauthorized, err)\n\t\t\treturn\n\t\t}\n\n\t\tvars := mux.Vars(r)\n\t\tprojectID := vars[\"project\"]\n\t\tserviceID, serviceIDExists := r.URL.Query()[\"serviceId\"]\n\t\tversion, versionExists := r.URL.Query()[\"version\"]\n\n\t\tservices, err := s.driver.GetServices(ctx, projectID)\n\t\tif err != nil {\n\t\t\t_ = helpers.Logger.LogError(helpers.GetRequestID(ctx), \"Failed to apply service\", err, nil)\n\t\t\t_ = helpers.Response.SendErrorResponse(ctx, w, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\tvar result []*model.Service\n\t\tif serviceIDExists && versionExists {\n\t\t\tfor _, val := range services {\n\t\t\t\tif val.ProjectID == projectID && val.ID == serviceID[0] && val.Version == version[0] {\n\t\t\t\t\tresult = append(result, val)\n\t\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t\t\t_ = json.NewEncoder(w).Encode(model.Response{Result: result})\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t_ = json.NewEncoder(w).Encode(map[string]string{\"error\": fmt.Sprintf(\"serviceID(%s) or version(%s) not present in state\", serviceID[0], version[0])})\n\t\t\treturn\n\t\t}\n\n\t\tif serviceIDExists && !versionExists {\n\t\t\tfor _, val := range services {\n\t\t\t\tif val.ID == serviceID[0] {\n\t\t\t\t\tresult = append(result, val)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t_ = json.NewEncoder(w).Encode(model.Response{Result: result})\n\t\t\treturn\n\t\t}\n\n\t\tresult = services\n\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_ = json.NewEncoder(w).Encode(model.Response{Result: result})\n\t}\n}", "func CreateServiceHandlers(lbc *controller.LoadBalancerController) cache.ResourceEventHandlerFuncs {\n\treturn cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\tsvc := obj.(*api_v1.Service)\n\t\t\tif lbc.IsExternalServiceForStatus(svc) {\n\t\t\t\tlbc.AddSyncQueue(svc)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tglog.V(3).Infof(\"Adding service: %v\", svc.Name)\n\t\t\tlbc.EnqueueIngressForService(svc)\n\t\t},\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\tsvc, isSvc := obj.(*api_v1.Service)\n\t\t\tif !isSvc {\n\t\t\t\tdeletedState, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\t\t\tif !ok {\n\t\t\t\t\tglog.V(3).Infof(\"Error received unexpected object: %v\", obj)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tsvc, ok = deletedState.Obj.(*api_v1.Service)\n\t\t\t\tif !ok {\n\t\t\t\t\tglog.V(3).Infof(\"Error DeletedFinalStateUnknown contained non-Service object: %v\", deletedState.Obj)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tif lbc.IsExternalServiceForStatus(svc) {\n\t\t\t\tlbc.AddSyncQueue(svc)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tglog.V(3).Infof(\"Removing service: %v\", svc.Name)\n\t\t\tlbc.EnqueueIngressForService(svc)\n\n\t\t},\n\t\tUpdateFunc: func(old, cur interface{}) {\n\t\t\tif !reflect.DeepEqual(old, cur) {\n\t\t\t\tsvc := cur.(*api_v1.Service)\n\t\t\t\tif lbc.IsExternalServiceForStatus(svc) {\n\t\t\t\t\tlbc.AddSyncQueue(svc)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tglog.V(3).Infof(\"Service %v changed, syncing\", svc.Name)\n\t\t\t\tlbc.EnqueueIngressForService(svc)\n\t\t\t}\n\t\t},\n\t}\n}", "func NewHandler(srv Service) http.Handler {\n\th := handlers{srv}\n\tr := chi.NewRouter()\n\tr.Get(\"/\", h.handleGetDemoSample)\n\tr.Get(\"/error_demo\", h.handleGetErrorDemoSample)\n\treturn r\n}", "func NewHandler(service Service) Handler {\n\treturn Handler{service: service}\n}", "func ServiceHandlers(base *gin.RouterGroup) {\n\t// Services endpoints\n\tservices := base.Group(\"/services\")\n\t{\n\t\tservices.POST(\"\", perm.MustPlatformAdmin(), middleware.Payload(), service.CreateService)\n\t\tservices.GET(\"\", perm.MustRead(), service.ListServices)\n\n\t\t// Service endpoints\n\t\ts := services.Group(\"/:service\", smiddleware.Establish())\n\t\t{\n\t\t\ts.GET(\"\", perm.MustRead(), service.GetService)\n\t\t\ts.PUT(\"\", perm.MustBuildAccess(), middleware.Payload(), service.UpdateService)\n\t\t\ts.DELETE(\"\", perm.MustPlatformAdmin(), service.DeleteService)\n\n\t\t\t// Log endpoints\n\t\t\tLogServiceHandlers(s)\n\t\t} // end of service endpoints\n\t} // end of services endpoints\n}", "func ServiceHandlers(base *gin.RouterGroup) {\n\t// Services endpoints\n\tservices := base.Group(\"/services\")\n\t{\n\t\tservices.POST(\"\", perm.MustPlatformAdmin(), middleware.Payload(), api.CreateService)\n\t\tservices.GET(\"\", perm.MustRead(), api.GetServices)\n\n\t\t// Service endpoints\n\t\tservice := services.Group(\"/:service\", service.Establish())\n\t\t{\n\t\t\tservice.GET(\"\", perm.MustRead(), api.GetService)\n\t\t\tservice.PUT(\"\", perm.MustPlatformAdmin(), middleware.Payload(), api.UpdateService)\n\t\t\tservice.DELETE(\"\", perm.MustPlatformAdmin(), api.DeleteService)\n\n\t\t\t// Log endpoints\n\t\t\tLogServiceHandlers(service)\n\t\t} // end of service endpoints\n\t} // end of services endpoints\n}", "func New(impl api.Service, swaggerFilePath string) http.Handler {\n\tbaseSrv := newBaseServer(impl)\n\tcontainersSrv := newContainersServer(impl)\n\n\tcontainer := restful.NewContainer()\n\tcontainer.Add(baseSrv.WebService)\n\tcontainer.Add(containersSrv.WebService)\n\n\tswaggerConf := swagger.Config{\n\t\tWebServices: container.RegisteredWebServices(),\n\t\tApiPath: \"/docs/apidocs.json\",\n\t\tSwaggerPath: \"/docs/swagger/\",\n\t\tSwaggerFilePath: swaggerFilePath,\n\t}\n\tswagger.RegisterSwaggerService(swaggerConf, container)\n\n\treturn container\n}", "func newHandler(t *testing.T) handler {\n\tt.Helper()\n\n\tapp, err := application.New()\n\trequire.NoError(t, err)\n\treturn handler{app}\n}", "func New(service storage.Service) http.Handler {\n\tr := mux.NewRouter()\n\n\th := handler{service}\n\n\tr.HandleFunc(\"/health\", health)\n\tr.HandleFunc(\"/insfile\", responseJSONWithModules(h.insfileWithModules)).Methods(http.MethodPost)\n\tr.HandleFunc(\"/insfile/text\", responseTextWithModules(h.insfileWithModules)).Methods(http.MethodPost)\n\tr.HandleFunc(\"/insfile/traverse\", responseJSON(h.insfile)).Methods(http.MethodPost)\n\tr.HandleFunc(\"/insfile/traverse/text\", responseText(h.insfile)).Methods(http.MethodPost)\n\n\treturn r\n}", "func NewServices(driver storagedriver.StorageDriver) *Services {\n\tlayerUploadStore, err := newTemporaryLocalFSLayerUploadStore()\n\n\tif err != nil {\n\t\t// TODO(stevvooe): This failure needs to be understood in the context\n\t\t// of the lifecycle of the services object, which is uncertain at this\n\t\t// point.\n\t\tpanic(\"unable to allocate layerUploadStore: \" + err.Error())\n\t}\n\n\treturn &Services{\n\t\tdriver: driver,\n\t\tpathMapper: &pathMapper{\n\t\t\t// TODO(sday): This should be configurable.\n\t\t\troot: \"/docker/registry/\",\n\t\t\tversion: storagePathVersion,\n\t\t},\n\t\tlayerUploadStore: layerUploadStore,\n\t}\n}", "func NewHandlers(logger logging.Logging, db *pg.DB, authServerArg *server.Server) *Handlers {\n\tproductCategoryRepository = NewRepository(db)\n\tproductCategoryService = NewService(productCategoryRepository)\n\tproductCategoryHandlerLogging = logger\n\tauthServer = authServerArg\n\n\treturn &Handlers{}\n}", "func newHandler(store *Store, handler storeHandler) http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\thandler(store, w, req)\n\t})\n}", "func NewHandler(service service.Interface) *Handler {\n\treturn &Handler{service: service}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewRPCHandler creates a new handler to return all specs.
func (h *Handler) NewRPCHandler() func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) { serviceName := r.PostFormValue("service") procedure := r.PostFormValue("procedure") contentType := r.PostFormValue("contentType") timeout, _ := strconv.Atoi(r.PostFormValue("timeout")) body := r.PostFormValue("body") client, err := h.getClient(serviceName) if err != nil { http.Error(w, fmt.Sprintf("Error creating porthos client %s", err), http.StatusInternalServerError) return } fmt.Printf("RPC Call to Service %s, Procedure: %s, ContentType: %s", serviceName, procedure, contentType) // call the remote method. response, err := client.Call(procedure). WithTimeout(time.Duration(timeout)*time.Second). WithBodyContentType([]byte(body), contentType). Sync() if err != nil { http.Error(w, fmt.Sprintf("Error performing rpc request %s", err), http.StatusInternalServerError) return } var responseBody interface{} if response.ContentType == "application/json" { response.UnmarshalJSONTo(&responseBody) } else { responseBody = string(response.Content) } json, err := json.Marshal(responseToClient{ StatusCode: response.StatusCode, ContentType: response.ContentType, Body: responseBody, }) if err != nil { http.Error(w, fmt.Sprintf("Error converting the response to json %s", err), http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/json") w.Write(json) } }
[ "func NewRPCHandler() *RPCHandler {\n\treturn &RPCHandler{}\n}", "func NewRPCHandler(svc types.TokenAuthService) *RPCHandler {\n\treturn &RPCHandler{\n\t\tTokenAuthService: svc,\n\t}\n}", "func NewHandler(k keeper.Keeper) sdk.Handler {\n\treturn func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) {\n\t\tctx = ctx.WithEventManager(sdk.NewEventManager())\n\t\tswitch msg := msg.(type) {\n\t\t// this line is used by starport scaffolding # 1\n\t\t// \treturn handleMsg<Action>(ctx, k, msg)\n\t\tcase types.MsgCreateRegualtor:\n\t\t\treturn handleMsgCreateRegulator(ctx, msg, k)\n\t\tdefault:\n\t\t\terrMsg := fmt.Sprintf(\"unrecognized %s message type: %T\", types.ModuleName, msg)\n\t\t\treturn nil, sdkerrors.Wrap(sdkerrors.ErrUnknownRequest, errMsg)\n\t\t}\n\t}\n}", "func NewJSONRPCHandler(params HandlerParams) (Handler, error) {\n\treturn Handler{\n\t\tbridge: jhttp.NewBridge(handler.Map{\n\t\t\t\"getHealth\": methods.NewHealthCheck(),\n\t\t\t\"getAccount\": methods.NewAccountHandler(params.AccountStore),\n\t\t}, nil),\n\t\tlogger: params.Logger,\n\t}, nil\n}", "func NewJSONRPCHandler(srvcs ...interface{}) (http.Handler, error) {\n\ts := rpc.NewServer()\n\tfor _, srvc := range srvcs {\n\t\tif err := s.Register(srvc); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn &JSONRPCHandler{s}, nil\n}", "func New(secret string, rcvr interface{}) *Handler {\n\tif secret == \"\" {\n\t\tpanic(\"webhook: called New with empty secret\")\n\t}\n\treturn &Handler{\n\t\tsecret: secret,\n\t\trcvr: reflect.ValueOf(rcvr),\n\t\tmethod: payloadMethods(reflect.TypeOf(rcvr)),\n\t}\n}", "func NewHandler(p rinq.Peer) rinq.CommandHandler {\n\treturn func(\n\t\tctx context.Context,\n\t\treq rinq.Request,\n\t\tres rinq.Response,\n\t) {\n\t\tswitch req.Command {\n\n\t\tcase \"command.success\":\n\t\t\tSuccess(ctx, req, res, p)\n\t\tcase \"command.fail\":\n\t\t\tFail(ctx, req, res, p)\n\t\tcase \"command.fail-payload\":\n\t\t\tFailWithPayload(ctx, req, res, p)\n\t\tcase \"command.error\":\n\t\t\tError(ctx, req, res, p)\n\t\tcase \"command.sleep\":\n\t\t\tSleep(ctx, req, res, p)\n\n\t\tcase \"notify.notify-me\":\n\t\t\tNotifyMe(ctx, req, res, p)\n\n\t\tdefault:\n\t\t\tres.Fail(\"unknown-command\", \"no such command: %s\", req.Command)\n\t\t\treq.Payload.Close()\n\t\t}\n\t}\n}", "func newHandler(sigs ...os.Signal) *signalHandler {\n\treturn &signalHandler{\n\t\tcallbacks: []func(){},\n\t\tchDisable: make(chan bool, 1),\n\t\tchSignals: make(chan os.Signal, 1),\n\t\tsignals: sigs,\n\t}\n}", "func NewHandler(k Keeper) sdk.Handler {\n\treturn func(ctx sdk.Context, msg sdk.Msg) sdk.Result {\n\t\tswitch msg := msg.(type) {\n\t\tcase MsgRecordReputation:\n\t\t\treturn handleMsgRecordReputation(ctx, k, msg)\n\t\tdefault:\n\t\t\terrMsg := fmt.Sprintf(\"unrecognized %s message type: %T\", types.ModuleName, msg)\n\t\t\treturn sdk.ErrUnknownRequest(errMsg).Result()\n\t\t}\n\t}\n}", "func (am AppModule) NewHandler() sdk.Handler { return nil }", "func NewHandler(c *HandlerConfig) func(http.ResponseWriter, *http.Request) {\n\n\t// pushback receives the push request and writes it into a file\n\t// according to a mapping provided by a json configuration\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tt := r.Header.Get(\"Content-Type\")\n\n\t\tif t != \"binary/octet-stream\" {\n\t\t\tlog.Printf(\"Wrong Content-Type %s\", t)\n\t\t\tw.Write([]byte(fmt.Sprintf(\"%s is not a supported Content-Type\", t)))\n\t\t\treturn\n\t\t}\n\n\t\t// Open test file\n\t\tf, err := os.Create(fmt.Sprintf(\"%s/%s.pushback\", c.Path, \"test\"))\n\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Could not open file %e\", err)\n\t\t}\n\n\t\tn, err := io.Copy(f, r.Body)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Could only receive %d\", n)\n\t\t}\n\n\t\tw.Write([]byte(\"OK\"))\n\t}\n}", "func (s *Server) newHandler() http.Handler {\n\tr := mux.NewRouter()\n\tr.HandleFunc(\"/register\", s.wrapMiddleware(registerHandler)).Methods(\"POST\")\n\tr.HandleFunc(\"/session/{id}\", s.wrapMiddleware(getHandler)).Methods(\"GET\")\n\tr.HandleFunc(\"/session\", s.wrapMiddleware(createHandler)).Methods(\"POST\")\n\tr.HandleFunc(\"/readiness\", predis.NewReadinessCheck(s.pool))\n\n\treturn r\n}", "func NewHandler(keeper Keeper) sdk.Handler {\n\treturn func(ctx sdk.Context, msg sdk.Msg) sdk.Result {\n\t\tswitch msg := msg.(type) {\n\t\tcase MsgCreateGroup:\n\t\t\treturn handleMsgCreateGroup(ctx, keeper, msg)\n\t\tdefault:\n\t\t\terrMsg := fmt.Sprintf(\"Unrecognized data Msg type: %v\", msg.Type())\n\t\t\treturn sdk.ErrUnknownRequest(errMsg).Result()\n\t\t}\n\t}\n}", "func NewHandler(k Keeper) sdk.Handler {\n\treturn func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) {\n\t\tctx = ctx.WithEventManager(sdk.NewEventManager())\n\t\tswitch msg := msg.(type) {\n\t\tcase MsgCreateCDP:\n\t\t\treturn handleMsgCreateCDP(ctx, k, msg)\n\t\tcase MsgDeposit:\n\t\t\treturn handleMsgDeposit(ctx, k, msg)\n\t\tcase MsgWithdraw:\n\t\t\treturn handleMsgWithdraw(ctx, k, msg)\n\t\tcase MsgDrawDebt:\n\t\t\treturn handleMsgDrawDebt(ctx, k, msg)\n\t\tcase MsgRepayDebt:\n\t\t\treturn handleMsgRepayDebt(ctx, k, msg)\n\t\tdefault:\n\t\t\treturn nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, \"unrecognized %s message type: %T\", ModuleName, msg)\n\t\t}\n\t}\n}", "func New(handler http.Handler) backend.CallResourceHandler {\n\treturn &httpResourceHandler{\n\t\thandler: handler,\n\t}\n}", "func newHandler(t *testing.T) handler {\n\tt.Helper()\n\n\tapp, err := application.New()\n\trequire.NoError(t, err)\n\treturn handler{app}\n}", "func newPprofHandler(svr *server.Server, rd *render.Render) *pprofHandler {\n\treturn &pprofHandler{\n\t\tsvr: svr,\n\t\trd: rd,\n\t}\n}", "func NewHandler(k Keeper) sdk.Handler {\n\treturn func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) {\n\t\tctx = ctx.WithEventManager(sdk.NewEventManager())\n\t\tswitch msg := msg.(type) {\n\t\tcase MsgCreate:\n\t\t\treturn handleMsgCreate(ctx, k, &msg)\n\t\tcase MsgDelete:\n\t\t\treturn handleMsgDelete(ctx, k, &msg)\n\t\tdefault:\n\t\t\treturn nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, \"unrecognized %s message type: %T\", types.ModuleName, msg)\n\t\t}\n\t}\n}", "func newHandlerRunner() *handlerRunner {\n\tfr := handlerRunner{}\n\treturn &fr\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize TodoServer and create a gin router
func NewTodoServer(store store.TodoStore) *TodoServer { t := new(TodoServer) t.Store = store t.Router = gin.Default() // Project routes t.Router.GET("/projects/:projectName", t.GetProject) t.Router.POST("/projects/", t.PostProject) t.Router.GET("/projects/", t.GetAllProjects) t.Router.PUT("/projects/:projectName", t.PutProject) t.Router.DELETE("/projects/:projectName", t.DeleteProject) t.Router.DELETE("/projects/:projectName/archive", t.ArchiveProject) t.Router.PUT("/projects/:projectName/archive", t.ArchiveProject) // Task routes t.Router.POST("projects/:projectName/tasks", t.PostTask) t.Router.GET("projects/:projectName/tasks/:taskName", t.GetTask) t.Router.GET("projects/:projectName/tasks", t.GetAllTasks) t.Router.PUT("projects/:projectName/tasks/:taskName", t.PutTask) t.Router.DELETE("projects/:projectName/tasks/:taskName", t.DeleteTask) t.Router.PUT("/projects/:projectName/tasks/:taskName/complete", t.CompleteTask) t.Router.DELETE("/projects/:projectName/tasks/:taskName/complete", t.CompleteTask) return t }
[ "func (s *Server) InitGin() {\n\trouter := gin.New()\n\trouter.Use(cors.Middleware(cors.Config{\n\t\tOrigins: \"*\",\n\t\tMethods: s.Config.WebServer.AllowedMethods,\n\t\tRequestHeaders: s.Config.WebServer.AllowedHeaders,\n\t\tExposedHeaders: \"\",\n\t\tMaxAge: 50 * time.Second,\n\t\tCredentials: true,\n\t\tValidateHeaders: false,\n\t}))\n\trouter.Use(gin.Recovery()) //skip logger setup, since we already have one\n\trouter.Use(VerifyHeader())\n\n\trouter.StaticFS(fmt.Sprintf(\"/%s\", s.Config.WebServer.SiteURL), http.Dir(s.Config.WebServer.StaticFilesLocation))\n\n\trouter.GET(\"/api/alive\", s.IsAlive())\n\trouter.GET(\"/api/master\", s.GetOwnerInfo())\n\trouter.POST(\"/api/master\", s.UpdateOwner())\n\trouter.GET(\"/api/network\", s.GetNetworkInfo())\n\trouter.GET(\"/api/start\", s.Sniff())\n\trouter.GET(\"/api/profiles\", s.GetAllProfiles())\n\trouter.POST(\"/api/profiles\", s.CreateOrUpdateProfile())\n\trouter.Run(fmt.Sprintf(\"%s:%d\", s.Config.WebServer.URL, s.Config.WebServer.Port))\n}", "func Server() {\n datastore.Start()\n router := gin.Default()\n web.ExposeRoutes(router)\n restAPI := router.Group(\"/api\")\n api.ExposeRoutes(restAPI)\n router.Run(fmt.Sprintf(\":%v\", config.GetEnvVar(\"PORT\")))\n}", "func (c Routes) StartGin() {\n\tr := gin.Default()\n\tr.Use(cors.Default())\n\tapi := r.Group(\"/api\")\n\t{\n\t\tapi.GET(\"/\", welcome)\n\t\tapi.GET(tasksResource, task.GetTasks)\n\t\tapi.GET(taskResource, task.GetTask)\n\t\tapi.POST(taskResource, task.CreateTask)\n\t\tapi.PATCH(taskResource, task.UpdateTaskStatus)\n\t\tapi.DELETE(taskResource, task.DeleteTask)\n\t}\n\n\tr.Run(\":8000\")\n}", "func RegisterTodoHandlerServer(ctx context.Context, mux *runtime.ServeMux, server TodoServer) error {\n\n\tmux.Handle(\"POST\", pattern_Todo_CreateTask_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_Todo_CreateTask_0(rctx, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Todo_CreateTask_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"GET\", pattern_Todo_ReadTask_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_Todo_ReadTask_0(rctx, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Todo_ReadTask_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"PUT\", pattern_Todo_UpdateTask_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_Todo_UpdateTask_0(rctx, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Todo_UpdateTask_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"DELETE\", pattern_Todo_DeleteTask_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_Todo_DeleteTask_0(rctx, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Todo_DeleteTask_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"GET\", pattern_Todo_ListTasksStream_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\terr := status.Error(codes.Unimplemented, \"streaming calls are not yet supported in the in-process transport\")\n\t\t_, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\treturn\n\t})\n\n\tmux.Handle(\"GET\", pattern_Todo_ListTasks_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_Todo_ListTasks_0(rctx, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Todo_ListTasks_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\treturn nil\n}", "func GinServer() {\n\t// Set Gin to production mode\n\tgin.SetMode(gin.ReleaseMode)\n\n\t// Set the router as the default one provided by Gin\n\trouter = gin.Default()\n\n\t// Process the templates at the start so that they don't have to be loaded\n\t// from the disk again. This makes serving HTML pages very fast.\n\trouter.LoadHTMLGlob(\"static/templates/*\")\n\n\t// Initialize the routes\n\tinitializeRoutes()\n\n\thttp.Handle(\"/\", router)\n}", "func InitRequestHandler() {\n\tr := gin.Default()\n\n\tv1 := r.Group(\"/api/v1\")\n\t{\n\n\t\tv1.GET(\"/healthz\", func(c *gin.Context) {\n\t\t\tc.String(200, \"OK\")\n\t\t})\n\n\t\tv1.POST(\"/payments\", func(c *gin.Context) {\n\t\t\tc.JSON(200, gin.H{\"message\": \"pong\"})\n\t\t})\n\n\t}\n\n\tr.Run()\n}", "func (c Routes) StartGin() {\n\tr := gin.Default()\n\tapi := r.Group(\"/api\")\n\t{\n\t\tapi.GET(\"/\", welcome)\n\t\tapi.GET(\"/users\", user.GetAllUsers)\n\t\tapi.POST(\"/users\", user.CreateUser)\n\t}\n\tr.Run(\":8000\")\n}", "func init() {\n\t_ = router.Register(\"httprouter\", New)\n}", "func InitServer(useMiddleware bool) *Server {\n\tserver := Server{}\n\n\t// Make Redis connection\n\tredisConn, err := redis.Dial(\"tcp\", LocalConfig.redisPort)\n\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tserver.Redis = redisConn\n\n\t// Register handlers\n\tgin.SetMode(gin.ReleaseMode)\n\tserver.Router = gin.New()\n\n\tif useMiddleware {\n\t\tserver.Router.Use(ValidateToken(&server), EnsureJSONBody(&server))\n\t}\n\n\tserver.Router.POST(\"/device\", GenerateToken(&server))\n\tserver.Router.POST(\"/close\", CloseSession(&server))\n\n\tserver.Router.POST(\"/store\", StoreData(&server))\n\tserver.Router.POST(\"/retrieve\", RetrieveData(&server))\n\n\treturn &server\n}", "func New(sto store.Service) *server {\n\ts := &server{sto: sto}\n\n\trouter := mux.NewRouter()\n\n\trouter.Handle(\"/todo\", allowedMethods(\n\t\t[]string{\"OPTIONS\", \"GET\", \"POST\"},\n\t\thandlers.MethodHandler{\n\t\t\t\"GET\": http.HandlerFunc(s.getTodos),\n\t\t\t\"POST\": http.HandlerFunc(s.createTodo),\n\t\t}))\n\n\trouter.Handle(\"/todo/{id}\", idMiddleware(allowedMethods(\n\t\t[]string{\"OPTIONS\", \"GET\", \"PUT\", \"PATCH\", \"DELETE\"},\n\t\thandlers.MethodHandler{\n\t\t\t\"GET\": http.HandlerFunc(s.getTodo),\n\t\t\t\"PUT\": http.HandlerFunc(s.putTodo),\n\t\t\t\"PATCH\": http.HandlerFunc(s.patchTodo),\n\t\t\t\"DELETE\": http.HandlerFunc(s.deleteTodo),\n\t\t})))\n\n\ts.handler = limitBody(defaultHeaders(router))\n\n\treturn s\n}", "func main() {\n\tfmt.Println(\"Go Demo with net/http server\")\n\n\t// initialize empty itemStore\n\titemStore := store.InitializeStore()\n\tserver.StartRouter(itemStore)\n}", "func New(e *todo.Endpoints, uh goagrpc.UnaryHandler) *Server {\n\treturn &Server{\n\t\tGetH: NewGetHandler(e.Get, uh),\n\t\tListH: NewListHandler(e.List, uh),\n\t\tAddH: NewAddHandler(e.Add, uh),\n\t\tRemoveH: NewRemoveHandler(e.Remove, uh),\n\t}\n}", "func InitializeServer(host string) (server *network.WebServer) {\n\trand.Seed(time.Now().UTC().UnixNano())\n\t// Make sure folders exist that we want:\n\tif err := ensureBindDirs(); err != nil {\n\t\tLog.Error(\"Failed to have home working dir to put the files into at ~/Desktop/bind, err: \", err)\n\t} else {\n\t\tLog.Info(\"bind dirs ensured!\")\n\t}\n\tif os.Args[0] != \"d\" { //development mode\n\t\tgin.SetMode(gin.ReleaseMode)\n\t}\n\tr := gin.New()\n\tr.LoadHTMLGlob(\"public/tmpl/*.html\")\n\tr.StaticFS(\"/videos\", http.Dir(basePath+\"/videos\"))\n\tr.StaticFS(\"/frames\", http.Dir(basePath+\"/frames\"))\n\tr.Static(\"/public\", \"./public\")\n\tr.GET(\"/\", getIndex)\n\tr.POST(\"/g\", postIndex)\n\tr.GET(\"/g\", getIndex)\n\tr.GET(\"/about\", getAbout)\n\tr.GET(\"/jobs\", getJobs)\n\tr.GET(\"/code\", getCode)\n\tmel = melody.New() // melody middleware\n\n\t// websocket route\n\tr.GET(\"/ws\",func(ctx *gin.Context){\n\t\t// handle request with Melody\n\t\tmel.HandleRequest(ctx.Writer,ctx.Request)\n\t})\n\n\t// Melody message handler\n\tmel.HandleMessage(func(ses *melody.Session,msg []byte){\n\t\t// broadcast message to connected sockets\n\t\tmel.Broadcast(msg)\n\t})\n\n\n\tr.GET(\"/openframes\", func(c *gin.Context) {\n\t\topen.Run(basePath + \"/frames\")\n\t})\n\tr.GET(\"/openvideos\", func(c *gin.Context) {\n\t\topen.Run(basePath + \"/videos\")\n\t})\n\tr.GET(\"/openlogs\", func(c *gin.Context) {\n\t\topen.Run(basePath + \"/logs\")\n\t})\n\tr.GET(\"/toggleClipYt\", func(c *gin.Context) {\n\t\topen.Run(basePath + \"/logs\")\n\t})\n\t// go requests(mel)\n\t// go jobUpdates(mel)\n\n\treturn network.InitializeWebServer(r, host)\n}", "func SetupRouters(r *gin.Engine) {\n\tv1 := r.Group(\"/v1\")\n\tv1.GET(\"/ping\", func(context *gin.Context) {\n\t\tcontext.JSON(200, \"ok\")\n\t})\n\tv1.POST(\"/webhook\", PushEvent)\n\tv1.POST(\"/task_config\", CreateTaskConfig)\n\tv1.POST(\"/task_history\", CreateTaskHistory)\n\tv1.StaticFS(\"/code_explorer\", http.Dir(\"/user/local/docker/deploy\"))\n}", "func InitServer() *http.Server {\n\tengine := gin.New()\n\tengine.Use(\n\t\tgin.Recovery(),\n\t\tginrus.Ginrus(log.StandardLogger(), time.RFC3339, true),\n\t)\n\n\t// CORS support\n\tcors := cors.Middleware(cors.Options{\n\t\tAllowOrigins: []string{\"*\"},\n\t\tAllowCredentials: false,\n\t\tAllowMethods: nil, // Use default (see defaultAllowMethods).\n\t\tAllowHeaders: []string{}, // Copy from Access-Control-Allow-Headers.\n\t\tExposeHeaders: []string{},\n\t\tMaxAge: 10 * time.Minute,\n\t})\n\tengine.Use(cors)\n\tengine.OPTIONS(\"/*cors\", cors)\n\n\t// Static files and templates.\n\t_, filepath, _, _ := runtime.Caller(1)\n\tbase := path.Dir(filepath)\n\tengine.LoadHTMLGlob(path.Join(base, \"static/templates/*\"))\n\tengine.Static(\"/css\", path.Join(base, \"static/css\"))\n\tengine.Static(\"/js\", path.Join(base, \"static/js\"))\n\n\t// Routes\n\tengine.GET(\"/\", Index)\n\tengine.POST(\"/algorithm\", NewAlgorithm)\n\tengine.GET(\"/worker/*prefix\", NewWorker)\n\n\tserver := http.Server{\n\t\tAddr: Addr,\n\t\tHandler: engine,\n\t}\n\n\treturn &server\n}", "func initRouter() *gin.Engine {\n\tr := gin.Default()\n\tv := r.Group(\"v\" + conf.APIVersion)\n\n\tfor _, app := range conf.Apps {\n\t\tappR := v.Group(app.PathPrefix)\n\n\t\tappR.POST(\"/register\", registerHandler(app))\n\t\tappR.POST(\"/login\", loginHandler(app))\n\t}\n\n\treturn r\n}", "func (srv *Server) InIt() {\n\tsrv.router = mux.NewRouter()\n\tsrv.router.HandleFunc(\"/components\", srv.getComponents).Methods(\"GET\")\n\tsrv.router.HandleFunc(\"/chain\", srv.addChain).Methods(\"POST\")\n\tsrv.router.PathPrefix(\"/web\").Handler(http.StripPrefix(\"/web\", http.FileServer(http.Dir(\"web/console\"))))\n\n\tgo http.ListenAndServe(\":8020\", srv.router)\n}", "func InitRoutes(taskRouter *mux.Router) {\n\t// ---- Task Listing ---- //\n\ttaskRouter.HandleFunc(\"\", TaskIndexHandler).Methods(\"GET\")\n\ttaskRouter.HandleFunc(\"/\", TaskIndexHandler).Methods(\"GET\")\n\t// ---- Task Creation ---- //\n\ttaskRouter.HandleFunc(\"\", TaskCreateHandler).Methods(\"POST\")\n\ttaskRouter.HandleFunc(\"/\", TaskCreateHandler).Methods(\"POST\")\n\t// ---- Task View ---- //\n\ttaskRouter.HandleFunc(\"/{taskId}\", TaskViewHandler).Methods(\"GET\")\n\ttaskRouter.HandleFunc(\"/{taskId}/\", TaskViewHandler).Methods(\"GET\")\n\t// ---- Task Deletion ---- //\n\ttaskRouter.HandleFunc(\"/{taskId}\", TaskDeleteHandler).Methods(\"DELETE\")\n\ttaskRouter.HandleFunc(\"/{taskId}/\", TaskDeleteHandler).Methods(\"DELETE\")\n\t// ---- Task Update ---- //\n\ttaskRouter.HandleFunc(\"/{taskId}\", TaskUpdateHandler).Methods(\"PATCH\")\n\ttaskRouter.HandleFunc(\"/{taskId}/\", TaskUpdateHandler).Methods(\"PATCH\")\n}", "func setupRouter() *gin.Engine {\n\trouter := gin.Default()\n\trouter.GET(\"/drivers/:id/locations\", getLocations)\n\treturn router\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewDataCollectionEndpointsClient creates a new instance of DataCollectionEndpointsClient with the specified values.
func NewDataCollectionEndpointsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) *DataCollectionEndpointsClient { cp := arm.ClientOptions{} if options != nil { cp = *options } if len(cp.Host) == 0 { cp.Host = arm.AzurePublicCloud } return &DataCollectionEndpointsClient{subscriptionID: subscriptionID, ep: string(cp.Host), pl: armruntime.NewPipeline(module, version, credential, &cp)} }
[ "func NewDataCollectionEndpointsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) *DataCollectionEndpointsClient {\n\tcp := arm.ClientOptions{}\n\tif options != nil {\n\t\tcp = *options\n\t}\n\tif len(cp.Endpoint) == 0 {\n\t\tcp.Endpoint = arm.AzurePublicCloud\n\t}\n\tclient := &DataCollectionEndpointsClient{\n\t\tsubscriptionID: subscriptionID,\n\t\thost: string(cp.Endpoint),\n\t\tpl: armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, &cp),\n\t}\n\treturn client\n}", "func New(endpoints ...string) *Client {\n\treturn &Client{\n\t\tendpoints: newEndpoints(endpoints...),\n\t\tMaxTries: len(endpoints),\n\t\tEndpointTimeout: defaultTimeout,\n\t}\n}", "func NewEndpointsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*EndpointsClient, error) {\n\tcl, err := arm.NewClient(moduleName+\".EndpointsClient\", moduleVersion, credential, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient := &EndpointsClient{\n\t\tsubscriptionID: subscriptionID,\n\t\tinternal: cl,\n\t}\n\treturn client, nil\n}", "func NewClient(userList, getUser, createUser, updateUser, deleteUser goa.Endpoint) *Client {\n\treturn &Client{\n\t\tUserListEndpoint: userList,\n\t\tGetUserEndpoint: getUser,\n\t\tCreateUserEndpoint: createUser,\n\t\tUpdateUserEndpoint: updateUser,\n\t\tDeleteUserEndpoint: deleteUser,\n\t}\n}", "func newEndpoints() *Endpoints {\n\treturn &Endpoints{\n\t\tBackends: map[string]service.PortConfiguration{},\n\t}\n}", "func NewCloudEndpointsClient(subscriptionID string) CloudEndpointsClient {\n\treturn NewCloudEndpointsClientWithBaseURI(DefaultBaseURI, subscriptionID)\n}", "func NewClientWithEndpoints(endpoints []string) (c *Client, err error) {\n\t// Create etcd client\n\tclient, err := clientv3.New(clientv3.Config{\n\t\tEndpoints: endpoints,\n\t\tDialTimeout: DialTimeout,\n\t})\n\tif err != nil {\n\t\tlogrus.Error(\"Can't create etcd client\", err)\n\t\treturn nil, err\n\t}\n\n\t// Create Client\n\tc = &Client{\n\t\tclientv3.NewKV(client),\n\t}\n\treturn c, nil\n}", "func NewCloudEndpointsClientWithBaseURI(baseURI string, subscriptionID string) CloudEndpointsClient {\n\treturn CloudEndpointsClient{NewWithBaseURI(baseURI, subscriptionID)}\n}", "func NewClient(refresh, refreshAll, catalogError goa.Endpoint) *Client {\n\treturn &Client{\n\t\tRefreshEndpoint: refresh,\n\t\tRefreshAllEndpoint: refreshAll,\n\t\tCatalogErrorEndpoint: catalogError,\n\t}\n}", "func (flags Etcd) CreateClientEndpoints(sctx *ServiceContext) string {\n\treturn flags.createEndpoints(sctx, defaultEtcdClientPort, nil)\n}", "func NewEndpoints(s Service) *Endpoints {\n\treturn &Endpoints{\n\t\tCreateUserdata: NewCreateUserdataEndpoint(s),\n\t\tUpdateUserdata: NewUpdateUserdataEndpoint(s),\n\t\tGetUserdata: NewGetUserdataEndpoint(s),\n\t}\n}", "func NewClient(c *rpc.Client) endpoint.Endpoint {\n\treturn func(ctx context.Context, request interface{}) (interface{}, error) {\n\t\tvar (\n\t\t\terrs = make(chan error, 1)\n\t\t\tresponses = make(chan interface{}, 1)\n\t\t)\n\t\tgo func() {\n\t\t\tvar response reqrep.AddResponse\n\t\t\tif err := c.Call(\"addsvc.Add\", request, &response); err != nil {\n\t\t\t\terrs <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tresponses <- response\n\t\t}()\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil, context.DeadlineExceeded\n\t\tcase err := <-errs:\n\t\t\treturn nil, err\n\t\tcase response := <-responses:\n\t\t\treturn response, nil\n\t\t}\n\t}\n}", "func New(c rest.Interface) *Clientset {\n\tvar cs Clientset\n\tcs.autoscalingV1alpha1 = autoscalingv1alpha1.New(c)\n\tcs.clusterV1alpha1 = clusterv1alpha1.New(c)\n\tcs.configV1alpha1 = configv1alpha1.New(c)\n\tcs.networkingV1alpha1 = networkingv1alpha1.New(c)\n\tcs.policyV1alpha1 = policyv1alpha1.New(c)\n\tcs.searchV1alpha1 = searchv1alpha1.New(c)\n\tcs.workV1alpha1 = workv1alpha1.New(c)\n\tcs.workV1alpha2 = workv1alpha2.New(c)\n\n\tcs.DiscoveryClient = discovery.NewDiscoveryClient(c)\n\treturn &cs\n}", "func NewClient(companyList, getCompany, createCompany, updateCompany, deleteCompany goa.Endpoint) *Client {\n\treturn &Client{\n\t\tCompanyListEndpoint: companyList,\n\t\tGetCompanyEndpoint: getCompany,\n\t\tCreateCompanyEndpoint: createCompany,\n\t\tUpdateCompanyEndpoint: updateCompany,\n\t\tDeleteCompanyEndpoint: deleteCompany,\n\t}\n}", "func NewEndpoints(c Configuration, alternate func() (Endpoints, error)) (Endpoints, error) {\n\tif endpoints := c.endpoints(); len(endpoints) > 0 {\n\t\treturn ParseURLs(endpoints...)\n\t}\n\n\tif alternate != nil {\n\t\treturn alternate()\n\t}\n\n\treturn nil, errNoConfiguredEndpoints\n}", "func NewDataClient() *DataClient {\n\treturn &DataClient{}\n}", "func NewEndpoints() Endpoints {\n\treturn Endpoints{\n\t\tendpoints: make([]*Endpoint, 0),\n\t\tmapUUID: make(map[string]int),\n\t}\n}", "func NewEndpoints(s Service) *Endpoints {\n\t// Casting service to Auther interface\n\ta := s.(Auther)\n\treturn &Endpoints{\n\t\tDataEventsEndpoint: NewDataEventsEndpointEndpoint(s, a.JWTAuth),\n\t\tAddDataEvent: NewAddDataEventEndpoint(s, a.JWTAuth),\n\t\tUpdateDataEvent: NewUpdateDataEventEndpoint(s, a.JWTAuth),\n\t\tDeleteDataEvent: NewDeleteDataEventEndpoint(s, a.JWTAuth),\n\t}\n}", "func NewClient(healthcheck, listDevices, createDevice, updateCharge, getChargeHistory, updateDevice goa.Endpoint) *Client {\n\treturn &Client{\n\t\tHealthcheckEndpoint: healthcheck,\n\t\tListDevicesEndpoint: listDevices,\n\t\tCreateDeviceEndpoint: createDevice,\n\t\tUpdateChargeEndpoint: updateCharge,\n\t\tGetChargeHistoryEndpoint: getChargeHistory,\n\t\tUpdateDeviceEndpoint: updateDevice,\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
createHandleResponse handles the Create response.
func (client *DataCollectionEndpointsClient) createHandleResponse(resp *http.Response) (DataCollectionEndpointsCreateResponse, error) { result := DataCollectionEndpointsCreateResponse{RawResponse: resp} if err := runtime.UnmarshalAsJSON(resp, &result.DataCollectionEndpointResource); err != nil { return DataCollectionEndpointsCreateResponse{}, runtime.NewResponseError(err, resp) } return result, nil }
[ "func (client *CustomAssessmentAutomationsClient) createHandleResponse(resp *http.Response) (CustomAssessmentAutomationsCreateResponse, error) {\n\tresult := CustomAssessmentAutomationsCreateResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CustomAssessmentAutomation); err != nil {\n\t\treturn CustomAssessmentAutomationsCreateResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *CustomAssessmentAutomationsClient) createHandleResponse(resp *http.Response) (CustomAssessmentAutomationsClientCreateResponse, error) {\n\tresult := CustomAssessmentAutomationsClientCreateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CustomAssessmentAutomation); err != nil {\n\t\treturn CustomAssessmentAutomationsClientCreateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ActionsClient) createOrUpdateHandleResponse(resp *http.Response) (ActionsClientCreateOrUpdateResponse, error) {\n\tresult := ActionsClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ActionResponse); err != nil {\n\t\treturn ActionsClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PipelineClient) createPipelineRunHandleResponse(resp *azcore.Response) (CreateRunResponseResponse, error) {\n\tvar val *CreateRunResponse\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn CreateRunResponseResponse{}, err\n\t}\n\treturn CreateRunResponseResponse{RawResponse: resp.Response, CreateRunResponse: val}, nil\n}", "func (client *PipelinesClient) createRunHandleResponse(resp *http.Response) (PipelinesClientCreateRunResponse, error) {\n\tresult := PipelinesClientCreateRunResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CreateRunResponse); err != nil {\n\t\treturn PipelinesClientCreateRunResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KeyVaultClient) createKeyHandleResponse(resp *http.Response) (KeyVaultClientCreateKeyResponse, error) {\n\tresult := KeyVaultClientCreateKeyResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil {\n\t\treturn KeyVaultClientCreateKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KeyVaultClient) createKeyHandleResponse(resp *http.Response) (KeyVaultClientCreateKeyResponse, error) {\n\tresult := KeyVaultClientCreateKeyResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil {\n\t\treturn KeyVaultClientCreateKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *OutputsClient) createOrReplaceHandleResponse(resp *http.Response) (OutputsClientCreateOrReplaceResponse, error) {\n\tresult := OutputsClientCreateOrReplaceResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Output); err != nil {\n\t\treturn OutputsClientCreateOrReplaceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *OperationsClient) createOrUpdateHandleResponse(resp *http.Response) (OperationsClientCreateOrUpdateResponse, error) {\n\tresult := OperationsClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.OperationsContent); err != nil {\n\t\treturn OperationsClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ServerVulnerabilityAssessmentClient) createOrUpdateHandleResponse(resp *http.Response) (ServerVulnerabilityAssessmentClientCreateOrUpdateResponse, error) {\n\tresult := ServerVulnerabilityAssessmentClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ServerVulnerabilityAssessment); err != nil {\n\t\treturn ServerVulnerabilityAssessmentClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *RoleAssignmentsClient) createByIDHandleResponse(resp *http.Response) (RoleAssignmentsClientCreateByIDResponse, error) {\n\tresult := RoleAssignmentsClientCreateByIDResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RoleAssignment); err != nil {\n\t\treturn RoleAssignmentsClientCreateByIDResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *OutputsClient) createOrReplaceHandleResponse(resp *http.Response) (OutputsCreateOrReplaceResponse, error) {\n\tresult := OutputsCreateOrReplaceResponse{RawResponse: resp}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Output); err != nil {\n\t\treturn OutputsCreateOrReplaceResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) createDeploymentHandleResponse(resp *http.Response) (WebAppsCreateDeploymentResponse, error) {\n\tresult := WebAppsCreateDeploymentResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Deployment); err != nil {\n\t\treturn WebAppsCreateDeploymentResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func CreateResponse(result interface{}, err error) *Response {\n\tif err == nil {\n\t\treturn CreateSuccessResponse(result)\n\t}\n\treturn CreateErrorResponse(err)\n}", "func (c *WSCodec) CreateResponse(id interface{}, reply interface{}) interface{} {\n\treturn &jsonSuccessResponse{Version: jsonrpcVersion, Id: id, Result: reply}\n}", "func (v *Vault) HandleCreate(e APIRequestEvent) {\n\treturn //No need of actions in case of create requests\n}", "func (pH *PhotoHandler) HandleCreate(w http.ResponseWriter, r *http.Request) {\n\tfmt.Printf(\"Entered HandleCreate\\n\")\n\n\tp := dtos.Photo{}\n\tjson.NewDecoder(r.Body).Decode(&p)\n\n\tresult := pH.photoController.Create(&p)\n\n\tuj, _ := json.Marshal(result)\n\tpH.writeSuccessResponse(uj, 201, w)\n}", "func NewCreateResponse(input string) CreateResponse {\n\tvar id string\n\ttmp := strings.Split(input, \"/\")\n\tif len(tmp) > 0 {\n\t\tid = tmp[len(tmp)-1]\n\t}\n\treturn CreateResponse{\n\t\tID: id,\n\t\tBody: input,\n\t}\n}", "func (client *IncidentsClient) createTeamHandleResponse(resp *http.Response) (IncidentsClientCreateTeamResponse, error) {\n\tresult := IncidentsClientCreateTeamResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.TeamInformation); err != nil {\n\t\treturn IncidentsClientCreateTeamResponse{}, err\n\t}\n\treturn result, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
listBySubscriptionHandleResponse handles the ListBySubscription response.
func (client *DataCollectionEndpointsClient) listBySubscriptionHandleResponse(resp *http.Response) (DataCollectionEndpointsListBySubscriptionResponse, error) { result := DataCollectionEndpointsListBySubscriptionResponse{RawResponse: resp} if err := runtime.UnmarshalAsJSON(resp, &result.DataCollectionEndpointResourceListResult); err != nil { return DataCollectionEndpointsListBySubscriptionResponse{}, runtime.NewResponseError(err, resp) } return result, nil }
[ "func (client *RedisClient) listBySubscriptionHandleResponse(resp *http.Response) (RedisListBySubscriptionResponse, error) {\n\tresult := RedisListBySubscriptionResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RedisListResult); err != nil {\n\t\treturn RedisListBySubscriptionResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *RulesClient) listBySubscriptionsHandleResponse(resp *azcore.Response) (RulesListBySubscriptionsResponse, error) {\n\tresult := RulesListBySubscriptionsResponse{RawResponse: resp.Response}\n\tif err := resp.UnmarshalAsJSON(&result.RuleListResult); err != nil {\n\t\treturn RulesListBySubscriptionsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SubscriptionsClient) listByTopicHandleResponse(resp *azcore.Response) (SubscriptionsListByTopicResponse, error) {\n\tresult := SubscriptionsListByTopicResponse{RawResponse: resp.Response}\n\tif err := resp.UnmarshalAsJSON(&result.SBSubscriptionListResult); err != nil {\n\t\treturn SubscriptionsListByTopicResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *TriggersClient) listByShareSubscriptionHandleResponse(resp *http.Response) (TriggersClientListByShareSubscriptionResponse, error) {\n\tresult := TriggersClientListByShareSubscriptionResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.TriggerList); err != nil {\n\t\treturn TriggersClientListByShareSubscriptionResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PrivateCloudsClient) listInSubscriptionHandleResponse(resp *http.Response) (PrivateCloudsClientListInSubscriptionResponse, error) {\n\tresult := PrivateCloudsClientListInSubscriptionResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PrivateCloudList); err != nil {\n\t\treturn PrivateCloudsClientListInSubscriptionResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client ServicesClient) ListBySubscriptionResponder(resp *http.Response) (result ServiceResourceList, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client LabsClient) ListBySubscriptionResponder(resp *http.Response) (result PagedLabs, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client LabClient) ListBySubscriptionResponder(resp *http.Response) (result ResponseWithContinuationLab, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client Client) ListBySubscriptionResponder(resp *http.Response) (result DelegatedControllers, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *ProviderShareSubscriptionsClient) listByShareHandleResponse(resp *http.Response) (ProviderShareSubscriptionsClientListByShareResponse, error) {\n\tresult := ProviderShareSubscriptionsClientListByShareResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ProviderShareSubscriptionList); err != nil {\n\t\treturn ProviderShareSubscriptionsClientListByShareResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PolicyEventsClient) listQueryResultsForSubscriptionHandleResponse(resp *http.Response) (PolicyEventsClientListQueryResultsForSubscriptionResponse, error) {\n\tresult := PolicyEventsClientListQueryResultsForSubscriptionResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyEventsQueryResults); err != nil {\n\t\treturn PolicyEventsClientListQueryResultsForSubscriptionResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client ServicesClient) ListBySubscription(ctx context.Context) (result ServiceResourceListPage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/ServicesClient.ListBySubscription\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.srl.Response.Response != nil {\n\t\t\t\tsc = result.srl.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tresult.fn = client.listBySubscriptionNextResults\n\treq, err := client.ListBySubscriptionPreparer(ctx)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"appplatform.ServicesClient\", \"ListBySubscription\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListBySubscriptionSender(req)\n\tif err != nil {\n\t\tresult.srl.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"appplatform.ServicesClient\", \"ListBySubscription\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.srl, err = client.ListBySubscriptionResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"appplatform.ServicesClient\", \"ListBySubscription\", resp, \"Failure responding to request\")\n\t\treturn\n\t}\n\tif result.srl.hasNextLink() && result.srl.IsEmpty() {\n\t\terr = result.NextWithContext(ctx)\n\t\treturn\n\t}\n\n\treturn\n}", "func (client IotHubResourceClient) ListBySubscription(ctx context.Context) (result IotHubDescriptionListResultPage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/IotHubResourceClient.ListBySubscription\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.ihdlr.Response.Response != nil {\n\t\t\t\tsc = result.ihdlr.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tresult.fn = client.listBySubscriptionNextResults\n\treq, err := client.ListBySubscriptionPreparer(ctx)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"devices.IotHubResourceClient\", \"ListBySubscription\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListBySubscriptionSender(req)\n\tif err != nil {\n\t\tresult.ihdlr.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"devices.IotHubResourceClient\", \"ListBySubscription\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.ihdlr, err = client.ListBySubscriptionResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"devices.IotHubResourceClient\", \"ListBySubscription\", resp, \"Failure responding to request\")\n\t\treturn\n\t}\n\tif result.ihdlr.hasNextLink() && result.ihdlr.IsEmpty() {\n\t\terr = result.NextWithContext(ctx)\n\t\treturn\n\t}\n\n\treturn\n}", "func (client LabClient) ListBySubscription(ctx context.Context, filter string, top *int32, orderBy string) (result ResponseWithContinuationLabPage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/LabClient.ListBySubscription\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.rwcl.Response.Response != nil {\n\t\t\t\tsc = result.rwcl.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tresult.fn = client.listBySubscriptionNextResults\n\treq, err := client.ListBySubscriptionPreparer(ctx, filter, top, orderBy)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"dtl.LabClient\", \"ListBySubscription\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListBySubscriptionSender(req)\n\tif err != nil {\n\t\tresult.rwcl.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"dtl.LabClient\", \"ListBySubscription\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.rwcl, err = client.ListBySubscriptionResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"dtl.LabClient\", \"ListBySubscription\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}", "func (client AccountClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {\n return client.Send(req, azure.DoRetryWithRegistration(client.Client))\n }", "func (client ServicesClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {\n\treturn client.Send(req, azure.DoRetryWithRegistration(client.Client))\n}", "func (client Client) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {\n\treturn client.Send(req, azure.DoRetryWithRegistration(client.Client))\n}", "func (client LabsClient) ListBySubscription(ctx context.Context, filter string) (result PagedLabsPage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/LabsClient.ListBySubscription\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.pl.Response.Response != nil {\n\t\t\t\tsc = result.pl.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: client.SubscriptionID,\n\t\t\tConstraints: []validation.Constraint{{Target: \"client.SubscriptionID\", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {\n\t\treturn result, validation.NewError(\"labservices.LabsClient\", \"ListBySubscription\", err.Error())\n\t}\n\n\tresult.fn = client.listBySubscriptionNextResults\n\treq, err := client.ListBySubscriptionPreparer(ctx, filter)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"labservices.LabsClient\", \"ListBySubscription\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListBySubscriptionSender(req)\n\tif err != nil {\n\t\tresult.pl.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"labservices.LabsClient\", \"ListBySubscription\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.pl, err = client.ListBySubscriptionResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"labservices.LabsClient\", \"ListBySubscription\", resp, \"Failure responding to request\")\n\t\treturn\n\t}\n\tif result.pl.hasNextLink() && result.pl.IsEmpty() {\n\t\terr = result.NextWithContext(ctx)\n\t\treturn\n\t}\n\n\treturn\n}", "func (client LabsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {\n\treturn client.Send(req, azure.DoRetryWithRegistration(client.Client))\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Push reads supporter and donation records and pushes them downstream. Note that the records are maps of strings. Downstream will convert them to useful data.
func (rt *Runtime) Push(d chan map[string]string) (err error) { t := rt.API.NewTable("supporter(supporter_KEY)donation") offset := int32(0) count := 500 c := "donation.RESULT IN 0,-1" for count == 500 { m, err := t.LeftJoinMap(offset, count, c) if err != nil { return err } for _, r := range m { d <- r } count = len(m) log.Printf("Push: read %d from offset %d\n", count, offset) offset += int32(count) } close(d) return err }
[ "func Read(reader io.Reader) (rj RecordJar, err error) {\n\n\tb := bufio.NewReader(reader)\n\tr := make(Record)\n\n\tcurrentHeader := \"\"\n\tline := \"\"\n\nRECORDS:\n\tfor {\n\n\t\t// If we have a record on the go store it and allocate a new one\n\t\tif len(r) != 0 {\n\t\t\trj = append(rj, r)\n\t\t\tr = make(Record)\n\t\t}\n\n\t\t// Exit at EOF\n\t\tif err == io.EOF {\n\t\t\tbreak RECORDS\n\t\t}\n\n\t\t// Process record header lines\n\tHEADERS:\n\t\tfor {\n\t\t\tline, err = b.ReadString('\\n')\n\t\t\tline = strings.TrimSpace(line)\n\n\t\t\tswitch {\n\t\t\tcase line == HS:\n\t\t\t\tbreak HEADERS\n\t\t\tcase line == RS:\n\t\t\t\tcontinue RECORDS\n\t\t\tcase len(line) > 1 && line[0:2] == REM:\n\t\t\t\tcontinue HEADERS\n\t\t\t}\n\n\t\t\ttokens := splitHeader.FindStringSubmatch(line)\n\t\t\tnewHeader, data := strings.ToLower(tokens[1]), tokens[2]\n\n\t\t\tif newHeader != \"\" {\n\t\t\t\tcurrentHeader = newHeader\n\t\t\t}\n\n\t\t\tif _, ok := r[currentHeader]; ok {\n\t\t\t\tr[currentHeader] += \" \"\n\t\t\t}\n\t\t\tr[currentHeader] += data\n\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tcontinue RECORDS\n\t\t\t}\n\t\t}\n\n\t\t// Process free format data lines - between header separator (HS) and\n\t\t// record separator / EOF (RS).\n\t\tjoiner := \"\"\n\t\tfor {\n\t\t\tline, err = b.ReadString('\\n')\n\t\t\tline = strings.TrimRightFunc(line, unicode.IsSpace)\n\n\t\t\tif line == RS || line == \"\" && err == io.EOF {\n\t\t\t\tcontinue RECORDS\n\t\t\t}\n\n\t\t\tif line == \"\" {\n\t\t\t\tr[\":data:\"] += \"\\n\"\n\t\t\t\tjoiner = \"\\n\"\n\t\t\t} else {\n\t\t\t\tr[\":data:\"] += joiner + line\n\t\t\t\tjoiner = \" \"\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tcontinue RECORDS\n\t\t\t}\n\t\t}\n\t}\n\n\treturn rj, nil\n}", "func (s *server) pushRecord(ctx context.Context, id thread.ID, lid peer.ID, rec core.Record) error {\n\t// Collect known writers\n\taddrs := make([]ma.Multiaddr, 0)\n\tinfo, err := s.threads.store.ThreadInfo(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, l := range info.Logs {\n\t\taddrs = append(addrs, l.Addrs...)\n\t}\n\n\t// Serialize and sign the record for transport\n\tpbrec, err := cbor.RecordToProto(ctx, s.threads, rec)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpayload, err := pbrec.Marshal()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsk := s.threads.getPrivKey()\n\tif sk == nil {\n\t\treturn fmt.Errorf(\"private key for host not found\")\n\t}\n\tsig, err := sk.Sign(payload)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq := &pb.PushRecordRequest{\n\t\tHeader: &pb.PushRecordRequest_Header{\n\t\t\tFrom: &pb.ProtoPeerID{ID: s.threads.host.ID()},\n\t\t\tSignature: sig,\n\t\t\tKey: &pb.ProtoPubKey{PubKey: sk.GetPublic()},\n\t\t},\n\t\tThreadID: &pb.ProtoThreadID{ID: id},\n\t\tLogID: &pb.ProtoPeerID{ID: lid},\n\t\tRecord: pbrec,\n\t}\n\n\t// Push to each address\n\twg := sync.WaitGroup{}\n\tfor _, addr := range addrs {\n\t\twg.Add(1)\n\t\tgo func(addr ma.Multiaddr) {\n\t\t\tdefer wg.Done()\n\t\t\tp, err := addr.ValueForProtocol(ma.P_P2P)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpid, err := peer.Decode(p)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif pid.String() == s.threads.host.ID().String() {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlog.Debugf(\"pushing record to %s...\", p)\n\n\t\t\tcctx, cancel := context.WithTimeout(ctx, reqTimeout)\n\t\t\tdefer cancel()\n\t\t\tconn, err := s.dial(cctx, pid, grpc.WithInsecure())\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"dial %s failed: %s\", p, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tclient := pb.NewServiceClient(conn)\n\t\t\tif _, err = client.PushRecord(cctx, req); err != nil {\n\t\t\t\tif status.Convert(err).Code() == codes.NotFound {\n\t\t\t\t\tlog.Debugf(\"pushing log %s to %s...\", lid.String(), p)\n\n\t\t\t\t\t// Send the missing log\n\t\t\t\t\tl, err := s.threads.store.LogInfo(id, lid)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Error(err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tlreq := &pb.PushLogRequest{\n\t\t\t\t\t\tHeader: &pb.PushLogRequest_Header{\n\t\t\t\t\t\t\tFrom: &pb.ProtoPeerID{ID: s.threads.host.ID()},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tThreadID: &pb.ProtoThreadID{ID: id},\n\t\t\t\t\t\tLog: logToProto(l),\n\t\t\t\t\t}\n\t\t\t\t\tif _, err = client.PushLog(cctx, lreq); err != nil {\n\t\t\t\t\t\tlog.Warnf(\"push log to %s failed: %s\", p, err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tlog.Warnf(\"push record to %s failed: %s\", p, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}(addr)\n\t}\n\n\t// Finally, publish to the thread's topic\n\tif err = s.publish(id, req); err != nil {\n\t\tlog.Error(err)\n\t}\n\n\twg.Wait()\n\treturn nil\n}", "func ReadSupporters(e *goengage.Environment, c1 chan goengage.Segment, done chan bool, id int) (err error) {\n\tlog.Printf(\"ReadSupporters %v: begin\\n\", id)\n\tfor true {\n\t\tr, ok := <-c1\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\t//Create a CSV filename for the group an see if the file exists.\n\t\tfilename := fmt.Sprintf(\"%v.csv\", r.Name)\n\t\tfilename = strings.Replace(filename, \"/\", \"-\", -1)\n\t\t_, err := os.Stat(filename)\n\t\tif err == nil || os.IsExist(err) {\n\t\t\tlog.Printf(\"ReadSupporters %v: %-32v skipped, file exists\\n\", id, r.Name)\n\t\t} else {\n\t\t\tlog.Printf(\"ReadSupporters %v: %-32v start\\n\", id, r.Name)\n\t\t\t// Create a file using the ID and write to it. We'll rename it to the group\n\t\t\t// when all of the supporters are gathered.\n\t\t\ttemp := fmt.Sprintf(\"%v.csv\", r.SegmentID)\n\t\t\tf, err := os.Create(temp)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tw := csv.NewWriter(f)\n\t\t\theaders := []string{\"SegmentID\", \"SegmentName\", \"SupporterID\", \"Email\"}\n\t\t\tw.Write(headers)\n\n\t\t\t// Read all supporters and write info to the group's CSV.\n\t\t\tcount := e.Metrics.MaxBatchSize\n\t\t\toffset := int32(0)\n\t\t\tfor count == e.Metrics.MaxBatchSize {\n\t\t\t\tpayload := goengage.SegmentMembershipRequestPayload{\n\t\t\t\t\tSegmentID: r.SegmentID,\n\t\t\t\t\tOffset: offset,\n\t\t\t\t\tCount: count,\n\t\t\t\t}\n\t\t\t\trqt := goengage.SegmentMembershipRequest{\n\t\t\t\t\tHeader: goengage.RequestHeader{},\n\t\t\t\t\tPayload: payload,\n\t\t\t\t}\n\t\t\t\tvar resp goengage.SegmentMembershipResponse\n\n\t\t\t\tn := goengage.NetOp{\n\t\t\t\t\tHost: e.Host,\n\t\t\t\t\tMethod: goengage.SearchMethod,\n\t\t\t\t\tEndpoint: goengage.SegmentSearchMembers,\n\t\t\t\t\tToken: e.Token,\n\t\t\t\t\tRequest: &rqt,\n\t\t\t\t\tResponse: &resp,\n\t\t\t\t}\n\t\t\t\tok := false\n\t\t\t\tfor !ok {\n\t\t\t\t\terr = n.Do()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfmt.Printf(\"ReadSupporters %v: %-32v %v\\n\", id, r.Name, err)\n\t\t\t\t\t\ttime.Sleep(time.Second)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tok = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor _, s := range resp.Payload.Supporters {\n\t\t\t\t\temail := goengage.FirstEmail(s)\n\t\t\t\t\tif email != nil {\n\t\t\t\t\t\ta := []string{r.SegmentID, r.Name, s.SupporterID, *email}\n\t\t\t\t\t\tw.Write(a)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tw.Flush()\n\t\t\t\tcount = resp.Payload.Count\n\t\t\t\toffset += int32(count)\n\t\t\t}\n\t\t\terr = os.Rename(temp, filename)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlog.Printf(\"ReadSupporters %v: %-32v done\\n\", id, r.Name)\n\t\t}\n\t}\n\tdone <- true\n\tlog.Printf(\"ReadSupporters %v: end\\n\", id)\n\treturn nil\n}", "func NewPuller(\n\tctx context.Context,\n\ttempDir string,\n\tchunksPerTF int,\n\tsrcCS, sinkCS chunks.ChunkStore,\n\twalkAddrs WalkAddrs,\n\thashes []hash.Hash,\n\tstatsCh chan Stats,\n) (*Puller, error) {\n\t// Sanity Check\n\ths := hash.NewHashSet(hashes...)\n\tmissing, err := srcCS.HasMany(ctx, hs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif missing.Size() != 0 {\n\t\treturn nil, errors.New(\"not found\")\n\t}\n\n\ths = hash.NewHashSet(hashes...)\n\tmissing, err = sinkCS.HasMany(ctx, hs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif missing.Size() == 0 {\n\t\treturn nil, ErrDBUpToDate\n\t}\n\n\tif srcCS.Version() != sinkCS.Version() {\n\t\treturn nil, fmt.Errorf(\"cannot pull from src to sink; src version is %v and sink version is %v\", srcCS.Version(), sinkCS.Version())\n\t}\n\n\tsrcChunkStore, ok := srcCS.(nbs.NBSCompressedChunkStore)\n\tif !ok {\n\t\treturn nil, ErrIncompatibleSourceChunkStore\n\t}\n\n\twr, err := nbs.NewCmpChunkTableWriter(tempDir)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar pushLogger *log.Logger\n\tif dbg, ok := os.LookupEnv(\"PUSH_LOG\"); ok && strings.ToLower(dbg) == \"true\" {\n\t\tlogFilePath := filepath.Join(tempDir, \"push.log\")\n\t\tf, err := os.OpenFile(logFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, os.ModePerm)\n\n\t\tif err == nil {\n\t\t\tpushLogger = log.New(f, \"\", log.Lmicroseconds)\n\t\t}\n\t}\n\n\tp := &Puller{\n\t\twaf: walkAddrs,\n\t\tsrcChunkStore: srcChunkStore,\n\t\tsinkDBCS: sinkCS,\n\t\thashes: hash.NewHashSet(hashes...),\n\t\ttablefileSema: semaphore.NewWeighted(outstandingTableFiles),\n\t\ttempDir: tempDir,\n\t\twr: wr,\n\t\tchunksPerTF: chunksPerTF,\n\t\tpushLog: pushLogger,\n\t\tstatsCh: statsCh,\n\t\tstats: &stats{},\n\t}\n\n\tif lcs, ok := sinkCS.(chunks.LoggingChunkStore); ok {\n\t\tlcs.SetLogger(p)\n\t}\n\n\treturn p, nil\n}", "func (s *quicHandler) receiveDataFromZipperSenders() {\n\tfor {\n\t\tselect {\n\t\tcase receiver, ok := <-s.zipperReceiver:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tsinks := GetSinks(s.serverlessConfig, &s.connMap)\n\t\t\tif len(sinks) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tgo func() {\n\t\t\t\tfd := decoder.NewFrameDecoder(receiver)\n\t\t\t\tfor {\n\t\t\t\t\tbuf, err := fd.Read(false)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// send data to sinks\n\t\t\t\t\t\tfor _, sink := range sinks {\n\t\t\t\t\t\t\tgo sendDataToSink(sink, buf, \"[Zipper Receiver] sent frame to sink.\", \"❌ [Zipper Receiver] sent frame to sink failed.\")\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}\n}", "func (pl *Payload) push(x interface{}) {\n\tswitch x.(type) {\n\tcase DNCReport:\n\t\tpl.DNCReports = append(pl.DNCReports, x.(DNCReport))\n\tcase CNIReport:\n\t\tpl.CNIReports = append(pl.CNIReports, x.(CNIReport))\n\tcase NPMReport:\n\t\tpl.NPMReports = append(pl.NPMReports, x.(NPMReport))\n\tcase CNSReport:\n\t\tpl.CNSReports = append(pl.CNSReports, x.(CNSReport))\n\t}\n}", "func collector( ch_name string, rdr *rabbit_hole.Mq_reader, num int, raw_json bool, wg *sync.WaitGroup, sheep *bleater.Bleater ) {\n\n\trh_ch := make( chan amqp.Delivery, 4096 )\t\t\t// our listen channel\n\tcount := 0\n\tdefer rdr.Close()\t\t\t\t\t\t\t\t\t// ensure reader is closed on return\n\n\tsheep.Baa( 1, \"reading from tokay response exchange: %s\", ch_name )\n\t\n\trdr.Start_eating( rh_ch )\n\tfor {\n\t\tmsg := <- rh_ch\t\t\t\t\t\t\t\t\t// wait for next msg from rabbit hole\n\t\tif raw_json {\n\t\t\tfmt.Printf( \"%s\\n\", msg.Body )\n\t\t} else {\n\t\t\tjt, _ := jsontools.Json2tree( msg.Body )\n\t\t\tjt.Pretty_print( os.Stdout )\n\t\t}\n\n\t\tmsg.Body = nil\n\t\tcount++\n\t\tif( num > 0 && count >= num ) {\n\t\t\tbreak\n\t\t}\n\n\t}\n\n\trdr.Stop()\t\t\t\t\t// turn off listner\n\twg.Done()\t\t\t\t\t// dec counter and possibly release main\n\treturn\n}", "func (log *Logger) Records(msg string, fd interface{}) {\n\tlog.pipeline[msg] = fd\n}", "func (ch *ClickHouseExportProcess) pushRecordsToFrontOfQueue(records []*flowrecord.FlowRecord) {\n\tch.dequeMutex.Lock()\n\tdefer ch.dequeMutex.Unlock()\n\n\tfor i := len(records) - 1; i >= 0; i-- {\n\t\tif ch.deque.Len() >= ch.queueSize {\n\t\t\tbreak\n\t\t}\n\t\tch.deque.PushFront(records[i])\n\t}\n}", "func TestFlowExporter_sendDataRecord(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\t// Values in the connection are not important. Initializing with 0s.\n\tflow1 := flowexporter.Connection{\n\t\tStartTime: time.Time{},\n\t\tStopTime: time.Time{},\n\t\tOriginalPackets: 0,\n\t\tOriginalBytes: 0,\n\t\tReversePackets: 0,\n\t\tReverseBytes: 0,\n\t\tTupleOrig: flowexporter.Tuple{\n\t\t\tSourceAddress: nil,\n\t\t\tDestinationAddress: nil,\n\t\t\tProtocol: 0,\n\t\t\tSourcePort: 0,\n\t\t\tDestinationPort: 0,\n\t\t},\n\t\tTupleReply: flowexporter.Tuple{\n\t\t\tSourceAddress: nil,\n\t\t\tDestinationAddress: nil,\n\t\t\tProtocol: 0,\n\t\t\tSourcePort: 0,\n\t\t\tDestinationPort: 0,\n\t\t},\n\t\tSourcePodNamespace: \"\",\n\t\tSourcePodName: \"\",\n\t\tDestinationPodNamespace: \"\",\n\t\tDestinationPodName: \"\",\n\t}\n\trecord1 := flowexporter.FlowRecord{\n\t\tConn: &flow1,\n\t\tPrevPackets: 0,\n\t\tPrevBytes: 0,\n\t\tPrevReversePackets: 0,\n\t\tPrevReverseBytes: 0,\n\t}\n\t// Following consists of all elements that are in IANAInfoElements and AntreaInfoElements (globals)\n\t// Need only element name and other are dummys\n\telemList := make([]*ipfixentities.InfoElement, len(IANAInfoElements)+len(IANAReverseInfoElements)+len(AntreaInfoElements))\n\tfor i, ie := range IANAInfoElements {\n\t\telemList[i] = ipfixentities.NewInfoElement(ie, 0, 0, 0, 0)\n\t}\n\tfor i, ie := range IANAReverseInfoElements {\n\t\telemList[i+len(IANAInfoElements)] = ipfixentities.NewInfoElement(ie, 0, 0, ipfixregistry.IANAReversedEnterpriseID, 0)\n\t}\n\tfor i, ie := range AntreaInfoElements {\n\t\telemList[i+len(IANAInfoElements)+len(IANAReverseInfoElements)] = ipfixentities.NewInfoElement(ie, 0, 0, 0, 0)\n\t}\n\n\tmockIPFIXExpProc := ipfixtest.NewMockIPFIXExportingProcess(ctrl)\n\tmockDataRec := ipfixtest.NewMockIPFIXRecord(ctrl)\n\tmockIPFIXRegistry := ipfixtest.NewMockIPFIXRegistry(ctrl)\n\tflowExp := &flowExporter{\n\t\tnil,\n\t\tmockIPFIXExpProc,\n\t\telemList,\n\t\ttestFlowExportFrequency,\n\t\t0,\n\t\ttestTemplateID,\n\t\tmockIPFIXRegistry,\n\t}\n\t// Expect calls required\n\tvar dataRecord ipfixentities.Record\n\ttempBytes := uint16(0)\n\tfor _, ie := range flowExp.elementsList {\n\t\tswitch ieName := ie.Name; ieName {\n\t\tcase \"flowStartSeconds\", \"flowEndSeconds\":\n\t\t\tmockDataRec.EXPECT().AddInfoElement(ie, uint32(time.Time{}.Unix())).Return(tempBytes, nil)\n\t\tcase \"sourceIPv4Address\", \"destinationIPv4Address\":\n\t\t\tmockDataRec.EXPECT().AddInfoElement(ie, nil).Return(tempBytes, nil)\n\t\tcase \"destinationClusterIPv4\":\n\t\t\tmockDataRec.EXPECT().AddInfoElement(ie, net.IP{0, 0, 0, 0}).Return(tempBytes, nil)\n\t\tcase \"sourceTransportPort\", \"destinationTransportPort\":\n\t\t\tmockDataRec.EXPECT().AddInfoElement(ie, uint16(0)).Return(tempBytes, nil)\n\t\tcase \"protocolIdentifier\":\n\t\t\tmockDataRec.EXPECT().AddInfoElement(ie, uint8(0)).Return(tempBytes, nil)\n\t\tcase \"packetTotalCount\", \"octetTotalCount\", \"packetDeltaCount\", \"octetDeltaCount\", \"reversePacketTotalCount\", \"reverseOctetTotalCount\", \"reversePacketDeltaCount\", \"reverseOctetDeltaCount\":\n\t\t\tmockDataRec.EXPECT().AddInfoElement(ie, uint64(0)).Return(tempBytes, nil)\n\t\tcase \"sourcePodName\", \"sourcePodNamespace\", \"sourceNodeName\", \"destinationPodName\", \"destinationPodNamespace\", \"destinationNodeName\", \"destinationServicePortName\":\n\t\t\tmockDataRec.EXPECT().AddInfoElement(ie, \"\").Return(tempBytes, nil)\n\t\tcase \"ingressNetworkPolicyName\", \"ingressNetworkPolicyNamespace\", \"egressNetworkPolicyName\", \"egressNetworkPolicyNamespace\":\n\t\t\tmockDataRec.EXPECT().AddInfoElement(ie, \"\").Return(tempBytes, nil)\n\t\t}\n\t}\n\tmockDataRec.EXPECT().GetRecord().Return(dataRecord)\n\tmockIPFIXExpProc.EXPECT().AddRecordAndSendMsg(ipfixentities.Data, dataRecord).Return(0, nil)\n\n\terr := flowExp.sendDataRecord(mockDataRec, record1)\n\tif err != nil {\n\t\tt.Errorf(\"Error in sending data record: %v\", err)\n\t}\n}", "func cmdProducer(cmdFile string, recordChan chan recWrap) {\n\tdefer wg.Done()\n\tcmds, err := os.Open(cmdFile)\n\tif err != nil{\n\t\tlog.Fatal(err)\n\t}\n\tdefer cmds.Close()\n\n\tr := csv.NewReader(bufio.NewReader(cmds))\n\tfor {\n\t\trecord, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\trecordChan <- recWrap{input: record}\n\t}\n}", "func (p *PGPusher) Push(fetchedResults map[int][]string) error {\n\t// Connect to DB\n\terr := p.ForceConnect()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer p.db.Close()\n\t// Create CDR table for import\n\terr = p.CreateCDRTable()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Prepare SQL query\n\terr = p.buildInsertQuery()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Insert in Batch to DB\n\terr = p.BatchInsert(fetchedResults)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Info(\"Total number pushed to PostgreSQL:\", p.countPushed)\n\treturn nil\n}", "func (th *DataRequestHandler) pushesToRequests() {\n\tstop := false\n\tpreReadItem := ^Index(0)\n\t// convert pushes into requests\n\tfor {\n\t\t// buffer a bunch of pushes\n\t\ttodoLen := len(th.pushes)\n\t\thitWindow := false\n\t\tcheckWindowHit := func(t Index) {\n\t\t\tif th.lastRequest == nil {\n\t\t\t\tth.lastRequest = &DataRequest{Start: 0, End: defaultWindowSpan}\n\t\t\t}\n\t\t\tif t >= th.lastRequest.Start && t < th.lastRequest.End {\n\t\t\t\thitWindow = true\n\t\t\t}\n\t\t}\n\t\t// we pre-read an item to wait for events, but we don't want to forget about this item\n\t\tif preReadItem != ^Index(0) {\n\t\t\tcheckWindowHit(preReadItem)\n\t\t}\n\t\tfor i := 0; i < todoLen; i++ {\n\t\t\tt, ok := <-th.pushes\n\t\t\tif !ok {\n\t\t\t\t// we just closed, stop processing after this\n\t\t\t\tstop = true\n\t\t\t}\n\t\t\tcheckWindowHit(t)\n\t\t}\n\t\t// if there is no work to do, wait for a bit, and check again\n\t\tif !hitWindow {\n\t\t\t// maybe we just need to stop because we can't receive pushes anymore\n\t\t\tif stop {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// no stopping yet, but no work to do either, wait for an update, then wait another second (to batch pushes), then form a request\n\t\t\titem, ok := <-th.pushes\n\t\t\t// it may also be the last item\n\t\t\tif !ok {\n\t\t\t\tstop = true\n\t\t\t}\n\t\t\tpreReadItem = item\n\t\t\ttime.Sleep(time.Second)\n\t\t\tcontinue\n\t\t} else {\n\t\t\t// there are pushes within the client range, repeat the client request.\n\t\t\t// trigger, if it's not already.\n\t\t\tif len(th.gotRequest) == 0 {\n\t\t\t\tth.gotRequest <- true\n\t\t\t}\n\n\t\t\t// maybe we just need to stop because we can't receive pushes anymore\n\t\t\tif stop {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// continue to process more pushes, if any are remaining in the channel\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func Supporter(rt *Runtime, c chan goengage.Fundraise) (err error) {\n\trt.Log.Println(\"Supporter: start\")\n\tfor true {\n\t\tr, ok := <-c\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tif rt.GoodYear(r.ActivityDate) {\n\t\t\trt.Log.Printf(\"%v Supporter\\n\", r.ActivityID)\n\n\t\t\ts := goengage.Supporter{\n\t\t\t\tSupporterID: r.SupporterID,\n\t\t\t}\n\t\t\trt.DB.FirstOrInit(&s, s)\n\n\t\t\t// rt.DB.Where(\"supporter_id = ?\", r.SupporterID).First(&s)\n\t\t\tif s.CreatedDate == nil {\n\t\t\t\tt, err := goengage.FetchSupporter(rt.Env, r.SupporterID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif t == nil {\n\t\t\t\t\tx := time.Now()\n\t\t\t\t\ts.CreatedDate = &x\n\t\t\t\t} else {\n\t\t\t\t\ts = *t\n\t\t\t\t}\n\t\t\t\trt.DB.Create(&s)\n\t\t\t}\n\t\t}\n\t}\n\trt.Log.Println(\"Supporter: end\")\n\treturn nil\n}", "func (s *quicHandler) receiveDataFromSources() {\n\tfor {\n\t\tselect {\n\t\tcase item, ok := <-s.source:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// one stream for each flows/sinks.\n\t\t\tflows, sinks := Build(s.serverlessConfig, &s.connMap)\n\t\t\tstream := DispatcherWithFunc(flows, item)\n\n\t\t\tgo func() {\n\t\t\t\tfor customer := range stream.Observe(rxgo.WithErrorStrategy(rxgo.ContinueOnError)) {\n\t\t\t\t\tif customer.Error() {\n\t\t\t\t\t\tfmt.Println(customer.E.Error())\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tvalue := customer.V.([]byte)\n\n\t\t\t\t\t// sinks\n\t\t\t\t\tfor _, sink := range sinks {\n\t\t\t\t\t\tgo sendDataToSink(sink, value, \"Zipper sent frame to sink\", \"❌ Zipper sent frame to sink failed.\")\n\t\t\t\t\t}\n\n\t\t\t\t\t// Zipper-Senders\n\t\t\t\t\tfor _, sender := range s.zipperSenders {\n\t\t\t\t\t\tif sender == nil {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tgo sendDataToSink(sender, value, \"[Zipper Sender] sent frame to downstream zipper.\", \"❌ [Zipper Sender] sent frame to downstream zipper failed.\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}\n}", "func convertRecords(screeningRecords []ninox.Record, basicIndex ninox.Index) []*ninox.Record {\n\n\tconst sourceName = \"ICTRP\"\n\n\t// initialize the updates/inserts\n\tupdates := []*ninox.Record{}\n\n\tfor _, s := range screeningRecords {\n\n\t\tsourceID := s.Field(\"TrialID\")\n\n\t\t// skip all records that exist in ninox already\n\t\t_, ok := basicIndex.Get(sourceID)\n\t\tif ok {\n\t\t\tfmt.Printf(\"record exists already: %s, %s\\n\", sourceID)\n\t\t\tcontinue\n\t\t}\n\n\t\t// initialize a new record\n\t\tr := ninox.Record{}\n\t\tr.Fields = make(map[string]interface{})\n\t\tr.Fields[\"source\"] = sourceName\n\t\tr.Fields[\"source_id\"] = sourceID\n\t\tr.Fields[\"review_status\"] = \"prefilled automatically\"\n\n\t\tr.Update(\"entry_type\", \"registration\", nil)\n\n\t\tr.Update(\"url\", s.Field(\"web address\"), nil)\n\n\t\tr.Update(\"title\", s.Field(\"Scientific title\"), nil)\n\n\t\tr.Update(\"corresp_author_lastname\", s.Field(\"Contact Lastname\"), nil)\n\t\tr.Update(\"corresp_author_email\", s.Field(\"Contact Email\"), nil)\n\n\t\tr.Update(\"status\", s.Field(\"Recruitment Status\"), toLowerCase)\n\t\tr.Update(\"status_date\", s.Field(\"Last Refreshed On\"), toIsoDate)\n\n\t\tr.Update(\"country\", s.Field(\"Countries\"), func(country string) (interface{}, bool) {\n\t\t\t// country field may contain multiple countries separated by semicolon\n\t\t\t// -> use international if there are multiple countries\n\t\t\t// -> use the country name if it is the same multiple times\n\t\t\tif strings.Contains(country, \";\") == false {\n\t\t\t\treturn country, false\n\t\t\t}\n\n\t\t\titems := strings.Split(country, \"; \")\n\t\t\tfirst := items[0]\n\t\t\tinternational := false\n\t\t\tfor _, c := range items {\n\t\t\t\tif c != first {\n\t\t\t\t\tinternational = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tif international {\n\t\t\t\treturn \"international\", true\n\t\t\t}\n\n\t\t\treturn first, true\n\t\t})\n\n\t\tr.Update(\"randomized\", s.Field(\"Study design\"), toLowerCase)\n\n\t\tr.Update(\"population_condition\", s.Field(\"condition\"), nil)\n\n\t\tr.Update(\"intervention_name\", s.Field(\"Intervention\"), nil)\n\n\t\tr.Update(\"out_primary_measure\", s.Field(\"Primary outcome\"), nil)\n\n\t\tr.Update(\"start_date\", s.Field(\"Date enrollement\"), toIsoDate)\n\n\t\t// results_available if a results url is given\n\t\tr.Update(\"results_available\", s.Field(\"results url link\"), func(url string) (interface{}, bool) {\n\t\t\tif url == \"\" {\n\t\t\t\treturn \"no\", true\n\t\t\t}\n\t\t\treturn \"yes\", true\n\t\t})\n\n\t\tr.Update(\"inclusion_criteria\", s.Field(\"Inclusion Criteria\"), nil)\n\t\tr.Update(\"exclusion_criteria\", s.Field(\"Exclusion Criteria\"), nil)\n\n\t\t// nothing to do, if the record was not changed\n\t\tif r.IsUpdated == false {\n\t\t\tcontinue\n\t\t}\n\n\t\tupdates = append(updates, &r)\n\t}\n\n\treturn updates\n\n}", "func (p *RiakPusher) Push(fetchedResults map[int][]string) error {\n\t// Connect to DB\n\terr := p.ForceConnect()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer riak.Close()\n\n\t// Insert in Batch to DB\n\terr = p.BatchInsert(fetchedResults)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Info(\"Total number pushed to Riak:\", p.countPushed)\n\treturn nil\n}", "func Read(delim rune) func(<-chan interface{}, chan<- interface{}, chan<- error) {\n\tif delim == rune(0) {\n\t\tdelim = ','\n\t}\n\treturn func(in <-chan interface{}, out chan<- interface{}, errs chan<- error) {\n\t\tvar header []string\n\n\t\tfor m := range in {\n\t\t\tr := csv.NewReader(strings.NewReader(m.(fmt.Stringer).String()))\n\t\t\tr.Comma = delim\n\t\t\t//r.ReuseRecord = true\n\t\t\tr.LazyQuotes = true\n\n\t\t\trecords, err := r.ReadAll()\n\t\t\tif err != nil {\n\t\t\t\terrs <- err\n\t\t\t}\n\n\t\t\tfor _, rec := range records {\n\t\t\t\tif header == nil {\n\t\t\t\t\theader = rec\n\t\t\t\t} else {\n\t\t\t\t\trow := message.NewRecord()\n\t\t\t\t\tfor i, v := range header {\n\t\t\t\t\t\trow.Set(v, rec[i])\n\t\t\t\t\t}\n\t\t\t\t\tout <- row\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\n\t}\n}", "func pushTxReceipts(txReceipts *types.TxReceipts4Subscribe) error {\n\ttxReceiptCh <- txReceipts\n\terr := <-resultCh\n\treturn err\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
StartService Starts selenium server
func StartService() { opts := []selenium.ServiceOption{ selenium.ChromeDriver(helpers.AutomationPath + driverPath), // Specify the path to GeckoDriver in order to use Firefox. selenium.Output(os.Stderr), // Output debug information to STDERR. } selenium.SetDebug(false) selenium, err := selenium.NewSeleniumService(helpers.AutomationPath+seleniumPath, port, opts...) if err != nil { panic(err) } seleniumService = selenium }
[ "func (s *Service) StartService() {\n\tutils.Logger().Info().Msg(\"Starting explorer service.\")\n\ts.Init(true)\n\ts.server = s.Run()\n}", "func (s JSONHTTPServer) StartService(status chan bool) {\n\tgo s.startInternal(status)\n}", "func StartService(service string) error {\n\treturn doService(service, \"start\")\n}", "func StartSelenium() {\n\tvar err error\n\tcheckWebDriver()\n\tdriver, err = core.Selenium()\n\tcheckFailure(err)\n\tcheckFailure(driver.Start())\n}", "func (rs *RatchetServer) StartService() {\n\tif rs.isStarted {\n\t\treturn\n\t}\n\trs.GenerateKeys()\n\trs.fountain.StartService()\n\trs.ticker = timesource.Clock.NewTicker(time.Minute * 5)\n\tgo func() {\n\t\tfor range rs.ticker.Chan() {\n\t\t\t// Pregenerate.\n\t\t\trs.GenerateKeys()\n\t\t\t// Call persistence.\n\t\t\tif err := rs.persist(); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\t}()\n}", "func startSeleniumServerOrCrash() *exec.Cmd {\n\tmyLog.Info(\"Starting Selenium server...\")\n\tcmd := exec.Command(\"/opt/bin/entry_point.sh\")\n\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\tmyLog.Fatal(err)\n\t}\n\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tmyLog.Fatal(err)\n\t}\n\n\tgo func() {\n\t\tscanner := bufio.NewScanner(stderr)\n\t\tfor scanner.Scan() {\n\t\t\tmyLog.Error(scanner.Text())\n\t\t}\n\t\tif err := scanner.Err(); err != nil {\n\t\t\tmyLog.Fatal(err)\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tscanner := bufio.NewScanner(stdout)\n\t\tfor scanner.Scan() {\n\t\t\tmyLog.Info(scanner.Text())\n\t\t}\n\t\tif err := scanner.Err(); err != nil {\n\t\t\tmyLog.Fatal(err)\n\t\t}\n\t}()\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\tmyLog.Fatal(\"Cannot start Selenium process:\", err)\n\t}\n\n\treturn cmd\n}", "func StartTelemetryService() error {\n\tplatform.KillProcessByName(telemetryServiceProcessName)\n\n\ttelemetryLogger.Printf(\"[Telemetry] Starting telemetry service process\")\n\tpath := fmt.Sprintf(\"%v/%v\", cniInstallDir, telemetryServiceProcessName)\n\tif err := common.StartProcess(path); err != nil {\n\t\ttelemetryLogger.Printf(\"[Telemetry] Failed to start telemetry service process :%v\", err)\n\t\treturn err\n\t}\n\n\ttelemetryLogger.Printf(\"[Telemetry] Telemetry service started\")\n\n\tfor attempt := 0; attempt < 5; attempt++ {\n\t\tif checkIfSockExists() {\n\t\t\tbreak\n\t\t}\n\n\t\ttime.Sleep(200 * time.Millisecond)\n\t}\n\n\treturn nil\n}", "func StartService() {\n\tlog.Printf(\"Starting weather web service\\n\")\n\tmux := http.NewServeMux()\n\n\tmux.HandleFunc(\"/weather\", weatherHandler)\n\tmux.HandleFunc(\"/health-check\", healthCheck)\n\tlog.Fatal(http.ListenAndServe(fmt.Sprintf(\":%d\", shared.WeatherPort), mux))\n}", "func (s *Service) startService() error {\n\t// Connect to the windows service manager.\n\tserviceManager, err := mgr.Connect()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer serviceManager.Disconnect()\n\n\tservice, err := serviceManager.OpenService(s.description.Name)\n\tif err != nil {\n\t\treturn errors.Errorf(\"could not access service: %s\", err)\n\t}\n\tdefer service.Close()\n\n\terr = service.Start(os.Args)\n\tif err != nil {\n\t\treturn errors.Errorf(\"could not start service: %s\", err)\n\t}\n\n\treturn nil\n}", "func (service *linuxUpstartService) Start() error {\n\tcmd := exec.Command(\"start\", service.config.Name)\n\n\treturn cmd.Run()\n}", "func (n *Node) StartService() {\n\tif n.stopServer != nil {\n\t\treturn\n\t}\n\n\tif n.service.listen == nil {\n\t\tn.service.listen = network.TCPListener\n\t}\n\n\tvar genesis hash.Hash\n\tif n.consensus != nil {\n\t\tgenesis = n.consensus.GetGenesisHash()\n\t}\n\n\tbind := n.NetAddrOf(n.host)\n\t_, n.Addr, n.stopServer = api.StartService(bind, n.key, genesis, n, n.Infof, n.service.listen)\n}", "func Start() {\n\tdriver.Main(func(app oswin.App) {\n\t\tatomic.AddInt32(&started, 1)\n\t\t<-quit\n\t})\n}", "func (s *AngularService) Start() (err error) {\n\tif s.options.Cd != \"\" {\n\t\tvar currDir string\n\t\tif currDir, err = os.Getwd(); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif err = os.Chdir(s.options.Cd); err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer os.Chdir(currDir)\n\t}\n\n\tvar ctx context.Context\n\n\tctx, s.ctxCancel = context.WithCancel(context.Background())\n\ts.done = ctx.Done()\n\n\tcmdArgs := []string{\"serve\"}\n\tif s.options.Port > 0 {\n\t\tcmdArgs = append(cmdArgs, \"--port\", strconv.Itoa(s.options.Port))\n\t}\n\n\tif s.options.Args != nil {\n\t\tcmdArgs = append(cmdArgs, s.options.Args...)\n\t}\n\n\tcmd := exec.CommandContext(ctx, \"ng\", cmdArgs...)\n\n\tif s.options.Stdout != nil {\n\t\tcmd.Stdout = s.options.Stdout\n\t}\n\tif s.options.Stderr != nil {\n\t\tcmd.Stderr = s.options.Stderr\n\t}\n\n\treturn cmd.Start()\n}", "func (openrc OpenRCInitSystem) ServiceStart(service string) error {\n\targs := []string{service, \"start\"}\n\treturn exec.Command(\"rc-service\", args...).Run()\n}", "func (sv *Unit) Start() (err error) {\n\te := log.WithField(\"ExecStart\", sv.Definition.Service.ExecStart)\n\n\te.Debug(\"sv.Start\")\n\n\tswitch sv.Definition.Service.Type {\n\tcase \"simple\":\n\t\tif err = sv.Cmd.Start(); err == nil {\n\t\t\tgo sv.Cmd.Wait()\n\t\t}\n\tcase \"oneshot\":\n\t\terr = sv.Cmd.Run()\n\tdefault:\n\t\tpanic(\"Unknown service type\")\n\t}\n\n\te.WithField(\"err\", err).Debug(\"started\")\n\treturn\n}", "func WDInit() selenium.WebDriver {\n\tvar err error\n\n\tops := []selenium.ServiceOption{\n\t\tselenium.ChromeDriver(seleniumPath),\n\t}\n\n\t//service, err := selenium.NewSeleniumService(seleniumPath, port, ops...)\n\tservice, err := selenium.NewChromeDriverService(chromeDriverPath, port, ops...)\n\tif err != nil {\n\t\tlog.Printf(\"Error starting the ChromeDriver server: %v\", err)\n\t}\n\t//Delay service shutdown\n\tdefer service.Stop()\n\n\t//log.Println(\"Service => \", service)\n\n\tcaps := selenium.Capabilities(map[string]interface{}{\"browserName\": \"chrome\"})\n\t//log.Println(\"Capabilities => \", caps)\n\n\tdriver, err := selenium.NewRemote(caps, \"\")\n\n\tif err != nil {\n\t\tlog.Println(\"support/base | Error al instanciar el driver de Selenium : \", err.Error())\n\t}\n\t//driver.ResizeWindow(\"note\", 1920, 1080)\n\treturn driver\n}", "func StopService() {\n\tseleniumService.Stop()\n}", "func StartTaskService(brain *brain.Manager, errChan chan error) {\n\tlis, err := net.Listen(\"tcp\", taskServicePort)\n\tif err != nil {\n\t\terrChan <- err\n\t\treturn\n\t}\n\n\tgrpcServer := grpc.NewServer()\n\n\tRegisterTaskServiceServer(grpcServer, TaskService{Manager: brain})\n\n\tlog.LogInfo(\"starting taask-server task service on :3688\")\n\tif err := grpcServer.Serve(lis); err != nil {\n\t\terrChan <- err\n\t}\n}", "func (s *ExtSupervisor) startService(ctx context.Context, name string, runf extension.RunFunc) {\n\textensionCtx, cancel := context.WithCancel(ctx)\n\tsi := &Extension{\n\t\tname: name,\n\t\tversion: \"1\", // TODO\n\t\tctx: extensionCtx,\n\t\tcancel: cancel,\n\t\tdonech: make(chan struct{}),\n\t}\n\ts.log.Info(\"starting extension %s\", si)\n\ts.runmap[name] = si\n\n\t// start the extension goroutine, which calls runf and blocks until it either:\n\t// - returns,\n\t// - panics (logs panic), or\n\t// - times out (si.ctx)\n\tgo func() {\n\t\tfunc() {\n\t\t\tdefer func() {\n\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\ts.log.Error(\"panic in extension %q: %v\\n%s\", si.name, r, debug.Stack())\n\t\t\t\t}\n\t\t\t}()\n\t\t\trunf(si)\n\t\t\tselect {\n\t\t\tcase <-si.ctx.Done(): // ok\n\t\t\t\ts.log.Debug(\"extension %s exited\", si)\n\t\t\tdefault:\n\t\t\t\ts.log.Warn(\"extension %s exited prematurely (did not wait for Done())\", si)\n\t\t\t\tsi.cancel()\n\t\t\t}\n\t\t}()\n\n\t\t// signal to supervisor that the extension has completed shutdown\n\t\tclose(si.donech)\n\t}()\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
StopService stops selenium server
func StopService() { seleniumService.Stop() }
[ "func StopService() {\n\tserver.Stop()\n}", "func (n *Node) StopService() {\n\tif n.stopServer == nil {\n\t\treturn\n\t}\n\tn.stopServer()\n\tn.stopServer = nil\n}", "func (s *Service) StopService() {\n\tutils.Logger().Info().Msg(\"Shutting down explorer service.\")\n\tif err := s.server.Shutdown(context.Background()); err != nil {\n\t\tutils.Logger().Error().Err(err).Msg(\"Error when shutting down explorer server\")\n\t} else {\n\t\tutils.Logger().Info().Msg(\"Shutting down explorer server successufully\")\n\t}\n}", "func (m *mockService) Stop() {\n\t// m.ctrl.Finish() calls runtime.Goexit() on errors\n\t// put it in defer so cleanup is always done\n\tdefer func() {\n\t\tm.server.Shutdown()\n\t\tm.started = false\n\t}()\n\tm.ctrl.Finish()\n}", "func (htmlServer *HTMLServer) Stop() error {\n\n\tconst timeout = 5 * time.Second\n\tctx, cancel := context.WithTimeout(context.Background(), timeout)\n\tdefer cancel()\n\n\tlog.Println(Detail(\"SERVER : Service stopping.\"))\n\n\tif e := htmlServer.server.Shutdown(ctx); e != nil {\n\n\t\tif e := htmlServer.server.Close(); e != nil {\n\t\t\tlog.Printf(Warn(\"SERVER : Service stopping : Error=%s\"), e)\n\t\t\treturn e\n\t\t}\n\t}\n\n\thtmlServer.wg.Wait()\n\tlog.Println(Detail(\"SERVER : Stopped\"))\n\treturn nil\n}", "func (i *ServiceInitializer) StopService(s turbo.Servable) {\n}", "func (s *ExtSupervisor) stopService(ctx context.Context, name string) error {\n\t// retrieve and remove extension instance from runmap\n\ts.runmu.Lock()\n\tsi := s.runmap[name]\n\tif si != nil {\n\t\tdelete(s.runmap, name)\n\t}\n\ts.runmu.Unlock()\n\tif si == nil {\n\t\treturn errorf(\"can not find extension %q\", name)\n\t}\n\n\t// cancel the extension and await its shutdown\n\tsi.cancel()\n\tselect {\n\tcase <-si.donech:\n\t\t// ok; extension shut down ok\n\tcase <-ctx.Done():\n\t\t// Context cancelled or timed out\n\t\t// NOTE: There is a possibility that an ill-behaving extension just keeps on truckin' here.\n\t\treturn ctx.Err()\n\t}\n\treturn nil\n}", "func (s *TestAPIServer) Stop() {\n\ts.srv.Close()\n}", "func (service *linuxUpstartService) Stop() error {\n\tcmd := exec.Command(\"stop\", service.config.Name)\n\n\treturn cmd.Run()\n}", "func (srv *server) Stop(ctx context.Context) error {\n\tzaplog.Info(\"stopping mqtt server\")\n\n\tdefer func() {\n\t\tzaplog.Info(\"server stopped\")\n\t\t//zaplog.Sync()\n\t}()\n\n\tselect {\n\tcase <-srv.exitChan:\n\t\treturn nil\n\tdefault:\n\t\tclose(srv.exitChan)\n\t}\n\n\tfor _, l := range srv.tcpListener {\n\t\tl.Close()\n\t}\n\tfor _, ws := range srv.websocketServer {\n\t\tws.Server.Shutdown(ctx)\n\t}\n\n\t//关闭所有的client\n\t//closing all idle clients\n\tsrv.mu.Lock()\n\tcloseCompleteSet := make([]<-chan struct{}, len(srv.clients))\n\ti := 0\n\tfor _, c := range srv.clients {\n\t\tcloseCompleteSet[i] = c.Close()\n\t\ti++\n\t}\n\tsrv.mu.Unlock()\n\n\tdone := make(chan struct{})\n\n\tgo func() {\n\t\tfor _, v := range closeCompleteSet {\n\t\t\t//等所有的session退出完毕\n\t\t\t//waiting for all sessions to unregister\n\t\t\t<-v\n\t\t}\n\t\tclose(done)\n\t}()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\tzaplog.Warn(\"server stop timeout, forced exit\", zap.String(\"error\", ctx.Err().Error()))\n\t\treturn ctx.Err()\n\tcase <-done:\n\t\t// server 停止后触发\n\t\tfor _, hooks := range srv.hooks {\n\t\t\tif hooks.OnStop != nil {\n\t\t\t\thooks.OnStop()\n\t\t\t}\n\t\t}\n\n\t\tfor _, v := range srv.plugins {\n\t\t\terr := v.Unload()\n\t\t\tif err != nil {\n\t\t\t\tzaplog.Warn(\"plugin unload error\", zap.String(\"error\", err.Error()))\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n}", "func StopTestServer(server *http.Server) error {\n\terror := server.Shutdown(nil)\n\tif error != nil {\n\t\tfmt.Printf(\"Could not stop test websocket server in test_utils: %s\", error.Error())\n\t\treturn error\n\t}\n\treturn nil\n}", "func stopServer(w http.ResponseWriter, r *http.Request) {\n\tgo localServer.Shutdown(context.Background())\n}", "func (h *htrun) stop(rmFromSmap bool) {\n\tif rmFromSmap {\n\t\th.unregisterSelf(true)\n\t}\n\tnlog.Warningln(\"Shutting down HTTP\")\n\twg := &sync.WaitGroup{}\n\twg.Add(1)\n\tgo func() {\n\t\ttime.Sleep(time.Second / 2)\n\t\th.stopHTTPServer()\n\t\twg.Done()\n\t}()\n\tentry := xreg.GetRunning(xreg.Flt{})\n\tif entry != nil {\n\t\ttime.Sleep(time.Second)\n\t\tentry = xreg.GetRunning(xreg.Flt{})\n\t\tif entry != nil {\n\t\t\tnlog.Warningf(\"Timed out waiting for %q to finish aborting\", entry.Kind())\n\t\t}\n\t}\n\tif h.si.IsTarget() {\n\t\twg.Wait()\n\t}\n}", "func (openrc OpenRCInitSystem) ServiceStop(service string) error {\n\targs := []string{service, \"stop\"}\n\treturn exec.Command(\"rc-service\", args...).Run()\n}", "func (serv *webService) Stop(ctx context.Context) error {\n\tvar err error\n\tif serv.Server != nil {\n\t\terr = serv.Server.Shutdown(ctx)\n\t\tserv.Server = nil\n\t}\n\tif serv.RPC != nil {\n\t\terr2 := serv.RPC.Stop()\n\t\tif err2 != nil {\n\t\t\terr = err2\n\t\t}\n\t}\n\treturn err\n}", "func (e *E2EServices) Stop() {\n\tdefer func() {\n\t\t// Collect log files.\n\t\te.getLogFiles()\n\t\t// Cleanup the manifest path for kubelet.\n\t\tmanifestPath := framework.TestContext.ManifestPath\n\t\tif manifestPath != \"\" {\n\t\t\terr := os.RemoveAll(manifestPath)\n\t\t\tif err != nil {\n\t\t\t\tglog.Errorf(\"Failed to delete static pod manifest directory %s: %v\", manifestPath, err)\n\t\t\t}\n\t\t}\n\t}()\n\tif e.services != nil {\n\t\tif err := e.services.kill(); err != nil {\n\t\t\tglog.Errorf(\"Failed to stop services: %v\", err)\n\t\t}\n\t}\n\tif e.kubelet != nil {\n\t\tif err := e.kubelet.kill(); err != nil {\n\t\t\tglog.Errorf(\"Failed to stop kubelet: %v\", err)\n\t\t}\n\t}\n}", "func (s Server) stop(ctx context.Context) {\n\ts.grpcServer.Stop()\n\terr := s.httpServer.Shutdown(ctx)\n\tif err != nil {\n\t\tlog.Err(err).Msg(\"error shutting down the http server\")\n\t}\n}", "func StopService(service string) error {\n\treturn doService(service, \"stop\")\n}", "func stopServer(grpcServer *grpc.Server, v *Vibranium) error {\n\ttime.Sleep(1 * time.Second) // to aviod \"read: connection reset by peer\"\n\tdefer time.Sleep(1 * time.Second) // to aviod \"bind error\"\n\tgrpcServer.GracefulStop()\n\tlog.Info(\"gRPC server stopped gracefully.\")\n\n\tlog.Info(\"Now check if cluster still have running tasks...\")\n\twait := make(chan interface{})\n\tgo func() {\n\t\tv.Wait()\n\t\twait <- \"\"\n\t}()\n\ttimer := time.NewTimer(time.Second * 30)\n\tselect {\n\tcase <-timer.C:\n\t\t// force quit(terminate all running tasks/goroutines)\n\t\tfor {\n\t\t\tif v.TaskNum == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tv.taskDone(\"\", false)\n\t\t}\n\t\tlog.Info(\"Cluster stopped FORCEFULLY\")\n\tcase <-wait:\n\t\tlog.Info(\"Cluster stopped gracefully\")\n\t}\n\treturn nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewDriver create new browser driver
func NewDriver(browser string) selenium.WebDriver { StartService() caps := selenium.Capabilities{"browserName": browser} switch browser { case "chrome": chrCaps := chrome.Capabilities{ Args: []string{ "--no-sandbox", }, W3C: true, } if headless { chrCaps.Args = append(chrCaps.Args, "--headless") } caps.AddChrome(chrCaps) case "htmlunit": caps["javascriptEnabled"] = true } wd, err := selenium.NewRemote(caps, fmt.Sprintf("http://localhost:%d/wd/hub", port)) if err != nil { } driver = wd return wd }
[ "func NewDriver() godfish.Driver { return &driver{} }", "func newDriver() *driver {\n\treturn &driver{\n\t\tnetworks: map[string]*bridgeNetwork{},\n\t\tportAllocator: portallocator.Get(),\n\t}\n}", "func RegisterNewDriver(driver string, defaultscopes []string, callback func(client *http.Client, u *models.User), endpoint oauth2.Endpoint, apimap, usermap map[string]string) {\n\tapiMap[driver] = apimap\n\tuserMap[driver] = usermap\n\tendpointMap[driver] = endpoint\n\tcallbackMap[driver] = callback\n\tdefaultScopesMap[driver] = defaultscopes\n}", "func NewDriver(baseURL string, token string) (*Driver, error) {\n\traw, err := hype.New(baseURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\td := &Driver{\n\t\traw,\n\t\ttoken,\n\t\thype.NewHeader(\"Accept\", \"application/json\"),\n\t\thype.NewHeader(\"Content-Type\", \"application/json\"),\n\t\thype.NewHeader(\"User-Agent\", \"fbz/0.1.0 (https://github.com/ess/fbz)\"),\n\t}\n\n\treturn d, nil\n}", "func NewDriver(name string) (*App, error) {\n\treturn newApp(\"driver.\" + name)\n}", "func NewDriver() *Driver {\n\treturn &Driver{\n\t\tServer: http.Server{\n\t\t\tReadTimeout: 30 * time.Second,\n\t\t\tWriteTimeout: 30 * time.Second,\n\t\t\tIdleTimeout: 120 * time.Second,\n\t\t},\n\t}\n}", "func NewBrowser(conn *rpcc.Conn) *Browser {\n\treturn &Browser{conn: conn}\n}", "func NewDriver(machineID string, secretData map[string][]byte, classKind string, machineClass interface{}, machineName string) Driver {\n\n\tswitch classKind {\n\tcase \"OpenStackMachineClass\":\n\t\treturn &OpenStackDriver{\n\t\t\tOpenStackMachineClass: machineClass.(*v1alpha1.OpenStackMachineClass),\n\t\t\tCredentialsData: secretData,\n\t\t\tUserData: string(secretData[\"userData\"]),\n\t\t\tMachineID: machineID,\n\t\t\tMachineName: machineName,\n\t\t}\n\n\tcase \"AWSMachineClass\":\n\t\treturn &AWSDriver{\n\t\t\tAWSMachineClass: machineClass.(*v1alpha1.AWSMachineClass),\n\t\t\tCredentialsData: secretData,\n\t\t\tUserData: string(secretData[\"userData\"]),\n\t\t\tMachineID: machineID,\n\t\t\tMachineName: machineName,\n\t\t}\n\n\tcase \"AzureMachineClass\":\n\t\treturn &AzureDriver{\n\t\t\tAzureMachineClass: machineClass.(*v1alpha1.AzureMachineClass),\n\t\t\tCredentialsData: secretData,\n\t\t\tUserData: string(secretData[\"userData\"]),\n\t\t\tMachineID: machineID,\n\t\t\tMachineName: machineName,\n\t\t}\n\n\tcase \"GCPMachineClass\":\n\t\treturn &GCPDriver{\n\t\t\tGCPMachineClass: machineClass.(*v1alpha1.GCPMachineClass),\n\t\t\tCredentialsData: secretData,\n\t\t\tUserData: string(secretData[\"userData\"]),\n\t\t\tMachineID: machineID,\n\t\t\tMachineName: machineName,\n\t\t}\n\n\tcase \"AlicloudMachineClass\":\n\t\treturn &AlicloudDriver{\n\t\t\tAlicloudMachineClass: machineClass.(*v1alpha1.AlicloudMachineClass),\n\t\t\tCredentialsData: secretData,\n\t\t\tUserData: string(secretData[\"userData\"]),\n\t\t\tMachineID: machineID,\n\t\t\tMachineName: machineName,\n\t\t}\n\tcase \"PacketMachineClass\":\n\t\treturn &PacketDriver{\n\t\t\tPacketMachineClass: machineClass.(*v1alpha1.PacketMachineClass),\n\t\t\tCredentialsData: secretData,\n\t\t\tUserData: string(secretData[\"userData\"]),\n\t\t\tMachineID: machineID,\n\t\t\tMachineName: machineName,\n\t\t}\n\t}\n\n\treturn NewFakeDriver(\n\t\tfunc() (string, string, error) {\n\t\t\tfakeVMs[\"fake\"] = \"fake_ip\"\n\t\t\treturn \"fake\", \"fake_ip\", nil\n\t\t},\n\t\tfunc(machineID string, machineName string) error {\n\t\t\tfakeVMs[machineID] = machineName\n\t\t\treturn nil\n\t\t},\n\t\tfunc(machineID string) error {\n\t\t\t// delete(fakeVMs, \"fake\")\n\t\t\tdelete(fakeVMs, machineID)\n\t\t\treturn nil\n\t\t},\n\t\tfunc() (string, error) {\n\t\t\treturn \"\", nil\n\t\t},\n\t\tfunc() (VMs, error) {\n\t\t\treturn fakeVMs, nil\n\t\t},\n\t\tfunc([]corev1.PersistentVolumeSpec) ([]string, error) {\n\t\t\treturn []string{}, nil\n\t\t},\n\t\tfunc() string {\n\t\t\treturn \"\"\n\t\t},\n\t\tfunc(string) {\n\t\t\treturn\n\t\t},\n\t)\n}", "func NewRemote(capabilities Capabilities, urlPrefix string) (WebDriver, error) {\n\tif urlPrefix == \"\" {\n\t\turlPrefix = DefaultURLPrefix\n\t}\n\n\twd := &remoteWD{\n\t\turlPrefix: urlPrefix,\n\t\tcapabilities: capabilities,\n\t}\n\tif b := capabilities[\"browserName\"]; b != nil {\n\t\twd.browser = b.(string)\n\t}\n\tif _, err := wd.NewSession(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn wd, nil\n}", "func CreateDriver(url string, capabilities map[string]string) *Driver {\n\tnewDriver := &Driver{\n\t\tclient.CreateClient(url),\n\t\tcapabilities,\n\t\t\"\",\n\t}\n\n\treturn newDriver\n}", "func NewDriver(name string, hooks HookType) *Driver {\n\treturn &Driver{name: name, hooks: hooks}\n}", "func NewDriver(dci DCI, n int) *Driver {\n\td := new(Driver)\n\td.dci = dci\n\td.buf = make([]byte, 0, n)\n\treturn d\n}", "func New() (d *Driver) {\n\treturn &Driver{}\n}", "func New(options *types.Options) (*Browser, error) {\n\tdataStore, err := os.MkdirTemp(\"\", \"nuclei-*\")\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not create temporary directory\")\n\t}\n\tpreviousPIDs := processutil.FindProcesses(processutil.IsChromeProcess)\n\n\tchromeLauncher := launcher.New().\n\t\tLeakless(false).\n\t\tSet(\"disable-gpu\", \"true\").\n\t\tSet(\"ignore-certificate-errors\", \"true\").\n\t\tSet(\"ignore-certificate-errors\", \"1\").\n\t\tSet(\"disable-crash-reporter\", \"true\").\n\t\tSet(\"disable-notifications\", \"true\").\n\t\tSet(\"hide-scrollbars\", \"true\").\n\t\tSet(\"window-size\", fmt.Sprintf(\"%d,%d\", 1080, 1920)).\n\t\tSet(\"mute-audio\", \"true\").\n\t\tSet(\"incognito\", \"true\").\n\t\tDelete(\"use-mock-keychain\").\n\t\tUserDataDir(dataStore)\n\n\tif MustDisableSandbox() {\n\t\tchromeLauncher = chromeLauncher.NoSandbox(true)\n\t}\n\n\texecutablePath, err := os.Executable()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// if musl is used, most likely we are on alpine linux which is not supported by go-rod, so we fallback to default chrome\n\tuseMusl, _ := fileutil.UseMusl(executablePath)\n\tif options.UseInstalledChrome || useMusl {\n\t\tif chromePath, hasChrome := launcher.LookPath(); hasChrome {\n\t\t\tchromeLauncher.Bin(chromePath)\n\t\t} else {\n\t\t\treturn nil, errors.New(\"the chrome browser is not installed\")\n\t\t}\n\t}\n\n\tif options.ShowBrowser {\n\t\tchromeLauncher = chromeLauncher.Headless(false)\n\t} else {\n\t\tchromeLauncher = chromeLauncher.Headless(true)\n\t}\n\tif types.ProxyURL != \"\" {\n\t\tchromeLauncher = chromeLauncher.Proxy(types.ProxyURL)\n\t}\n\n\tfor k, v := range options.ParseHeadlessOptionalArguments() {\n\t\tchromeLauncher.Set(flags.Flag(k), v)\n\t}\n\n\tlauncherURL, err := chromeLauncher.Launch()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbrowser := rod.New().ControlURL(launcherURL)\n\tif browserErr := browser.Connect(); browserErr != nil {\n\t\treturn nil, browserErr\n\t}\n\tcustomAgent := \"\"\n\tfor _, option := range options.CustomHeaders {\n\t\tparts := strings.SplitN(option, \":\", 2)\n\t\tif len(parts) != 2 {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.EqualFold(parts[0], \"User-Agent\") {\n\t\t\tcustomAgent = parts[1]\n\t\t}\n\t}\n\n\thttpclient, err := newHttpClient(options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tengine := &Browser{\n\t\ttempDir: dataStore,\n\t\tcustomAgent: customAgent,\n\t\tengine: browser,\n\t\thttpclient: httpclient,\n\t\toptions: options,\n\t}\n\tengine.previousPIDs = previousPIDs\n\treturn engine, nil\n}", "func NewDriver(endpoint, driverName, nodeID string) *Driver {\n\tglog.Infof(\"NewDriver for CHDFS, driverName: %v version: %v nodeID: %v\", driverName, version, nodeID)\n\n\tcsiDriver := csicommon.NewCSIDriver(driverName, version, nodeID)\n\tcsiDriver.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{\n\t\tcsi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,\n\t})\n\n\treturn &Driver{\n\t\tcsiDriver: csiDriver,\n\t\tendpoint: endpoint,\n\t}\n}", "func (m *Manager) GetNewDriver(machineName string) (Driver, error) {\n\tif driverFactory, ok := m.drivers[machineName]; ok {\n\t\treturn driverFactory(), nil\n\t}\n\n\treturn nil, errors.New(\"No such driver: \" + machineName)\n}", "func NewDriver(options *DriverOptions) CSIDriver {\n\tif !*useDriverV2 {\n\t\treturn newDriverV1(options)\n\t} else {\n\t\treturn newDriverV2(options)\n\t}\n}", "func NewDriver(name string, loader func(interface{}) error) (Driver, error) {\n\tfactorysMu.RLock()\n\tfactoryi, ok := factories[name]\n\tfactorysMu.RUnlock()\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"file: unknown driver %q (forgotten import?)\", name)\n\t}\n\treturn factoryi(loader)\n}", "func New(s sender.Interface) (d *driver) {\n\td = &driver{}\n\td.sender = s\n\td.next = d.newLogin()\n\td.Process(\"\")\n\treturn d\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Address will create a new Address service that can interact with the Swyftx addresses endpoints The asset code is required for the Deposit, Withdraw and CheckDeposit endpoints
func (c *Client) Address(assetCode ...string) *AddressService { if len(assetCode) == 0 { assetCode[0] = "" } return &AddressService{service{c}, assetCode[0]} }
[ "func (as *AddressService) Create(name string) (*Address, error) {\n\tif isEmptyStr(as.assetCode) {\n\t\treturn nil, errAssetCode\n\t}\n\n\tvar (\n\t\taddresses []*Address\n\t\tbody struct {\n\t\t\tAddress struct {\n\t\t\t\tName string `json:\"name\"`\n\t\t\t} `json:\"address\"`\n\t\t}\n\t)\n\tbody.Address.Name = name\n\n\tif err := as.client.Post(buildString(\"address/deposit/\", as.assetCode), &body, &addresses); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn addresses[0], nil\n}", "func (as *ApiService) CreateDepositAddress(currency string) (*ApiResponse, error) {\n\treq := NewRequest(http.MethodPost, \"/api/v1/deposit-addresses\", map[string]string{\"currency\": currency})\n\treturn as.Call(req)\n}", "func ExampleAddDepositAddress() {}", "func CreateAddress() *addresspb.Address {\n\ta := addresspb.Address{\n\t\tCorrespondanceAddr: &addresspb.Location{\n\t\t\tLocation: \"loc 1\",\n\t\t\tCity: &addresspb.City{\n\t\t\t\tName: \"Mumbai\",\n\t\t\t\tZipCode: \"400005\",\n\t\t\t\tRegion: addresspb.Division_WEST,\n\t\t\t},\n\t\t},\n\n\t\tAdditionalAddr: []*addresspb.Location{\n\t\t\t{\n\t\t\t\tLocation: \"loc 2\",\n\t\t\t\tCity: &addresspb.City{\n\t\t\t\t\tName: \"Srinagar\",\n\t\t\t\t\tZipCode: \"190001\",\n\t\t\t\t\tRegion: addresspb.Division_NORTH,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tLocation: \"loc 3\",\n\t\t\t\tCity: &addresspb.City{\n\t\t\t\t\tName: \"Imphal\",\n\t\t\t\t\tZipCode: \"795001\",\n\t\t\t\t\tRegion: addresspb.Division_EAST,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tLocation: \"loc 4\",\n\t\t\t\tCity: &addresspb.City{\n\t\t\t\t\tName: \"Mysore\",\n\t\t\t\t\tZipCode: \"570001\",\n\t\t\t\t\tRegion: addresspb.Division_SOUTH,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn &a\n}", "func (c *client) CreateAddress(accountID int64) (string, error) {\n\turlStr := fmt.Sprintf(\"%s/accounts/%d/addresses/\", c.url, accountID)\n\treq, err := http.NewRequest(\"POST\", urlStr, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treq.SetBasicAuth(c.user, c.pass)\n\treq.Header.Set(\"Accept\", \"application/json\")\n\t_, payload, err := c.do(req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\taddrs := []address{}\n\tif err := json.Unmarshal(payload, &addrs); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(addrs) != 1 {\n\t\treturn \"\", errors.New(\"expected one address\")\n\t}\n\treturn addrs[0].Address, nil\n}", "func (a *APIClient) GenerateAddress(currency string, ipnURL string, description string) (address APIAddress, err error) {\n\tdata := map[string]interface{}{\n\t\t\"currency\": currency,\n\t\t\"ipn_url\": ipnURL,\n\t\t\"description\": description,\n\t}\n\terr = a.Fetch(\"POST\", \"/address/generate\", data, &address)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = checkAPIErrors(address.Error)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn\n}", "func (wc *rpcClient) address(aType string) (btcutil.Address, error) {\n\tvar addrStr string\n\targs := anylist{\"\"}\n\tif !wc.omitAddressType {\n\t\targs = append(args, aType)\n\t}\n\terr := wc.call(methodNewAddress, args, &addrStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn wc.decodeAddr(addrStr, wc.chainParams) // we should consider returning a string\n}", "func (c *Client) CreateAddress(user api.UserPass) (string, error) {\n\tres := &api.JSONAddress{}\n\terr := c.requester.SendRequest(\"createAddress\", &user, res)\n\treturn res.Address, err\n}", "func (client BaseClient) CreateAddress(ctx context.Context, addressName string, resourceGroupName string, addressResource AddressResource) (result CreateAddressFuture, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/BaseClient.CreateAddress\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.FutureAPI != nil && result.FutureAPI.Response() != nil {\n\t\t\t\tsc = result.FutureAPI.Response().StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: addressName,\n\t\t\tConstraints: []validation.Constraint{{Target: \"addressName\", Name: validation.MaxLength, Rule: 24, Chain: nil},\n\t\t\t\t{Target: \"addressName\", Name: validation.MinLength, Rule: 3, Chain: nil},\n\t\t\t\t{Target: \"addressName\", Name: validation.Pattern, Rule: `^[-\\w\\.]+$`, Chain: nil}}},\n\t\t{TargetValue: client.SubscriptionID,\n\t\t\tConstraints: []validation.Constraint{{Target: \"client.SubscriptionID\", Name: validation.MinLength, Rule: 1, Chain: nil}}},\n\t\t{TargetValue: resourceGroupName,\n\t\t\tConstraints: []validation.Constraint{{Target: \"resourceGroupName\", Name: validation.MaxLength, Rule: 90, Chain: nil},\n\t\t\t\t{Target: \"resourceGroupName\", Name: validation.MinLength, Rule: 1, Chain: nil}}},\n\t\t{TargetValue: addressResource,\n\t\t\tConstraints: []validation.Constraint{{Target: \"addressResource.AddressProperties\", Name: validation.Null, Rule: true,\n\t\t\t\tChain: []validation.Constraint{{Target: \"addressResource.AddressProperties.ShippingAddress\", Name: validation.Null, Rule: false,\n\t\t\t\t\tChain: []validation.Constraint{{Target: \"addressResource.AddressProperties.ShippingAddress.StreetAddress1\", Name: validation.Null, Rule: true, Chain: nil},\n\t\t\t\t\t\t{Target: \"addressResource.AddressProperties.ShippingAddress.Country\", Name: validation.Null, Rule: true, Chain: nil},\n\t\t\t\t\t}},\n\t\t\t\t\t{Target: \"addressResource.AddressProperties.ContactDetails\", Name: validation.Null, Rule: true,\n\t\t\t\t\t\tChain: []validation.Constraint{{Target: \"addressResource.AddressProperties.ContactDetails.ContactName\", Name: validation.Null, Rule: true, Chain: nil},\n\t\t\t\t\t\t\t{Target: \"addressResource.AddressProperties.ContactDetails.Phone\", Name: validation.Null, Rule: true, Chain: nil},\n\t\t\t\t\t\t\t{Target: \"addressResource.AddressProperties.ContactDetails.EmailList\", Name: validation.Null, Rule: true, Chain: nil},\n\t\t\t\t\t\t}},\n\t\t\t\t}}}}}); err != nil {\n\t\treturn result, validation.NewError(\"edgeorder.BaseClient\", \"CreateAddress\", err.Error())\n\t}\n\n\treq, err := client.CreateAddressPreparer(ctx, addressName, resourceGroupName, addressResource)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"edgeorder.BaseClient\", \"CreateAddress\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresult, err = client.CreateAddressSender(req)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"edgeorder.BaseClient\", \"CreateAddress\", result.Response(), \"Failure sending request\")\n\t\treturn\n\t}\n\n\treturn\n}", "func (id *Public) CreateAddress(version, stream uint64) {\n\tid.Address.Version = version\n\tid.Address.Stream = stream\n\tcopy(id.Address.Ripe[:], id.hash())\n}", "func CreateAddress(amount int) (string, []Wallet) {\n\n\twallets := []Wallet{}\n\tfor i := 0; i < amount; i++ {\n\t\twif, _ := network.CreatePrivateKey()\n\t\taddress, _ := network.GetAddress(wif)\n\t\tvar wallet = Wallet{ADDRESS: address.EncodeAddress(), PRIVKEY: wif.String()}\n\t\twallets = append(wallets, wallet)\n\t}\n\n\tjson := ConvertToJSON(&wallets)\n\n\tlog.Println(\"Generated\", amount, \"addresses\")\n\n\treturn json, wallets\n\n}", "func (uc UsersController) CreateAddress(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tfmt.Fprintf(w, \"User.CreateAddress\")\n}", "func (lu *litUiClient) Address() (string, error) {\n\n\t// cointype of 0 means default, not mainnet.\n\t// this is ugly but does prevent mainnet use for now.\n\n\tvar cointype, numadrs uint32\n\n\t// if no arguments given, generate 1 new address.\n\t// if no cointype given, assume type 1 (testnet)\n\n\tnumadrs = 0\n\n\treply := new(litrpc.AddressReply)\n\n\targs := new(litrpc.AddressArgs)\n\targs.CoinType = cointype\n\targs.NumToMake = numadrs\n\n\tfmt.Printf(\"adr cointye: %d num:%d\\n\", args.CoinType, args.NumToMake)\n\terr := lu.rpccon.Call(\"LitRPC.Address\", args, reply)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tresponse := reply.WitAddresses[len(reply.WitAddresses)-1]\n\treturn response, nil\n}", "func (a *AddressApiService) ClientAddressCreate(ctx _context.Context, clientUuid string) ApiClientAddressCreateRequest {\n\treturn ApiClientAddressCreateRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tclientUuid: clientUuid,\n\t}\n}", "func CreateAddress(b types.Address, nonce *big.Int) types.Address {\n\tdata, _ := rlp.EncodeToBytes([]interface{}{b, nonce})\n\treturn types.BytesToAddress(keccak.Keccak256(data)[12:])\n}", "func (e Endpoints) PostAddress(ctx context.Context, profileID string, a Address) error {\n\n\t// TODO: Create detailed ref spec\n\trequest := postAddressRequest{ProfileID: profileID, Address: a}\n\n\tresponse, err := e.PostAddressEndpoint(ctx, request)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresp := response.(postAddressResponse)\n\n\treturn resp.Err\n}", "func (dcr *ExchangeWallet) NewAddress() (string, error) {\n\treturn dcr.DepositAddress()\n}", "func (client BaseClient) CreateAddressResponder(resp *http.Response) (result AddressResource, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func CreateAddress(b common.Address, nonce uint64) common.Address {\n\tdata, _ := rlp.EncodeToBytes([]interface{}{b, nonce})\n\taddr := common.BytesToAddress(Keccak512(data)[:])\n\treturn common.DarmaAddressToContractAddress(addr)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create will create a new address for a specific asset and return the newly created address
func (as *AddressService) Create(name string) (*Address, error) { if isEmptyStr(as.assetCode) { return nil, errAssetCode } var ( addresses []*Address body struct { Address struct { Name string `json:"name"` } `json:"address"` } ) body.Address.Name = name if err := as.client.Post(buildString("address/deposit/", as.assetCode), &body, &addresses); err != nil { return nil, err } return addresses[0], nil }
[ "func (c *client) CreateAddress(accountID int64) (string, error) {\n\turlStr := fmt.Sprintf(\"%s/accounts/%d/addresses/\", c.url, accountID)\n\treq, err := http.NewRequest(\"POST\", urlStr, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treq.SetBasicAuth(c.user, c.pass)\n\treq.Header.Set(\"Accept\", \"application/json\")\n\t_, payload, err := c.do(req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\taddrs := []address{}\n\tif err := json.Unmarshal(payload, &addrs); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(addrs) != 1 {\n\t\treturn \"\", errors.New(\"expected one address\")\n\t}\n\treturn addrs[0].Address, nil\n}", "func (c *Client) CreateAddress(user api.UserPass) (string, error) {\n\tres := &api.JSONAddress{}\n\terr := c.requester.SendRequest(\"createAddress\", &user, res)\n\treturn res.Address, err\n}", "func CreateAddress() *addresspb.Address {\n\ta := addresspb.Address{\n\t\tCorrespondanceAddr: &addresspb.Location{\n\t\t\tLocation: \"loc 1\",\n\t\t\tCity: &addresspb.City{\n\t\t\t\tName: \"Mumbai\",\n\t\t\t\tZipCode: \"400005\",\n\t\t\t\tRegion: addresspb.Division_WEST,\n\t\t\t},\n\t\t},\n\n\t\tAdditionalAddr: []*addresspb.Location{\n\t\t\t{\n\t\t\t\tLocation: \"loc 2\",\n\t\t\t\tCity: &addresspb.City{\n\t\t\t\t\tName: \"Srinagar\",\n\t\t\t\t\tZipCode: \"190001\",\n\t\t\t\t\tRegion: addresspb.Division_NORTH,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tLocation: \"loc 3\",\n\t\t\t\tCity: &addresspb.City{\n\t\t\t\t\tName: \"Imphal\",\n\t\t\t\t\tZipCode: \"795001\",\n\t\t\t\t\tRegion: addresspb.Division_EAST,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tLocation: \"loc 4\",\n\t\t\t\tCity: &addresspb.City{\n\t\t\t\t\tName: \"Mysore\",\n\t\t\t\t\tZipCode: \"570001\",\n\t\t\t\t\tRegion: addresspb.Division_SOUTH,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn &a\n}", "func (client BaseClient) CreateAddress(ctx context.Context, addressName string, resourceGroupName string, addressResource AddressResource) (result CreateAddressFuture, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/BaseClient.CreateAddress\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.FutureAPI != nil && result.FutureAPI.Response() != nil {\n\t\t\t\tsc = result.FutureAPI.Response().StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: addressName,\n\t\t\tConstraints: []validation.Constraint{{Target: \"addressName\", Name: validation.MaxLength, Rule: 24, Chain: nil},\n\t\t\t\t{Target: \"addressName\", Name: validation.MinLength, Rule: 3, Chain: nil},\n\t\t\t\t{Target: \"addressName\", Name: validation.Pattern, Rule: `^[-\\w\\.]+$`, Chain: nil}}},\n\t\t{TargetValue: client.SubscriptionID,\n\t\t\tConstraints: []validation.Constraint{{Target: \"client.SubscriptionID\", Name: validation.MinLength, Rule: 1, Chain: nil}}},\n\t\t{TargetValue: resourceGroupName,\n\t\t\tConstraints: []validation.Constraint{{Target: \"resourceGroupName\", Name: validation.MaxLength, Rule: 90, Chain: nil},\n\t\t\t\t{Target: \"resourceGroupName\", Name: validation.MinLength, Rule: 1, Chain: nil}}},\n\t\t{TargetValue: addressResource,\n\t\t\tConstraints: []validation.Constraint{{Target: \"addressResource.AddressProperties\", Name: validation.Null, Rule: true,\n\t\t\t\tChain: []validation.Constraint{{Target: \"addressResource.AddressProperties.ShippingAddress\", Name: validation.Null, Rule: false,\n\t\t\t\t\tChain: []validation.Constraint{{Target: \"addressResource.AddressProperties.ShippingAddress.StreetAddress1\", Name: validation.Null, Rule: true, Chain: nil},\n\t\t\t\t\t\t{Target: \"addressResource.AddressProperties.ShippingAddress.Country\", Name: validation.Null, Rule: true, Chain: nil},\n\t\t\t\t\t}},\n\t\t\t\t\t{Target: \"addressResource.AddressProperties.ContactDetails\", Name: validation.Null, Rule: true,\n\t\t\t\t\t\tChain: []validation.Constraint{{Target: \"addressResource.AddressProperties.ContactDetails.ContactName\", Name: validation.Null, Rule: true, Chain: nil},\n\t\t\t\t\t\t\t{Target: \"addressResource.AddressProperties.ContactDetails.Phone\", Name: validation.Null, Rule: true, Chain: nil},\n\t\t\t\t\t\t\t{Target: \"addressResource.AddressProperties.ContactDetails.EmailList\", Name: validation.Null, Rule: true, Chain: nil},\n\t\t\t\t\t\t}},\n\t\t\t\t}}}}}); err != nil {\n\t\treturn result, validation.NewError(\"edgeorder.BaseClient\", \"CreateAddress\", err.Error())\n\t}\n\n\treq, err := client.CreateAddressPreparer(ctx, addressName, resourceGroupName, addressResource)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"edgeorder.BaseClient\", \"CreateAddress\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresult, err = client.CreateAddressSender(req)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"edgeorder.BaseClient\", \"CreateAddress\", result.Response(), \"Failure sending request\")\n\t\treturn\n\t}\n\n\treturn\n}", "func (as *ApiService) CreateDepositAddress(currency string) (*ApiResponse, error) {\n\treq := NewRequest(http.MethodPost, \"/api/v1/deposit-addresses\", map[string]string{\"currency\": currency})\n\treturn as.Call(req)\n}", "func (c *Client) Create(ctx context.Context, arg *ngrok.ReservedAddrCreate) (*ngrok.ReservedAddr, error) {\n\tif arg == nil {\n\t\targ = new(ngrok.ReservedAddrCreate)\n\t}\n\tvar res ngrok.ReservedAddr\n\tvar path bytes.Buffer\n\tif err := template.Must(template.New(\"create_path\").Parse(\"/reserved_addrs\")).Execute(&path, arg); err != nil {\n\t\tpanic(err)\n\t}\n\tvar (\n\t\tapiURL = &url.URL{Path: path.String()}\n\t\tbodyArg interface{}\n\t)\n\tapiURL.Path = path.String()\n\tbodyArg = arg\n\n\tif err := c.apiClient.Do(ctx, \"POST\", apiURL, bodyArg, &res); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &res, nil\n}", "func (id *Public) CreateAddress(version, stream uint64) {\n\tid.Address.Version = version\n\tid.Address.Stream = stream\n\tcopy(id.Address.Ripe[:], id.hash())\n}", "func (uc UsersController) CreateAddress(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tfmt.Fprintf(w, \"User.CreateAddress\")\n}", "func CreateAddress(id ID, owner []byte) (swarm.Address, error) {\n\tsum, err := hash(id, owner)\n\tif err != nil {\n\t\treturn swarm.ZeroAddress, err\n\t}\n\treturn swarm.NewAddress(sum), nil\n}", "func (client BaseClient) CreateAddressResponder(resp *http.Response) (result AddressResource, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (c *Client) Address(assetCode ...string) *AddressService {\n\tif len(assetCode) == 0 {\n\t\tassetCode[0] = \"\"\n\t}\n\n\treturn &AddressService{service{c}, assetCode[0]}\n}", "func CreateAddress(amount int) (string, []Wallet) {\n\n\twallets := []Wallet{}\n\tfor i := 0; i < amount; i++ {\n\t\twif, _ := network.CreatePrivateKey()\n\t\taddress, _ := network.GetAddress(wif)\n\t\tvar wallet = Wallet{ADDRESS: address.EncodeAddress(), PRIVKEY: wif.String()}\n\t\twallets = append(wallets, wallet)\n\t}\n\n\tjson := ConvertToJSON(&wallets)\n\n\tlog.Println(\"Generated\", amount, \"addresses\")\n\n\treturn json, wallets\n\n}", "func resourceCreate(d *schema.ResourceData, m interface{}) error {\n\t address := d.Get(\"address\").(string)\n d.SetId(address)\n return resourceRead(d, m)\n}", "func createAddress(creator *Account) Word256 {\n\tnonce := creator.Nonce\n\tcreator.Nonce += 1\n\ttemp := make([]byte, 32+8)\n\tcopy(temp, creator.Address[:])\n\tPutUint64BE(temp[32:], nonce)\n\treturn LeftPadWord256(sha3.Sha3(temp)[:20])\n}", "func CreateAddrEntry(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tvar address Address\n\terr := json.NewDecoder(r.Body).Decode(&address)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\t_, ok := GetEntry(address.FirstName, address.LastName)\n\tif ok {\n\t\thttp.Error(w, fmt.Sprintf(\"Duplicate entry for firstName: %s, lastName: %s\", address.FirstName, address.LastName), 400)\n\t\treturn\n\t}\n\tUpdateEntry(address)\n\tjson.NewEncoder(w).Encode(address)\n}", "func (service AccountsService) Create(a Account) (*Response, Account, error) {\n\treq, err := service.client.newRequest(\"POST\", \"accounts\", nil, a)\n\tif err != nil {\n\t\treturn nil, Account{}, err\n\t}\n\n\tvar dest Account\n\tres, err := service.client.do(req, &dest)\n\n\tdest.BillingInfo = nil\n\n\treturn res, dest, err\n}", "func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) {\n\tb, err := opts.ToAddressScopeCreateMap()\n\tif err != nil {\n\t\tr.Err = err\n\t\treturn\n\t}\n\tresp, err := client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{\n\t\tOkCodes: []int{201},\n\t})\n\t_, r.Header, r.Err = gophercloud.ParseResponse(resp, err)\n\treturn\n}", "func CreateAddress(b types.Address, nonce *big.Int) types.Address {\n\tdata, _ := rlp.EncodeToBytes([]interface{}{b, nonce})\n\treturn types.BytesToAddress(keccak.Keccak256(data)[12:])\n}", "func (a *AddressApiService) ClientAddressCreate(ctx _context.Context, clientUuid string) ApiClientAddressCreateRequest {\n\treturn ApiClientAddressCreateRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tclientUuid: clientUuid,\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetActive will get all active addresses for an asset
func (as *AddressService) GetActive() ([]*Address, error) { return as.getAddresses("deposit") }
[ "func (c *AccountsListCall) Active(active bool) *AccountsListCall {\n\tc.urlParams_.Set(\"active\", fmt.Sprint(active))\n\treturn c\n}", "func (w *Wallet) activeData(dbtx walletdb.ReadTx) ([]btcutil.Address, []wtxmgr.Credit, er.R) {\n\taddrmgrNs := dbtx.ReadBucket(waddrmgrNamespaceKey)\n\ttxmgrNs := dbtx.ReadBucket(wtxmgrNamespaceKey)\n\n\tvar addrs []btcutil.Address\n\terr := w.Manager.ForEachActiveAddress(addrmgrNs, func(addr btcutil.Address) er.R {\n\t\taddrs = append(addrs, addr)\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tunspent, err := w.TxStore.GetUnspentOutputs(txmgrNs)\n\treturn addrs, unspent, err\n}", "func (c *AdsListCall) Active(active bool) *AdsListCall {\n\tc.urlParams_.Set(\"active\", fmt.Sprint(active))\n\treturn c\n}", "func (c *CreativesListCall) Active(active bool) *CreativesListCall {\n\tc.urlParams_.Set(\"active\", fmt.Sprint(active))\n\treturn c\n}", "func (a *Account) ActivePaymentAddresses() map[string]struct{} {\n\tinfos := a.KeyStore.ActiveAddresses()\n\n\taddrs := make(map[string]struct{}, len(infos))\n\tfor _, info := range infos {\n\t\taddrs[info.Address().EncodeAddress()] = struct{}{}\n\t}\n\n\treturn addrs\n}", "func (a *Account) ActivePaymentAddresses() map[string]struct{} {\n\tinfos := a.ActiveAddresses()\n\n\taddrs := make(map[string]struct{}, len(infos))\n\tfor _, info := range infos {\n\t\taddrs[info.Address().EncodeAddress()] = struct{}{}\n\t}\n\n\treturn addrs\n}", "func ActiveAddresses() map[string]net.Interface {\n\tresult := make(map[string]net.Interface)\n\tif iFaces, err := net.Interfaces(); err == nil {\n\t\tfor _, iFace := range iFaces {\n\t\t\tconst interesting = net.FlagUp | net.FlagBroadcast\n\t\t\tif iFace.Flags&interesting == interesting {\n\t\t\t\tif name := Address(iFace); name != \"\" {\n\t\t\t\t\tresult[name] = iFace\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn result\n}", "func (s *Storage) FetchAllActive() ([]web.Advert, error) {\n\tvar ad web.Advert\n\tvar allEntries []web.Advert\n\trows, err := s.Query(\"SELECT * FROM active\")\n\tif err != nil {\n\t\treturn nil, errWithLog(err.Error(), \"FetchAllActive\")\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\terr = rows.Scan(&ad.Date, &ad.Source, &ad.AdNum, &ad.Cat, &ad.Desc, &ad.Link, &ad.Contact, &ad.Rate)\n\t\tif err != nil {\n\t\t\tif err == sql.ErrNoRows {\n\t\t\t\t// table is empty just return empty map\n\t\t\t\treturn []web.Advert{}, nil\n\t\t\t}\n\t\t\treturn nil, errWithLog(err.Error(), \"FetchAllActive\")\n\t\t}\n\t\tallEntries = append(allEntries, ad)\n\t}\n\treturn allEntries, nil\n}", "func (k Keeper) GetActiveValidatorList(ctx sdk.Context) ([]sdk.AccAddress, error) {\n\tvar result []sdk.AccAddress\n\tstore := ctx.KVStore(k.dataStoreKey)\n\tit := store.Iterator(nil, nil)\n\tdefer it.Close()\n\tfor ; it.Valid(); it.Next() {\n\t\tvar value types.Info\n\t\tif err := proto.Unmarshal(it.Value(), &value); err != nil {\n\t\t\tpanic(errors.Wrap(err, \"cannot unmarshal info\"))\n\t\t}\n\t\tif !value.IsActive() {\n\t\t\tcontinue\n\t\t}\n\t\taddr := sdk.AccAddress(it.Key())\n\t\tresult = append(result, addr)\n\t}\n\n\treturn result, nil\n}", "func (_Activatable *ActivatableCaller) Active(opts *bind.CallOpts) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Activatable.contract.Call(opts, out, \"active\")\n\treturn *ret0, err\n}", "func (c *RemarketingListsListCall) Active(active bool) *RemarketingListsListCall {\n\tc.urlParams_.Set(\"active\", fmt.Sprint(active))\n\treturn c\n}", "func (eth *Eth) ActiveAddress() string {\n\tkeypair := eth.keyManager.KeyPair()\n\taddr := ethutil.Bytes2Hex(keypair.Address())\n\treturn addr\n}", "func (c *TargetableRemarketingListsListCall) Active(active bool) *TargetableRemarketingListsListCall {\n\tc.urlParams_.Set(\"active\", fmt.Sprint(active))\n\treturn c\n}", "func (am *AccountManager) RescanActiveAddresses() error {\n\tvar job *RescanJob\n\tfor _, a := range am.AllAccounts() {\n\t\tacctJob, err := a.RescanActiveJob()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif job == nil {\n\t\t\tjob = acctJob\n\t\t} else {\n\t\t\tjob.Merge(acctJob)\n\t\t}\n\t}\n\tif job != nil {\n\t\t// Submit merged job and block until rescan completes.\n\t\tjobFinished := am.rm.SubmitJob(job)\n\t\t<-jobFinished\n\t}\n\n\treturn nil\n}", "func Active(ctx context.Context) []string {\n\temails := []string{}\n\n\tq := datastore.NewQuery(\"Subscription\").KeysOnly()\n\tt := q.Run(ctx)\n\tfor {\n\t\tkey, err := t.Next(nil)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\temails = append(emails, key.StringID())\n\t}\n\n\treturn emails\n}", "func (v Account) GetActiveOffers(params AccountGetActiveOffersParams) (*AccountGetActiveOffersResponse, error) {\n\tr, err := v.API.Request(\"account.getActiveOffers\", params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar resp AccountGetActiveOffersResponse\n\terr = json.Unmarshal(r, &resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}", "func (mod *MonkRpcModule) ActiveAddress() string {\n\tkeypair := mod.keyManager.KeyPair()\n\taddr := monkutil.Bytes2Hex(keypair.Address())\n\treturn addr\n}", "func (am *AccountManager) RescanActiveAddresses() {\n\tfor _, account := range am.AllAccounts() {\n\t\taccount.RescanActiveAddresses()\n\t}\n}", "func (_NodeSpace *NodeSpaceCaller) ActiveNodeAddresses(opts *bind.CallOpts, arg0 *big.Int) (common.Address, error) {\n\tvar out []interface{}\n\terr := _NodeSpace.contract.Call(opts, &out, \"activeNodeAddresses\", arg0)\n\n\tif err != nil {\n\t\treturn *new(common.Address), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)\n\n\treturn out0, err\n\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetSaved will get all saved addresses for an asset
func (as *AddressService) GetSaved() ([]*Address, error) { return as.getAddresses("withdraw") }
[ "func (s *Storage) GetAll() ([]*common.ReserveAddress, int64, error) {\n\tvar (\n\t\tlogger = s.sugar.With(\"func\", caller.GetCurrentFunctionName())\n\t\tstored []*ReserveAddress\n\t\tresults []*common.ReserveAddress\n\t\tqueryStmt = `SELECT id, address, type, description, timestamp\nFROM addresses`\n\t\tqueryVersionStmt = `SELECT version FROM addresses_version WHERE id = 1`\n\t\tversion int64\n\t)\n\n\tlogger.Debug(\"querying all stored reserve addresses\")\n\tif err := s.db.Select(&stored, queryStmt); err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\tfor _, r := range stored {\n\t\tresult, err := r.Common()\n\t\tif err != nil {\n\t\t\treturn nil, 0, err\n\t\t}\n\t\tresults = append(results, result)\n\t}\n\tif len(results) > 0 {\n\t\tif err := s.db.Get(&version, queryVersionStmt); err != nil {\n\t\t\treturn nil, 0, err\n\t\t}\n\t}\n\treturn results, version, nil\n}", "func Save() ([]byte, Cache) {\n\tcache := make(Cache)\n\n\tatomLock.RLock()\n\n\tfor hash, atom := range atomCache {\n\t\tcache[hash] = atom\n\t}\n\n\tatomLock.RUnlock()\n\n\treturn atomData.Bytes(), cache\n}", "func (d *database) GetSavedPeers() (savedAddresses []multiaddr.Multiaddr, err error) {\n\t// retrieve the saved addresses\n\terr = d.db.Update(func(tx *bbolt.Tx) error {\n\t\tsavedBucket := tx.Bucket(peersDbKey)\n\t\terr = savedBucket.ForEach(func(k, v []byte) error {\n\t\t\taddr, err := multiaddr.NewMultiaddrBytes(v)\n\t\t\tif err == nil {\n\t\t\t\tpeerID, err := peer.AddrInfoFromP2pAddr(addr)\n\t\t\t\tif err != nil {\n\t\t\t\t\t// if the saved peer cannot be validated, delete\n\t\t\t\t\t_ = savedBucket.Delete(k)\n\t\t\t\t} else {\n\t\t\t\t\tisBanned, err := d.IsPeerBanned(peerID.ID)\n\t\t\t\t\tif !isBanned && err == nil {\n\t\t\t\t\t\tsavedAddresses = append(savedAddresses, addr)\n\t\t\t\t\t}\n\t\t\t\t\t// if saved peer is banned, delete\n\t\t\t\t\tif isBanned && err == nil {\n\t\t\t\t\t\terr = savedBucket.Delete(k)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn err\n\t\t})\n\t\treturn err\n\t})\n\treturn\n}", "func (m *BookingCustomer) GetAddresses()([]PhysicalAddressable) {\n val, err := m.GetBackingStore().Get(\"addresses\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]PhysicalAddressable)\n }\n return nil\n}", "func GetAddresses(id string) ([]string, error) {\n\treturn gWallets.getAddresses(id)\n}", "func (svc *inmemService) GetAddresses(ctx context.Context, profileID string) ([]Address, error) {\n\n\t// Get a Read Lock on the svc for atomic read access to the datastore\n\tsvc.mtx.RLock()\n\n\t// Immediately set up a lock release to occur when the function finishes\n\tdefer svc.mtx.RUnlock()\n\n\t// Check to make sure there is a profile that corresponds to the passed in profile and save the found profile to a profile variable\n\tprofile, ok := svc.profiles[profileID]\n\n\t// If no profile was found for the passed in ID\n\tif !ok {\n\n\t\t// Return error informing the caller that the profile to which the addresses should have been associated was not found\n\t\treturn nil, ErrNotFound\n\t}\n\n\t// Return all addresses associated with the profile that was passed in and a nil error value\n\treturn profile.Addresses, nil\n}", "func (w *XPubWallet) GetAddresses() []cipher.Addresser {\n\treturn w.Entries.getAddresses()\n}", "func (as *AddressService) GetActive() ([]*Address, error) {\n\treturn as.getAddresses(\"deposit\")\n}", "func (mock *StoreServiceMock) SaveCalls() []struct {\n\tEntry ytfeed.Entry\n} {\n\tvar calls []struct {\n\t\tEntry ytfeed.Entry\n\t}\n\tmock.lockSave.RLock()\n\tcalls = mock.calls.Save\n\tmock.lockSave.RUnlock()\n\treturn calls\n}", "func GetSavedConfigs() (fileNames []string) {\n\tfileNames, err := getSavedConfigs()\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\treturn\n}", "func (account *Account) Saved() *SavedMedia {\r\n\treturn &SavedMedia{\r\n\t\tinst: account.inst,\r\n\t\tendpoint: urlFeedSaved,\r\n\t\terr: nil,\r\n\t}\r\n}", "func (s *Store) GetBulk(suffixes []string) ([]string, error) {\n\tanchorBytes, err := s.store.GetBulk(suffixes...)\n\tif err != nil {\n\t\treturn nil, orberrors.NewTransient(fmt.Errorf(\"failed to get did anchor reference: %w\", err))\n\t}\n\n\tanchors := make([]string, len(suffixes))\n\n\tfor i, a := range anchorBytes {\n\t\tif a == nil {\n\t\t\tanchors[i] = \"\"\n\t\t} else {\n\t\t\tanchors[i] = string(a)\n\t\t}\n\t}\n\n\tlogger.Debugf(\"retrieved latest anchors%s for suffixes%s\", anchors, suffixes)\n\n\treturn anchors, nil\n}", "func (pg *PGStorage) GetAddresses(sql string, args ...interface{}) ([]*Address, error) {\n\tsql = \"SELECT id, updated_at, hash, income, outcome, ballance FROM address \" + sql\n\n\trows, err := pg.con.Query(sql, args)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\taddresses := make([]*Address, 0)\n\n\tfor rows.Next() {\n\t\ta := &Address{}\n\t\tif err := rows.Scan(\n\t\t\t&a.ID,\n\t\t\t&a.UpdatedAt,\n\t\t\t&a.Hash,\n\t\t\t&a.Income,\n\t\t\t&a.Outcome,\n\t\t\t&a.Ballance,\n\t\t); err != nil {\n\t\t\treturn addresses, err\n\t\t}\n\t\taddresses = append(addresses, a)\n\t}\n\treturn addresses, err\n}", "func (s *Service) GetAddresses(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tsizeStr := r.FormValue(\"size\")\n\tprefix := r.FormValue(\"prefix\")\n\tif sizeStr == \"\" {\n\t\tsizeStr = defaultPageSize\n\t}\n\tdata := &Data{}\n\tdefer func() {\n\t\tif err := json.NewEncoder(w).Encode(data.Addresses); err != nil {\n\t\t\tutils.Logger().Warn().Err(err).Msg(\"cannot JSON-encode addresses\")\n\t\t}\n\t}()\n\n\tsize, err := strconv.Atoi(sizeStr)\n\tif err != nil || size > maxAddresses {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tdata.Addresses, err = s.Storage.GetAddresses(size, prefix)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tutils.Logger().Warn().Err(err).Msg(\"wasn't able to fetch addresses from storage\")\n\t\treturn\n\t}\n}", "func (u *Unmarshal) AssetsExportGet(uuid string, chunk string) ([]byte, error) {\n\ts := u.NewService()\n\traw, err := s.AssetsExportGet(uuid, chunk)\n\treturn raw, err\n}", "func (s *StoreImpl) Save(dir string) error {\n\tassetMap := make(map[string]Asset)\n\tfor k, v := range s.assets {\n\t\tassetMap[k.String()] = v.asset\n\t}\n\tdata, err := json.MarshalIndent(&assetMap, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpath := filepath.Join(dir, stateFileName)\n\tif err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {\n\t\treturn err\n\t}\n\tif err := ioutil.WriteFile(path, data, 0644); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (mp *Map) Save() ([]datastore.Property, error) {\n\tvar d []datastore.Property\n\tfor k, v := range *mp {\n\t\td = append(d, datastore.Property{\n\t\t\tName: k,\n\t\t\tValue: v,\n\t\t\t// Force property to not be indexed. This allows more freedom of the\n\t\t\t// property name, i.e. may contain \".\".\n\t\t\tNoIndex: true,\n\t\t})\n\t}\n\treturn d, nil\n}", "func (m *Drive) GetBundles()([]DriveItemable) {\n return m.bundles\n}", "func (s *Storage) Get(id uint64) (*common.ReserveAddress, error) {\n\tvar (\n\t\tlogger = s.sugar.With(\"func\", caller.GetCurrentFunctionName(),\n\t\t\t\"id\", id,\n\t\t)\n\t\taddr = &ReserveAddress{}\n\t\tqueryStmt = `SELECT id, address, type, description, timestamp\nFROM addresses\nWHERE id = $1`\n\t)\n\n\tif err := s.db.Get(addr, queryStmt, id); err == sql.ErrNoRows {\n\t\tlogger.Infow(\"no record found in database\")\n\t\treturn nil, storage.ErrNotExists\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\n\tra, err := addr.Common()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ra, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove will remove a withdrawal adddress given the id of the address
func (as *AddressService) Remove(addressID int) error { if err := as.client.Delete(buildString("address/withdraw/", strconv.Itoa(addressID))); err != nil { return err } return nil }
[ "func ExampleRemoveDepositAddress() {}", "func Delete(a Address) Edit { return change{Address: a, op: 'd'} }", "func (f *wsClientFilter) removeAddress(a btcutil.Address) {\n\tswitch a := a.(type) {\n\tcase *btcutil.AddressPubKeyHash:\n\t\tdelete(f.pubKeyHashes, *a.Hash160())\n\t\treturn\n\tcase *btcutil.AddressScriptHash:\n\t\tdelete(f.scriptHashes, *a.Hash160())\n\t\treturn\n\tcase *btcutil.AddressPubKey:\n\t\tserializedPubKey := a.ScriptAddress()\n\t\tswitch len(serializedPubKey) {\n\t\tcase 33: // compressed\n\t\t\tvar compressedPubKey [33]byte\n\t\t\tcopy(compressedPubKey[:], serializedPubKey)\n\t\t\tdelete(f.compressedPubKeys, compressedPubKey)\n\t\t\treturn\n\t\tcase 65: // uncompressed\n\t\t\tvar uncompressedPubKey [65]byte\n\t\t\tcopy(uncompressedPubKey[:], serializedPubKey)\n\t\t\tdelete(f.uncompressedPubKeys, uncompressedPubKey)\n\t\t\treturn\n\t\t}\n\t}\n\n\tdelete(f.otherAddresses, a.EncodeAddress())\n}", "func DeleteAddressByID(ID int)error{\n\tsql := \"delete from address where id = ?\"\n\t_,err := utils.Db.Exec(sql,ID)\n\tif err != nil{\n\t\treturn err\n\t}\n\treturn nil\n}", "func (ua *UserAddress) Delete(ctx context.Context, key ...interface{}) error {\n\tvar err error\n\tvar dbConn *sql.DB\n\n\t// if deleted, bail\n\tif ua._deleted {\n\t\treturn nil\n\t}\n\n\ttx, err := components.M.GetConnFromCtx(ctx)\n\tif err != nil {\n\t\tdbConn, err = components.M.GetMasterConn()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ttableName, err := GetUserAddressTableName(key...)\n\tif err != nil {\n\t\treturn err\n\t}\n\t//1\n\n\t// sql query with composite primary key\n\tsqlstr := `UPDATE ` + tableName + ` SET is_del = 1 WHERE uaid = ?`\n\n\t// run query\n\tutils.GetTraceLog(ctx).Debug(\"DB\", zap.String(\"SQL\", fmt.Sprint(sqlstr, ua.Uaid)))\n\tif tx != nil {\n\t\t_, err = tx.Exec(sqlstr, ua.Uaid)\n\t} else {\n\t\t_, err = dbConn.Exec(sqlstr, ua.Uaid)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// set deleted\n\tua._deleted = true\n\n\treturn nil\n}", "func (f *wsClientFilter) removeAddressStr(s string, params *chaincfg.Params) {\n\ta, err := btcutil.DecodeAddress(s, params)\n\tif err == nil {\n\t\tf.removeAddress(a)\n\t} else {\n\t\tdelete(f.otherAddresses, s)\n\t}\n}", "func Remove(id string) error {\n\treturn gWallets.remove(id)\n}", "func DelUserByID(w http.ResponseWriter, r *http.Request) {\n\t// Get Parameters From URI\n\tparamID := chi.URLParam(r, \"id\")\n\n\t// Get ID Parameters From URI Then Convert it to Integer\n\tuserID, err := strconv.Atoi(paramID)\n\tif err != nil {\n\t\trouter.ResponseInternalError(w, err.Error())\n\t\treturn\n\t}\n\n\t// Check if Requested Data in User Array Range\n\tif userID <= 0 || userID > len(model.Users) {\n\t\trouter.ResponseBadRequest(w, \"invalid array index\")\n\t\treturn\n\t}\n\n\t// Delete User Data from Users Array\n\tmodel.Users = append(model.Users[:userID-1], model.Users[userID:]...)\n\n\trouter.ResponseSuccess(w, \"\")\n}", "func (twd *TCPWaveDriver) ReleaseAddress(conf NetConfig, ip string, mac string) (string,error){\n glog.Infof(\"Ip delete request with ip = %s\", ip)\n err := twd.ObjMgr.DeleteIPAddress(ip, \"\", conf.IPAM.Org)\n if err!=nil{\n glog.Error(err)\n return \"\", err\n }\n return ip,nil\n}", "func (s *svcBook) DelByID(pId *uint) (uint, error) {\n\treturn (*s.pRepo).DelByID(pId)\n}", "func DeleteAddress(id int32) error {\n\ta, err := GetAddress(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresult := db.DB().Delete(a)\n\treturn result.Error\n}", "func (o *Address) Delete(exec boil.Executor) error {\n\tif o == nil {\n\t\treturn errors.New(\"sqlboiler: no Address provided for delete\")\n\t}\n\n\tif err := o.doBeforeDeleteHooks(exec); err != nil {\n\t\treturn err\n\t}\n\n\targs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), addressPrimaryKeyMapping)\n\tsql := \"DELETE FROM `address` WHERE `address_id`=?\"\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, sql)\n\t\tfmt.Fprintln(boil.DebugWriter, args...)\n\t}\n\n\t_, err := exec.Exec(sql, args...)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"sqlboiler: unable to delete from address\")\n\t}\n\n\tif err := o.doAfterDeleteHooks(exec); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (t *ToDoList) Remove(id string) {\n\tidx := findDealIndex(t.Deals, id)\n\tif idx == -1 {\n\t\tpanic(\"Deal is not found\")\n\t}\n\tt.Deals[idx] = t.Deals[len(t.Deals)-1]\n\tt.Deals[len(t.Deals)-1] = nil\n\tt.Deals = t.Deals[:len(t.Deals)-1]\n}", "func (c *Client) UnreactByAddressAndID(addr string, id int64) error {\n\trxn := new(Reaction)\n\n\t_, err := c.Model(rxn).\n\t\tWhere(\"id = ?\", id).\n\t\tWhere(\"creator = ?\", addr).\n\t\tWhere(\"deleted_at IS NULL\").\n\t\tSet(\"deleted_at = ?\", time.Now()).\n\t\tUpdate()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (t *Transaction) remove(db meddler.DB) error {\n lender, err := GetUserById(db, t.LenderId)\n if err != nil {\n return err\n }\n debtor, err := GetUserById(db, t.DebtorId)\n if err != nil {\n return err\n }\n\n // reverse the balance updates due to this transaction\n lender.UpdateBalance(db, -(t.Amount))\n debtor.UpdateBalance(db, t.Amount)\n\n // remove the transaction from the db\n _, err = db.Exec(\"DELETE FROM transactions WHERE id = ?\", t.Id)\n if err != nil {\n return err\n }\n t = nil\n\n return nil\n}", "func RemoveByID(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tfmt.Fprint(w, \"Welcome!\\n\")\n}", "func (oiuo *OrderInfoUpdateOne) RemoveOrderAddress(o ...*OrderAddress) *OrderInfoUpdateOne {\n\tids := make([]int, len(o))\n\tfor i := range o {\n\t\tids[i] = o[i].ID\n\t}\n\treturn oiuo.RemoveOrderAddresIDs(ids...)\n}", "func (t *Tenants) Del(id string) error {\n\treturn t.store.Del(id)\n}", "func ExampleAddDepositAddress() {}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
VerifyWithdrawal will verify a withdrawal given the verification token
func (as *AddressService) VerifyWithdrawal(token string) error { if err := as.client.Get(buildString("address/withdraw/verify/", token), nil); err != nil { return err } return nil }
[ "func (_Token *TokenTransactor) Withdraw(opts *bind.TransactOpts, _recipient common.Address, _amount *big.Int, _market common.Address) (*types.Transaction, error) {\n\treturn _Token.contract.Transact(opts, \"withdraw\", _recipient, _amount, _market)\n}", "func (_Token *TokenSession) ExecuteWithdrawal() (*types.Transaction, error) {\n\treturn _Token.Contract.ExecuteWithdrawal(&_Token.TransactOpts)\n}", "func (_SfcContract *SfcContractSession) Withdraw(toValidatorID *big.Int, wrID *big.Int) (*types.Transaction, error) {\n\treturn _SfcContract.Contract.Withdraw(&_SfcContract.TransactOpts, toValidatorID, wrID)\n}", "func (_SfcContract *SfcContractTransactorSession) Withdraw(toValidatorID *big.Int, wrID *big.Int) (*types.Transaction, error) {\n\treturn _SfcContract.Contract.Withdraw(&_SfcContract.TransactOpts, toValidatorID, wrID)\n}", "func (_V1 *V1Transactor) Withdraw(opts *bind.TransactOpts, _token common.Address) (*types.Transaction, error) {\n\treturn _V1.contract.Transact(opts, \"withdraw\", _token)\n}", "func (_Token *TokenTransactor) ExecuteWithdrawal(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _Token.contract.Transact(opts, \"executeWithdrawal\")\n}", "func (broadcast *Broadcast) ValidatorWithdraw(ctx context.Context, username, amount,\n\tprivKeyHex string, seq int64) (*model.BroadcastResponse, error) {\n\tmsg := model.ValidatorWithdrawMsg{\n\t\tUsername: username,\n\t\tAmount: amount,\n\t}\n\treturn broadcast.broadcastTransaction(ctx, msg, privKeyHex, seq, \"\", false)\n}", "func (_Lmc *LmcTransactor) Withdraw(opts *bind.TransactOpts, _tokenAmount *big.Int) (*types.Transaction, error) {\n\treturn _Lmc.contract.Transact(opts, \"withdraw\", _tokenAmount)\n}", "func VerifyTransaction(contract BasicFluenceContract, tx Transaction, minDeposit int64) {\n // checking that the client actually exists in the contract\n var deposit, ok = contract.ClientDeposits[tx.Seal.PublicKey]\n assertTrue(ok)\n\n // checking that the client has enough funds\n assertTrue(deposit >= minDeposit)\n\n // checking that the transaction was signed by this client\n assertTrue(Verify(tx.Seal, Hash(tx.Invoke)))\n}", "func withdraw(ctx iscp.Sandbox) (dict.Dict, error) {\n\tstate := ctx.State()\n\tmustCheckLedger(state, \"accounts.withdraw.begin\")\n\n\tif ctx.Caller().Address().Equals(ctx.ChainID().AsAddress()) {\n\t\t// if the caller is on the same chain, do nothing\n\t\treturn nil, nil\n\t}\n\ttokensToWithdraw, ok := GetAccountBalances(state, ctx.Caller())\n\tif !ok {\n\t\t// empty balance, nothing to withdraw\n\t\treturn nil, nil\n\t}\n\t// will be sending back to default entry point\n\ta := assert.NewAssert(ctx.Log())\n\t// bring balances to the current account (owner's account). It is needed for subsequent Send call\n\ta.Require(MoveBetweenAccounts(state, ctx.Caller(), commonaccount.Get(ctx.ChainID()), tokensToWithdraw),\n\t\t\"accounts.withdraw.inconsistency. failed to move tokens to owner's account\")\n\n\t// add incoming tokens (after fees) to the balances to be withdrawn. Otherwise they would end up in the common account\n\ttokensToWithdraw.AddAll(ctx.IncomingTransfer())\n\t// Send call assumes tokens are in the current account\n\ta.Require(ctx.Send(ctx.Caller().Address(), tokensToWithdraw, &iscp.SendMetadata{\n\t\tTargetContract: ctx.Caller().Hname(),\n\t}), \"accounts.withdraw.inconsistency: failed sending tokens \")\n\n\tctx.Log().Debugf(\"accounts.withdraw.success. Sent to address %s\", tokensToWithdraw.String())\n\n\tmustCheckLedger(state, \"accounts.withdraw.exit\")\n\treturn nil, nil\n}", "func TstCheckWithdrawalStatusMatches(t *testing.T, s1, s2 WithdrawalStatus) {\n\tif s1.Fees() != s2.Fees() {\n\t\tt.Fatalf(\"Wrong amount of network fees; want %d, got %d\", s1.Fees(), s2.Fees())\n\t}\n\n\tif !reflect.DeepEqual(s1.Sigs(), s2.Sigs()) {\n\t\tt.Fatalf(\"Wrong tx signatures; got %x, want %x\", s1.Sigs(), s2.Sigs())\n\t}\n\n\tif !reflect.DeepEqual(s1.NextInputAddr(), s2.NextInputAddr()) {\n\t\tt.Fatalf(\"Wrong NextInputAddr; got %v, want %v\", s1.NextInputAddr(), s2.NextInputAddr())\n\t}\n\n\tif !reflect.DeepEqual(s1.NextChangeAddr(), s2.NextChangeAddr()) {\n\t\tt.Fatalf(\"Wrong NextChangeAddr; got %v, want %v\", s1.NextChangeAddr(), s2.NextChangeAddr())\n\t}\n\n\tif !reflect.DeepEqual(s1.Outputs(), s2.Outputs()) {\n\t\tt.Fatalf(\"Wrong WithdrawalOutputs; got %v, want %v\", s1.Outputs(), s2.Outputs())\n\t}\n\n\tif !reflect.DeepEqual(s1.transactions, s2.transactions) {\n\t\tt.Fatalf(\"Wrong transactions; got %v, want %v\", s1.transactions, s2.transactions)\n\t}\n\n\t// The above checks could be replaced by this one, but when they fail the\n\t// failure msg wouldn't give us much clue as to what is not equal, so we do\n\t// the individual checks above and use this one as a catch-all check in case\n\t// we forget to check any of the individual fields.\n\tif !reflect.DeepEqual(s1, s2) {\n\t\tt.Fatalf(\"Wrong WithdrawalStatus; got %v, want %v\", s1, s2)\n\t}\n}", "func TestNewWithdrawalMessage(t *testing.T) {\n\tt.Parallel()\n\t// create a withdrawal message using random parameters\n\taid, _ := modules.NewAccountID()\n\tamount := types.NewCurrency64(fastrand.Uint64n(100))\n\tblockHeight := types.BlockHeight(fastrand.Intn(100))\n\tmsg := newWithdrawalMessage(aid, amount, blockHeight)\n\n\t// validate the withdrawal message\n\tif msg.Account != aid {\n\t\tt.Fatal(\"Unexpected account ID\")\n\t}\n\tif !msg.Amount.Equals(amount) {\n\t\tt.Fatal(\"Unexpected amount\")\n\t}\n\tif msg.Expiry != blockHeight+withdrawalValidityPeriod {\n\t\tt.Fatal(\"Unexpected expiry\")\n\t}\n\tif len(msg.Nonce) != modules.WithdrawalNonceSize {\n\t\tt.Fatal(\"Unexpected nonce length\")\n\t}\n\tvar nonce [modules.WithdrawalNonceSize]byte\n\tif bytes.Equal(msg.Nonce[:], nonce[:]) {\n\t\tt.Fatal(\"Uninitialized nonce\")\n\t}\n}", "func (_Token *TokenTransactorSession) Withdraw(_recipient common.Address, _amount *big.Int, _market common.Address) (*types.Transaction, error) {\n\treturn _Token.Contract.Withdraw(&_Token.TransactOpts, _recipient, _amount, _market)\n}", "func (_Lmc *LmcTransactorSession) Withdraw(_tokenAmount *big.Int) (*types.Transaction, error) {\n\treturn _Lmc.Contract.Withdraw(&_Lmc.TransactOpts, _tokenAmount)\n}", "func (_TokenStakingEscrow *TokenStakingEscrowTransactor) WithdrawRevoked(opts *bind.TransactOpts, operator common.Address) (*types.Transaction, error) {\n\treturn _TokenStakingEscrow.contract.Transact(opts, \"withdrawRevoked\", operator)\n}", "func (_IWETH *IWETHSession) Withdraw(arg0 *big.Int) (*types.Transaction, error) {\r\n\treturn _IWETH.Contract.Withdraw(&_IWETH.TransactOpts, arg0)\r\n}", "func (_EtherDelta *EtherDeltaTransactor) WithdrawToken(opts *bind.TransactOpts, token common.Address, amount *big.Int) (*types.Transaction, error) {\n\treturn _EtherDelta.contract.Transact(opts, \"withdrawToken\", token, amount)\n}", "func (_BondedECDSAKeep *BondedECDSAKeepTransactor) Withdraw(opts *bind.TransactOpts, _member common.Address) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.contract.Transact(opts, \"withdraw\", _member)\n}", "func (_IWETH *IWETHTransactorSession) Withdraw(arg0 *big.Int) (*types.Transaction, error) {\r\n\treturn _IWETH.Contract.Withdraw(&_IWETH.TransactOpts, arg0)\r\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
VerifyBSB will verify a BSB number and send back the current status of that BSB
func (as *AddressService) VerifyBSB(bsb string) (*BSBStatus, error) { var bsbStatus BSBStatus if err := as.client.Get(buildString("address/withdraw/bsb-verify/", bsb), &bsbStatus); err != nil { return nil, err } return &bsbStatus, nil }
[ "func (_OperCont *OperContCallerSession) VerifyBinary(binName string, measurement *big.Int) (bool, error) {\n\treturn _OperCont.Contract.VerifyBinary(&_OperCont.CallOpts, binName, measurement)\n}", "func (_OperCont *OperContCaller) VerifyBinary(opts *bind.CallOpts, binName string, measurement *big.Int) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _OperCont.contract.Call(opts, out, \"verifyBinary\", binName, measurement)\n\treturn *ret0, err\n}", "func (_Testimonium *TestimoniumCaller) VerifyState(opts *bind.CallOpts, blockHash [32]byte, noOfConfirmations uint8, rlpEncodedState []byte, path []byte, rlpEncodedNodes []byte) (uint8, error) {\n\tvar (\n\t\tret0 = new(uint8)\n\t)\n\tout := ret0\n\terr := _Testimonium.contract.Call(opts, out, \"verifyState\", blockHash, noOfConfirmations, rlpEncodedState, path, rlpEncodedNodes)\n\treturn *ret0, err\n}", "func (api *API) fBVerificationExists(token string) (fbid uint64, err error) {\n\ts, err := api.sc.Prepare(\"SELECT fb_id FROM facebook_verification WHERE token = ?\")\n\tif err != nil {\n\t\treturn\n\t}\n\terr = s.QueryRow(token).Scan(&fbid)\n\tif err == sql.ErrNoRows {\n\t\terr = NoSuchVerificationToken\n\t}\n\treturn\n}", "func verify(srvChan chan string, channel, nick, hostname string, args []string) {\n\tmessage := \"NOTICE \" + channel + \" :\"\n\tif len(args) != 2 {\n\t\tmessage = \"NOTICE \" + channel + \" :ERROR: Invalid number of arguments\"\n\t} else {\n\t\tuname := args[0]\n\t\tpin := args[1]\n\t\treply := cmdDb.Cmd(\"get\", uname+\"Pin\")\n\t\tpinDb, err := (reply.Bytes())\n\t\tif err != nil {\n\t\t\tlog.Println(err.Error())\n\t\t\treturn\n\t\t}\n\t\tif string(pinDb) == pin {\n\t\t\tmessage += \"You are now verified as \" + uname\n\t\t\tcmdDb.Cmd(\"set\", uname+\"Host\", hostname)\n\t\t\tcmdDb.Cmd(\"set\", uname+\"Pin\", fmt.Sprintf(\"%06d\", rand.Intn(1000000)))\n\t\t} else {\n\t\t\tmessage += \"PIN does not match that of \" + uname\n\t\t}\n\t}\n\tlog.Println(message)\n\tsrvChan <- message\n}", "func (_Testimonium *TestimoniumSession) VerifyState(blockHash [32]byte, noOfConfirmations uint8, rlpEncodedState []byte, path []byte, rlpEncodedNodes []byte) (uint8, error) {\n\treturn _Testimonium.Contract.VerifyState(&_Testimonium.CallOpts, blockHash, noOfConfirmations, rlpEncodedState, path, rlpEncodedNodes)\n}", "func (mb *TCPPackager) Verify(aduRequest []byte, aduResponse []byte) (err error) {\n\t// Transaction id\n\tresponseVal := binary.BigEndian.Uint16(aduResponse)\n\trequestVal := binary.BigEndian.Uint16(aduRequest)\n\tif responseVal != requestVal {\n\t\terr = fmt.Errorf(\"modbus: response transaction id '%v' does not match request '%v'\", responseVal, requestVal)\n\t\treturn\n\t}\n\t// Protocol id\n\tresponseVal = binary.BigEndian.Uint16(aduResponse[2:])\n\trequestVal = binary.BigEndian.Uint16(aduRequest[2:])\n\tif responseVal != requestVal {\n\t\terr = fmt.Errorf(\"modbus: response protocol id '%v' does not match request '%v'\", responseVal, requestVal)\n\t\treturn\n\t}\n\t// Unit id (1 byte)\n\tif aduResponse[6] != aduRequest[6] {\n\t\terr = fmt.Errorf(\"modbus: response unit id '%v' does not match request '%v'\", aduResponse[6], aduRequest[6])\n\t\treturn\n\t}\n\treturn\n}", "func BebGetSuccess(w http.ResponseWriter, name string) {\n\tw.WriteHeader(http.StatusOK)\n\ts := bebtypes.Subscription{\n\t\tName: name,\n\t\tSubscriptionStatus: bebtypes.SubscriptionStatusActive,\n\t}\n\terr := json.NewEncoder(w).Encode(s)\n\tExpect(err).ShouldNot(HaveOccurred())\n}", "func TestBirdShowStatus(t *testing.T) {\n\tout := \"1000-BIRD 1.6.4\\n\" +\n\t\t\"1011-Router ID is 192.168.1.9\\n\" +\n\t\t\" Current server time is 2018-12-27 12:15:01\\n\" +\n\t\t\" Last reboot on 2018-12-21 12:35:11\\n\" +\n\t\t\" Last reconfiguration on 2018-12-21 12:35:11\\n\" +\n\t\t\"0013 Daemon is up and running\\n\"\n\tcompleted := containsActionCompletedCode([]byte(out))\n\n\tassert.True(\"'show status' successfully completed\", completed, t)\n}", "func VerifyTask(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tresult := acceptBll.VerifyTask(vars[\"taskId\"])\n\tmsg := constants.NotFoundData\n\tif result {\n\t\tmsg = constants.SuccessMessage\n\t}\n\thelper.InitialResponseSuccess(w, result, http.StatusOK, msg)\n}", "func (b *backend) Verify(addr wallet.Address, _params *channel.Params, state *channel.State, sig []byte) (bool, error) {\n\tif err := state.Valid(); err != nil {\n\t\treturn false, errors.Wrap(err, \"verifying invalid state\")\n\t}\n\tlog.WithFields(log.Fields{\"channel\": state.ID, \"version\": state.Version}).Tracef(\"Verifying state\")\n\n\tbuff := new(bytes.Buffer)\n\tif err := state.Encode(buff); err != nil {\n\t\treturn false, errors.WithMessage(err, \"pack state\")\n\t}\n\treturn wallet.VerifySignature(buff.Bytes(), sig, addr)\n}", "func wbbVerify(curve *math.Curve, pk *math.G2, sig *math.G1, m *math.Zr) error {\n\tif pk == nil || sig == nil || m == nil {\n\t\treturn errors.Errorf(\"Weak-BB signature invalid: received nil input\")\n\t}\n\t// Set P = pk * g2^m\n\tP := curve.NewG2()\n\tP.Clone(pk)\n\tP.Add(curve.GenG2.Mul(m))\n\tP.Affine()\n\t// check that e(sig, pk * g2^m) = e(g1, g2)\n\tif !curve.FExp(curve.Pairing(P, sig)).Equals(curve.GenGt) {\n\t\treturn errors.Errorf(\"Weak-BB signature is invalid\")\n\t}\n\treturn nil\n}", "func (api *Staytus) VerifySubscriber(email string) (bool, error) {\n\trequest := &request{\n\t\tSubscriberEmail: email,\n\t}\n\tbody, err := json.Marshal(request)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdata, err := api.post(\"api/v1/subscribers/verify\", body)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tvar result bool\n\tif err := json.Unmarshal(data, &result); err != nil {\n\t\treturn false, err\n\t}\n\treturn result, nil\n}", "func Verify(response http.ResponseWriter, request *http.Request) {\n\tif Config.ThrottleVerification != nil {\n\t\tConfig.ThrottleVerification()\n\t}\n\n\t// Find the user for this verification ID.\n\tverificationID := request.FormValue(\"id\")\n\tuser, err := Config.LoadUserByVerificationID(verificationID)\n\tif err != nil {\n\t\tRenderProgramError(response, request, \"Could not load user for verification ID\", \"\", err)\n\t\treturn\n\t}\n\tif user == nil {\n\t\tConfig.Log.Printf(\"Verification ID not found: %s\", verificationID)\n\t\tRenderPageError(response, request, \"signup.gohtml\", \"verificationidnotfound\", map[string]string{}, nil)\n\t\treturn\n\t}\n\n\t// Do we have the right user?\n\tuserVerificationID, idCreated := user.GetVerificationID()\n\tif userVerificationID != verificationID {\n\t\tRenderProgramError(\n\t\t\tresponse,\n\t\t\trequest,\n\t\t\tfmt.Sprintf(\"Wrong user loaded: %s (%s) with verification ID %s instead of %s\", user.GetID(), user.GetEmail(), userVerificationID, verificationID),\n\t\t\t\"Wrong user loaded\",\n\t\t\terr,\n\t\t)\n\t\treturn\n\t}\n\n\t// Is the verification ID still valid?\n\tif idCreated.Add(3 * 24 * time.Hour).Before(time.Now()) {\n\t\tConfig.Log.Printf(\"Verification ID for user %s (%s) expired: %s\", user.GetID(), user.GetEmail(), verificationID)\n\t\tRenderPageError(response, request, \"signup.gohtml\", \"verificationidexpired\", map[string]string{}, nil)\n\t\treturn\n\t}\n\n\t// User has been verified. Update status.\n\tuser.SetState(StateVerified)\n\tuser.SetVerificationID(\"\", time.Unix(0, 0)) // Invalidate verification ID.\n\tif err = Config.UpdateUser(user); err != nil {\n\t\tRenderProgramError(response, request, fmt.Sprintf(\"Could not verify user %s (%s)\", user.GetID(), user.GetEmail()), \"Could not verify user\", err)\n\t\treturn\n\t}\n\tConfig.Log.Printf(\"User %s (%s) has been verified\", user.GetID(), user.GetEmail())\n\n\t// If anyone is logged in, log them out now.\n\tsession, _ := sessions.Start(response, request, false)\n\tif session != nil && session.User() != nil {\n\t\tsession.LogOut()\n\t}\n\n\t// Show a confirmation.\n\tRenderPageBasic(response, request, \"verified.gohtml\", nil)\n}", "func (_Ethdkg *EthdkgCaller) Verify(opts *bind.CallOpts, message []byte, sig [2]*big.Int, pubK [4]*big.Int) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Ethdkg.contract.Call(opts, out, \"Verify\", message, sig, pubK)\n\treturn *ret0, err\n}", "func (c *Client) VerifyRoutingNumber(data string) (map[string]interface{}, error) {\n\tlog.info(\"========== VERIFY ROUTING NUMBER ==========\")\n\turl := buildURL(\"routing-number-verification\")\n\n\treturn c.do(\"POST\", url, data, nil)\n}", "func (pubKey PubKey) VRFVerify(proof crypto.Proof, seed []byte) (crypto.Output, error) {\n\treturn nil, fmt.Errorf(\"VRF verify is not supported by the BLS12\")\n}", "func (rs Resource) HandleVerify(w http.ResponseWriter, r *http.Request) {\n\tverificationCode := r.URL.Query().Get(\"v\")\n\n\t//verification token should not be empty\n\tif len(verificationCode) == 0 {\n\t\tlog(r).WithField(\"verificationcode\", verificationCode)\n\t\trender.Render(w, r, ErrInvalidRequest(ErrInvalidVerificationToken))\n\t\treturn\n\t}\n\n\t//we decode the verification code\n\tcode, err := encoding.Decode(verificationCode)\n\tif err != nil {\n\t\tlog(r).WithField(\"verificationcode\", verificationCode)\n\t\trender.Render(w, r, ErrInvalidRequest(err))\n\t\treturn\n\t}\n\n\t//then check if it exists in the database\n\tu, err := rs.Store.Users().GetByVerificationToken(r.Context(), code)\n\tif err != nil {\n\t\tlog(r).Error(err)\n\t\trender.Render(w, r, ErrInvalidRequest(ErrInvalidVerificationToken))\n\t\treturn\n\t}\n\n\t//check if token is expired\n\tif time.Until(u.VerificationExpires) < 1 {\n\t\tlog(r).WithField(\"verificationtoken\", verificationCode)\n\t\trender.Render(w, r, ErrInvalidRequest(ErrInvalidVerificationToken))\n\t\treturn\n\t}\n\n\t//mark the user as verified\n\tif err := rs.Store.Users().SetVerified(r.Context(), u.ID); err != nil {\n\t\tlog(r).Error(err)\n\t\trender.Render(w, r, ErrInternalServerError)\n\t\treturn\n\t}\n\n\t//send a welcome email.\n\twelcomeEmailContent := provider.WelcomeEmailContent{\n\t\tLoginURL: \"https://fupisha.io/login\",\n\t\tSiteName: \"Fupisha\",\n\t\tSiteURL: \"https://fupisha.io\",\n\t}\n\n\tif err := rs.Mailer.SendWelcomeNotification(u.Email, welcomeEmailContent); err != nil {\n\t\tlog(r).Error(err)\n\t\trender.Render(w, r, ErrInternalServerError)\n\t\treturn\n\t}\n\n\t//We should redirect to a frontend page once the frontend is up and running. The page should have a text probably saying account verified successfully and that the user should have recieved a welcome email with\n\t//login instructions.\n\trender.Status(r, http.StatusOK)\n\trender.Respond(w, r, http.NoBody)\n}", "func (client *Client) VerifyBankElement(request *VerifyBankElementRequest) (response *VerifyBankElementResponse, err error) {\n\tresponse = CreateVerifyBankElementResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CheckDeposit check a deposit for an address given the address id
func (as *AddressService) CheckDeposit(addressID int) error { if isEmptyStr(as.assetCode) { return errAssetCode } if err := as.client.Get(buildString("address/check/", as.assetCode, "/", strconv.Itoa(addressID)), nil); err != nil { return err } return nil }
[ "func ExampleAddDepositAddress() {}", "func ExampleRemoveDepositAddress() {}", "func (k *Keeper) GetDeposit(ctx sdk.Context, address sdk.AccAddress) (deposit types.Deposit, found bool) {\n\tstore := k.Store(ctx)\n\n\tkey := types.DepositKey(address)\n\tvalue := store.Get(key)\n\tif value == nil {\n\t\treturn deposit, false\n\t}\n\n\tk.cdc.MustUnmarshalBinaryBare(value, &deposit)\n\treturn deposit, true\n}", "func (p *PrivKey) checkImportedAddress(walletAddress, p2shSegwitAddress, fullPublicKey string) {\n\t// Note,\n\t// GetAccount() calls GetAddressInfo() internally\n\n\tvar (\n\t\ttargetAddr string\n\t\taddrType address.AddrType\n\t)\n\n\tswitch p.btc.CoinTypeCode() {\n\tcase coin.BTC:\n\t\ttargetAddr = p2shSegwitAddress\n\t\taddrType = address.AddrTypeP2shSegwit\n\tcase coin.BCH:\n\t\ttargetAddr = walletAddress\n\t\taddrType = address.AddrTypeBCHCashAddr\n\tdefault:\n\t\tp.logger.Warn(\"this coin type is not implemented in checkImportedAddress()\",\n\t\t\tzap.String(\"coin_type_code\", p.btc.CoinTypeCode().String()))\n\t\treturn\n\t}\n\n\t// 1.call `getaccount` by target_address\n\tacnt, err := p.btc.GetAccount(targetAddr)\n\tif err != nil {\n\t\tp.logger.Warn(\n\t\t\t\"fail to call btc.GetAccount()\",\n\t\t\tzap.String(addrType.String(), targetAddr),\n\t\t\tzap.Error(err))\n\t\treturn\n\t}\n\tp.logger.Debug(\n\t\t\"account is found\",\n\t\tzap.String(\"account\", acnt),\n\t\tzap.String(addrType.String(), targetAddr))\n\n\t// 2.call `getaddressinfo` by target_address\n\taddrInfo, err := p.btc.GetAddressInfo(targetAddr)\n\tif err != nil {\n\t\tp.logger.Warn(\n\t\t\t\"fail to call btc.GetAddressInfo()\",\n\t\t\tzap.String(addrType.String(), targetAddr),\n\t\t\tzap.Error(err))\n\t} else {\n\t\tif addrInfo.Pubkey != fullPublicKey {\n\t\t\tp.logger.Warn(\n\t\t\t\t\"pubkey is not matched\",\n\t\t\t\tzap.String(\"in_bitcoin_core\", addrInfo.Pubkey),\n\t\t\t\tzap.String(\"in_database\", fullPublicKey))\n\t\t}\n\t}\n}", "func (e Exchange) DepositAddress(exch string, currencyCode currency.Code) (out string, err error) {\n\tif currencyCode.IsEmpty() {\n\t\terr = errors.New(\"currency code is empty\")\n\t\treturn\n\t}\n\treturn engine.Bot.DepositAddressManager.GetDepositAddressByExchange(exch, currencyCode)\n}", "func (h *HUOBI) QueryDepositAddress(ctx context.Context, cryptocurrency currency.Code) ([]DepositAddress, error) {\n\tresp := struct {\n\t\tDepositAddress []DepositAddress `json:\"data\"`\n\t}{}\n\n\tvals := url.Values{}\n\tvals.Set(\"currency\", cryptocurrency.Lower().String())\n\n\terr := h.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, huobiAccountDepositAddress, vals, nil, &resp, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(resp.DepositAddress) == 0 {\n\t\treturn nil, errors.New(\"deposit address data isn't populated\")\n\t}\n\treturn resp.DepositAddress, nil\n}", "func (_PlasmaFramework *PlasmaFrameworkCaller) IsDeposit(opts *bind.CallOpts, blockNum *big.Int) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _PlasmaFramework.contract.Call(opts, out, \"isDeposit\", blockNum)\n\treturn *ret0, err\n}", "func (_DFedUSDD *DFedUSDDTransactor) Deposit(opts *bind.TransactOpts, _to common.Address, _amount *big.Int) (*types.Transaction, error) {\n\treturn _DFedUSDD.contract.Transact(opts, \"deposit\", _to, _amount)\n}", "func WalletAddressCheck(addr string) string {\n\toutput := sdksource.WalletAddressCheck(addr)\n\treturn output\n}", "func (_SingleAuto *SingleAutoFilterer) FilterDeposit(opts *bind.FilterOpts, user []common.Address, pid []*big.Int) (*SingleAutoDepositIterator, error) {\n\n\tvar userRule []interface{}\n\tfor _, userItem := range user {\n\t\tuserRule = append(userRule, userItem)\n\t}\n\tvar pidRule []interface{}\n\tfor _, pidItem := range pid {\n\t\tpidRule = append(pidRule, pidItem)\n\t}\n\n\tlogs, sub, err := _SingleAuto.contract.FilterLogs(opts, \"Deposit\", userRule, pidRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &SingleAutoDepositIterator{contract: _SingleAuto.contract, event: \"Deposit\", logs: logs, sub: sub}, nil\n}", "func (mapper Mapper) AddDeposit(ctx context.Context, proposalID uint64, depositorAddr btypes.Address, depositAmount uint64) (btypes.Error, bool) {\n\tproposal, ok := mapper.GetProposal(proposalID)\n\tif !ok {\n\t\treturn types.ErrUnknownProposal(proposalID), false\n\t}\n\n\taccountMapper := ctx.Mapper(account.AccountMapperName).(*account.AccountMapper)\n\taccount := accountMapper.GetAccount(depositorAddr).(*qtypes.QOSAccount)\n\taccount.MustMinusQOS(btypes.NewInt(int64(depositAmount)))\n\taccountMapper.SetAccount(account)\n\n\t// Update proposal\n\tproposal.TotalDeposit = proposal.TotalDeposit + depositAmount\n\tmapper.SetProposal(proposal)\n\n\t// Check if deposit has provided sufficient total funds to transition the proposal into the voting period\n\tactivatedVotingPeriod := false\n\tif proposal.Status == types.StatusDepositPeriod && proposal.TotalDeposit >= mapper.GetParams(ctx).MinDeposit {\n\t\tmapper.activateVotingPeriod(ctx, proposal)\n\t\tactivatedVotingPeriod = true\n\t}\n\n\t// Add or update deposit object\n\tcurrDeposit, found := mapper.GetDeposit(proposalID, depositorAddr)\n\tif !found {\n\t\tnewDeposit := types.Deposit{depositorAddr, proposalID, depositAmount}\n\t\tmapper.SetDeposit(proposalID, depositorAddr, newDeposit)\n\t} else {\n\t\tcurrDeposit.Amount = currDeposit.Amount + depositAmount\n\t\tmapper.SetDeposit(proposalID, depositorAddr, currDeposit)\n\t}\n\n\treturn nil, activatedVotingPeriod\n}", "func (_Depositmanager *DepositmanagerSession) DepositFor(_destination common.Address, _amount *big.Int, _tokenType *big.Int, _pubkey []byte) (*types.Transaction, error) {\n\treturn _Depositmanager.Contract.DepositFor(&_Depositmanager.TransactOpts, _destination, _amount, _tokenType, _pubkey)\n}", "func (_Depositmanager *DepositmanagerTransactorSession) DepositFor(_destination common.Address, _amount *big.Int, _tokenType *big.Int, _pubkey []byte) (*types.Transaction, error) {\n\treturn _Depositmanager.Contract.DepositFor(&_Depositmanager.TransactOpts, _destination, _amount, _tokenType, _pubkey)\n}", "func SaveDeposit(a *rlib.Deposit, newRcpts []int64) []BizError {\n\tvar e []BizError\n\tvar rlist []rlib.Receipt\n\ttot := float64(0)\n\t//------------------------------------------------------------\n\t// First, validate that all newRcpts are eligible for inclusion\n\t// in this receipt\n\t//------------------------------------------------------------\n\tfor i := 0; i < len(newRcpts); i++ {\n\t\tr := rlib.GetReceipt(newRcpts[i])\n\t\ttot += r.Amount\n\t\tif r.DID != 0 && r.DID != a.DID {\n\t\t\ts := fmt.Sprintf(BizErrors[ReceiptAlreadyDeposited].Message, rlib.IDtoShortString(\"RCPT\", r.RCPTID), rlib.IDtoShortString(\"D\", r.DID))\n\t\t\tb := BizError{Errno: ReceiptAlreadyDeposited, Message: s}\n\t\t\te = append(e, b)\n\t\t\tcontinue\n\t\t}\n\t\tif r.BID != a.BID {\n\t\t\ts := fmt.Sprintf(BizErrors[ReceiptBizMismatch].Message, rlib.IDtoShortString(\"RCPT\", r.RCPTID))\n\t\t\tb := BizError{Errno: ReceiptBizMismatch, Message: s}\n\t\t\te = append(e, b)\n\t\t\tcontinue\n\t\t}\n\t\trlist = append(rlist, r)\n\t}\n\t//------------------------------------------------------------\n\t// next, validate that the total of all newRcpts matches Amount\n\t//------------------------------------------------------------\n\tif tot != a.Amount {\n\t\te = AddBizErrToList(e, DepositTotalMismatch)\n\t\treturn e\n\t}\n\n\t//------------------------------------------------------------\n\t// Save the deposit\n\t//------------------------------------------------------------\n\tif a.DID == 0 {\n\t\t_, err := rlib.InsertDeposit(a)\n\t\tif err != nil {\n\t\t\te = AddErrToBizErrlist(err, e)\n\t\t}\n\t\tfor i := 0; i < len(newRcpts); i++ {\n\t\t\tvar dp = rlib.DepositPart{\n\t\t\t\tDID: a.DID,\n\t\t\t\tBID: a.BID,\n\t\t\t\tRCPTID: newRcpts[i],\n\t\t\t}\n\t\t\terr = rlib.InsertDepositPart(&dp)\n\t\t\tif err != nil {\n\t\t\t\te = AddErrToBizErrlist(err, e)\n\t\t\t}\n\t\t\tif rlist[i].DID == 0 {\n\t\t\t\trlist[i].DID = a.DID\n\t\t\t\terr = rlib.UpdateReceipt(&rlist[i])\n\t\t\t\tif err != nil {\n\t\t\t\t\te = AddErrToBizErrlist(err, e)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\terr := rlib.UpdateDeposit(a)\n\t\tif err != nil {\n\t\t\te = AddErrToBizErrlist(err, e)\n\t\t}\n\t\t//---------------------------------------------------------------------------\n\t\t// If any receipts have been removed from the previous version. To do\n\t\t// this we will compare the list of current Deposit's RCPTIDs to the\n\t\t// list of newly proposed RCPTIDs. We will compare the two lists and\n\t\t// produce 2 new lists: addlist and removelist. Then we will add and\n\t\t// link the addlist, and unlink the removelist. The new Receipts are\n\t\t// already provided in newRcpts.\n\t\t//---------------------------------------------------------------------------\n\t\tcurDepParts, err := rlib.GetDepositParts(a.DID)\n\t\tif err != nil {\n\t\t\te = AddErrToBizErrlist(err, e)\n\t\t\treturn e\n\t\t}\n\n\t\tcurrent := map[int64]int{}\n\t\tfor i := 0; i < len(curDepParts); i++ {\n\t\t\tcurrent[curDepParts[i].RCPTID] = 0 // mark each receipt as initialized to 0\n\t\t}\n\n\t\tvar addlist []int64\n\t\tfor i := 0; i < len(newRcpts); i++ {\n\t\t\t_, ok := current[newRcpts[i]]\n\t\t\tif !ok {\n\t\t\t\taddlist = append(addlist, newRcpts[i])\n\t\t\t}\n\t\t}\n\n\t\tvar newlist = map[int64]int{}\n\t\tfor i := 0; i < len(newRcpts); i++ {\n\t\t\tnewlist[newRcpts[i]] = 0\n\t\t}\n\n\t\tvar removelist []int64\n\t\tfor i := 0; i < len(curDepParts); i++ {\n\t\t\t_, ok := newlist[curDepParts[i].RCPTID]\n\t\t\tif !ok {\n\t\t\t\tremovelist = append(removelist, curDepParts[i].RCPTID)\n\t\t\t}\n\t\t}\n\n\t\t//--------------------------------------------------------\n\t\t// Remove the deposit link in the removelist receipts...\n\t\t//--------------------------------------------------------\n\t\tfor i := 0; i < len(removelist); i++ {\n\t\t\tr := rlib.GetReceipt(removelist[i])\n\t\t\tif r.RCPTID == 0 {\n\t\t\t\terr := fmt.Errorf(\"could not load receipt %d\", removelist[i])\n\t\t\t\te = AddErrToBizErrlist(err, e)\n\t\t\t}\n\t\t\tr.DID = 0\n\t\t\terr := rlib.UpdateReceipt(&r)\n\t\t\tif err != nil {\n\t\t\t\te = AddErrToBizErrlist(err, e)\n\t\t\t}\n\t\t\t//---------------------------------------\n\t\t\t// Now remove the Deposit Part record...\n\t\t\t//---------------------------------------\n\t\t\tfor j := 0; j < len(curDepParts); j++ {\n\t\t\t\tif curDepParts[j].RCPTID == removelist[i] {\n\t\t\t\t\terr = rlib.DeleteDepositPart(curDepParts[j].DPID)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\te = AddErrToBizErrlist(err, e)\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tcurrent[curDepParts[i].RCPTID]++ // mark that we've actually processed this entry\n\t\t}\n\t\t//--------------------------------------------------------\n\t\t// Add the deposit link in the addlist receipts...\n\t\t//--------------------------------------------------------\n\t\tfor i := 0; i < len(addlist); i++ {\n\t\t\tr := rlib.GetReceipt(addlist[i])\n\t\t\tif r.RCPTID == 0 {\n\t\t\t\terr := fmt.Errorf(\"could not load receipt %d\", addlist[i])\n\t\t\t\te = AddErrToBizErrlist(err, e)\n\t\t\t}\n\t\t\tr.DID = a.DID\n\t\t\terr := rlib.UpdateReceipt(&r)\n\t\t\tif err != nil {\n\t\t\t\te = AddErrToBizErrlist(err, e)\n\t\t\t}\n\t\t\t//-----------------------------------------\n\t\t\t// Add a deposit part for this receipt...\n\t\t\t//-----------------------------------------\n\t\t\tvar dp = rlib.DepositPart{\n\t\t\t\tDID: a.DID,\n\t\t\t\tBID: a.BID,\n\t\t\t\tRCPTID: r.RCPTID,\n\t\t\t}\n\t\t\terr = rlib.InsertDepositPart(&dp)\n\t\t\tif err != nil {\n\t\t\t\te = AddErrToBizErrlist(err, e)\n\t\t\t}\n\t\t\tcurrent[newRcpts[i]]++ // mark that we've actually processed this entry\n\t\t}\n\t}\n\treturn e\n}", "func (dcr *ExchangeWallet) DepositAddress() (string, error) {\n\taddr, err := dcr.wallet.ExternalAddress(dcr.ctx, dcr.depositAccount())\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn addr.String(), nil\n}", "func (acct *CheckingAccount) Deposit(amount Money) error {\n\tfmt.Printf(\"Depositing %f \\n\", amount)\n\tacct.Balance = acct.Balance + amount\n\treturn nil\n}", "func (w *Wallet) GetBalance(address string, id schema.ID, asset Asset) (xdr.Int64, []string, error) {\n\tif address == \"\" {\n\t\terr := fmt.Errorf(\"trying to get the balance of an empty address. this should never happen\")\n\t\tlog.Warn().Err(err).Send()\n\t\treturn 0, nil, err\n\t}\n\n\tvar total xdr.Int64\n\thorizonClient, err := w.GetHorizonClient()\n\tif err != nil {\n\t\treturn 0, nil, err\n\t}\n\n\tcursor := \"\"\n\n\ttxReq := horizonclient.TransactionRequest{\n\t\tForAccount: address,\n\t\tCursor: cursor,\n\t\tLimit: stellarPageLimit,\n\t}\n\n\tlog.Info().Str(\"address\", address).Msg(\"fetching balance for address\")\n\ttxes, err := horizonClient.Transactions(txReq)\n\tif err != nil {\n\t\treturn 0, nil, errors.Wrap(err, \"could not get transactions\")\n\t}\n\n\tdonors := make(map[string]struct{})\n\tfor len(txes.Embedded.Records) != 0 {\n\t\tfor _, tx := range txes.Embedded.Records {\n\t\t\tif tx.Memo == strconv.FormatInt(int64(id), 10) {\n\t\t\t\teffectsReq := horizonclient.EffectRequest{\n\t\t\t\t\tForTransaction: tx.Hash,\n\t\t\t\t}\n\t\t\t\teffects, err := horizonClient.Effects(effectsReq)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error().Err(err).Msgf(\"failed to get transaction effects\")\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// first check if we have been paid\n\t\t\t\tvar isFunding bool\n\t\t\t\tfor _, effect := range effects.Embedded.Records {\n\t\t\t\t\tif effect.GetAccount() != address {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif effect.GetType() == \"account_credited\" {\n\t\t\t\t\t\tcreditedEffect := effect.(horizoneffects.AccountCredited)\n\t\t\t\t\t\tif creditedEffect.Asset.Code != asset.Code() ||\n\t\t\t\t\t\t\tcreditedEffect.Asset.Issuer != asset.Issuer() {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tparsedAmount, err := amount.Parse(creditedEffect.Amount)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tisFunding = true\n\t\t\t\t\t\ttotal += parsedAmount\n\t\t\t\t\t} else if effect.GetType() == \"account_debited\" {\n\t\t\t\t\t\tdebitedEffect := effect.(horizoneffects.AccountDebited)\n\t\t\t\t\t\tif debitedEffect.Asset.Code != asset.Code() ||\n\t\t\t\t\t\t\tdebitedEffect.Asset.Issuer != asset.Issuer() {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tparsedAmount, err := amount.Parse(debitedEffect.Amount)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tisFunding = false\n\t\t\t\t\t\ttotal -= parsedAmount\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif isFunding {\n\t\t\t\t\t// we don't need to verify the asset here anymore, since this\n\t\t\t\t\t// flag is only toggled on after that check passed in the loop\n\t\t\t\t\t// above\n\t\t\t\t\tfor _, effect := range effects.Embedded.Records {\n\t\t\t\t\t\tif effect.GetType() == \"account_debited\" && effect.GetAccount() != address {\n\t\t\t\t\t\t\tdonors[effect.GetAccount()] = struct{}{}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tcursor = tx.PagingToken()\n\t\t}\n\n\t\t// if the amount of records fetched is smaller than the page limit\n\t\t// we can assume we are on the last page and we break to prevent another\n\t\t// call to horizon\n\t\tif len(txes.Embedded.Records) < stellarPageLimit {\n\t\t\tbreak\n\t\t}\n\n\t\ttxReq.Cursor = cursor\n\t\tlog.Info().Str(\"address\", address).Msgf(\"fetching balance for address with cursor: %s\", cursor)\n\t\ttxes, err = horizonClient.Transactions(txReq)\n\t\tif err != nil {\n\t\t\treturn 0, nil, errors.Wrap(err, \"could not get transactions\")\n\t\t}\n\t}\n\n\tdonorList := []string{}\n\tfor donor := range donors {\n\t\tdonorList = append(donorList, donor)\n\t}\n\tlog.Info().\n\t\tInt64(\"balance\", int64(total)).\n\t\tStr(\"address\", address).\n\t\tInt64(\"id\", int64(id)).Msgf(\"status of balance for reservation\")\n\treturn total, donorList, nil\n}", "func (f *FTX) FetchDepositAddress(ctx context.Context, coin currency.Code, chain string) (*DepositData, error) {\n\tresp := struct {\n\t\tData DepositData `json:\"result\"`\n\t}{}\n\tvals := url.Values{}\n\tif chain != \"\" {\n\t\tvals.Set(\"method\", strings.ToLower(chain))\n\t}\n\tpath := common.EncodeURLValues(getDepositAddress+coin.Upper().String(), vals)\n\treturn &resp.Data, f.SendAuthHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, path, nil, &resp)\n}", "func ServiceGetAddressDistrictByID(user *md.User, id int64) (access utils.AccessResult, districtInfo map[string]interface{}, err error) {\n\n\tif access, err = ServiceCheckUserModelAssess(user, \"AddressDistrict\"); err == nil {\n\t\tif !access.Read {\n\t\t\terr = errors.New(\"has no update permission\")\n\t\t\treturn\n\t\t}\n\t} else {\n\t\treturn\n\t}\n\to := orm.NewOrm()\n\tvar district *md.AddressDistrict\n\n\tif district, err = md.GetAddressDistrictByID(id, o); err == nil {\n\t\tobjInfo := make(map[string]interface{})\n\t\tobjInfo[\"Name\"] = district.Name\n\t\tobjInfo[\"ID\"] = district.ID\n\t\tcityInfo := make(map[string]interface{})\n\t\tcityInfo[\"ID\"] = district.City.ID\n\t\tcityInfo[\"Name\"] = district.City.Name\n\t\tobjInfo[\"City\"] = cityInfo\n\t\tprovinceInfo := make(map[string]interface{})\n\t\tif district.City.Province != nil {\n\t\t\tprovinceInfo[\"ID\"] = district.City.Province.ID\n\t\t\tprovinceInfo[\"Name\"] = district.City.Province.Name\n\t\t\tobjInfo[\"Province\"] = provinceInfo\n\t\t\tcountryInfo := make(map[string]interface{})\n\t\t\tif district.City.Province.Country != nil {\n\t\t\t\tcountryInfo[\"ID\"] = district.City.Province.Country.ID\n\t\t\t\tcountryInfo[\"Name\"] = district.City.Province.Country.Name\n\t\t\t\tobjInfo[\"Country\"] = countryInfo\n\t\t\t}\n\t\t}\n\n\t\tdistrictInfo = objInfo\n\t}\n\treturn\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use Price.ProtoReflect.Descriptor instead.
func (*Price) Descriptor() ([]byte, []int) { return file_price_price_proto_rawDescGZIP(), []int{0} }
[ "func (*Price) Descriptor() ([]byte, []int) {\n\treturn file_proto_supply_proto_rawDescGZIP(), []int{0}\n}", "func (*Price) Descriptor() ([]byte, []int) {\n\treturn file_vega_proto_rawDescGZIP(), []int{0}\n}", "func (*PriceInfo) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_retail_v2_common_proto_rawDescGZIP(), []int{6}\n}", "func (*PricingPodPriceRequest) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{8}\n}", "func (*PriceLevel) Descriptor() ([]byte, []int) {\n\treturn file_vega_proto_rawDescGZIP(), []int{14}\n}", "func (*PricingNodePriceRequest) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{6}\n}", "func (*PriceRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_grpc_examples_wallet_stats_stats_proto_rawDescGZIP(), []int{0}\n}", "func (*PricingPodPriceResponse) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{9}\n}", "func (*ModifyPriceLevel) Descriptor() ([]byte, []int) {\n\treturn file_openfeed_proto_rawDescGZIP(), []int{19}\n}", "func (*PricingNodePriceResponse) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{7}\n}", "func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}", "func (*OrderPricePair) Descriptor() ([]byte, []int) {\n\treturn file_witness_proto_rawDescGZIP(), []int{10}\n}", "func (*ProductMemberPrice) Descriptor() ([]byte, []int) {\n\treturn file_modules_inventory_product_product_proto_rawDescGZIP(), []int{10}\n}", "func (*GetDownLinkPriceRequest) Descriptor() ([]byte, []int) {\n\treturn file_wallet_proto_rawDescGZIP(), []int{25}\n}", "func (*ProductPriceRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_Ultimate_Super_WebDev_Corp_gateway_services_widget_widget_proto_rawDescGZIP(), []int{1}\n}", "func (*PriceResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_grpc_examples_wallet_stats_stats_proto_rawDescGZIP(), []int{1}\n}", "func (*Money) Descriptor() ([]byte, []int) {\n\treturn file_proto_google_fhir_proto_stu3_datatypes_proto_rawDescGZIP(), []int{32}\n}", "func (*PlanChange_Modified) Descriptor() ([]byte, []int) {\n\treturn edgelq_limits_proto_v1alpha2_plan_change_proto_rawDescGZIP(), []int{0, 1}\n}", "func (*PlanChange_Removed) Descriptor() ([]byte, []int) {\n\treturn edgelq_limits_proto_v1alpha2_plan_change_proto_rawDescGZIP(), []int{0, 3}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lookup looks up the provided key in map m. The caller must provide the key's digest which is used as a hash.
func (m Map) Lookup(d digest.Digest, key T) T { if _, ok := m.tab[d]; !ok { return nil } entry := *m.tab[d] for entry != nil && Less(entry.Key, key) { entry = entry.Next } if entry == nil || !Equal(entry.Key, key) { return nil } return entry.Value }
[ "func lookup(m map[string]string, k string) (string, error) {\n\tv, ok := m[k]\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"key %q missing\", k)\n\t}\n\treturn v, nil\n}", "func (m *Map) Lookup(key, val []byte) int {\n\tif len(key) != m.keySize {\n\t\tpanic(\"key has invalid size\")\n\t}\n\n\tif len(val) != m.valSize {\n\t\tpanic(\"val has invalid size\")\n\t}\n\n\tvar v int\n\n\tfor _, entry := range m.m {\n\t\tvv := subtle.ConstantTimeCompare(entry[:m.keySize], key) &^ v\n\t\tsubtle.ConstantTimeCopy(vv, val, entry[m.keySize:])\n\t\tv |= vv\n\t}\n\n\treturn v\n}", "func (r *HashRing) Lookup(key string) (string, bool) {\n\tstrs := r.LookupN(key, 1)\n\tif len(strs) == 0 {\n\t\treturn \"\", false\n\t}\n\treturn strs[0], true\n}", "func (dc *DigestCache) Lookup(hash []byte) []byte {\n\tif r, ok := dc.Records[string(hash)]; ok {\n\t\treturn r\n\t}\n\treturn nil\n}", "func (r *ring) Lookup(\n\tkey string,\n) (HostInfo, error) {\n\taddr, found := r.ring().Lookup(key)\n\tif !found {\n\t\tselect {\n\t\tcase r.refreshChan <- &ChangedEvent{}:\n\t\tdefault:\n\t\t}\n\t\treturn HostInfo{}, ErrInsufficientHosts\n\t}\n\tr.members.RLock()\n\tdefer r.members.RUnlock()\n\thost, ok := r.members.keys[addr]\n\tif !ok {\n\t\treturn HostInfo{}, fmt.Errorf(\"host not found in member keys, host: %q\", addr)\n\t}\n\treturn host, nil\n}", "func Lookup() {\n\tTEXT(\"Lookup\", NOSPLIT, \"func(keyset []byte, key []byte) int\")\n\tDoc(\"Lookup searches for a key in a set of keys, returning its index if \",\n\t\t\"found. If the key cannot be found, the number of keys is returned.\")\n\n\t// Load inputs.\n\tkeyset := Load(Param(\"keyset\").Base(), GP64())\n\tcount := Load(Param(\"keyset\").Len(), GP64())\n\tSHRQ(Imm(4), count)\n\tkeyPtr := Load(Param(\"key\").Base(), GP64())\n\tkeyLen := Load(Param(\"key\").Len(), GP64())\n\tkeyCap := Load(Param(\"key\").Cap(), GP64())\n\n\t// None of the keys are larger than maxLength.\n\tCMPQ(keyLen, Imm(maxLength))\n\tJA(LabelRef(\"not_found\"))\n\n\t// We're going to be unconditionally loading 16 bytes from the input key\n\t// so first check if it's safe to do so (cap >= 16). If not, defer to\n\t// safe_load for additional checks.\n\tCMPQ(keyCap, Imm(maxLength))\n\tJB(LabelRef(\"safe_load\"))\n\n\t// Load the input key and pad with zeroes to 16 bytes.\n\tLabel(\"load\")\n\tkey := XMM()\n\tVMOVUPS(Mem{Base: keyPtr}, key)\n\tLabel(\"prepare\")\n\tzeroes := XMM()\n\tVPXOR(zeroes, zeroes, zeroes)\n\tones := XMM()\n\tVPCMPEQB(ones, ones, ones)\n\tvar blendBytes [maxLength * 2]byte\n\tfor j := 0; j < maxLength; j++ {\n\t\tblendBytes[j] = 0xFF\n\t}\n\tblendMasks := ConstBytes(\"blend_masks\", blendBytes[:])\n\tblendMasksPtr := GP64()\n\tLEAQ(blendMasks.Offset(maxLength), blendMasksPtr)\n\tSUBQ(keyLen, blendMasksPtr)\n\tblend := XMM()\n\tVMOVUPS(Mem{Base: blendMasksPtr}, blend)\n\tVPBLENDVB(blend, key, zeroes, key)\n\n\t// Zero out i so we can use it as the loop increment.\n\ti := GP64()\n\tXORQ(i, i)\n\n\t// Round the key count down to the nearest multiple of unroll to determine\n\t// how many iterations of the big loop we'll need.\n\ttruncatedCount := GP64()\n\tMOVQ(count, truncatedCount)\n\tshift := uint64(math.Log2(float64(unroll)))\n\tSHRQ(Imm(shift), truncatedCount)\n\tSHLQ(Imm(shift), truncatedCount)\n\n\t// Loop over multiple keys in the big loop.\n\tLabel(\"bigloop\")\n\tCMPQ(i, truncatedCount)\n\tJE(LabelRef(\"loop\"))\n\n\tx := []VecPhysical{X8, X9, X10, X11, X12, X13, X14, X15}\n\tfor n := 0; n < unroll; n++ {\n\t\tVPCMPEQB(Mem{Base: keyset, Disp: maxLength * n}, key, x[n])\n\t\tVPTEST(ones, x[n])\n\t\tvar target string\n\t\tif n == 0 {\n\t\t\ttarget = \"done\"\n\t\t} else {\n\t\t\ttarget = fmt.Sprintf(\"found%d\", n)\n\t\t}\n\t\tJCS(LabelRef(target))\n\t}\n\n\t// Advance and loop again.\n\tADDQ(Imm(unroll), i)\n\tADDQ(Imm(unroll*maxLength), keyset)\n\tJMP(LabelRef(\"bigloop\"))\n\n\t// Loop over the remaining keys.\n\tLabel(\"loop\")\n\tCMPQ(i, count)\n\tJE(LabelRef(\"done\"))\n\n\t// Try to match against the input key.\n\tmatch := XMM()\n\tVPCMPEQB(Mem{Base: keyset}, key, match)\n\tVPTEST(ones, match)\n\tJCS(LabelRef(\"done\"))\n\n\t// Advance and loop again.\n\tLabel(\"next\")\n\tINCQ(i)\n\tADDQ(Imm(maxLength), keyset)\n\tJMP(LabelRef(\"loop\"))\n\tJMP(LabelRef(\"done\"))\n\n\t// Return the loop increment, or the count if the key wasn't found. If we're\n\t// here from a jump within the big loop, the loop increment needs\n\t// correcting first.\n\tfor j := unroll - 1; j > 0; j-- {\n\t\tLabel(fmt.Sprintf(\"found%d\", j))\n\t\tINCQ(i)\n\t}\n\tLabel(\"done\")\n\tStore(i, ReturnIndex(0))\n\tRET()\n\tLabel(\"not_found\")\n\tStore(count, ReturnIndex(0))\n\tRET()\n\n\t// If the input key is near a page boundary, we must change the way we load\n\t// it to avoid a fault. We instead want to load the 16 bytes up to and\n\t// including the key, then shuffle the key forward in the register. E.g. for\n\t// key \"foo\" we would load the 13 bytes prior to the key along with \"foo\"\n\t// and then move the last 3 bytes forward so the first 3 bytes are equal\n\t// to \"foo\".\n\tLabel(\"safe_load\")\n\tpageOffset := GP64()\n\tMOVQ(keyPtr, pageOffset)\n\tANDQ(U32(pageSize-1), pageOffset)\n\tCMPQ(pageOffset, U32(pageSize-maxLength))\n\tJBE(LabelRef(\"load\")) // Not near a page boundary.\n\toffset := GP64()\n\tMOVQ(^U64(0)-maxLength+1, offset)\n\tADDQ(keyLen, offset)\n\tVMOVUPS(Mem{Base: keyPtr, Index: offset, Scale: 1}, key)\n\tvar shuffleBytes [maxLength * 2]byte\n\tfor j := 0; j < maxLength; j++ {\n\t\tshuffleBytes[j] = byte(j)\n\t\tshuffleBytes[j+maxLength] = byte(j)\n\t}\n\tshuffleMasks := ConstBytes(\"shuffle_masks\", shuffleBytes[:])\n\tshuffleMasksPtr := GP64()\n\tLEAQ(shuffleMasks.Offset(maxLength), shuffleMasksPtr)\n\tSUBQ(keyLen, shuffleMasksPtr)\n\tshuffle := XMM()\n\tVMOVUPS(Mem{Base: shuffleMasksPtr}, shuffle)\n\tVPSHUFB(shuffle, key, key)\n\tJMP(LabelRef(\"prepare\"))\n}", "func (c *Cache) Lookup(buildid int64) (string, error) {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\n\tif hash, ok := c.hashes[buildid]; !ok {\n\t\treturn \"\", fmt.Errorf(\"BuildId not found in cache: %d\", buildid)\n\t} else {\n\t\treturn hash, nil\n\t}\n}", "func (kp *KeyPool) Lookup(keyid [signkeys.KeyIDSize]byte) (*signkeys.PublicKey, error) {\n\tkp.mapMutex.RLock()\n\tdefer kp.mapMutex.RUnlock()\n\tkey, err := kp.lookup(keyid)\n\tif err == ErrNotFound && kp.FetchKeyCallBack != nil {\n\t\t// Use fetchkey callback\n\t\tfetchedKeyMarshalled, err := kp.FetchKeyCallBack(keyid[:])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfetchedKey, err := new(signkeys.PublicKey).Unmarshal(fetchedKeyMarshalled)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tkeyidFetch, err := kp.loadKey(fetchedKey)\n\t\tif err != nil && err != ErrExists {\n\t\t\treturn nil, ErrNotFound\n\t\t}\n\t\tif *keyidFetch == keyid {\n\t\t\treturn fetchedKey, nil\n\t\t}\n\t}\n\treturn key, err\n}", "func (h *hashtable) Lookup(key LRUItem) *CacheItem {\n\tkeyHash := key.Hash()\n\tbucketIndex := int(keyHash) % h.bucketcount\n\tfor node := h.buckets[bucketIndex]; node != nil; node = node.chain {\n\t\tif node.hash == keyHash {\n\t\t\tif node.data.Equals(key) {\n\t\t\t\treturn node\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (cgm *syncAtomicMap) Lookup(lookup func(string) (interface{}, error)) error {\n\tcgm.lookup = lookup\n\treturn nil\n}", "func (recv *Tree) Lookup(key uintptr) uintptr {\n\tc_key := (C.gconstpointer)(key)\n\n\tretC := C.g_tree_lookup((*C.GTree)(recv.native), c_key)\n\tretGo := (uintptr)(unsafe.Pointer(retC))\n\n\treturn retGo\n}", "func (fi *FsCache) lookup(key *fs.FsFile) (*fs.FsData, bool) {\n\tfdata, ok := fi.dict[key.String()]\n\treturn fdata, ok\n}", "func (ms *MemoizeSigner) lookup(msg []byte) ([]byte, bool) {\n\tms.RLock()\n\tdefer ms.RUnlock()\n\tsig, exists := ms.memory[msgDigest(msg)]\n\treturn sig, exists\n}", "func (c *cache[K, V]) Lookup(key K, f func() V) V {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tif c.data == nil {\n\t\tval := f()\n\t\tc.data = map[K]V{key: val}\n\t\treturn val\n\t}\n\tif v, ok := c.data[key]; ok {\n\t\treturn v\n\t}\n\tval := f()\n\tc.data[key] = val\n\treturn val\n}", "func (h *Int64HashTable) Lookup(v uint64, cmp func(int64) bool) (*entryInt64, bool) {\n\tidx, ok := h.lookup(v, h.capMask, cmp)\n\treturn &h.entries[idx], ok\n}", "func (kp *KeyPool) lookup(keyid [signkeys.KeyIDSize]byte) (*signkeys.PublicKey, error) {\n\tif d, ok := kp.keys[keyid]; ok {\n\t\tif d.Expire > times.Now() {\n\t\t\treturn d, nil\n\t\t}\n\t\treturn nil, ErrExpired\n\t}\n\treturn nil, ErrNotFound\n}", "func (i *FingerprintMetricIndex) Lookup(fp model.Fingerprint) (metric model.Metric, ok bool, err error) {\n\tok, err = i.Get(codable.Fingerprint(fp), (*codable.Metric)(&metric))\n\treturn\n}", "func (manager *KeysManager) Lookup(keyID string) (*jose.JSONWebKey, error) {\n\tvar jwk *jose.JSONWebKey\n\tjwk, exists := manager.KeyMap[keyID]\n\t// If no key is found, refresh the stored key set.\n\tif !exists {\n\t\tif err := manager.Refresh(); err != nil {\n\t\t\treturn jwk, err\n\t\t}\n\t\tjwk, exists = manager.KeyMap[keyID]\n\t\t// If still no key is found, return an error.\n\t\tif !exists {\n\t\t\treturn nil, missingKey(keyID)\n\t\t}\n\t}\n\treturn jwk, nil\n}", "func (h *Float64HashTable) Lookup(v uint64, cmp func(float64) bool) (*entryFloat64, bool) {\n\tidx, ok := h.lookup(v, h.capMask, cmp)\n\treturn &h.entries[idx], ok\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Insert inserts the provided keyvalue pair into the map, overriding any previous definiton of the key. The caller must provide the digest which is used as a hash.
func (m *Map) Insert(d digest.Digest, key, value T) { if m.tab[d] == nil { entry := &mapEntry{Key: key, Value: value} if m.tab == nil { m.tab = make(map[digest.Digest]**mapEntry) } m.n++ m.tab[d] = &entry return } entryp := m.tab[d] for *entryp != nil && Less((*entryp).Key, key) { entryp = &(*entryp).Next } if *entryp == nil || !Equal((*entryp).Key, key) { *entryp = &mapEntry{Key: key, Value: value, Next: *entryp} m.n++ } else { (*entryp).Value = value } }
[ "func (t *chaining) Insert(key string, val interface{}) {\n\tif t.loadFactor() > t.maxLoad {\n\t\tt.tableDouble()\n\t}\n\thash := t.hash(key)\n\tif t.values[hash] == nil {\n\t\tt.values[hash] = list.New()\n\t}\n\tt.values[hash].Insert(&pair{key, val})\n\tt.len++\n}", "func (t *HashTable) Insert(key containers.Hasher, value interface{}) {\n\tif t.tableSize < 3 {\n\t\tt.Clear()\n\t}\n\tindex := key.Hash(t.tableSize)\n\tnode := t.table[index]\n\tfor node != nil {\n\t\tif node.key.Equal(key) {\n\t\t\tnode.value = value\n\t\t\treturn\n\t\t}\n\t\tnode = node.next\n\t}\n\tt.table[index] = newTableNode(key, value, t.table[index])\n\tt.count++\n}", "func (t *Table) Insert(key, value string) {\n\ti := t.hash(key)\n\n\tfor j, kv := range t.table[i] {\n\t\tif key == kv.Key {\n\t\t\t// Overwrite previous value for the same key.\n\t\t\tt.table[i][j].Value = value\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Add a new value to the table.\n\tt.table[i] = append(t.table[i], kv{\n\t\tKey: key,\n\t\tValue: value,\n\t})\n}", "func (am AttributeMap) Insert(k string, v AttributeValue) {\n\tif _, existing := am.Get(k); !existing {\n\t\t*am.orig = append(*am.orig, newAttributeKeyValue(k, v))\n\t}\n}", "func (d *Dtrie) Insert(key, value interface{}) *Dtrie {\n\troot := insert(d.root, &entry{d.hasher(key), key, value})\n\treturn &Dtrie{root, d.hasher}\n}", "func (sm StringMap) Insert(k, v string) {\n\tif _, existing := sm.Get(k); !existing {\n\t\t*sm.orig = append(*sm.orig, newStringKeyValue(k, v))\n\t}\n}", "func (sm StringMap) Insert(k, v string) {\n\tif _, existing := sm.Get(k); !existing {\n\t\t*sm.orig = append(*sm.orig, NewStringKeyValue(k, v).orig)\n\t}\n}", "func (ht *BackgroundHashTable) insert(key int, val string) {\n datum := Datum{key: key, val: val}\n\n ht.data[ht.hash(key)].insert(datum)\n ht.n++\n ht.double()\n}", "func (c *Cache) Insert(key string, value string) {\n\treg := &Registry{\n\t\ttime.Now(),\n\t\tvalue,\n\t}\n\n\tc.Lock.Lock()\n\tc.Map[key] = reg\n\tc.Lock.Unlock()\n}", "func (m *hashMap) Add(key string, value interface{}) {\n\tb := m.getBucket(key)\n\tif b.Add(key, value) {\n\t\tm.Size++\n\t}\n}", "func Put(key string, value string){\n \n h := sha256.New()\n h.Write([]byte(value))\n sha := base64.URLEncoding.EncodeToString(h.Sum(nil))\n \n //fmt.Println(sha)\n var n Data \n \n n.val = value //storing key value in keyValue hash map\n n.hash = sha // storing key hash in keyHash hash map \n \n keyValue[key] = n\n}", "func Insert(args *function.Arguments) (values.Value, error) {\n\tdict, err := args.GetRequiredDictionary(\"dict\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tkey, err := args.GetRequired(\"key\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvalue, err := args.GetRequired(\"value\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn dict.Insert(key, value)\n}", "func (h *Int64HashTable) Insert(e *entryInt64, v uint64, val int64, memoIdx int32) error {\n\te.h = h.fixHash(v)\n\te.payload.val = val\n\te.payload.memoIdx = memoIdx\n\th.size++\n\n\tif h.needUpsize() {\n\t\th.upsize(h.cap * uint64(loadFactor) * 2)\n\t}\n\treturn nil\n}", "func (h *Float64HashTable) Insert(e *entryFloat64, v uint64, val float64, memoIdx int32) error {\n\te.h = h.fixHash(v)\n\te.payload.val = val\n\te.payload.memoIdx = memoIdx\n\th.size++\n\n\tif h.needUpsize() {\n\t\th.upsize(h.cap * uint64(loadFactor) * 2)\n\t}\n\treturn nil\n}", "func (m *OrderedMap[K,V]) Insert(k K, v V) {\n\tif m.Has(k) {\n\t\tm.mp[k].Value = Pair[K,V]{Key: k, Value: v}\n\t} else {\n\t\tpair := Pair[K,V]{Key: k, Value: v}\n\t\te := m.list.PushBack(pair)\n\t\tm.mp[k] = e\n\t}\n}", "func (s *flowMap) Put(k, v *flow.Flow) {\n\td := k.Digest()\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tif _, ok := s.m[d]; ok {\n\t\treturn\n\t}\n\ts.m[d] = v\n}", "func (fdb *fdbSlice) Insert(k Key, v Value) error {\n\n\tfdb.cmdCh <- kv{k: k, v: v}\n\treturn fdb.fatalDbErr\n\n}", "func (recv *VariantDict) Insert(key string, formatString string, args ...interface{}) {\n\tc_key := C.CString(key)\n\tdefer C.free(unsafe.Pointer(c_key))\n\n\tgoFormattedString := fmt.Sprintf(formatString, args...)\n\tc_format_string := C.CString(goFormattedString)\n\tdefer C.free(unsafe.Pointer(c_format_string))\n\n\tC._g_variant_dict_insert((*C.GVariantDict)(recv.native), c_key, c_format_string)\n\n\treturn\n}", "func (i StringHashMap[T, V]) Add(key T, val V) {\n\thash := key.Hash()\n\ti.hashToKey[hash] = key\n\ti.hashToVal[hash] = val\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Len returns the total number of entries in the map.
func (m Map) Len() int { return m.n }
[ "func (m *HashMap) Len() int64 { return m.n }", "func Len(m Map) int {\n\treturn m.count()\n}", "func (em EntryMap) Len() int {\n\treturn len(em)\n}", "func (hm *HashMap) Len() int {\n\treturn hm.np.elemNum()\n}", "func (hm *HashMap) Len() int {\n\thm.rlock()\n\tdefer hm.runlock()\n\n\treturn hm.len()\n}", "func (j *JPNSoftwareMap) Len() int { return len(*j) }", "func (m *Mapping) Len() int { return len(m.Pairs) }", "func (c *Cache) Len() int {\n\treturn len(c.keyMap)\n}", "func length(m *sync.Map) int {\n\tn := 0\n\tm.Range(func(_, _ interface{}) bool {\n\t\tn++\n\t\treturn true\n\t})\n\treturn n\n}", "func (accounts *Accounts) Len() int {\n\tif (accounts == nil) || (accounts.Map == nil) {\n\t\treturn 0\n\t}\n\treturn len(accounts.Map)\n}", "func (rm *ResultMap) Len() int {\n\tl := 0\n\trm.sm.Range(func(_, _ interface{}) bool {\n\t\tl++\n\t\treturn true\n\t})\n\treturn l\n}", "func (m *MultiMap) Size() int {\n\tsize := 0\n\tfor _, value := range m.m {\n\t\tsize += len(value)\n\t}\n\treturn size\n}", "func (m *ModuleMap) Len() int {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\treturn len(m.m)\n}", "func (c *Cache) Len() int {\n\treturn c.secretMap.Len()\n}", "func (rm *FilteredResultMap) Len() int {\n\tl := 0\n\trm.sm.Range(func(_, _ interface{}) bool {\n\t\tl++\n\t\treturn true\n\t})\n\treturn l\n}", "func (e byEntry) Len() int {\n\treturn len(e)\n}", "func (am AttributeMap) Len() int {\n\treturn len(*am.orig)\n}", "func (w *WindowedMap) Len() int {\n\treturn len(w.uidList)\n}", "func (tt *TtTable) Len() uint64 {\n\treturn tt.numberOfEntries\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Each enumerates all keyvalue pairs in map m in deterministic order. TODO(marius): we really ought to use a representation that's more amenable to such (common) operations.
func (m Map) Each(fn func(k, v T)) { digests := make([]digest.Digest, 0, len(m.tab)) for d := range m.tab { digests = append(digests, d) } sort.Slice(digests, func(i, j int) bool { return digests[i].Less(digests[j]) }) for _, d := range digests { for entry := *m.tab[d]; entry != nil; entry = entry.Next { fn(entry.Key, entry.Value) } } }
[ "func AffinityMapMemIter(m AffinityMapMem) func(k, v []byte) {\n\tks := len(AffinityKey{})\n\tvs := len(AffinityValue{})\n\n\treturn func(k, v []byte) {\n\t\tvar key AffinityKey\n\t\tcopy(key[:ks], k[:ks])\n\n\t\tvar val AffinityValue\n\t\tcopy(val[:vs], v[:vs])\n\n\t\tm[key] = val\n\t}\n}", "func EachMap(mp reflect.Value, fn func(key, val reflect.Value)) {\n\tif fn == nil {\n\t\treturn\n\t}\n\tif mp.Kind() != reflect.Map {\n\t\tpanic(\"only allow map value data\")\n\t}\n\n\tfor _, key := range mp.MapKeys() {\n\t\tfn(key, mp.MapIndex(key))\n\t}\n}", "func (l *LockMap) Iter(f func(key string, val string)) {\n\tc := atomic.LoadUint32(&l.c)\n\tif c == 0 { //Fast exit to avoid the cost of RLock/RUnlock for empty maps\n\t\treturn\n\t}\n\tl.RLock()\n\tdefer l.RUnlock()\n\tfor k, v := range l.m {\n\t\tf(k, v)\n\t}\n}", "func (m *OrderedMap) Map() map[string][]string {\n\tmapOut := map[string][]string{}\n\tfor _, keyPair := range m.keypairs {\n\t\tkey := keyPair[0]\n\t\tval := keyPair[1]\n\t\tmapOut[key] = append(mapOut[key], val)\n\t}\n\treturn mapOut\n}", "func (b *SetMap) Iterate(fn func(int)) {\n\telts := make([]int, 0, b.Count())\n\tfor c, _ := range b.hash {\n\t\telts = append(elts, c)\n\t}\n\tsort.Ints(elts)\n\tfor _, c := range elts {\n\t\tfn(c)\n\t}\n}", "func (m Object) Generate() interface{} {\n\tout := NewOrderedMap()\n\tfor _, kv := range m.Values {\n\t\tout.Set(kv.Key, kv.Value.Generate())\n\t}\n\treturn out\n}", "func (m *Type) Iter(f func(k, v string) error) error {\n\tif m.m == nil {\n\t\treturn nil\n\t}\n\tfor ak, av := range m.m {\n\t\tif err := f(ak, av); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (m *OrderedMap) Pairs() []KeyValue {\n\treturn m.kvs\n}", "func (mm Uint64Uint64Map) Foreach(f func(uint64, uint64)) {\n\tfor k, v := range mm {\n\t\tf(k, v)\n\t}\n}", "func MapMemIter(m MapMem) func(k, v []byte) {\n\tks := len(FrontendKey{})\n\tvs := len(FrontendValue{})\n\n\treturn func(k, v []byte) {\n\t\tvar key FrontendKey\n\t\tcopy(key[:ks], k[:ks])\n\n\t\tvar val FrontendValue\n\t\tcopy(val[:vs], v[:vs])\n\n\t\tm[key] = val\n\t}\n}", "func (m concurrentMapImpl) Items() map[string]interface{} {\n\ttmp := make(map[string]interface{})\n\tfor item := range m.allMapEntry() {\n\t\ttmp[item.Key] = item.Value\n\t}\n\treturn tmp\n}", "func (m *Map) Iterate(f func(key types.Type, value interface{})) {\n\tif m != nil {\n\t\tfor _, bucket := range m.table {\n\t\t\tfor _, e := range bucket {\n\t\t\t\tif e.key != nil {\n\t\t\t\t\tf(e.key, e.value)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func testMapSetN(n int, m map[Key]interface{}) {\n\tfor i := 0; i < n; i++ {\n\t\tm[Key(i)] = i\n\t}\n}", "func sortMapByValue(m map[rune]float64) PairList {\n p := make(PairList, len(m))\n i := 0\n for k, v := range m {\n p[i] = Pair{k, v}\n\t i++\n }\n sort.Sort(p)\n return p\n}", "func BackendMapMemIter(m BackendMapMem) func(k, v []byte) {\n\tks := len(BackendKey{})\n\tvs := len(BackendValue{})\n\n\treturn func(k, v []byte) {\n\t\tvar key BackendKey\n\t\tcopy(key[:ks], k[:ks])\n\n\t\tvar val BackendValue\n\t\tcopy(val[:vs], v[:vs])\n\n\t\tm[key] = val\n\t}\n}", "func (om *OrderedMap[K, V]) Foreach(f func(key K, value V)) {\n\tif om.pairs == nil {\n\t\treturn\n\t}\n\n\tfor pair := om.Oldest(); pair != nil; pair = pair.Next() {\n\t\tf(pair.Key, pair.Value)\n\t}\n}", "func main() {\n\tm := map[string]int{\n\t\t\"James\":32,\n\t\t\"Miss Moneypenny\":27,\n\t}\n\tfmt.Println(m)\n\tfmt.Println(m[\"James\"])\n\n\t// adding an element to a map\n\tm[\"todd\"] = 33\n\n\t// ranging over a map\n\tfor key, value := range m {\n\t\tfmt.Println(key, value)\n\t}\n\n\n}", "func Values(m map[string]string) []string {\n\tvar values []string\n\tfor _, v := range m {\n\t\tvalues = append(values, v)\n\t}\n\treturn values\n}", "func (sm *DefaultIDSetMap) Each(f func(key int, value *IDSet)) {\n\tif sm.key != 0 {\n\t\tf(sm.key, sm.value)\n\t}\n\tif sm.m != nil {\n\t\tfor k, v := range sm.m {\n\t\t\tf(k, v)\n\t\t}\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Len returns the number of entries in the directory.
func (d Dir) Len() int { return len(d.contents) }
[ "func (d DirEntries) Len() int { return len(d) }", "func (d DirInfos) Len() int {\n\treturn len(d)\n}", "func Size() int {\n\treturn len(directory)\n}", "func (d Directory) Size() int { return binary.Size(d) }", "func (e DirectoryEntry) Size() int { return binary.Size(e) }", "func (p *PathManager) Len() int {\n\treturn p.paths.Len()\n}", "func (p path) Len() int {\n\treturn len(p)\n}", "func (f FileInfos) Len() int {\n\treturn len(f)\n}", "func (paths Paths) Len() int {\n\treturn len(paths)\n}", "func (r *MTnode) dpathLen() int {\n\tif !r.IsDir() {\n\t\tpanic(r)\n\t}\n\n\tret := len(r.name)\n\tif r.parent != nil {\n\t\tret += r.parent.dpathLen()\n\t}\n\tif r.name != \"/\" {\n\t\tret++\n\t}\n\n\treturn ret\n}", "func (r *Root) Len() uint64 {\n\treturn r.count\n}", "func (t *Tree) Len() int { return t.Count }", "func (e byEntry) Len() int {\n\treturn len(e)\n}", "func (sp crSortedPaths) Len() int {\n\treturn len(sp)\n}", "func (s *Store) Len(ctx context.Context) (int64, error) {\n\tvar nb int64\n\tif err := s.List(ctx, \"\", func(string) error {\n\t\tnb++\n\t\treturn nil\n\t}); err != nil {\n\t\treturn 0, err\n\t}\n\treturn nb, nil\n}", "func (files FilesByDate) Len() int {\n\treturn len(files)\n}", "func (ent *dirent) size() int64 {\n\t// 8 bytes for inode number\n\t// 8 bytes for name length\n\t// n bytes for name\n\treturn int64(16 + len(ent.name))\n}", "func (t *Tree) Len() int {\n\treturn t.Count\n}", "func (h Handle) Len() int {\n\tl := 8 + 8 + 4 + len(h.Type) + len(h.Name)\n\tif h.MD != nil {\n\t\tswitch h.MD.(type) {\n\t\tcase *AlpcPortInfo:\n\t\t\tl += 16\n\t\tcase *MutantInfo:\n\t\t\tl += 5\n\t\tcase *FileInfo:\n\t\t\tl++\n\t\t}\n\t}\n\treturn l\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set sets the directory's entry for the provided path. Set overwrites any previous file set at path.
func (d *Dir) Set(path string, file reflow.File) { if d.contents == nil { d.contents = make(map[string]reflow.File) } d.contents[path] = file }
[ "func (f *IndexFile) SetPath(path string) { f.path = path }", "func (l *LogFile) Set(path string) {\n\tl.Path = filepath.Join(path, logsFile)\n}", "func (f *LogFile) SetPath(path string) { f.path = path }", "func SetPath(p string) {\n\tpath = filepath.FromSlash(p)\n}", "func (d *Document) Set(value []byte) error {\n\tif d.path == \"\" {\n\t\treturn ErrEmptyDocumentPath\n\t}\n\n\terr := WriteFile(d.path, value, time.Second*5)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func SetPath(p string) {\n\tcurrentPath = \"\"\n\tbeginPath = p\n\tdirsAmount = 0\n}", "func SetPath(p string) error {\n\tif info, err := os.Stat(p); err != nil {\n\t\treturn err\n\t} else if !info.IsDir() {\n\t\treturn fmt.Errorf(\"path for persistence is not directory\")\n\t}\n\tdataPath = p\n\treturn nil\n}", "func (cfg *Config) SetPath(path string) error {\n\tif cfg == nil {\n\t\treturn errors.New(\"The Config object is not initialized\")\n\t}\n\tcfg.Path = path\n\treturn nil\n}", "func (t *TomlTree) Set(key string, value interface{}) {\n\tt.SetPath(strings.Split(key, \".\"), value)\n}", "func (m *Win32LobAppFileSystemDetection) SetPath(value *string)() {\n err := m.GetBackingStore().Set(\"path\", value)\n if err != nil {\n panic(err)\n }\n}", "func (_m *requestHeaderMapUpdatable) SetPath(path string) {\n\t_m.Called(path)\n}", "func (c *Config) SetPath(path string) {\n\tc.path = path\n}", "func (fs *FSCache) Set(key string, content []byte) error {\n\treturn ioutil.WriteFile(\n\t\tpath.Join(fs.Root, key),\n\t\tcontent,\n\t\t0600,\n\t)\n}", "func SetPackagePath(l *lua.LState, dir string) error {\n\treturn SetPackagePathRaw(l, PackagePath(dir))\n}", "func (v Values) SetAtPath(path string, value interface{}) error {\n\tsegs := strings.Split(path, \".\")\n\terr := v.setAtPath(segs, value)\n\tif err != nil {\n\t\treturn errors.Errorf(\"error adding value at path %q: %s\", path, err)\n\t}\n\treturn nil\n}", "func (r *Input) SetPath(path string) error {\n\tquery, err := fetch.Parse(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.Lock()\n\tr.Path = query\n\tr.Unlock()\n\treturn nil\n}", "func (j Json) Set(path string, value interface{}) (string, error) {\n\treturn sjson.Set(string(j), path, value)\n}", "func (w *LogWriter) SetPath(dir, fileName string) {\n\tw.logFileDir, w.logFileName = dir, fileName\n}", "func (b *Bucket) SetMetadataAtPath(pth string, md Metadata) {\n\tif b.Version == 0 {\n\t\treturn\n\t}\n\n\tx, ok := b.Metadata[pth]\n\tif ok {\n\t\tif md.Key != \"\" {\n\t\t\tx.Key = md.Key\n\t\t}\n\t\tif md.Roles != nil {\n\t\t\tx.Roles = md.Roles\n\t\t}\n\t\tif x.Info == nil {\n\t\t\tx.Info = md.Info\n\t\t} else if md.Info != nil {\n\t\t\tmergemap.Merge(x.Info, md.Info)\n\t\t}\n\t\tx.UpdatedAt = md.UpdatedAt\n\t\tb.Metadata[pth] = x\n\t} else {\n\t\tif md.Roles == nil {\n\t\t\tmd.Roles = make(map[did.DID]Role)\n\t\t}\n\t\tif md.Info == nil {\n\t\t\tmd.Info = make(map[string]interface{})\n\t\t}\n\t\tb.Metadata[pth] = md\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lookup returns the entry associated with the provided path and a boolean indicating whether the entry was found.
func (d Dir) Lookup(path string) (file reflow.File, ok bool) { file, ok = d.contents[path] return file, ok }
[ "func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) {\n\tif r == nil {\n\t\treturn\n\t}\n\te, ok = r.m[path]\n\tif ok && e.Type == \"hardlink\" {\n\t\te, ok = r.m[e.LinkName]\n\t}\n\treturn\n}", "func (registry *Registry) Lookup(handle string) *task.Task {\n\t// TODO: Refactor the interface here to explicitly add an `ok`\n\t// return value (in the style of reading a map[...]...)\n\t// to differentiate a present nil value return vs. a\n\t// not-present-at-all value.\n\tregistry.lock.RLock()\n\tdefer registry.lock.RUnlock()\n\n\tif t, exists := registry.db[handle]; exists {\n\t\treturn t\n\t}\n\n\treturn nil\n}", "func (e entry) lookup(name interface{}) (interface{}, bool) {\n\tif res, ok := e[name]; ok {\n\t\treturn res, true\n\t}\n\treturn nil, false\n}", "func Lookup(path string) ([]byte, error) {\n\tf, ok := files[path]\n\tif !ok {\n\t\treturn nil, os.ErrNotExist\n\t}\n\treturn f.data, nil\n}", "func (r *Router) Lookup(method, path string) (Handle, Params, bool) {\n\tif root := r.trees[method]; root != nil {\n\t\treturn root.getValue(path)\n\t}\n\treturn nil, nil, false\n}", "func (f *EnvFile) Lookup(key string) (string, bool) {\n\tif f.Err() == nil {\n\t\tif val, ok := f.data[key]; ok {\n\t\t\treturn val, true\n\t\t}\n\t}\n\treturn \"\", false\n}", "func (fi *FsCache) Lookup(key *fs.FsFile) (*fs.FsData, bool) {\n\tfi.l.RLock()\n\tdefer fi.l.RUnlock()\n\tfdata, ok := fi.lookup(key)\n\treturn fdata, ok\n}", "func (r *HashRing) Lookup(key string) (string, bool) {\n\tstrs := r.LookupN(key, 1)\n\tif len(strs) == 0 {\n\t\treturn \"\", false\n\t}\n\treturn strs[0], true\n}", "func (r *Router) Lookup(method, path string) (Handle, Params, bool) {\n\tif root := r.trees[method]; root != nil {\n\t\thandle, ps, tsr := root.getValue(path, r.getParams)\n\t\tif handle == nil {\n\t\t\tr.putParams(ps)\n\t\t\treturn nil, nil, tsr\n\t\t}\n\t\tif ps == nil {\n\t\t\treturn handle, nil, tsr\n\t\t}\n\t\treturn handle, *ps, tsr\n\t}\n\treturn nil, nil, false\n}", "func (mp *metaPartition) Lookup(req *LookupReq, p *Packet) (err error) {\n\tdentry := &Dentry{\n\t\tParentId: req.ParentID,\n\t\tName: req.Name,\n\t}\n\tdentry, status := mp.getDentry(dentry)\n\tvar reply []byte\n\tif status == proto.OpOk {\n\t\tresp := &LookupResp{\n\t\t\tInode: dentry.Inode,\n\t\t\tMode: dentry.Type,\n\t\t}\n\t\treply, err = json.Marshal(resp)\n\t\tif err != nil {\n\t\t\tstatus = proto.OpErr\n\t\t\treply = []byte(err.Error())\n\t\t}\n\t}\n\tp.PacketErrorWithBody(status, reply)\n\treturn\n}", "func (fi *FsCache) lookup(key *fs.FsFile) (*fs.FsData, bool) {\n\tfdata, ok := fi.dict[key.String()]\n\treturn fdata, ok\n}", "func (l *List) Lookup(domain string) (e Entry, ok bool) {\n\t// STARTTLS List spec does not specify the behavior in regards of IDNA\n\t// domains. As a sanity check, we refuse to lookup non-IDNA conforming\n\t// domains and convert them to A-labels form as it is consistent with\n\t// MTA-STS.\n\t//\n\t// https://github.com/EFForg/starttls-everywhere/issues/156\n\tdomainACE, err := idna.ToASCII(domain)\n\tif err != nil {\n\t\treturn Entry{}, false\n\t}\n\tdomainACE = strings.ToLower(domainACE)\n\n\te, ok = l.Policies[domainACE]\n\tif !ok {\n\t\treturn Entry{}, false\n\t}\n\n\tif e.PolicyAlias != \"\" {\n\t\te, ok = l.PolicyAliases[e.PolicyAlias]\n\t\tif !ok {\n\t\t\treturn Entry{}, false\n\t\t}\n\t}\n\n\te.Domain = domainACE\n\n\treturn e, true\n}", "func (c *MockConfig) Lookup(key string) (interface{}, bool) {\n\tif key == \"invalid\" {\n\t\treturn nil, false\n\t}\n\tvalue, ok := c.Lookup(key)\n\tif !ok {\n\t\treturn nil, false\n\t}\n\treturn value, true\n}", "func (state *State) LookUp(path string) (value reflect.Value, err error) {\n\tvalue, err = utils.LookUp(state.deviceInfo, path)\n\treturn\n}", "func (d *Dir) Lookup(ctx context.Context, name string) (fs.Node, error) {\n\t// Ensure that entries have been loaded.\n\tif d.entries == nil {\n\t\tif err := d.load(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\ti, ok := d.entryIndex[name]\n\tif !ok {\n\t\treturn nil, fuse.ENOENT\n\t}\n\n\treturn d.fs.makeNode(d.entries[i], d.inode)\n}", "func Lookup(name string) (Plugin, bool) {\n\tp, found := directory[name]\n\treturn p, found\n}", "func (inst *hiddenInstance) Lookup(path ...string) Value {\n\treturn inst.value().Lookup(path...)\n}", "func (x *JSONProperties) Lookup(key string) bool {\n\tif x == nil {\n\t\treturn false\n\t}\n\n\tx.RLock()\n\tdefer x.RUnlock()\n\n\t_, ok := x.Properties[key]\n\treturn ok\n}", "func Lookup(path ...string) PathGetter {\n\treturn PathGetter{Path: path}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Path returns the path of the currently scanned entry.
func (s *DirScanner) Path() string { return s.path }
[ "func (e *DevEngine) Path() string { return e.path }", "func (r *readerWithStats) Path() string {\n\treturn r.r.Path()\n}", "func (rc *RC) Path() string {\n\treturn rc.path\n}", "func (i *Instance) Path() (string, error) {\n\tref, _, _, err := i.GetAsAny(WmiPathKey)\n\treturn ref.(string), err\n}", "func (i *Image) Path() string {\n\treturn i.p\n}", "func (i *Index) Path() string { return i.path }", "func (d *Dir) Path() string {\n\treturn d.h.Path\n}", "func (src *Source) Path() string {\n\treturn src.String()\n}", "func (c *Config) Path() string {\n\treturn c.path\n}", "func (f *File) Path() string {\n\treturn \"/\" + f.key\n}", "func (tree *Tree) Path() string {\n\treturn tree.path\n}", "func (tb *Table) Path() string {\n\treturn tb.path\n}", "func (d *Dir) Path() string {\n\treturn d.path\n}", "func (me *Image) Path() string {\n\treturn me.key.path\n}", "func (m *ItemReference) GetPath()(*string) {\n return m.path\n}", "func (f *File) Path() string {\n\treturn utils.EnsureLeadingSlash(f.key)\n}", "func (f *File) Path() string {\n\tfldr, _ := f.ParentFolder()\n\tif fldr == nil {\n\t\treturn \"\"\n\t}\n\treturn fldr.FullName\n}", "func (s *FileStore) Path() string { return s.path }", "func (p *SeriesPartition) Path() string { return p.path }" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
File returns the file of the currently scanned entry.
func (s *DirScanner) File() reflow.File { return s.contents[s.path] }
[ "func (g IdxImportedASTFileInfo) File() File { return File{g.c.file} }", "func (cfg *Configuration) File() string {\n\treturn cfg.FilePath\n}", "func (pos *Position) File() string {\n\treturn *pos.file\n}", "func GetFile() string {\n\tfile, _ := GetFileNoByDepth(1)\n\treturn file\n}", "func (c *Config) File() string {\n\treturn c.file\n}", "func (err ErrPermission) File() File {\n\treturn err.file\n}", "func (k Key) File() string {\n\tif k.Type == PrivateKey {\n\t\treturn PrivateKeyFile(k.Usage, k.Version)\n\t}\n\treturn PublicKeyFile(k.Usage, k.IA, k.Version)\n}", "func (p *Post) File() string {\n\treturn fmt.Sprintf(\"%d%s\", p.Name, p.Extenstion)\n}", "func (c *Config) File() string { return c.viper.GetString(configFile) }", "func (c *caller) getFile() string {\n\treturn c.file\n}", "func (r *LocalRegistry) file() string {\n\treturn filepath.Join(core.RegistryPath(r.ArtHome), \"repository.json\")\n}", "func (p Pos) File() *File {\n\tif p.index() == 0 {\n\t\treturn nil\n\t}\n\treturn p.file\n}", "func File() (string, error) {\n\tdirPath, err := DirPath()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn fmt.Sprintf(\"%s/config.json\", dirPath), nil\n}", "func (f Frame) File() string {\n\treturn f.tr.getStringDefault(f.file)\n}", "func (s *Store) getFile() string {\n\treturn s.File\n}", "func (f *Function) File() *File {\n\treturn f.file\n}", "func File(pass *analysis.Pass, pos token.Pos) *ast.File {\n\tfor _, f := range pass.Files {\n\t\tif f.Pos() <= pos && pos <= f.End() {\n\t\t\treturn f\n\t\t}\n\t}\n\treturn nil\n}", "func (f *Files) File(name string) string {\n\treturn filepath.Join(f.OutDir, name)\n}", "func (f *FileMem) File() *os.File {\n\treturn f.file\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sprint returns a prettyprinted version of value v with type t.
func Sprint(v T, t *types.T) string { switch arg := v.(type) { case shorter: return arg.Short() case stringer: return arg.String() case digester: return fmt.Sprintf("delayed(%v)", arg.Digest()) } switch t.Kind { case types.ErrorKind, types.BottomKind: panic("illegal type") case types.IntKind: return v.(*big.Int).String() case types.FloatKind: return v.(*big.Float).String() case types.StringKind: return fmt.Sprintf("%q", v.(string)) case types.BoolKind: if v.(bool) { return "true" } return "false" case types.FileKind: file := v.(reflow.File) if file.IsRef() { return fmt.Sprintf("file(source=%s, etag=%s)", file.Source, file.ETag) } return fmt.Sprintf("file(sha256=%s, size=%d)", file.ID, file.Size) case types.DirKind: dir := v.(Dir) entries := make([]string, 0, dir.Len()) for scan := dir.Scan(); scan.Scan(); { entries = append(entries, fmt.Sprintf("%q: %s", scan.Path(), Sprint(scan.File(), types.File))) } return fmt.Sprintf("dir(%s)", strings.Join(entries, ", ")) case types.FilesetKind: // We can't access the FileSet struct here because it would introduce // a circular dependency between reflow/ and reflow/values. We could // move the fileset definition elsewhere, but since this is anyway just a // backwards compatibility issue, we'll keep it opaque for now. d := v.(digester) return fmt.Sprintf("fileset(%s)", d.Digest().Short()) case types.UnitKind: return "()" case types.ListKind: list := v.(List) elems := make([]string, len(list)) for i, e := range list { elems[i] = Sprint(e, t.Elem) } return fmt.Sprintf("[%v]", strings.Join(elems, ", ")) case types.MapKind: var keys, values []string for _, entryp := range v.(*Map).tab { for entry := *entryp; entry != nil; entry = entry.Next { keys = append(keys, Sprint(entry.Key, t.Index)) values = append(values, Sprint(entry.Value, t.Elem)) } } elems := make([]string, len(keys)) for i := range keys { elems[i] = fmt.Sprintf("%s: %s", keys[i], values[i]) } return fmt.Sprintf("[%s]", strings.Join(elems, ", ")) case types.TupleKind: tuple := v.(Tuple) elems := make([]string, len(t.Fields)) for i, f := range t.Fields { elems[i] = Sprint(tuple[i], f.T) } return fmt.Sprintf("(%s)", strings.Join(elems, ", ")) case types.StructKind: s := v.(Struct) elems := make([]string, len(t.Fields)) for i, f := range t.Fields { elems[i] = fmt.Sprintf("%s: %s", f.Name, Sprint(s[f.Name], f.T)) } return fmt.Sprintf("{%s}", strings.Join(elems, ", ")) case types.ModuleKind: s := v.(Module) elems := make([]string, len(t.Fields)) for i, f := range t.Fields { elems[i] = fmt.Sprintf("val %s = %s", f.Name, Sprint(s[f.Name], f.T)) } return fmt.Sprintf("module{%s}", strings.Join(elems, "; ")) case types.SumKind: variant := v.(*Variant) variantTyp := t.VariantMap()[variant.Tag] if variantTyp == nil { return fmt.Sprintf("#%s", variant.Tag) } return fmt.Sprintf("#%s(%s)", variant.Tag, Sprint(variant.Elem, variantTyp)) case types.FuncKind: return fmt.Sprintf("func(?)") default: panic("unknown type " + t.String()) } }
[ "func PrettyPrint(v interface{}) {\n\tfmt.Printf(\"%# v\\n\", pretty.Formatter(v))\n}", "func formatValue(v cty.Value, indent int) string {\n\tif !v.IsKnown() {\n\t\t// This should never happen in practice because values should always\n\t\t// be known before we start asserting on them, but we'll deal with this\n\t\t// here anyway for robustness.\n\t\treturn \"(unknown)\"\n\t}\n\tif v.IsNull() {\n\t\tty := v.Type()\n\t\tswitch {\n\t\tcase ty == cty.String:\n\t\t\treturn \"tostring(null)\"\n\t\tcase ty == cty.Number:\n\t\t\treturn \"tonumber(null)\"\n\t\tcase ty == cty.Bool:\n\t\t\treturn \"tobool(null)\"\n\t\tcase ty.IsListType():\n\t\t\treturn fmt.Sprintf(\"tolist(null) /* of %s */\", ty.ElementType().FriendlyName())\n\t\tcase ty.IsSetType():\n\t\t\treturn fmt.Sprintf(\"toset(null) /* of %s */\", ty.ElementType().FriendlyName())\n\t\tcase ty.IsMapType():\n\t\t\treturn fmt.Sprintf(\"tomap(null) /* of %s */\", ty.ElementType().FriendlyName())\n\t\tdefault:\n\t\t\treturn fmt.Sprintf(\"null /* %s */\", ty.FriendlyName())\n\t\t}\n\t}\n\n\tty := v.Type()\n\tswitch {\n\tcase ty.IsPrimitiveType():\n\t\tswitch ty {\n\t\tcase cty.String:\n\t\t\t// FIXME: If it's a multi-line string, better to render it using\n\t\t\t// HEREDOC-style syntax.\n\t\t\treturn strconv.Quote(v.AsString())\n\t\tcase cty.Number:\n\t\t\tbf := v.AsBigFloat()\n\t\t\treturn bf.Text('g', -1)\n\t\tcase cty.Bool:\n\t\t\tif v.True() {\n\t\t\t\treturn \"true\"\n\t\t\t} else {\n\t\t\t\treturn \"false\"\n\t\t\t}\n\t\t}\n\tcase ty.IsObjectType():\n\t\treturn formatMappingValue(v, indent)\n\tcase ty.IsTupleType():\n\t\treturn formatSequenceValue(v, indent)\n\tcase ty.IsListType():\n\t\treturn fmt.Sprintf(\"tolist(%s)\", formatSequenceValue(v, indent))\n\tcase ty.IsSetType():\n\t\treturn fmt.Sprintf(\"toset(%s)\", formatSequenceValue(v, indent))\n\tcase ty.IsMapType():\n\t\treturn fmt.Sprintf(\"tomap(%s)\", formatMappingValue(v, indent))\n\t}\n\n\t// Should never get here because there are no other types\n\treturn fmt.Sprintf(\"%#v\", v)\n}", "func printVal(l string, v interface{}) {\n\tswitch f := v.(type) {\n\tcase uint, uint64:\n\t\tprintFmt(l, \"%d\", f)\n\tcase string:\n\t\tprintFmt(l, \"%s\", f)\n\tdefault:\n\t\tprintFmt(l, \"%v\", f)\n\t}\n}", "func (p *Printer) PrintT(template string, v ...interface{}) FullPrinter {\n state, fc := p.initState()\n defer p.reset(state)\n p.formatter.PrintTemplate(fc, template, v...)\n state.Buffer.WriteNewLine()\n p.fc.Writer.Write(state.Buffer)\n return p\n}", "func Sprint(a ...interface{}) string {\n\treturn p.Sprint(a...)\n}", "func Tstring(v interface{}) string { return fmt.Sprintf(\"%v\", v) }", "func PrettyPrint(v interface{}) {\n\tb, _ := json.MarshalIndent(v, \"\", \" \")\n\tprintln(string(b))\n}", "func PrettyPrint(w io.Writer, v interface{}, prefix, suffix string, hooks PrettyPrintHooks) {\n\tif v == nil {\n\t\treturn\n\t}\n\n\tf := IndentFormatter(w, \"· \")\n\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\tf.Format(\"\\npanic: %v\", e)\n\t\t}\n\t}()\n\n\tprettyPrint(nil, f, prefix, suffix, v, hooks, false, false)\n}", "func PrettyPrint(v interface{}, prefix string, indent string) (string, error) {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to marshal\")\n\t}\n\n\tvar out bytes.Buffer\n\tif err := json.Indent(&out, b, prefix, indent); err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to indent\")\n\t}\n\n\tif _, err := out.WriteString(\"\\n\"); err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to write string\")\n\t}\n\n\treturn out.String(), nil\n}", "func Sprint(a ...interface{}) string {\n\treturn defPrinter.Sprint(a...)\n}", "func sprint(value interface{}) (string, error) {\n\tif value == nil {\n\t\treturn nilText, nil\n\t}\n\n\tswitch t := value.(type) {\n\tcase fmt.Stringer:\n\t\treturn t.String(), nil\n\tcase error:\n\t\treturn t.Error(), nil\n\tcase []byte: // Reflect doesn't handle []byte (easily)\n\t\treturn string(t), nil\n\t}\n\n\tvalueRfl := valueOf(value)\n\tswitch valueRfl.Kind() {\n\tcase reflect.Ptr:\n\t\tif valueRfl.IsNil() {\n\t\t\treturn nilText, nil\n\t\t}\n\t\treturn sprint(valueRfl.Elem().Interface())\n\n\tcase reflect.Struct:\n\t\treturn sprintStruct(value)\n\n\tcase reflect.Slice:\n\t\tif valueRfl.IsNil() {\n\t\t\treturn nilText, nil\n\t\t}\n\t\treturn sprintSlice(value)\n\n\tcase reflect.Map:\n\t\tif valueRfl.IsNil() {\n\t\t\treturn nilText, nil\n\t\t}\n\t\treturn sprintMap(value)\n\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,\n\t\treflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\treturn fmt.Sprintf(\"%d\", value), nil\n\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn fmt.Sprintf(\"%.2f\", valueRfl.Float()), nil\n\n\tcase reflect.Bool:\n\t\treturn fmt.Sprintf(\"%t\", valueRfl.Bool()), nil\n\n\tcase reflect.String:\n\t\tif v, ok := value.(strfmt.UUID); ok {\n\t\t\treturn v.String(), nil\n\t\t}\n\t\treturn value.(string), nil\n\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"unknown type: %s\", valueRfl.Type().String())\n\t}\n}", "func toString(v interface{}) string {\n\treturn fmt.Sprint(v)\n}", "func PrettyPrint(i interface{}) string {\n\tswitch t := i.(type) {\n\tcase nil:\n\t\treturn \"None\"\n\tcase string:\n\t\treturn capitalizeFirst(t)\n\tdefault:\n\t\treturn capitalizeFirst(fmt.Sprintf(\"%s\", t))\n\t}\n}", "func pv(v interface{}) {\n\tfmt.Printf(\"%v, %T\\n\", v, v)\n}", "func (p Property) Sprint(a ...interface{}) string {\n\treturn fmt.Sprintf(\"%s%s%s\", p, fmt.Sprint(a...), Reset)\n}", "func (v Vector) oneLineSprint(conf *config.Config, parens, spaces bool) (string, []int) {\n\tvar b bytes.Buffer\n\tif parens {\n\t\tspaces = true\n\t}\n\tcols := make([]int, len(v))\n\tfor i, elem := range v {\n\t\tif spaces && i > 0 {\n\t\t\tfmt.Fprint(&b, \" \")\n\t\t}\n\t\tif parens && !IsScalarType(elem) {\n\t\t\tfmt.Fprintf(&b, \"(%s)\", elem.Sprint(conf))\n\t\t} else {\n\t\t\tfmt.Fprintf(&b, \"%s\", elem.Sprint(conf))\n\t\t}\n\t\tcols[i] = b.Len()\n\t}\n\treturn b.String(), cols\n}", "func (p *Printer) formatValue(value interface{}) string {\n\n\t// If this value has a String() method then we should use that:\n\tif stringable, ok := value.(stringable); ok {\n\t\treturn p.spewConfig.Sprintf(\"%s\", stringable.String())\n\t}\n\n\treturn p.spewConfig.Sprintf(\"%v\", value)\n}", "func Sprint(val value.Value) string {\n\treturn val.Sprint(ivyCfg)\n}", "func (s *state) printValue(n parse.Node, v reflect.Value) {\n\ts.at(n)\n\tiface, ok := printableValue(v)\n\tif !ok {\n\t\ts.errorf(\"can't print %s of type %s\", n, v.Type())\n\t}\n\tfmt.Fprint(s.wr, iface)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Digest computes the digest for value v, given type t.
func Digest(v T, t *types.T) digest.Digest { w := Digester.NewWriter() WriteDigest(w, v, t) return w.Digest() }
[ "func WriteDigest(w io.Writer, v T, t *types.T) {\n\tif d, ok := v.(digester); ok {\n\t\tdigest.WriteDigest(w, d.Digest())\n\t\treturn\n\t}\n\n\tw.Write([]byte{t.Kind.ID()})\n\tswitch t.Kind {\n\tcase types.ErrorKind, types.BottomKind, types.RefKind:\n\t\tpanic(\"illegal type\")\n\tcase types.IntKind:\n\t\tvi := v.(*big.Int)\n\t\t// Bytes returns the normalized big-endian (i.e., free of a zero\n\t\t// prefix) representation of the absolute value of the integer.\n\t\tp := vi.Bytes()\n\t\tif len(p) == 0 {\n\t\t\t// This is the representation of \"0\"\n\t\t\treturn\n\t\t}\n\t\tif p[0] == 0 {\n\t\t\tpanic(\"big.Int byte representation is not normalized\")\n\t\t}\n\t\tif vi.Sign() < 0 {\n\t\t\tw.Write([]byte{0})\n\t\t}\n\t\tw.Write(p)\n\tcase types.FloatKind:\n\t\tw.Write([]byte(v.(*big.Float).Text('e', 10)))\n\tcase types.StringKind:\n\t\tio.WriteString(w, v.(string))\n\tcase types.BoolKind:\n\t\tif v.(bool) {\n\t\t\tw.Write(trueByte)\n\t\t} else {\n\t\t\tw.Write(falseByte)\n\t\t}\n\tcase types.FileKind:\n\t\tdigest.WriteDigest(w, v.(reflow.File).Digest())\n\tcase types.DirKind:\n\t\tdir := v.(Dir)\n\t\tfor scan := dir.Scan(); scan.Scan(); {\n\t\t\tio.WriteString(w, scan.Path())\n\t\t\tdigest.WriteDigest(w, scan.File().Digest())\n\t\t}\n\t// Filesets are digesters, so they don't need to be handled here.\n\tcase types.UnitKind:\n\tcase types.ListKind:\n\t\twriteLength(w, len(v.(List)))\n\t\tfor _, e := range v.(List) {\n\t\t\tWriteDigest(w, e, t.Elem)\n\t\t}\n\tcase types.MapKind:\n\t\tm := v.(*Map)\n\t\twriteLength(w, m.Len())\n\t\ttype kd struct {\n\t\t\tk T\n\t\t\td digest.Digest\n\t\t}\n\t\tkeys := make([]kd, 0, m.Len())\n\t\tfor _, entryp := range m.tab {\n\t\t\tfor entry := *entryp; entry != nil; entry = entry.Next {\n\t\t\t\tkeys = append(keys, kd{entry.Key, Digest(entry.Key, t.Index)})\n\t\t\t}\n\t\t}\n\t\t// Sort the map so that it produces a consistent digest. We sort\n\t\t// its keys by their digest because the values may not yet be\n\t\t// evaluated.\n\t\tsort.Slice(keys, func(i, j int) bool {\n\t\t\treturn keys[i].d.Less(keys[j].d)\n\t\t})\n\t\tfor _, k := range keys {\n\t\t\tWriteDigest(w, k.k, t.Index)\n\t\t\tWriteDigest(w, m.Lookup(k.d, k.k), t.Elem)\n\t\t}\n\tcase types.TupleKind:\n\t\twriteLength(w, len(t.Fields))\n\t\ttuple := v.(Tuple)\n\t\tfor i, f := range t.Fields {\n\t\t\tWriteDigest(w, tuple[i], f.T)\n\t\t}\n\tcase types.StructKind:\n\t\twriteLength(w, len(t.Fields))\n\t\ts := v.(Struct)\n\t\tkeys := make([]string, len(t.Fields))\n\t\tfor i, f := range t.Fields {\n\t\t\tkeys[i] = f.Name\n\t\t}\n\t\tsort.Strings(keys)\n\t\tfm := t.FieldMap()\n\t\tfor _, k := range keys {\n\t\t\tWriteDigest(w, s[k], fm[k])\n\t\t}\n\tcase types.ModuleKind:\n\t\twriteLength(w, len(t.Fields))\n\t\ts := v.(Module)\n\t\tkeys := make([]string, len(t.Fields))\n\t\tfor i, f := range t.Fields {\n\t\t\tkeys[i] = f.Name\n\t\t}\n\t\tsort.Strings(keys)\n\t\tfm := t.FieldMap()\n\t\tfor _, k := range keys {\n\t\t\tWriteDigest(w, s[k], fm[k])\n\t\t}\n\tcase types.SumKind:\n\t\tvariant := v.(*Variant)\n\t\tio.WriteString(w, variant.Tag)\n\t\tWriteDigest(w, variant.Elem, t.VariantMap()[variant.Tag])\n\tcase types.FuncKind:\n\t\tdigest.WriteDigest(w, v.(Func).Digest())\n\t}\n}", "func (q *Qsign) Digest(v interface{}) ([]byte, error) {\n\tbuf := new(bytes.Buffer)\n\n\tif q.prefixGenerator != nil {\n\t\tif _, err := buf.WriteString(q.prefixGenerator()); err != nil {\n\t\t\treturn buf.Bytes(), err\n\t\t}\n\t}\n\n\tvs := getStructValues(v)\n\n\tpairs := []string{}\n\tfor _, f := range vs {\n\t\tif !q.filter(f.name, f.value) {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar buf strings.Builder\n\t\tbuf.WriteString(f.name)\n\t\tbuf.WriteString(q.connector)\n\t\tbuf.WriteString(f.value)\n\n\t\tpairs = append(pairs, buf.String())\n\t}\n\tconnected := strings.Join(pairs, q.delimiter)\n\tbuf.WriteString(connected)\n\n\tif q.suffixGenerator != nil {\n\t\tif _, err := buf.WriteString(q.suffixGenerator()); err != nil {\n\t\t\treturn buf.Bytes(), err\n\t\t}\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func (t TagGenDigest) Digest() (regv1.Hash, error) {\n\treturn regv1.Hash{\n\t\tAlgorithm: t.Algorithm,\n\t\tHex: t.Hex,\n\t}, nil\n}", "func (h Hasher) Digest(b []byte) Hash {\n\thr := h.New()\n\thr.Write(b)\n\treturn hr.Sum([]byte{})\n}", "func (suite *testSuite) roundTripDigestTest(t *testValue) {\n\tvs := NewTestValueStore()\n\tr := vs.WriteValue(t.value)\n\tv2 := vs.ReadValue(r.TargetHash())\n\n\tsuite.True(v2.Equals(t.value), t.description)\n\tsuite.True(t.value.Equals(v2), t.description)\n\tsuite.Equal(t.expectedRef, r.TargetHash().String(), t.description)\n}", "func HashFromDigest(algo Type, digest Digest) Hash {\n\treturn HashFromSum(algo, digest[:])\n}", "func (o VirtualDatabaseStatusOutput) Digest() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v VirtualDatabaseStatus) *string { return v.Digest }).(pulumi.StringPtrOutput)\n}", "func (b SignDetail) Digest() (common.Hash, error) {\n\tvar hash common.Hash\n\tvar signFormatData apitypes.TypedData\n\tif err := json.Unmarshal([]byte(b.SignSchema.Schema), &signFormatData); err != nil {\n\t\treturn hash, err\n\t}\n\tparams, err := b.GetContractParams()\n\tif err != nil {\n\t\treturn hash, err\n\t}\n\tdata, err := buildTypedData(signFormatData, params)\n\n\tif err != nil {\n\t\treturn hash, err\n\t}\n\thash, err = crypto2.Keccak256HashEIP712(data)\n\treturn hash, err\n}", "func digest(object interface{}) string {\n\tj, _ := json.Marshal(object)\n\thash := sha256.New()\n\thash.Write(j)\n\tdigest := hex.EncodeToString(hash.Sum(nil))\n\treturn digest\n}", "func Hash(value int64) uint64 {\n\treturn FNVHash64(uint64(value))\n}", "func (t *Tree) Digest() *crypto.Digest { return t.dig }", "func digest(res *MTnode, dbar *mpb.Bar) {\n\tif res == nil {\n\t\tpanic(\"res is nil\")\n\t}\n\n\tif !res.IsDir() {\n\t\tfor _, csum := range calcChecksumKinds {\n\t\t\tres.Checksum(csum)\n\t\t}\n\t}\n\n\tif dbar != nil {\n\t\tdbar.Increment()\n\t}\n}", "func (u Unit) Digest() string {\n\tsha := sha256.New()\n\tpb := u.Proto\n\tif pb == nil {\n\t\tpb = new(apb.CompilationUnit)\n\t}\n\tput := func(tag string, ss ...string) {\n\t\tfmt.Fprintln(sha, tag)\n\t\tfor _, s := range ss {\n\t\t\tfmt.Fprint(sha, s, \"\\x00\")\n\t\t}\n\t}\n\tputv := func(tag string, v *spb.VName) {\n\t\tput(tag, v.GetSignature(), v.GetCorpus(), v.GetRoot(), v.GetPath(), v.GetLanguage())\n\t}\n\tputv(\"CU\", pb.VName)\n\tfor _, ri := range pb.RequiredInput {\n\t\tputv(\"RI\", ri.VName)\n\t\tput(\"IN\", ri.Info.GetPath(), ri.Info.GetDigest())\n\t}\n\tput(\"ARG\", pb.Argument...)\n\tput(\"OUT\", pb.OutputKey)\n\tput(\"SRC\", pb.SourceFile...)\n\tput(\"CWD\", pb.WorkingDirectory)\n\tput(\"CTX\", pb.EntryContext)\n\tfor _, env := range pb.Environment {\n\t\tput(\"ENV\", env.Name, env.Value)\n\t}\n\tfor _, d := range pb.Details {\n\t\tput(\"DET\", d.TypeUrl, string(d.Value))\n\t}\n\treturn hex.EncodeToString(sha.Sum(nil)[:])\n}", "func (tc ScannerTestcase) Digest() claircore.Digest {\n\td, err := claircore.ParseDigest(tc.Hash)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn d\n}", "func (_Swap *SwapCaller) Digest(opts *bind.CallOpts, _ids []*big.Int, _tos []common.Address, _amounts []*big.Int) ([32]byte, error) {\n\tvar out []interface{}\n\terr := _Swap.contract.Call(opts, &out, \"digest\", _ids, _tos, _amounts)\n\n\tif err != nil {\n\t\treturn *new([32]byte), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte)\n\n\treturn out0, err\n\n}", "func (d Digest) Hash() crypto.Hash { return d.h }", "func (h Hasher) Hash(t types.Type) uint32 {\n\thash, ok := h.memo[t]\n\tif !ok {\n\t\thash = h.hashFor(t)\n\t\th.memo[t] = hash\n\t}\n\treturn hash\n}", "func (n *Node) Digest() ([]byte, error) {\n\t// HMAC(Nonce,Inputs[*]|(Cryptex||Secret||Marker))\n\thash := hmac.New(sha256.New, n.Nonce)\n\n\tfor _, input := range n.Inputs {\n\t\tif _, err := hash.Write(input); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tvar (\n\t\tdata []byte\n\t\terr error\n\t)\n\n\tswitch {\n\tcase n.cryptex != nil:\n\t\tdata, err = n.cryptex.Marshal()\n\tcase n.secret != nil:\n\t\tdata, err = n.secret.Marshal()\n\tcase n.Marker != nil:\n\t\tdata, err = n.Marker.Marshal()\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif _, err := hash.Write(data); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn hash.Sum(nil), nil\n}", "func (h Hasher) hashFor(t types.Type) uint32 {\n\t// See Identical for rationale.\n\tswitch t := t.(type) {\n\tcase *types.Basic:\n\t\treturn uint32(t.Kind())\n\n\tcase *types.Array:\n\t\treturn 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem())\n\n\tcase *types.Slice:\n\t\treturn 9049 + 2*h.Hash(t.Elem())\n\n\tcase *types.Struct:\n\t\tvar hash uint32 = 9059\n\t\tfor i, n := 0, t.NumFields(); i < n; i++ {\n\t\t\tf := t.Field(i)\n\t\t\tif f.Anonymous() {\n\t\t\t\thash += 8861\n\t\t\t}\n\t\t\thash += hashString(t.Tag(i))\n\t\t\thash += hashString(f.Name()) // (ignore f.Pkg)\n\t\t\thash += h.Hash(f.Type())\n\t\t}\n\t\treturn hash\n\n\tcase *types.Pointer:\n\t\treturn 9067 + 2*h.Hash(t.Elem())\n\n\tcase *types.Signature:\n\t\tvar hash uint32 = 9091\n\t\tif t.Variadic() {\n\t\t\thash *= 8863\n\t\t}\n\n\t\t// Use a separate hasher for types inside of the signature, where type\n\t\t// parameter identity is modified to be (index, constraint). We must use a\n\t\t// new memo for this hasher as type identity may be affected by this\n\t\t// masking. For example, in func[T any](*T), the identity of *T depends on\n\t\t// whether we are mapping the argument in isolation, or recursively as part\n\t\t// of hashing the signature.\n\t\t//\n\t\t// We should never encounter a generic signature while hashing another\n\t\t// generic signature, but defensively set sigTParams only if h.mask is\n\t\t// unset.\n\t\ttparams := typeparams.ForSignature(t)\n\t\tif h.sigTParams == nil && tparams.Len() != 0 {\n\t\t\th = Hasher{\n\t\t\t\t// There may be something more efficient than discarding the existing\n\t\t\t\t// memo, but it would require detecting whether types are 'tainted' by\n\t\t\t\t// references to type parameters.\n\t\t\t\tmemo: make(map[types.Type]uint32),\n\t\t\t\t// Re-using ptrMap ensures that pointer identity is preserved in this\n\t\t\t\t// hasher.\n\t\t\t\tptrMap: h.ptrMap,\n\t\t\t\tsigTParams: tparams,\n\t\t\t}\n\t\t}\n\n\t\tfor i := 0; i < tparams.Len(); i++ {\n\t\t\ttparam := tparams.At(i)\n\t\t\thash += 7 * h.Hash(tparam.Constraint())\n\t\t}\n\n\t\treturn hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())\n\n\tcase *typeparams.Union:\n\t\treturn h.hashUnion(t)\n\n\tcase *types.Interface:\n\t\t// Interfaces are identical if they have the same set of methods, with\n\t\t// identical names and types, and they have the same set of type\n\t\t// restrictions. See go/types.identical for more details.\n\t\tvar hash uint32 = 9103\n\n\t\t// Hash methods.\n\t\tfor i, n := 0, t.NumMethods(); i < n; i++ {\n\t\t\t// Method order is not significant.\n\t\t\t// Ignore m.Pkg().\n\t\t\tm := t.Method(i)\n\t\t\t// Use shallow hash on method signature to\n\t\t\t// avoid anonymous interface cycles.\n\t\t\thash += 3*hashString(m.Name()) + 5*h.shallowHash(m.Type())\n\t\t}\n\n\t\t// Hash type restrictions.\n\t\tterms, err := typeparams.InterfaceTermSet(t)\n\t\t// if err != nil t has invalid type restrictions.\n\t\tif err == nil {\n\t\t\thash += h.hashTermSet(terms)\n\t\t}\n\n\t\treturn hash\n\n\tcase *types.Map:\n\t\treturn 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem())\n\n\tcase *types.Chan:\n\t\treturn 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem())\n\n\tcase *types.Named:\n\t\thash := h.hashPtr(t.Obj())\n\t\ttargs := typeparams.NamedTypeArgs(t)\n\t\tfor i := 0; i < targs.Len(); i++ {\n\t\t\ttarg := targs.At(i)\n\t\t\thash += 2 * h.Hash(targ)\n\t\t}\n\t\treturn hash\n\n\tcase *typeparams.TypeParam:\n\t\treturn h.hashTypeParam(t)\n\n\tcase *types.Tuple:\n\t\treturn h.hashTuple(t)\n\t}\n\n\tpanic(fmt.Sprintf(\"%T: %v\", t, t))\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WriteDigest writes digest material for value v (given type t) into the writer w.
func WriteDigest(w io.Writer, v T, t *types.T) { if d, ok := v.(digester); ok { digest.WriteDigest(w, d.Digest()) return } w.Write([]byte{t.Kind.ID()}) switch t.Kind { case types.ErrorKind, types.BottomKind, types.RefKind: panic("illegal type") case types.IntKind: vi := v.(*big.Int) // Bytes returns the normalized big-endian (i.e., free of a zero // prefix) representation of the absolute value of the integer. p := vi.Bytes() if len(p) == 0 { // This is the representation of "0" return } if p[0] == 0 { panic("big.Int byte representation is not normalized") } if vi.Sign() < 0 { w.Write([]byte{0}) } w.Write(p) case types.FloatKind: w.Write([]byte(v.(*big.Float).Text('e', 10))) case types.StringKind: io.WriteString(w, v.(string)) case types.BoolKind: if v.(bool) { w.Write(trueByte) } else { w.Write(falseByte) } case types.FileKind: digest.WriteDigest(w, v.(reflow.File).Digest()) case types.DirKind: dir := v.(Dir) for scan := dir.Scan(); scan.Scan(); { io.WriteString(w, scan.Path()) digest.WriteDigest(w, scan.File().Digest()) } // Filesets are digesters, so they don't need to be handled here. case types.UnitKind: case types.ListKind: writeLength(w, len(v.(List))) for _, e := range v.(List) { WriteDigest(w, e, t.Elem) } case types.MapKind: m := v.(*Map) writeLength(w, m.Len()) type kd struct { k T d digest.Digest } keys := make([]kd, 0, m.Len()) for _, entryp := range m.tab { for entry := *entryp; entry != nil; entry = entry.Next { keys = append(keys, kd{entry.Key, Digest(entry.Key, t.Index)}) } } // Sort the map so that it produces a consistent digest. We sort // its keys by their digest because the values may not yet be // evaluated. sort.Slice(keys, func(i, j int) bool { return keys[i].d.Less(keys[j].d) }) for _, k := range keys { WriteDigest(w, k.k, t.Index) WriteDigest(w, m.Lookup(k.d, k.k), t.Elem) } case types.TupleKind: writeLength(w, len(t.Fields)) tuple := v.(Tuple) for i, f := range t.Fields { WriteDigest(w, tuple[i], f.T) } case types.StructKind: writeLength(w, len(t.Fields)) s := v.(Struct) keys := make([]string, len(t.Fields)) for i, f := range t.Fields { keys[i] = f.Name } sort.Strings(keys) fm := t.FieldMap() for _, k := range keys { WriteDigest(w, s[k], fm[k]) } case types.ModuleKind: writeLength(w, len(t.Fields)) s := v.(Module) keys := make([]string, len(t.Fields)) for i, f := range t.Fields { keys[i] = f.Name } sort.Strings(keys) fm := t.FieldMap() for _, k := range keys { WriteDigest(w, s[k], fm[k]) } case types.SumKind: variant := v.(*Variant) io.WriteString(w, variant.Tag) WriteDigest(w, variant.Elem, t.VariantMap()[variant.Tag]) case types.FuncKind: digest.WriteDigest(w, v.(Func).Digest()) } }
[ "func Digest(v T, t *types.T) digest.Digest {\n\tw := Digester.NewWriter()\n\tWriteDigest(w, v, t)\n\treturn w.Digest()\n}", "func WriteDigest(w io.Writer, d Digest) (n int, err error) {\n\tif d.IsZero() {\n\t\tpanic(\"digest.WriteDigest: attempted to write a zero digest\")\n\t}\n\tdigestHash, ok := cryptoToDigestHashes[d.h]\n\tif !ok {\n\t\treturn n, fmt.Errorf(\"cannot convert %v to a digestHash\", d.h)\n\t}\n\tb := [2]byte{byte(digestHash >> 8), byte(digestHash & 0xff)}\n\tn, err = w.Write(b[:])\n\tif err != nil {\n\t\treturn n, err\n\t}\n\tm, err := w.Write(d.b[:d.h.Size()])\n\treturn n + m, err\n}", "func (q *Qsign) Digest(v interface{}) ([]byte, error) {\n\tbuf := new(bytes.Buffer)\n\n\tif q.prefixGenerator != nil {\n\t\tif _, err := buf.WriteString(q.prefixGenerator()); err != nil {\n\t\t\treturn buf.Bytes(), err\n\t\t}\n\t}\n\n\tvs := getStructValues(v)\n\n\tpairs := []string{}\n\tfor _, f := range vs {\n\t\tif !q.filter(f.name, f.value) {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar buf strings.Builder\n\t\tbuf.WriteString(f.name)\n\t\tbuf.WriteString(q.connector)\n\t\tbuf.WriteString(f.value)\n\n\t\tpairs = append(pairs, buf.String())\n\t}\n\tconnected := strings.Join(pairs, q.delimiter)\n\tbuf.WriteString(connected)\n\n\tif q.suffixGenerator != nil {\n\t\tif _, err := buf.WriteString(q.suffixGenerator()); err != nil {\n\t\t\treturn buf.Bytes(), err\n\t\t}\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func (o VirtualDatabaseStatusOutput) Digest() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v VirtualDatabaseStatus) *string { return v.Digest }).(pulumi.StringPtrOutput)\n}", "func (d Writer) Digest() Digest {\n\tif err := d.w.Flush(); err != nil {\n\t\tpanic(fmt.Sprintf(\"digest.Digest.Flush: %v\", err))\n\t}\n\treturn New(d.h, d.hw.Sum(nil))\n}", "func Digest(d collection.Document) ([]byte, error) {\n\trequest := make(map[string]interface{})\n\t//{ \"index\" : { \"_index\" : \"logs-2017.05.28\", \"_id\" : \"1\" } }\n\tdocDescription := make(map[string]interface{})\n\tdocDescription[\"_index\"] = RenderIndexName(d)\n\tdocDescription[\"_id\"] = d.ID\n\trequest[\"index\"] = docDescription\n\tbody, err := json.Marshal(request)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"json.Marshal.%s\", err)\n\t}\n\tbody = append(body, '\\n')\n\tbody = append(body, d.Body...)\n\tbody = append(body, '\\n')\n\treturn body, nil\n}", "func (o VirtualDatabaseStatusPtrOutput) Digest() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *VirtualDatabaseStatus) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Digest\n\t}).(pulumi.StringPtrOutput)\n}", "func (oc *OAuthConsumer) digest(key string, m string) string {\n\th := hmac.NewSHA1([]byte(key))\n\th.Write([]byte(m))\n\treturn base64encode(h.Sum())\n\n/*\ts := bytes.TrimSpace(h.Sum())\n\td := make([]byte, base64.StdEncoding.EncodedLen(len(s)))\n\tbase64.StdEncoding.Encode(d, s)\n\tds := strings.TrimSpace(bytes.NewBuffer(d).String())\n*/\n//\treturn ds\n\n}", "func digest(object interface{}) string {\n\tj, _ := json.Marshal(object)\n\thash := sha256.New()\n\thash.Write(j)\n\tdigest := hex.EncodeToString(hash.Sum(nil))\n\treturn digest\n}", "func (t *taprootSigHashOptions) writeDigestExtensions(w io.Writer) error {\n\tswitch t.extFlag {\n\t// The base extension, used for tapscript keypath spends doesn't modify\n\t// the digest at all.\n\tcase baseSigHashExtFlag:\n\t\treturn nil\n\n\t// The tapscript base leaf version extension adds the leaf hash, key\n\t// version, and code separator position to the final digest.\n\tcase tapscriptSighashExtFlag:\n\t\tif _, err := w.Write(t.tapLeafHash); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := w.Write([]byte{t.keyVersion}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr := binary.Write(w, binary.LittleEndian, t.codeSepPos)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (h Hasher) Digest(b []byte) Hash {\n\thr := h.New()\n\thr.Write(b)\n\treturn hr.Sum([]byte{})\n}", "func digest(res *MTnode, dbar *mpb.Bar) {\n\tif res == nil {\n\t\tpanic(\"res is nil\")\n\t}\n\n\tif !res.IsDir() {\n\t\tfor _, csum := range calcChecksumKinds {\n\t\t\tres.Checksum(csum)\n\t\t}\n\t}\n\n\tif dbar != nil {\n\t\tdbar.Increment()\n\t}\n}", "func (suite *testSuite) roundTripDigestTest(t *testValue) {\n\tvs := NewTestValueStore()\n\tr := vs.WriteValue(t.value)\n\tv2 := vs.ReadValue(r.TargetHash())\n\n\tsuite.True(v2.Equals(t.value), t.description)\n\tsuite.True(t.value.Equals(v2), t.description)\n\tsuite.Equal(t.expectedRef, r.TargetHash().String(), t.description)\n}", "func writeValue(buf *bytes.Buffer, fv *reflect.Value, key string) error {\n\tswitch fv.Kind() {\n\tcase reflect.Int:\n\t\t_, err := buf.WriteString(fmt.Sprintf(\"%s = %v\\n\", key, fv.Int()))\n\t\treturn err\n\tcase reflect.Float64:\n\t\t_, err := buf.WriteString(fmt.Sprintf(\"%s = %v\\n\", key, fv.Float()))\n\t\treturn err\n\tcase reflect.Bool:\n\t\t_, err := buf.WriteString(fmt.Sprintf(\"%s = %v\\n\", key, fv.Bool()))\n\t\treturn err\n\tcase reflect.String:\n\t\tqval := strings.Replace(fv.String(), \"\\n\", \"\\\\n\", -1)\n\t\t_, err := buf.WriteString(fmt.Sprintf(\"%s = %s\\n\", key, qval))\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (n *Node) Digest() ([]byte, error) {\n\t// HMAC(Nonce,Inputs[*]|(Cryptex||Secret||Marker))\n\thash := hmac.New(sha256.New, n.Nonce)\n\n\tfor _, input := range n.Inputs {\n\t\tif _, err := hash.Write(input); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tvar (\n\t\tdata []byte\n\t\terr error\n\t)\n\n\tswitch {\n\tcase n.cryptex != nil:\n\t\tdata, err = n.cryptex.Marshal()\n\tcase n.secret != nil:\n\t\tdata, err = n.secret.Marshal()\n\tcase n.Marker != nil:\n\t\tdata, err = n.Marker.Marshal()\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif _, err := hash.Write(data); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn hash.Sum(nil), nil\n}", "func NVWriteValueAuth(rw io.ReadWriter, index, offset uint32, data []byte, auth []byte) error {\n\tif auth == nil {\n\t\treturn fmt.Errorf(\"no auth value given but mandatory\")\n\t}\n\tsharedSecret, osapr, err := newOSAPSession(rw, etOwner, khOwner, auth[:])\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to start new auth session: %v\", err)\n\t}\n\tdefer osapr.Close(rw)\n\tdefer zeroBytes(sharedSecret[:])\n\tauthIn := []interface{}{ordNVWriteValueAuth, index, offset, len(data), data}\n\tca, err := newCommandAuth(osapr.AuthHandle, osapr.NonceEven, nil, sharedSecret[:], authIn)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to construct auth fields: %v\", err)\n\t}\n\tdata, ra, ret, err := nvWriteValue(rw, index, offset, uint32(len(data)), data, ca)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to write to NVRAM: %v\", err)\n\t}\n\traIn := []interface{}{ret, ordNVWriteValueAuth, tpmutil.U32Bytes(data)}\n\tif err := ra.verify(ca.NonceOdd, sharedSecret[:], raIn); err != nil {\n\t\treturn fmt.Errorf(\"failed to verify authenticity of response: %v\", err)\n\t}\n\treturn nil\n}", "func (o *ImageSource) SetDigest(v RegistryDigestSource) {\n\to.Digest.Set(&v)\n}", "func (me TDigestValueType) String() string { return xsdt.Base64Binary(me).String() }", "func (h *Hasher) WriteHash(p [32]byte) {\n\tcopy(h.buf[:], p[:])\n\th.h.Write(h.buf[:32])\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
OnUpdateManifest is called when a new manifest is added. It updates metadb according to the type of image pushed(normal images, signatues, etc.). In care of any errors, it makes sure to keep consistency between metadb and the image store.
func OnUpdateManifest(repo, reference, mediaType string, digest godigest.Digest, body []byte, storeController storage.StoreController, metaDB mTypes.MetaDB, log log.Logger, ) error { imgStore := storeController.GetImageStore(repo) // check if image is a signature isSignature, signatureType, signedManifestDigest, err := storage.CheckIsImageSignature(repo, body, reference) if err != nil { log.Error().Err(err).Msg("can't check if image is a signature or not") if err := imgStore.DeleteImageManifest(repo, reference, false); err != nil { log.Error().Err(err).Str("manifest", reference).Str("repository", repo).Msg("couldn't remove image manifest in repo") return err } return err } metadataSuccessfullySet := true if isSignature { layersInfo, errGetLayers := GetSignatureLayersInfo(repo, reference, digest.String(), signatureType, body, imgStore, log) if errGetLayers != nil { metadataSuccessfullySet = false err = errGetLayers } else { err = metaDB.AddManifestSignature(repo, signedManifestDigest, mTypes.SignatureMetadata{ SignatureType: signatureType, SignatureDigest: digest.String(), LayersInfo: layersInfo, }) if err != nil { log.Error().Err(err).Msg("metadb: error while putting repo meta") metadataSuccessfullySet = false } else { err = metaDB.UpdateSignaturesValidity(repo, signedManifestDigest) if err != nil { log.Error().Err(err).Str("repository", repo).Str("reference", reference).Str("digest", signedManifestDigest.String()).Msg("metadb: failed verify signatures validity for signed image") metadataSuccessfullySet = false } } } } else { err = SetImageMetaFromInput(repo, reference, mediaType, digest, body, imgStore, metaDB, log) if err != nil { metadataSuccessfullySet = false } } if !metadataSuccessfullySet { log.Info().Str("tag", reference).Str("repository", repo).Msg("uploading image meta was unsuccessful for tag in repo") if err := imgStore.DeleteImageManifest(repo, reference, false); err != nil { log.Error().Err(err).Str("reference", reference).Str("repository", repo). Msg("couldn't remove image manifest in repo") return err } return err } return nil }
[ "func OnDeleteManifest(repo, reference, mediaType string, digest godigest.Digest, manifestBlob []byte,\n\tstoreController storage.StoreController, metaDB mTypes.MetaDB, log log.Logger,\n) error {\n\timgStore := storeController.GetImageStore(repo)\n\n\tisSignature, signatureType, signedManifestDigest, err := storage.CheckIsImageSignature(repo, manifestBlob,\n\t\treference)\n\tif err != nil {\n\t\tlog.Error().Err(err).Msg(\"can't check if image is a signature or not\")\n\n\t\treturn err\n\t}\n\n\tmanageRepoMetaSuccessfully := true\n\n\tif isSignature {\n\t\terr = metaDB.DeleteSignature(repo, signedManifestDigest, mTypes.SignatureMetadata{\n\t\t\tSignatureDigest: digest.String(),\n\t\t\tSignatureType: signatureType,\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Error().Err(err).Msg(\"metadb: can't check if image is a signature or not\")\n\t\t\tmanageRepoMetaSuccessfully = false\n\t\t}\n\t} else {\n\t\terr = metaDB.DeleteRepoTag(repo, reference)\n\t\tif err != nil {\n\t\t\tlog.Info().Msg(\"metadb: restoring image store\")\n\n\t\t\t// restore image store\n\t\t\t_, _, err := imgStore.PutImageManifest(repo, reference, mediaType, manifestBlob)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error().Err(err).Msg(\"metadb: error while restoring image store, database is not consistent\")\n\t\t\t}\n\n\t\t\tmanageRepoMetaSuccessfully = false\n\t\t}\n\n\t\tif referredDigest, hasSubject := common.GetReferredSubject(manifestBlob); hasSubject {\n\t\t\terr := metaDB.DeleteReferrer(repo, referredDigest, digest)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error().Err(err).Msg(\"metadb: error while deleting referrer\")\n\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif !manageRepoMetaSuccessfully {\n\t\tlog.Info().Str(\"tag\", reference).Str(\"repository\", repo).\n\t\t\tMsg(\"metadb: deleting image meta was unsuccessful for tag in repo\")\n\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) {\n\tdcontext.GetLogger(imh).Debug(\"PutImageManifest\")\n\tmanifests, err := imh.Repository.Manifests(imh)\n\tif err != nil {\n\t\timh.Errors = append(imh.Errors, err)\n\t\treturn\n\t}\n\n\tvar jsonBuf bytes.Buffer\n\tif err := copyFullPayload(imh, w, r, &jsonBuf, maxManifestBodySize, \"image manifest PUT\"); err != nil {\n\t\t// copyFullPayload reports the error if necessary\n\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err.Error()))\n\t\treturn\n\t}\n\n\tmediaType := r.Header.Get(\"Content-Type\")\n\tmanifest, desc, err := distribution.UnmarshalManifest(mediaType, jsonBuf.Bytes())\n\tif err != nil {\n\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err))\n\t\treturn\n\t}\n\n\tif imh.Digest != \"\" {\n\t\tif desc.Digest != imh.Digest {\n\t\t\tdcontext.GetLogger(imh).Errorf(\"payload digest does not match: %q != %q\", desc.Digest, imh.Digest)\n\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid)\n\t\t\treturn\n\t\t}\n\t} else if imh.Tag != \"\" {\n\t\timh.Digest = desc.Digest\n\t} else {\n\t\timh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail(\"no tag or digest specified\"))\n\t\treturn\n\t}\n\n\tisAnOCIManifest := mediaType == v1.MediaTypeImageManifest || mediaType == v1.MediaTypeImageIndex\n\n\tif isAnOCIManifest {\n\t\tdcontext.GetLogger(imh).Debug(\"Putting an OCI Manifest!\")\n\t} else {\n\t\tdcontext.GetLogger(imh).Debug(\"Putting a Docker Manifest!\")\n\t}\n\n\tvar options []distribution.ManifestServiceOption\n\tif imh.Tag != \"\" {\n\t\toptions = append(options, distribution.WithTag(imh.Tag))\n\t}\n\n\tif err := imh.applyResourcePolicy(manifest); err != nil {\n\t\timh.Errors = append(imh.Errors, err)\n\t\treturn\n\t}\n\n\t_, err = manifests.Put(imh, manifest, options...)\n\tif err != nil {\n\t\t// TODO(stevvooe): These error handling switches really need to be\n\t\t// handled by an app global mapper.\n\t\tif err == distribution.ErrUnsupported {\n\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported)\n\t\t\treturn\n\t\t}\n\t\tif err == distribution.ErrAccessDenied {\n\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeDenied)\n\t\t\treturn\n\t\t}\n\t\tswitch err := err.(type) {\n\t\tcase distribution.ErrManifestVerification:\n\t\t\tfor _, verificationError := range err {\n\t\t\t\tswitch verificationError := verificationError.(type) {\n\t\t\t\tcase distribution.ErrManifestBlobUnknown:\n\t\t\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestBlobUnknown.WithDetail(verificationError.Digest))\n\t\t\t\tcase distribution.ErrManifestNameInvalid:\n\t\t\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeNameInvalid.WithDetail(err))\n\t\t\t\tcase distribution.ErrManifestUnverified:\n\t\t\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnverified)\n\t\t\t\tdefault:\n\t\t\t\t\tif verificationError == digest.ErrDigestInvalidFormat {\n\t\t\t\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid)\n\t\t\t\t\t} else {\n\t\t\t\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown, verificationError)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase errcode.Error:\n\t\t\timh.Errors = append(imh.Errors, err)\n\t\tdefault:\n\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))\n\t\t}\n\t\treturn\n\t}\n\n\t// Tag this manifest\n\tif imh.Tag != \"\" {\n\t\ttags := imh.Repository.Tags(imh)\n\t\terr = tags.Tag(imh, imh.Tag, desc)\n\t\tif err != nil {\n\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))\n\t\t\treturn\n\t\t}\n\n\t}\n\n\t// Construct a canonical url for the uploaded manifest.\n\tref, err := reference.WithDigest(imh.Repository.Named(), imh.Digest)\n\tif err != nil {\n\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))\n\t\treturn\n\t}\n\n\tlocation, err := imh.urlBuilder.BuildManifestURL(ref)\n\tif err != nil {\n\t\t// NOTE(stevvooe): Given the behavior above, this absurdly unlikely to\n\t\t// happen. We'll log the error here but proceed as if it worked. Worst\n\t\t// case, we set an empty location header.\n\t\tdcontext.GetLogger(imh).Errorf(\"error building manifest url from digest: %v\", err)\n\t}\n\n\tw.Header().Set(\"Location\", location)\n\tw.Header().Set(\"Docker-Content-Digest\", imh.Digest.String())\n\tw.WriteHeader(http.StatusCreated)\n\n\tdcontext.GetLogger(imh).Debug(\"Succeeded in putting manifest!\")\n}", "func (g *Gateway) UpdateManifest() error {\n\tts := time.Now().Unix()\n\tkey := fmt.Sprintf(\"%s/%s\", service.Gateway, g.ID)\n\tmanifest := &consul.ServiceManifest{\n\t\tID: g.ID,\n\t\tType: service.Gateway,\n\t\tAddress: g.Address,\n\t\tPort: g.Port,\n\t\tApps: g.Apps,\n\t\tHosts: g.Hosts,\n\t\tLastActive: ts,\n\t}\n\n\tif err := g.Consul.WriteStructToKey(key, manifest); err != nil {\n\t\treturn fmt.Errorf(\"error updating manifest: %v\", err)\n\t}\n\n\tfmt.Printf(\"Updated manifest %v\\n\", manifest)\n\treturn nil\n}", "func PushManifest(img string, auth dockertypes.AuthConfig) (hash string, length int, err error) {\n\tsrcImages := []types.ManifestEntry{}\n\n\tfor i, platform := range platformsToSearchForIndex {\n\t\tosArchArr := strings.Split(platform, \"/\")\n\t\tif len(osArchArr) != 2 && len(osArchArr) != 3 {\n\t\t\treturn hash, length, fmt.Errorf(\"platform argument %d is not of form 'os/arch': '%s'\", i, platform)\n\t\t}\n\t\tvariant := \"\"\n\t\tos, arch := osArchArr[0], osArchArr[1]\n\t\tif len(osArchArr) == 3 {\n\t\t\tvariant = osArchArr[2]\n\t\t}\n\t\tsrcImages = append(srcImages, types.ManifestEntry{\n\t\t\tImage: fmt.Sprintf(\"%s-%s\", img, arch),\n\t\t\tPlatform: ocispec.Platform{\n\t\t\t\tOS: os,\n\t\t\t\tArchitecture: arch,\n\t\t\t\tVariant: variant,\n\t\t\t},\n\t\t})\n\t}\n\n\tyamlInput := types.YAMLInput{\n\t\tImage: img,\n\t\tManifests: srcImages,\n\t}\n\n\tlog.Debugf(\"pushing manifest list for %s -> %#v\", img, yamlInput)\n\n\t// push the manifest list with the auth as given, ignore missing, do not allow insecure\n\treturn registry.PushManifestList(auth.Username, auth.Password, yamlInput, true, false, false, types.OCI, \"\")\n}", "func (d *DockerV2Manifest) PutManifest() {\n\treponame := d.Ctx.Input.Param(\":splat\")\n\ttags := d.Ctx.Input.Param(\":tags\")\n\tlogs.Debug(\"PutManifest of '%s:%s'.\", reponame, tags)\n\n\tdata := d.Ctx.Input.CopyBody(utils.MaxSize)\n\tlogs.Debug(\"The manifest is <%s>\", data)\n\terr := storage.PutManifest(d.Ctx, reponame, tags, \"docker\", \"v2\", data)\n\tif err != nil {\n\t\tCtxErrorWrap(d.Ctx, http.StatusInternalServerError, err, fmt.Sprintf(\"Failed to put manifest of '%s:%s'.\", reponame, tags))\n\t\treturn\n\t}\n\n\t//TODO: rollback the storage.. add error checks\n\t_, err = models.AddImage(reponame, tags, \"docker\", \"v2\")\n\tif err != nil {\n\t\tCtxErrorWrap(d.Ctx, http.StatusInternalServerError, err, fmt.Sprintf(\"Failed to add image '%s:%s' to db.\", reponame, tags))\n\t\treturn\n\t}\n\n\tdigest, _ := utils.DigestManifest(data)\n\theader := make(map[string]string)\n\theader[\"Docker-Content-Digest\"] = digest\n\tCtxSuccessWrap(d.Ctx, http.StatusOK, \"{}\", header)\n}", "func (is *ObjectStorage) PutImageManifest(repo, reference, mediaType string, //nolint: gocyclo\n\tbody []byte,\n) (godigest.Digest, godigest.Digest, error) {\n\tif err := is.InitRepo(repo); err != nil {\n\t\tis.log.Debug().Err(err).Msg(\"init repo\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\tvar lockLatency time.Time\n\n\tvar err error\n\n\tis.Lock(&lockLatency)\n\tdefer func() {\n\t\tis.Unlock(&lockLatency)\n\n\t\tif err == nil {\n\t\t\tmonitoring.SetStorageUsage(is.metrics, is.rootDir, repo)\n\t\t\tmonitoring.IncUploadCounter(is.metrics, repo)\n\t\t}\n\t}()\n\n\trefIsDigest := true\n\n\tmDigest, err := common.GetAndValidateRequestDigest(body, reference, is.log)\n\tif err != nil {\n\t\tif errors.Is(err, zerr.ErrBadManifest) {\n\t\t\treturn mDigest, \"\", err\n\t\t}\n\n\t\trefIsDigest = false\n\t}\n\n\tdig, err := common.ValidateManifest(is, repo, reference, mediaType, body, is.log)\n\tif err != nil {\n\t\treturn dig, \"\", err\n\t}\n\n\tindex, err := common.GetIndex(is, repo, is.log)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t// create a new descriptor\n\tdesc := ispec.Descriptor{\n\t\tMediaType: mediaType, Size: int64(len(body)), Digest: mDigest,\n\t}\n\n\tif !refIsDigest {\n\t\tdesc.Annotations = map[string]string{ispec.AnnotationRefName: reference}\n\t}\n\n\tvar subjectDigest godigest.Digest\n\n\tartifactType := \"\"\n\n\tif mediaType == ispec.MediaTypeImageManifest {\n\t\tvar manifest ispec.Manifest\n\n\t\terr := json.Unmarshal(body, &manifest)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\n\t\tif manifest.Subject != nil {\n\t\t\tsubjectDigest = manifest.Subject.Digest\n\t\t}\n\n\t\tartifactType = zcommon.GetManifestArtifactType(manifest)\n\t} else if mediaType == ispec.MediaTypeImageIndex {\n\t\tvar index ispec.Index\n\n\t\terr := json.Unmarshal(body, &index)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\n\t\tif index.Subject != nil {\n\t\t\tsubjectDigest = index.Subject.Digest\n\t\t}\n\n\t\tartifactType = zcommon.GetIndexArtifactType(index)\n\t}\n\n\tupdateIndex, oldDgst, err := common.CheckIfIndexNeedsUpdate(&index, &desc, is.log)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tif !updateIndex {\n\t\treturn desc.Digest, subjectDigest, nil\n\t}\n\n\t// write manifest to \"blobs\"\n\tdir := path.Join(is.rootDir, repo, \"blobs\", mDigest.Algorithm().String())\n\tmanifestPath := path.Join(dir, mDigest.Encoded())\n\n\tif err = is.store.PutContent(context.Background(), manifestPath, body); err != nil {\n\t\tis.log.Error().Err(err).Str(\"file\", manifestPath).Msg(\"unable to write\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\terr = common.UpdateIndexWithPrunedImageManifests(is, &index, repo, desc, oldDgst, is.log)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t// now update \"index.json\"\n\tindex.Manifests = append(index.Manifests, desc)\n\tdir = path.Join(is.rootDir, repo)\n\tindexPath := path.Join(dir, \"index.json\")\n\n\tbuf, err := json.Marshal(index)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"file\", indexPath).Msg(\"unable to marshal JSON\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\t// update the descriptors artifact type in order to check for signatures when applying the linter\n\tdesc.ArtifactType = artifactType\n\n\t// apply linter only on images, not signatures\n\tpass, err := common.ApplyLinter(is, is.linter, repo, desc)\n\tif !pass {\n\t\tis.log.Error().Err(err).Str(\"repository\", repo).Str(\"reference\", reference).Msg(\"linter didn't pass\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\tif err = is.store.PutContent(context.Background(), indexPath, buf); err != nil {\n\t\tis.log.Error().Err(err).Str(\"file\", manifestPath).Msg(\"unable to write\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\treturn desc.Digest, subjectDigest, nil\n}", "func UpdateManifest(m Manifests, root string, paths []string, id flux.ResourceID, f func(manifest []byte) ([]byte, error)) error {\n\tresources, err := m.LoadManifests(root, paths)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresource, ok := resources[id.String()]\n\tif !ok {\n\t\treturn ErrResourceNotFound(id.String())\n\t}\n\n\tpath := filepath.Join(root, resource.Source())\n\tdef, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnewDef, err := f(def)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(path, newDef, fi.Mode())\n}", "func (s *Stargate) UpdateManifest() error {\n\tts := time.Now().Unix()\n\tkey := fmt.Sprintf(\"%s/%s\", service.Stargate, s.ID)\n\tmanifest := &consul.ServiceManifest{\n\t\tID: s.ID,\n\t\tType: service.Stargate,\n\t\tLastActive: ts,\n\t}\n\n\tif err := s.Consul.WriteStructToKey(key, manifest); err != nil {\n\t\treturn fmt.Errorf(\"error updating manifest: %v\", err)\n\t}\n\n\tfmt.Printf(\"Updated manifest %v\\n\", manifest)\n\treturn nil\n}", "func PackageManifestChanged(fino os.FileInfo, remoteURL string) bool {\n\tres, err := http.Head(remoteURL)\n\tif err != nil {\n\t\tif err, ok := err.(net.Error); ok {\n\t\t\tfmt.Printf(constants.WarningColor, \"missing internet?, using local manifest.\\n\")\n\t\t} else {\n\t\t\tpanic(err)\n\t\t}\n\n\t\treturn false\n\t}\n\n\treturn fino.Size() != res.ContentLength\n}", "func UpdateManifest(m Manifests, root string, serviceID flux.ResourceID, f func(manifest []byte) ([]byte, error)) error {\n\tservices, err := m.FindDefinedServices(root)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpaths := services[serviceID]\n\tif len(paths) == 0 {\n\t\treturn ErrNoResourceFilesFoundForService\n\t}\n\tif len(paths) > 1 {\n\t\treturn ErrMultipleResourceFilesFoundForService\n\t}\n\n\tdef, err := ioutil.ReadFile(paths[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnewDef, err := f(def)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfi, err := os.Stat(paths[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(paths[0], newDef, fi.Mode())\n}", "func PackageManifestChanged(fino os.FileInfo, remoteURL string) bool {\n\tres, err := http.Head(remoteURL)\n\tif err != nil {\n\t\tvar netError *net.Error\n\t\tif errors.Is(err, *netError) {\n\t\t\tfmt.Printf(constants.WarningColor, \"missing internet?, using local manifest.\\n\")\n\t\t} else {\n\t\t\tfmt.Printf(\"probably bad URL: %s, got error %s\", remoteURL, err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\treturn false\n\t}\n\n\treturn fino.Size() != res.ContentLength\n}", "func Update(oldManifest []byte) (string, error) {\n\tjsonManifest, err := loadJSONManifest(oldManifest)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tnewManifest, err := updateApplication(oldManifest)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif manifestApplications, ok := jsonManifest[\"applications\"]; ok {\n\t\tnewApplications, manifestErr := updateApplications(manifestApplications, jsonManifest)\n\t\tif manifestErr != nil {\n\t\t\treturn \"\", manifestErr\n\t\t}\n\t\tnewManifest[\"applications\"], _ = json.Marshal(newApplications)\n\t\tdelete(newManifest, \"routes\")\n\t\treturn marshal(newManifest)\n\t}\n\treturn marshal(newManifest)\n}", "func (api *Client) UpdateManifest(manifest *Manifest, token string, appId string) (*UpdateManifestResponse, error) {\n\treturn api.UpdateManifestContext(context.Background(), manifest, token, appId)\n}", "func (d *InboundCacheDriver) StoreManifest(location models.ImageReference, contents []byte, mediaType string, now time.Time) error {\n\td.Entries[location] = inboundCacheEntry{contents, mediaType, now}\n\treturn nil\n}", "func (b *Backend) ManifestAnnotate(ctx context.Context, req *pb.ManifestAnnotateRequest) (*gogotypes.Empty, error) {\n\tvar emptyResp = &gogotypes.Empty{}\n\n\tif !b.daemon.opts.Experimental {\n\t\treturn emptyResp, errors.New(\"please enable experimental to use manifest feature\")\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"ManifestList\": req.GetManifestList(),\n\t\t\"Manifest\": req.GetManifest(),\n\t}).Info(\"ManifestAnnotateRequest received\")\n\n\tmanifestName := req.GetManifestList()\n\tmanifestImage := req.GetManifest()\n\timageOS := req.GetOs()\n\timageArch := req.GetArch()\n\timageOSFeature := req.GetOsFeatures()\n\timageVariant := req.GetVariant()\n\n\t// get list image\n\t_, listImage, err := image.FindImage(b.daemon.localStore, manifestName)\n\tif err != nil {\n\t\treturn emptyResp, err\n\t}\n\n\t// load list from list image\n\t_, list, err := loadListFromImage(b.daemon.localStore, listImage.ID)\n\tif err != nil {\n\t\treturn emptyResp, err\n\t}\n\n\t// add image to list, if image already exists, it will be substituted\n\tinstanceDigest, err := list.addImage(ctx, b.daemon.localStore, manifestImage)\n\tif err != nil {\n\t\treturn emptyResp, err\n\t}\n\n\t// modify image platform if user specifies\n\tfor i := range list.docker.Manifests {\n\t\tif list.docker.Manifests[i].Digest == instanceDigest {\n\t\t\tif imageOS != \"\" {\n\t\t\t\tlist.docker.Manifests[i].Platform.OS = imageOS\n\t\t\t}\n\t\t\tif imageArch != \"\" {\n\t\t\t\tlist.docker.Manifests[i].Platform.Architecture = imageArch\n\t\t\t}\n\t\t\tif len(imageOSFeature) > 0 {\n\t\t\t\tlist.docker.Manifests[i].Platform.OSFeatures = append([]string{}, imageOSFeature...)\n\t\t\t}\n\t\t\tif imageVariant != \"\" {\n\t\t\t\tlist.docker.Manifests[i].Platform.Variant = imageVariant\n\t\t\t}\n\t\t}\n\t}\n\n\t// save list to image\n\t_, err = list.saveListToImage(b.daemon.localStore, listImage.ID, \"\", manifest.DockerV2ListMediaType)\n\n\treturn emptyResp, err\n}", "func convertManifestIfRequiredWithUpdate(ctx context.Context, options types.ManifestUpdateOptions, converters map[string]manifestConvertFn) (types.Image, error) {\n\tif options.ManifestMIMEType == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tconverter, ok := converters[options.ManifestMIMEType]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"Unsupported conversion type: %v\", options.ManifestMIMEType)\n\t}\n\n\toptionsCopy := options\n\tconvertedManifest, err := converter(ctx, &optionsCopy)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconvertedImage := memoryImageFromManifest(convertedManifest)\n\n\toptionsCopy.ManifestMIMEType = \"\"\n\treturn convertedImage.UpdatedImage(ctx, optionsCopy)\n}", "func appendImageManifest(tarFile string, manifest []byte) error {\n\thash := sha256.Sum256(manifest)\n\treturn appendToTarFile(tarFile, fmt.Sprintf(\"%s-%x.json\", \"imagemanifest\", hash), manifest)\n}", "func (ir *ImageEngine) ManifestPush(ctx context.Context, name, destination string, opts entities.ImagePushOptions) (string, error) {\n\tif opts.Signers != nil {\n\t\treturn \"\", fmt.Errorf(\"forwarding Signers is not supported for remote clients\")\n\t}\n\n\toptions := new(images.PushOptions)\n\toptions.WithUsername(opts.Username).WithPassword(opts.Password).WithAuthfile(opts.Authfile).WithRemoveSignatures(opts.RemoveSignatures).WithAll(opts.All).WithFormat(opts.Format).WithCompressionFormat(opts.CompressionFormat).WithQuiet(opts.Quiet).WithProgressWriter(opts.Writer).WithAddCompression(opts.AddCompression).WithForceCompressionFormat(opts.ForceCompressionFormat)\n\n\tif s := opts.SkipTLSVerify; s != types.OptionalBoolUndefined {\n\t\tif s == types.OptionalBoolTrue {\n\t\t\toptions.WithSkipTLSVerify(true)\n\t\t} else {\n\t\t\toptions.WithSkipTLSVerify(false)\n\t\t}\n\t}\n\tdigest, err := manifests.Push(ir.ClientCtx, name, destination, options)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"pushing manifest list %s: %w\", name, err)\n\t}\n\n\tif opts.Rm {\n\t\tif _, rmErrors := ir.Remove(ctx, []string{name}, entities.ImageRemoveOptions{LookupManifest: true}); len(rmErrors) > 0 {\n\t\t\treturn \"\", fmt.Errorf(\"removing manifest after push: %w\", rmErrors[0])\n\t\t}\n\t}\n\n\treturn digest, err\n}", "func valideManifest(mani *ImageManifest) error {\n\treturn nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
OnDeleteManifest is called when a manifest is deleted. It updates metadb according to the type of image pushed(normal images, signatues, etc.). In care of any errors, it makes sure to keep consistency between metadb and the image store.
func OnDeleteManifest(repo, reference, mediaType string, digest godigest.Digest, manifestBlob []byte, storeController storage.StoreController, metaDB mTypes.MetaDB, log log.Logger, ) error { imgStore := storeController.GetImageStore(repo) isSignature, signatureType, signedManifestDigest, err := storage.CheckIsImageSignature(repo, manifestBlob, reference) if err != nil { log.Error().Err(err).Msg("can't check if image is a signature or not") return err } manageRepoMetaSuccessfully := true if isSignature { err = metaDB.DeleteSignature(repo, signedManifestDigest, mTypes.SignatureMetadata{ SignatureDigest: digest.String(), SignatureType: signatureType, }) if err != nil { log.Error().Err(err).Msg("metadb: can't check if image is a signature or not") manageRepoMetaSuccessfully = false } } else { err = metaDB.DeleteRepoTag(repo, reference) if err != nil { log.Info().Msg("metadb: restoring image store") // restore image store _, _, err := imgStore.PutImageManifest(repo, reference, mediaType, manifestBlob) if err != nil { log.Error().Err(err).Msg("metadb: error while restoring image store, database is not consistent") } manageRepoMetaSuccessfully = false } if referredDigest, hasSubject := common.GetReferredSubject(manifestBlob); hasSubject { err := metaDB.DeleteReferrer(repo, referredDigest, digest) if err != nil { log.Error().Err(err).Msg("metadb: error while deleting referrer") return err } } } if !manageRepoMetaSuccessfully { log.Info().Str("tag", reference).Str("repository", repo). Msg("metadb: deleting image meta was unsuccessful for tag in repo") return err } return nil }
[ "func (p *Processor) DeleteManifest(account keppel.Account, repo keppel.Repository, manifestDigest digest.Digest, actx keppel.AuditContext) error {\n\tvar (\n\t\ttagResults []keppel.Tag\n\t\ttags []string\n\t)\n\n\t_, err := p.db.Select(&tagResults,\n\t\t`SELECT * FROM tags WHERE repo_id = $1 AND digest = $2`,\n\t\trepo.ID, manifestDigest)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, tagResult := range tagResults {\n\t\ttags = append(tags, tagResult.Name)\n\t}\n\n\tresult, err := p.db.Exec(\n\t\t//this also deletes tags referencing this manifest because of \"ON DELETE CASCADE\"\n\t\t`DELETE FROM manifests WHERE repo_id = $1 AND digest = $2`,\n\t\trepo.ID, manifestDigest)\n\tif err != nil {\n\t\totherDigest, err2 := p.db.SelectStr(\n\t\t\t`SELECT parent_digest FROM manifest_manifest_refs WHERE repo_id = $1 AND child_digest = $2`,\n\t\t\trepo.ID, manifestDigest)\n\t\t// more than one manifest is referenced by another manifest\n\t\tif otherDigest != \"\" && err2 == nil {\n\t\t\treturn fmt.Errorf(\"cannot delete a manifest which is referenced by the manifest %s\", otherDigest)\n\t\t}\n\t\t// if the SELECT failed return the previous error to not shadow it\n\t\treturn err\n\t}\n\trowsDeleted, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif rowsDeleted == 0 {\n\t\treturn sql.ErrNoRows\n\t}\n\n\t//We delete in the storage *after* the deletion is durable in the DB to be\n\t//extra sure that we did not break any constraints (esp. manifest-manifest\n\t//refs and manifest-blob refs) that the DB enforces. Doing things in this\n\t//order might mean that, if DeleteManifest fails, we're left with a manifest\n\t//in the backing storage that is not referenced in the DB anymore, but this\n\t//is not a huge problem since the janitor can clean those up after the fact.\n\t//What's most important is that we don't lose any data in the backing storage\n\t//while it is still referenced in the DB.\n\t//\n\t//Also, the DELETE statement could fail if some concurrent process created a\n\t//manifest reference in the meantime. If that happens, and we have already\n\t//deleted the manifest in the backing storage, we've caused an inconsistency\n\t//that we cannot recover from.\n\terr = p.sd.DeleteManifest(account, repo.Name, manifestDigest)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif userInfo := actx.UserIdentity.UserInfo(); userInfo != nil {\n\t\tp.auditor.Record(audittools.EventParameters{\n\t\t\tTime: p.timeNow(),\n\t\t\tRequest: actx.Request,\n\t\t\tUser: userInfo,\n\t\t\tReasonCode: http.StatusOK,\n\t\t\tAction: cadf.DeleteAction,\n\t\t\tTarget: auditManifest{\n\t\t\t\tAccount: account,\n\t\t\t\tRepository: repo,\n\t\t\t\tDigest: manifestDigest,\n\t\t\t\tTags: tags,\n\t\t\t},\n\t\t})\n\t}\n\n\treturn nil\n}", "func OnUpdateManifest(repo, reference, mediaType string, digest godigest.Digest, body []byte,\n\tstoreController storage.StoreController, metaDB mTypes.MetaDB, log log.Logger,\n) error {\n\timgStore := storeController.GetImageStore(repo)\n\n\t// check if image is a signature\n\tisSignature, signatureType, signedManifestDigest, err := storage.CheckIsImageSignature(repo, body, reference)\n\tif err != nil {\n\t\tlog.Error().Err(err).Msg(\"can't check if image is a signature or not\")\n\n\t\tif err := imgStore.DeleteImageManifest(repo, reference, false); err != nil {\n\t\t\tlog.Error().Err(err).Str(\"manifest\", reference).Str(\"repository\", repo).Msg(\"couldn't remove image manifest in repo\")\n\n\t\t\treturn err\n\t\t}\n\n\t\treturn err\n\t}\n\n\tmetadataSuccessfullySet := true\n\n\tif isSignature {\n\t\tlayersInfo, errGetLayers := GetSignatureLayersInfo(repo, reference, digest.String(), signatureType, body,\n\t\t\timgStore, log)\n\t\tif errGetLayers != nil {\n\t\t\tmetadataSuccessfullySet = false\n\t\t\terr = errGetLayers\n\t\t} else {\n\t\t\terr = metaDB.AddManifestSignature(repo, signedManifestDigest, mTypes.SignatureMetadata{\n\t\t\t\tSignatureType: signatureType,\n\t\t\t\tSignatureDigest: digest.String(),\n\t\t\t\tLayersInfo: layersInfo,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tlog.Error().Err(err).Msg(\"metadb: error while putting repo meta\")\n\t\t\t\tmetadataSuccessfullySet = false\n\t\t\t} else {\n\t\t\t\terr = metaDB.UpdateSignaturesValidity(repo, signedManifestDigest)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error().Err(err).Str(\"repository\", repo).Str(\"reference\", reference).Str(\"digest\",\n\t\t\t\t\t\tsignedManifestDigest.String()).Msg(\"metadb: failed verify signatures validity for signed image\")\n\t\t\t\t\tmetadataSuccessfullySet = false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\terr = SetImageMetaFromInput(repo, reference, mediaType, digest, body,\n\t\t\timgStore, metaDB, log)\n\t\tif err != nil {\n\t\t\tmetadataSuccessfullySet = false\n\t\t}\n\t}\n\n\tif !metadataSuccessfullySet {\n\t\tlog.Info().Str(\"tag\", reference).Str(\"repository\", repo).Msg(\"uploading image meta was unsuccessful for tag in repo\")\n\n\t\tif err := imgStore.DeleteImageManifest(repo, reference, false); err != nil {\n\t\t\tlog.Error().Err(err).Str(\"reference\", reference).Str(\"repository\", repo).\n\t\t\t\tMsg(\"couldn't remove image manifest in repo\")\n\n\t\t\treturn err\n\t\t}\n\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (is *ObjectStorage) DeleteImageManifest(repo, reference string, detectCollisions bool) error {\n\tdir := path.Join(is.rootDir, repo)\n\tif fi, err := is.store.Stat(context.Background(), dir); err != nil || !fi.IsDir() {\n\t\treturn zerr.ErrRepoNotFound\n\t}\n\n\tvar lockLatency time.Time\n\n\tvar err error\n\n\tis.Lock(&lockLatency)\n\tdefer func() {\n\t\tis.Unlock(&lockLatency)\n\n\t\tif err == nil {\n\t\t\tmonitoring.SetStorageUsage(is.metrics, is.rootDir, repo)\n\t\t}\n\t}()\n\n\tindex, err := common.GetIndex(is, repo, is.log)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanifestDesc, err := common.RemoveManifestDescByReference(&index, reference, detectCollisions)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = common.UpdateIndexWithPrunedImageManifests(is, &index, repo, manifestDesc, manifestDesc.Digest, is.log)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// now update \"index.json\"\n\tdir = path.Join(is.rootDir, repo)\n\tfile := path.Join(dir, \"index.json\")\n\n\tbuf, err := json.Marshal(index)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := writeFile(is.store, file, buf); err != nil {\n\t\tis.log.Debug().Str(\"deleting reference\", reference).Msg(\"\")\n\n\t\treturn err\n\t}\n\n\t// Delete blob only when blob digest not present in manifest entry.\n\t// e.g. 1.0.1 & 1.0.2 have same blob digest so if we delete 1.0.1, blob should not be removed.\n\ttoDelete := true\n\n\tfor _, manifest := range index.Manifests {\n\t\tif manifestDesc.Digest.String() == manifest.Digest.String() {\n\t\t\ttoDelete = false\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif toDelete {\n\t\tp := path.Join(dir, \"blobs\", manifestDesc.Digest.Algorithm().String(), manifestDesc.Digest.Encoded())\n\n\t\terr = is.store.Delete(context.Background(), p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (mr *ManifestResource) Delete(_ http.ResponseWriter, req *http.Request, _ httprouter.Params) restful.Exchanger {\n\treturn &DELETEManifestHandler{\n\t\tState: mr.context.liveState(),\n\t\tQueryValues: mr.ParseQuery(req),\n\t\tStateWriter: sous.StateWriter(mr.context.StateManager),\n\t}\n}", "func (registry *Registry) DeleteManifest(repository string, digest digest.Digest) error {\n\turl := registry.url(\"/v2/%s/manifests/%s\", repository, digest)\n\tregistry.Logf(\"registry.manifest.delete url=%s repository=%s reference=%s\", url, repository, digest)\n\n\treq, err := http.NewRequest(\"DELETE\", url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := registry.Client.Do(req)\n\tif resp != nil {\n\t\tdefer resp.Body.Close()\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func DeleteManifest(registry, repository string, manifest manifest.Data) error {\n\t// Will perform an actual delete\n\tdeleteCmd := newDeleteManifestsCommand(registry, repository, manifest.Digest)\n\n\tvar outb bytes.Buffer\n\tdeleteCmd.Stdout = &outb\n\n\treturn deleteCmd.Run()\n}", "func (d *swiftDriver) DeleteManifest(account keppel.Account, repoName string, manifestDigest digest.Digest) error {\n\tc, _, err := d.getBackendConnection(account)\n\tif err != nil {\n\t\treturn err\n\t}\n\to := manifestObject(c, repoName, manifestDigest)\n\treturn o.Delete(nil, nil)\n}", "func (d *StorageDriver) DeleteManifest(account keppel.Account, repoName string, manifestDigest digest.Digest) error {\n\tpath := d.getManifestPath(account, repoName, manifestDigest)\n\treturn os.Remove(path)\n}", "func (s *manifestStore) Delete(ctx context.Context, m *models.Manifest) (bool, error) {\n\tdefer metrics.InstrumentQuery(\"manifest_delete\")()\n\tq := \"DELETE FROM manifests WHERE top_level_namespace_id = $1 AND repository_id = $2 AND id = $3\"\n\n\tres, err := s.db.ExecContext(ctx, q, m.NamespaceID, m.RepositoryID, m.ID)\n\tif err != nil {\n\t\tvar pgErr *pgconn.PgError\n\t\tif errors.As(err, &pgErr) && pgErr.Code == pgerrcode.ForeignKeyViolation && pgErr.TableName == \"manifest_references\" {\n\t\t\treturn false, fmt.Errorf(\"deleting manifest: %w\", ErrManifestReferencedInList)\n\t\t}\n\t\treturn false, fmt.Errorf(\"deleting manifest: %w\", err)\n\t}\n\n\tcount, err := res.RowsAffected()\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"deleting manifest: %w\", err)\n\t}\n\n\treturn count == 1, nil\n}", "func (l *Local) DeleteManifestByImage(name, ref string) (err error) {\n\tif _, err := digest.Parse(ref); err != nil {\n\t\t// remove tag too\n\t\ttag := registry.PathJoinWithBase(name, baseTagDir, ref)\n\t\tdgst, err := ioutil.ReadFile(tag)\n\t\tif err != nil {\n\t\t\tlog.Println(\"-----------\", err, tag)\n\t\t\treturn errors.Wrap(err)\n\t\t}\n\t\tos.Remove(tag)\n\t\tref = string(dgst)\n\t}\n\n\tmanifestDir := registry.PathJoinWithBase(name, ref)\n\tmanifest := filepath.Join(manifestDir, \"manifest.json\")\n\tif _, err := os.Stat(manifest); os.IsNotExist(err) {\n\t\treturn errors.Wrap(err,\n\t\t\terrors.WithStatusCode(http.StatusAccepted),\n\t\t)\n\t}\n\treturn os.RemoveAll(manifestDir)\n}", "func (reg *GoogleContainerRegistry) DeleteImageManifest(name, reference string) error {\n\ttarget := fmt.Sprintf(\"%s/%s/%s/manifests/%s\", reg.getRegistryURLWithAPIVersionV2(), reg.ProjectName, name, reference)\n\n\treq, err := http.NewRequest(http.MethodDelete, target, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq.Header.Add(\"Accept\", \"application/vnd.docker.distribution.manifest.v2+json\")\n\n\tres, err := reg.Client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif res.StatusCode != http.StatusOK {\n\t\treturn fmt.Errorf(\"Non-OK status code received in DeleteImageManifest: %d\", res.StatusCode)\n\t}\n\n\treturn nil\n}", "func (m *manifestService) Delete(ctx context.Context, dgst digest.Digest) error {\n\tcontext.GetLogger(ctx).Debugf(\"(*manifestService).Delete\")\n\treturn m.manifests.Delete(withRepository(ctx, m.repo), dgst)\n}", "func (api *Client) DeleteManifest(token string, appId string) (*SlackResponse, error) {\n\treturn api.DeleteManifestContext(context.Background(), token, appId)\n}", "func (bm *BlobsManifest) Delete() error {\n\n\tfor _, chunk := range bm.Chunks {\n\t\t// for Huge Blob mode, no need remove blobs\n\t\t_, _, length := utils.ParseBlobDigest(chunk)\n\t\tif length != 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tb := blobs.GetBlobPartial(\"\", chunk)\n\t\tif b != nil {\n\t\t\tb.Delete()\n\t\t}\n\t}\n\n\t// to remove Huge Blob Image\n\timageDir := configuration.RootDirectory() + manifest.ManifestDir + \"/\" + bm.BlobSum\n\tutils.RemoveDir(imageDir)\n\n\tutils.Remove(blobsManifestPath(bm.BlobSum))\n\n\treturn nil\n}", "func (r *repository) Delete(dgst digest.Digest) error {\n\tms, err := r.Repository.Manifests(r.ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ms.Delete(dgst)\n}", "func (mh *MetadataHandler) HandleDeleteMetadata(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\n\tvars := mux.Vars(r)\n\tvar (\n\t\tappID string\n\t\tok bool\n\t)\n\tif appID, ok = vars[\"appID\"]; !ok {\n\t\tw.WriteHeader(http.StatusBadRequest) // 400\n\t\treturn\n\t}\n\n\terr := mh.Repository.Delete(appID)\n\tif err != nil {\n\t\tif err == repository.ErrIDNotFound {\n\t\t\tw.WriteHeader(http.StatusConflict) // 409\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusInternalServerError) // 500\n\t\t}\n\t\tyaml.NewEncoder(w).Encode(err.Error())\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusNoContent) // 204\n}", "func deleteManifests(ctx context.Context, acrClient api.AcrCLIClientInterface, loginURL string, repoName string, args []string) error {\n\tfor i := 0; i < len(args); i++ {\n\t\t_, err := acrClient.DeleteManifest(ctx, repoName, args[i])\n\t\tif err != nil {\n\t\t\t// If there is an error (this includes not found and not allowed operations) the deletion of the images is stopped and an error is returned.\n\t\t\treturn errors.Wrap(err, \"failed to delete manifests\")\n\t\t}\n\t\tfmt.Printf(\"%s/%s@%s\\n\", loginURL, repoName, args[i])\n\t}\n\treturn nil\n}", "func revertToManifest(kv *DB, mf *Manifest, idMap map[uint64]struct{}) error {\n\t// 1. Check all files in manifest exist.\n\tfor id := range mf.Tables {\n\t\tif _, ok := idMap[id]; !ok {\n\t\t\treturn fmt.Errorf(\"file does not exist for table %d\", id)\n\t\t}\n\t}\n\n\t// 2. Delete files that shouldn't exist.\n\tfor id := range idMap {\n\t\tif _, ok := mf.Tables[id]; !ok {\n\t\t\tkv.elog.Printf(\"Table file %d not referenced in MANIFEST\\n\", id)\n\t\t\tfilename := table.NewFilename(id, kv.opt.Dir)\n\t\t\tif err := os.Remove(filename); err != nil {\n\t\t\t\treturn y.Wrapf(err, \"While removing table %d\", id)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) {\n\tdcontext.GetLogger(imh).Debug(\"PutImageManifest\")\n\tmanifests, err := imh.Repository.Manifests(imh)\n\tif err != nil {\n\t\timh.Errors = append(imh.Errors, err)\n\t\treturn\n\t}\n\n\tvar jsonBuf bytes.Buffer\n\tif err := copyFullPayload(imh, w, r, &jsonBuf, maxManifestBodySize, \"image manifest PUT\"); err != nil {\n\t\t// copyFullPayload reports the error if necessary\n\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err.Error()))\n\t\treturn\n\t}\n\n\tmediaType := r.Header.Get(\"Content-Type\")\n\tmanifest, desc, err := distribution.UnmarshalManifest(mediaType, jsonBuf.Bytes())\n\tif err != nil {\n\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err))\n\t\treturn\n\t}\n\n\tif imh.Digest != \"\" {\n\t\tif desc.Digest != imh.Digest {\n\t\t\tdcontext.GetLogger(imh).Errorf(\"payload digest does not match: %q != %q\", desc.Digest, imh.Digest)\n\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid)\n\t\t\treturn\n\t\t}\n\t} else if imh.Tag != \"\" {\n\t\timh.Digest = desc.Digest\n\t} else {\n\t\timh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail(\"no tag or digest specified\"))\n\t\treturn\n\t}\n\n\tisAnOCIManifest := mediaType == v1.MediaTypeImageManifest || mediaType == v1.MediaTypeImageIndex\n\n\tif isAnOCIManifest {\n\t\tdcontext.GetLogger(imh).Debug(\"Putting an OCI Manifest!\")\n\t} else {\n\t\tdcontext.GetLogger(imh).Debug(\"Putting a Docker Manifest!\")\n\t}\n\n\tvar options []distribution.ManifestServiceOption\n\tif imh.Tag != \"\" {\n\t\toptions = append(options, distribution.WithTag(imh.Tag))\n\t}\n\n\tif err := imh.applyResourcePolicy(manifest); err != nil {\n\t\timh.Errors = append(imh.Errors, err)\n\t\treturn\n\t}\n\n\t_, err = manifests.Put(imh, manifest, options...)\n\tif err != nil {\n\t\t// TODO(stevvooe): These error handling switches really need to be\n\t\t// handled by an app global mapper.\n\t\tif err == distribution.ErrUnsupported {\n\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported)\n\t\t\treturn\n\t\t}\n\t\tif err == distribution.ErrAccessDenied {\n\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeDenied)\n\t\t\treturn\n\t\t}\n\t\tswitch err := err.(type) {\n\t\tcase distribution.ErrManifestVerification:\n\t\t\tfor _, verificationError := range err {\n\t\t\t\tswitch verificationError := verificationError.(type) {\n\t\t\t\tcase distribution.ErrManifestBlobUnknown:\n\t\t\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestBlobUnknown.WithDetail(verificationError.Digest))\n\t\t\t\tcase distribution.ErrManifestNameInvalid:\n\t\t\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeNameInvalid.WithDetail(err))\n\t\t\t\tcase distribution.ErrManifestUnverified:\n\t\t\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnverified)\n\t\t\t\tdefault:\n\t\t\t\t\tif verificationError == digest.ErrDigestInvalidFormat {\n\t\t\t\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid)\n\t\t\t\t\t} else {\n\t\t\t\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown, verificationError)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase errcode.Error:\n\t\t\timh.Errors = append(imh.Errors, err)\n\t\tdefault:\n\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))\n\t\t}\n\t\treturn\n\t}\n\n\t// Tag this manifest\n\tif imh.Tag != \"\" {\n\t\ttags := imh.Repository.Tags(imh)\n\t\terr = tags.Tag(imh, imh.Tag, desc)\n\t\tif err != nil {\n\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))\n\t\t\treturn\n\t\t}\n\n\t}\n\n\t// Construct a canonical url for the uploaded manifest.\n\tref, err := reference.WithDigest(imh.Repository.Named(), imh.Digest)\n\tif err != nil {\n\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))\n\t\treturn\n\t}\n\n\tlocation, err := imh.urlBuilder.BuildManifestURL(ref)\n\tif err != nil {\n\t\t// NOTE(stevvooe): Given the behavior above, this absurdly unlikely to\n\t\t// happen. We'll log the error here but proceed as if it worked. Worst\n\t\t// case, we set an empty location header.\n\t\tdcontext.GetLogger(imh).Errorf(\"error building manifest url from digest: %v\", err)\n\t}\n\n\tw.Header().Set(\"Location\", location)\n\tw.Header().Set(\"Docker-Content-Digest\", imh.Digest.String())\n\tw.WriteHeader(http.StatusCreated)\n\n\tdcontext.GetLogger(imh).Debug(\"Succeeded in putting manifest!\")\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
committee_api GetCommitteeRequest api request get_committee_request
func (api *API) GetCommitteeRequest(id uint32, count int32) (*CommitteeObject, error) { var resp CommitteeObject err := api.call("committee_api", "get_committee_request", []interface{}{id, count}, &resp) return &resp, err }
[ "func CommitteeGet(id string) (*Committee, error) {\n\tvar response committeeResponse\n\tp := params{\"id\": id}\n\terr := committeeAPIS.get.get(&response, p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn response.committee(), nil\n}", "func (_IOrakuruCore *IOrakuruCoreCallerSession) GetRequest(_requestId [32]byte) (struct {\n\tId [32]byte\n\tDataSource string\n\tSelector string\n\tCallbackAddr common.Address\n\tExecutionTimestamp *big.Int\n\tIsFulfilled bool\n\tAggrType uint8\n\tPrecision uint8\n}, error) {\n\treturn _IOrakuruCore.Contract.GetRequest(&_IOrakuruCore.CallOpts, _requestId)\n}", "func (_IOrakuruCore *IOrakuruCoreSession) GetRequest(_requestId [32]byte) (struct {\n\tId [32]byte\n\tDataSource string\n\tSelector string\n\tCallbackAddr common.Address\n\tExecutionTimestamp *big.Int\n\tIsFulfilled bool\n\tAggrType uint8\n\tPrecision uint8\n}, error) {\n\treturn _IOrakuruCore.Contract.GetRequest(&_IOrakuruCore.CallOpts, _requestId)\n}", "func (api *API) GetCommitteeRequestsList(status uint16) ([]*uint16, error) {\n\tvar resp []*uint16\n\terr := api.call(\"committee_api\", \"get_committee_requests_list\", []interface{}{status}, &resp)\n\treturn resp, err\n}", "func getCmdQueryCommittee() *cobra.Command {\n\treturn &cobra.Command{\n\t\tUse: \"committee [committee-id]\",\n\t\tArgs: cobra.ExactArgs(1),\n\t\tShort: \"Query details of a single committee\",\n\t\tExample: fmt.Sprintf(\"%s query %s committee 1\", version.AppName, types.ModuleName),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tclientCtx, err := client.GetClientQueryContext(cmd)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// validate that the committee id is a uint\n\t\t\tcommitteeID, err := strconv.ParseUint(args[0], 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"committee-id %s not a valid uint, please input a valid committee-id\", args[0])\n\t\t\t}\n\n\t\t\tqueryClient := types.NewQueryClient(clientCtx)\n\t\t\tres, err := queryClient.Committee(context.Background(), &types.QueryCommitteeRequest{CommitteeId: committeeID})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn clientCtx.PrintProto(res)\n\t\t},\n\t}\n}", "func (s *PublicBlockChainAPI) GetCommittee(ctx context.Context, epoch int64) (map[string]interface{}, error) {\n\tcommittee, err := s.b.GetCommittee(big.NewInt(epoch))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvalidators := make([]map[string]interface{}, 0)\n\tfor _, validator := range committee.NodeList {\n\t\tvalidatorBalance := new(hexutil.Big)\n\t\tvalidatorBalance, err = s.b.GetBalance(validator.EcdsaAddress)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\toneAddress, err := internal_common.AddressToBech32(validator.EcdsaAddress)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvalidatorsFields := map[string]interface{}{\n\t\t\t\"address\": oneAddress,\n\t\t\t\"balance\": validatorBalance,\n\t\t}\n\t\tvalidators = append(validators, validatorsFields)\n\t}\n\tresult := map[string]interface{}{\n\t\t\"shardID\": committee.ShardID,\n\t\t\"validators\": validators,\n\t}\n\treturn result, nil\n}", "func (m manager) GetRequest(ctx *context.T) vtrace.Request {\n\t// nologcall\n\tif span := getSpan(ctx); span != nil {\n\t\treturn vtrace.Request{\n\t\t\tSpanId: span.id,\n\t\t\tTraceId: span.trace,\n\t\t\tFlags: span.flags(),\n\t\t\tLogLevel: int32(GetVTraceLevel(ctx)),\n\t\t}\n\t}\n\treturn vtrace.Request{}\n}", "func (api *API) GetCommitteeRequestVotes(id uint32) ([]*CommitteeVoteState, error) {\n\tvar resp []*CommitteeVoteState\n\terr := api.call(\"committee_api\", \"get_committee_request_votes\", []interface{}{id}, &resp)\n\treturn resp, err\n}", "func GetCmdQueryCommittee(queryRoute string, cdc *codec.Codec) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"committee [committee-id]\",\n\t\tArgs: cobra.ExactArgs(1),\n\t\tShort: \"Query details of a single committee\",\n\t\tExample: fmt.Sprintf(\"%s query %s committee 1\", version.ClientName, types.ModuleName),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcliCtx := context.NewCLIContext().WithCodec(cdc)\n\n\t\t\t// Prepare params for querier\n\t\t\tcommitteeID, err := strconv.ParseUint(args[0], 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"committee-id %s not a valid uint\", args[0])\n\t\t\t}\n\t\t\tbz, err := cdc.MarshalJSON(types.NewQueryCommitteeParams(committeeID))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Query\n\t\t\tres, _, err := cliCtx.QueryWithData(fmt.Sprintf(\"custom/%s/%s\", queryRoute, types.QueryCommittee), bz)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Decode and print result\n\t\t\tcommittee := types.Committee{}\n\t\t\tif err = cdc.UnmarshalJSON(res, &committee); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn cliCtx.PrintOutput(committee)\n\t\t},\n\t}\n\treturn cmd\n}", "func (br *Batcher) GetRequest(id string, seq uint32) (req client.Request) {\n\tclog, logexists := br.CLog[id]\n\n\tif logexists {\n\t\tval, vexists := clog.Values[seq]\n\t\tif vexists {\n\t\t\treq = val\n\t\t}\n\t}\n\n\treturn\n}", "func (_CommitteeManager *CommitteeManagerSession) Committee() (common.Address, error) {\n\treturn _CommitteeManager.Contract.Committee(&_CommitteeManager.CallOpts)\n}", "func (_CommitteeManager *CommitteeManagerCaller) Committee(opts *bind.CallOpts) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _CommitteeManager.contract.Call(opts, out, \"_committee\")\n\treturn *ret0, err\n}", "func GetRequest(ctx context.Context, r *http.Request) (*skillserver.EchoRequest, error) {\n\tvar echoReq *skillserver.EchoRequest\n\terr := json.NewDecoder(r.Body).Decode(&echoReq)\n\treturn echoReq, err\n}", "func (c *Client) NewGetDraftRequest(ctx context.Context, path string) (*http.Request, error) {\n\tscheme := c.Scheme\n\tif scheme == \"\" {\n\t\tscheme = \"http\"\n\t}\n\tu := url.URL{Host: c.Host, Scheme: scheme, Path: path}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func GetActionCommitment(ctx iris.Context) {\n\ty1, err := ctx.URLParamInt64(\"FirstYear\")\n\tif err != nil {\n\t\ty1 = int64(time.Now().Year()) + 1\n\t}\n\tvar resp models.ActionCommitments\n\tdb := ctx.Values().Get(\"db\").(*sql.DB)\n\tif err = resp.GetAll(y1, db); err != nil {\n\t\tctx.StatusCode(http.StatusInternalServerError)\n\t\tctx.JSON(jsonError{\"Prévisions AP par actions budgétaires, requête : \" + err.Error()})\n\t}\n\tctx.StatusCode(http.StatusOK)\n\tctx.JSON(resp)\n}", "func GetTracerRequest(tracerID uint) ([]byte, error) {\n\tvar (\n\t\tret []byte\n\t\terr error\n\t\trequest types.Request\n\t)\n\n\tif err = store.DB.First(&request).Error; err != nil {\n\t\tlog.Warning.Print(err)\n\t\treturn ret, err\n\t}\n\n\tif ret, err = json.Marshal(request); err != nil {\n\t\tlog.Warning.Print(err)\n\t}\n\n\treturn ret, err\n}", "func (client *CertificateOrdersClient) retrieveCertificateEmailHistoryCreateRequest(ctx context.Context, resourceGroupName string, name string, options *CertificateOrdersClientRetrieveCertificateEmailHistoryOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/retrieveEmailHistory\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (a *RepoAPI) getCommit(params interface{}) (resp *rpc.Response) {\n\tm := objx.New(cast.ToStringMap(params))\n\treturn rpc.Success(util.Map{\n\t\t\"commit\": a.mods.Repo.GetCommit(m.Get(\"name\").Str(), m.Get(\"hash\").Str()),\n\t})\n}", "func decodeGetRequest(_ context.Context, r *http1.Request) (interface{}, error) {\n\treq := endpoint.GetRequest{}\n\treturn req, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetCommitteeRequestVotes api request get_committee_request_votes
func (api *API) GetCommitteeRequestVotes(id uint32) ([]*CommitteeVoteState, error) { var resp []*CommitteeVoteState err := api.call("committee_api", "get_committee_request_votes", []interface{}{id}, &resp) return resp, err }
[ "func (n *Node) requestVotes(currTerm uint64) (fallback, electionResult bool) {\n\t// TODO: Students should implement this method\n\treturn\n}", "func (api *API) GetCommitteeRequest(id uint32, count int32) (*CommitteeObject, error) {\n\tvar resp CommitteeObject\n\terr := api.call(\"committee_api\", \"get_committee_request\", []interface{}{id, count}, &resp)\n\treturn &resp, err\n}", "func (l *TPCLeader) voteRequest(ctx context.Context, key, value string) tpc_pb.Action {\n\tvoteMessage := tpc_pb.LeaderMsg{\n\t\tType: tpc_pb.MessageType_VOTE,\n\t\tAction: tpc_pb.Action_PREPARE,\n\t\tKey: key,\n\t\tValue: value,\n\t}\n\tvar responseChannel chan *tpc_pb.Response = l.manager.SendMessage(ctx, voteMessage, false)\n\n\tnumResponses := 0\n\tvar vote tpc_pb.Action = tpc_pb.Action_COMMIT\n\t// iterate through the follower responses from the channel\n\tfor responsePtr := range responseChannel {\n\t\t// if any of them is an abort, change the global vote to abort\n\t\tif responsePtr.Action == tpc_pb.Action_ABORT {\n\t\t\tvote = responsePtr.Action\n\t\t}\n\t\t// end the loop when responses from all followers were received\n\t\tnumResponses++\n\t\tif numResponses == l.numFollowers {\n\t\t\tbreak\n\t\t}\n\t}\n\t// if the thread exits the for loop, then all followers voted commit\n\treturn vote\n}", "func (r *Node) requestVotes(electionResults chan bool, fallbackChan chan bool, currTerm uint64) {\n\t// Votes received\n\tremaining := 0\n\tresultChan := make(chan RequestVoteResult)\n\tfor _, peer := range r.Peers {\n\t\tif r.Self.GetId() == peer.GetId() {\n\t\t\tcontinue\n\t\t}\n\t\tmsg := rpc.RequestVoteRequest{\n\t\t\tTerm: currTerm,\n\t\t\tCandidate: r.Self,\n\t\t\tLastLogIndex: r.LastLogIndex(),\n\t\t\tLastLogTerm: r.GetLog(r.LastLogIndex()).GetTermId(),\n\t\t}\n\t\tremaining++\n\t\tgo r.requestPeerVote(peer, &msg, resultChan)\n\t}\n\n\tvote := 1\n\treject := 0\n\tmajority := r.config.ClusterSize/2 + 1\n\tif vote >= majority {\n\t\telectionResults <- true\n\t\treturn\n\t}\n\tfor remaining > 0 {\n\t\trequestVoteResult := <-resultChan\n\t\tremaining--\n\t\tif requestVoteResult == RequestVoteFallback {\n\t\t\tfallbackChan <- true\n\t\t\treturn\n\t\t}\n\t\tif requestVoteResult == RequestVoteSuccess {\n\t\t\tvote++\n\t\t\tif vote >= majority {\n\t\t\t\telectionResults <- true\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\treject++\n\t\t\tif reject >= majority {\n\t\t\t\telectionResults <- false\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func (r *Raft) serviceRequestVote(request RequestVote, state int) {\n\t//fmt.Println(\"In service RV method of \", r.Myconfig.Id)\n\tresponse := RequestVoteResponse{}\n\tcandidateId := request.CandidateId\n\tresponse.Id = r.Myconfig.Id\n\tif r.isDeservingCandidate(request) {\n\t\tresponse.VoteGranted = true\n\t\tr.myCV.VotedFor = candidateId\n\t\tr.myCV.CurrentTerm = request.Term\n\t} else {\n\t\tif request.Term > r.myCV.CurrentTerm {\n\t\t\tr.myCV.CurrentTerm = request.Term\n\t\t\tr.myCV.VotedFor = -1\n\t\t}\n\t\tresponse.VoteGranted = false\n\t}\n\tif request.Term > r.myCV.CurrentTerm {\n\t\tr.WriteCVToDisk()\n\t}\n\tresponse.Term = r.myCV.CurrentTerm\n\tr.send(candidateId, response) //send to sender using send(sender,response)\n}", "func (node *Node) ReceiveRequestVote(ctx context.Context, buffer []byte) (candidateAddress string, request RequestVote, err error) {\n candidateAddress, err = node.receive(ctx, buffer, &request)\n return\n}", "func (m *Member) RequestVote(ctx context.Context, leader string, term uint64, logSize uint64) (*raftapi.RequestVoteResponse, error) {\n\tlog.WithFields(log.Fields{\"member_name\": m.Name}).Debugln(\"Requesting vote from\")\n\tvar conn *grpc.ClientConn\n\tconn, err := grpc.Dial(m.Address(), grpc.WithInsecure())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer conn.Close()\n\tapi := raftapi.NewRaftServiceClient(conn)\n\tresponse, err := api.RequestVote(ctx, &raftapi.RequestVoteMessage{\n\t\tTerm: term,\n\t\tCandidate: leader,\n\t\tLogSize: logSize,\n\t\tLastLogTerm: 0,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn response, nil\n}", "func (r *Raft) serviceRequestVote(request RequestVote) {\n\t//fmt.Println(\"In service RV method of \", r.Myconfig.Id)\n\tresponse := RequestVoteResponse{} //prep response object,for responding back to requester\n\tcandidateId := request.candidateId\n\tresponse.id = r.Myconfig.Id\n\t//fmt.Println(\"Follower\", r.Myconfig.Id, \"log as complete?\", r.logAsGoodAsMine(request))\n\tif r.isDeservingCandidate(request) {\n\t\tresponse.voteGranted = true\n\t\tr.votedFor = candidateId\n\t\tr.currentTerm = request.term\n\n\t\t//Writing current term and voteFor to disk\n\t\tr.WriteCVToDisk()\n\n\t} else {\n\t\tresponse.voteGranted = false\n\t}\n\tresponse.term = r.currentTerm //to return self's term too\n\n\t//fmt.Println(\"Follower\", r.Myconfig.Id, \"voting\", response.voteGranted) //\"because votefor is\", r.votedFor, \"my and request terms are:\", r.currentTerm, request.term)\n\t//fmt.Println(\"Follower\", r.Myconfig.Id, \"Current term,request.term is\", r.currentTerm, request.term, \"Self lastLogIndex is\", r.myMetaData.lastLogIndex, \"VotedFor,request.lastLogTerm\", r.votedFor, request.lastLogTerm)\n\t//fmt.Println(\"VotedFor,request.lastLogTerm\", r.votedFor, request.lastLogTerm)\n\n\t//fmt.Printf(\"In serviceRV of %v, obj prep is %v \\n\", r.Myconfig.Id, response)\n\tsend(candidateId, response) //send to sender using send(sender,response)\n}", "func (api *API) GetCommitteeRequestsList(status uint16) ([]*uint16, error) {\n\tvar resp []*uint16\n\terr := api.call(\"committee_api\", \"get_committee_requests_list\", []interface{}{status}, &resp)\n\treturn resp, err\n}", "func (alg *Algorand) committeeVote(round uint64, step uint64, expectedNum int, hash common.Hash) error {\n\tif alg.maliciousType == EvilVoteNothing {\n\t\t// vote nothing\n\t\treturn nil\n\t}\n\n\t// check if user is in committee using Sortition(Alg 1) for j > 0\n\tvrf, proof, j := alg.sortition(alg.sortitionSeed(round), role(committee, round, step), expectedNum, alg.tokenOwn())\n\n\t//log.Trace(\"[algorand:committeeVote] committeeVote\", \"ID\", alg.id, \"sub-user\", j)\n\t// only committee members originate a message\n\tif j > 0 {\n\t\t// Gossip vote message\n\t\tvoteMsg := &VoteMessage{\n\t\t\tBlockNumber: round,\n\t\t\tStep: step,\n\t\t\tSub: uint64(j),\n\t\t\tVRF: vrf,\n\t\t\tProof: proof,\n\t\t\tParentHash: alg.chain.last.Hash(),\n\t\t\tBlockHash: hash,\n\t\t}\n\t\t_, err := voteMsg.Sign(alg.privkey)\n\t\tif err != nil {\n\t\t\tlog.Info(\"[algorand:committeeVote] sign err\", \"ID\", alg.id, \"sub-user\", j, \"err\", err)\n\t\t\treturn err\n\t\t}\n\t\t//log.Info(\"[algorand:committeeVote] signed \", \"ID\", alg.id, \"sub-user\", j, \"votemsg\", voteMsg)\n\t\t//gossip vote\n\t\talg.chain.protocolManager.vote_Ch <- VotePreEvent{Vote: voteMsg}\n\t\t//go alg.chain.protocolManager.BroadcastVote(voteMsg.Hash(), voteMsg)\n\t}\n\treturn nil\n}", "func parseVoteRequest(r *http.Request) (electionID string, ballotID string, err error) {\n\t// Parse URL and route\n\turlparts := strings.Split(r.RequestURI, \"/\")\n\n\t// Check for the correct number of request parts\n\tif len(urlparts) < 3 || len(urlparts) > 4 {\n\t\terr = parseError{\"Invalid number of url parts. 404 Not Found.\", http.StatusNotFound}\n\t\treturn\n\t}\n\n\t// Get the electionID\n\telectionID = urlparts[2]\n\tif len(electionID) > MaxElectionIDSize || !ValidElectionID.MatchString(electionID) {\n\t\terr = parseError{\"Invalid Election ID. 404 Not Found.\", http.StatusNotFound}\n\t\treturn\n\t}\n\n\t// If we are only length 3, that's it, we are asking for a full report / ballot roll for an election\n\tif len(urlparts) == 3 || urlparts[3] == \"\" {\n\t\treturn\n\t}\n\n\t// Get the ballotID (hex encoded SHA512 of base64 encoded public-key)\n\tballotID = urlparts[3]\n\tif len(ballotID) > MaxBallotIDSize || !ValidBallotID.MatchString(ballotID) {\n\t\terr = parseError{\"Invalid Ballot ID. 404 Not Found.\", http.StatusNotFound}\n\t}\n\n\t// If the user has provided a signature of the request in the headers, verify it\n\tif r.Header.Get(\"X-Voteflow-Signature\") != \"\" {\n\t\t// Verify the signature headers, do a cryptographic check to make sure the header and Method / URL request is signed\n\t\tif suberr := verifySignatureHeaders(r); suberr != nil {\n\t\t\terr = parseError{suberr.Error(), http.StatusBadRequest}\n\t\t\treturn\n\t\t}\n\t}\n\n\t// All checks pass\n\treturn\n}", "func (rf *Raft) sendRequestVotes(electionWon chan bool) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\targs := &RequestVoteArgs{\n\t\tTerm: rf.currentTerm,\n\t\tCandidateId: rf.me,\n\t\tLastLogIndex: rf.lastLogEntryIndex(),\n\t\tLastLogTerm: rf.lastLogEntryTerm(),\n\t}\n\n\tvotes := 1\n\tvotes_mu := sync.Mutex{}\n\t// broadcast, get votes from everyone!\n\tfor i := 0; i < len(rf.peers); i++ {\n\t\tif i != rf.me {\n\t\t\tgo func(peer int) {\n\t\t\t\treply := &RequestVoteReply{}\n\t\t\t\trf.sendRequestVote(peer, args, reply)\n\n\t\t\t\trf.mu.Lock()\n\t\t\t\tdefer rf.mu.Unlock()\n\t\t\t\tif rf.state == Candidate && rf.currentTerm == args.Term && reply.VoteGranted {\n\t\t\t\t\tvotes_mu.Lock()\n\t\t\t\t\tdefer votes_mu.Unlock()\n\t\t\t\t\tvotes++\n\t\t\t\t\tif votes > len(rf.peers)/2 {\n\t\t\t\t\t\t// Volatile state on leaders:\n\t\t\t\t\t\trf.nextIndex = []int{}\n\t\t\t\t\t\trf.matchIndex = []int{}\n\t\t\t\t\t\tnextIndex := 0\n\t\t\t\t\t\tif len(rf.log) > 0 {\n\t\t\t\t\t\t\tnextIndex = rf.lastLogEntry().Index + 1\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfor i := 0; i < len(rf.peers); i++ {\n\t\t\t\t\t\t\trf.nextIndex = append(rf.nextIndex, nextIndex)\n\t\t\t\t\t\t\trf.matchIndex = append(rf.matchIndex, 0)\n\t\t\t\t\t\t}\n\t\t\t\t\t\trf.state = Leader\n\n\t\t\t\t\t\telectionWon <- true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}(i)\n\t\t}\n\t}\n}", "func (node *RaftNode) RequestVote(ctx context.Context, in *protos.RequestVoteMessage) (*protos.RequestVoteResponse, error) {\n\n\tnode.GetLock(\"RequestVote\")\n\n\tlatestLogIndex := int32(-1)\n\tlatestLogTerm := int32(-1)\n\n\tif logLen := int32(len(node.log)); logLen > 0 {\n\t\tlatestLogIndex = logLen - 1\n\t\tlatestLogTerm = node.log[latestLogIndex].Term\n\t}\n\n\tlog.Printf(\"\\nReceived term: %v, My term: %v, My votedFor: %v\\n\", in.Term, node.currentTerm, node.votedFor)\n\tlog.Printf(\"\\nReceived latestLogIndex: %v, My latestLogIndex: %v, Received latestLogTerm: %v, My latestLogTerm: %v\\n\", in.LastLogIndex, latestLogIndex, in.LastLogTerm, latestLogTerm)\n\n\t// If the received message's term is greater than the replica's current term, transition to\n\t// follower (if not already a follower) and update term.\n\tif in.Term > node.currentTerm {\n\t\tnode.ToFollower(node.Meta.Master_ctx, in.Term)\n\t}\n\n\t// If ToFollower was called above, in.Term and node.currentTerm will be equal. If in.Term < node.currentTerm, reject vote.\n\t// If the candidate's log is not atleast as up-to-date as the replica's, reject vote.\n\tif (node.votedFor == in.CandidateId) ||\n\t\t((in.Term == node.currentTerm) && (node.votedFor == -1) &&\n\t\t\t(in.LastLogTerm > latestLogTerm || ((in.LastLogTerm == latestLogTerm) && (in.LastLogIndex >= latestLogIndex)))) {\n\n\t\tnode.votedFor = in.CandidateId\n\n\t\tlog.Printf(\"\\nGranting vote to %v\\n\", in.CandidateId)\n\t\tnode.PersistToStorage()\n\t\tnode.ReleaseLock(\"RequestVote1\")\n\t\treturn &protos.RequestVoteResponse{Term: in.Term, VoteGranted: true}, nil\n\n\t} else {\n\n\t\tlog.Printf(\"\\nRejecting vote to %v\\n\", in.CandidateId)\n\t\tnode.ReleaseLock(\"RequestVote2\")\n\t\treturn &protos.RequestVoteResponse{Term: in.Term, VoteGranted: false}, nil\n\n\t}\n\n}", "func (*ObserveProposalVotesRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_trading_proto_rawDescGZIP(), []int{43}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\n\trf.mu.Lock()\n\trf.debug(\"***************Inside the RPC handler for sendRequestVote *********************\")\n\tdefer rf.mu.Unlock()\n\tvar lastIndex int\n\t//var lastTerm int\n\tif len(rf.log) > 0 {\n\t\tlastLogEntry := rf.log[len(rf.log)-1]\n\t\tlastIndex = lastLogEntry.LastLogIndex\n\t\t//lastTerm = lastLogEntry.lastLogTerm\n\t}else{\n\t\tlastIndex = 0\n\t\t//lastTerm = 0\n\t}\n\treply.Term = rf.currentTerm\n\t//rf.debug()\n\tif args.Term < rf.currentTerm {\n\t\treply.VoteGranted = false\n\t\trf.debug(\"My term is higher than candidate's term, myTerm = %d, candidate's term = %d\", rf.currentTerm,args.Term )\n\t} else if (rf.votedFor == -1 || rf.votedFor == args.CandidateId) && args.LastLogIndex >= lastIndex {\n\t\trf.votedFor = args.CandidateId\n\t\treply.VoteGranted = true\n\t\trf.currentTerm = args.Term\n\t\trf.resetElectionTimer()\n\t\t//rf.debug(\"I am setting my currentTerm to -->\",args.Term,\"I am \",rf.me)\n\t}\n}", "func (*GetVotesByPartyRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_trading_proto_rawDescGZIP(), []int{25}\n}", "func (s *VotingChaincode) vote(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tif len(args) != 3 {\n\t\treturn shim.Error(msg.GetErrMsg(\"COM_ERR_01\", []string{\"vote\", \"3\"}))\n\t}\n\n\ttodayDate := string(time.Now().UTC().Format(\"2006/01/02\"))\n\n\tvoterSSN := args[0]\n\telectionType := args[1]\n\tcandidatePubKey := args[2]\n\n\telection, err := u.FindCompositeKey(stub, c.ELECTION, []string{electionType})\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\tif election == \"\" {\n\t\treturn shim.Error(msg.GetErrMsg(\"VOT_ERR_15\", []string{electionType}))\n\t}\n\n\t_, keyParts, err := stub.SplitCompositeKey(election)\n\tif err != nil {\n\t\treturn shim.Error(msg.GetErrMsg(\"COM_ERR_07\", []string{election}))\n\t}\n\n\tisElectionPeriod := u.IsWithinRange(todayDate, keyParts[1], keyParts[2], \"2006/01/02\")\n\tif isElectionPeriod != true {\n\t\treturn shim.Error(msg.GetErrMsg(\"VOT_ERR_13\", []string{todayDate, electionType, fmt.Sprint(keyParts[1] + \"-\" + keyParts[2])}))\n\t}\n\n\tfound, voterPubKey := u.FindUserBySSN(stub, voterSSN)\n\tif !found {\n\t\treturn shim.Error(msg.GetErrMsg(\"COM_ERR_14\", []string{voterSSN}))\n\t}\n\n\tvoterAsBytes, err := stub.GetState(voterPubKey)\n\tif err != nil {\n\t\treturn shim.Error(msg.GetErrMsg(\"COM_ERR_10\", []string{voterSSN, err.Error()}))\n\t}\n\n\tvoter := User{}\n\terr = json.Unmarshal(voterAsBytes, &voter)\n\n\tif err != nil {\n\t\treturn shim.Error(\"Failed to unmarshal voter\")\n\t}\n\n\thasVoted := strings.Contains(voter.Election, c.VOTED)\n\tif hasVoted == true {\n\t\treturn shim.Error(msg.GetErrMsg(\"VOT_ERR_14\", []string{voterSSN}))\n\t}\n\n\tisRegistered := strings.Contains(voter.Election, c.REGISTERED)\n\tif isRegistered != true {\n\t\treturn shim.Error(msg.GetErrMsg(\"VOT_ERR_11\", []string{fmt.Sprint(\"Voter\" + voterSSN + \" Not Registered\")}))\n\t}\n\n\tisEligibleToVote := strings.Split(voter.Election, c.SEPARATOR)\n\tif isEligibleToVote[6] != \"true\" {\n\t\treturn shim.Error(msg.GetErrMsg(\"VOT_ERR_11\", []string{fmt.Sprint(isEligibleToVote[5] + \" Voter Min Age \" + strconv.Itoa(c.VOTER_MIN_AGE))}))\n\t}\n\n\tvoterAge := isEligibleToVote[5]\n\n\tcandidateAsBytes, err := stub.GetState(candidatePubKey)\n\tif err != nil {\n\t\treturn shim.Error(msg.GetErrMsg(\"COM_ERR_10\", []string{voterSSN, err.Error()}))\n\t}\n\n\tcandidate := User{}\n\tjson.Unmarshal(candidateAsBytes, &candidate)\n\n\tisCandidate := strings.Split(candidate.Election, c.SEPARATOR)\n\tif isCandidate[4] != \"true\" {\n\t\treturn shim.Error(msg.GetErrMsg(\"VOT_ERR_12\", []string{candidatePubKey, \"Not Registered\"}))\n\t}\n\n\tif candidate.SSN == voter.SSN {\n\t\treturn shim.Error(msg.GetErrMsg(\"VOT_ERR_12\", []string{candidatePubKey, fmt.Sprint(\"Same Voter \" + voterSSN + \" and Candidate \" + candidate.SSN)}))\n\t}\n\n\t_, err = s.callOtherCC(stub, c.CCNAME, c.CHANNELID, []string{\"giveVote\", voter.SSN, candidatePubKey, electionType, todayDate})\n\tif err != nil {\n\t\treturn shim.Error(msg.GetErrMsg(\"COM_ERR_17\", []string{c.CCNAME, err.Error()}))\n\t}\n\n\tvoter.Election = strings.Replace(voter.Election, c.REGISTERED, c.VOTED, -1)\n\n\tvoterAsBytes, _ = json.Marshal(voter)\n\n\terr = stub.PutState(voterPubKey, voterAsBytes)\n\tif err != nil {\n\t\treturn shim.Error(msg.GetErrMsg(\"COM_ERR_09\", []string{voterPubKey, err.Error()}))\n\t}\n\n\tvote := Vote{\n\t\tvoterSSN,\n\t\tvoter.FirstName,\n\t\tvoter.LastName,\n\t\tvoterAge,\n\t\tcandidatePubKey,\n\t\ttodayDate,\n\t\telectionType,\n\t\tstub.GetTxID()}\n\n\tvoteJSON, _ := json.Marshal(vote)\n\n\treturn shim.Success(voteJSON)\n}", "func (q queryServer) Vote(ctx context.Context, req *v1.QueryVoteRequest) (*v1.QueryVoteResponse, error) {\n\tif req == nil {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"invalid request\")\n\t}\n\n\tif req.ProposalId == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"proposal id can not be 0\")\n\t}\n\n\tif req.Voter == \"\" {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"empty voter address\")\n\t}\n\n\tvoter, err := q.k.authKeeper.AddressCodec().StringToBytes(req.Voter)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvote, err := q.k.Votes.Get(ctx, collections.Join(req.ProposalId, sdk.AccAddress(voter)))\n\tif err != nil {\n\t\tif errors.IsOf(err, collections.ErrNotFound) {\n\t\t\treturn nil, status.Errorf(codes.InvalidArgument,\n\t\t\t\t\"voter: %v not found for proposal: %v\", req.Voter, req.ProposalId)\n\t\t}\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\treturn &v1.QueryVoteResponse{Vote: &vote}, nil\n}", "func (_Bep20 *Bep20Caller) GetPriorVotes(opts *bind.CallOpts, account common.Address, blockNumber *big.Int) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Bep20.contract.Call(opts, &out, \"getPriorVotes\", account, blockNumber)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetCommitteeRequestsList api request get_committee_requests_list
func (api *API) GetCommitteeRequestsList(status uint16) ([]*uint16, error) { var resp []*uint16 err := api.call("committee_api", "get_committee_requests_list", []interface{}{status}, &resp) return resp, err }
[ "func (api *API) GetCommitteeRequest(id uint32, count int32) (*CommitteeObject, error) {\n\tvar resp CommitteeObject\n\terr := api.call(\"committee_api\", \"get_committee_request\", []interface{}{id, count}, &resp)\n\treturn &resp, err\n}", "func (api *API) GetCommitteeRequestVotes(id uint32) ([]*CommitteeVoteState, error) {\n\tvar resp []*CommitteeVoteState\n\terr := api.call(\"committee_api\", \"get_committee_request_votes\", []interface{}{id}, &resp)\n\treturn resp, err\n}", "func CommitteeGetList(chamber string) ([]*Committee, error) {\n\tvar response committeesResponse\n\tp := params{\"chamber\": chamber}\n\terr := committeeAPIS.getList.get(&response, p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn response.committees(), nil\n}", "func (c *Client) ListPullRequestCommits(owner, repo string, index int64, opt ListPullRequestCommitsOptions) ([]*Commit, *Response, error) {\n\tif err := escapeValidatePathSegments(&owner, &repo); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tlink, _ := url.Parse(fmt.Sprintf(\"/repos/%s/%s/pulls/%d/commits\", owner, repo, index))\n\topt.setDefaults()\n\tcommits := make([]*Commit, 0, opt.PageSize)\n\tlink.RawQuery = opt.getURLQuery().Encode()\n\tresp, err := c.getParsedResponse(\"GET\", link.String(), nil, nil, &commits)\n\treturn commits, resp, err\n}", "func (c *Config) ListRequests() (*RequestsList, error) {\n\n\tresp, err := c.makeRequest(\"GET\", fmt.Sprintf(\"%s/api/1.1/payment-requests/\", c.endpoint), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tswitch resp.StatusCode {\n\tcase 200:\n\t\tr := &RequestsList{}\n\t\terr := json.NewDecoder(resp.Body).Decode(r)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn r, nil\n\n\tcase 400:\n\t\treturn nil, badrequest(resp)\n\tcase 401:\n\t\treturn nil, unauthorized(resp)\n\t}\n\n\treturn nil, defaultResponse(resp)\n}", "func (client BastionClient) listWorkRequests(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/workRequests\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response ListWorkRequestsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (api MWSAPI) GetReportRequestList(params map[string]string) (string, error) {\n\t// params := make(map[string]string)\n\tparams[\"MarketplaceId\"] = string(api.MarketplaceID)\n\treturn api.genSignAndFetch(\"GetReportRequestList\", reportAPI, params)\n}", "func (r *ScheduleOpenShiftChangeRequestsCollectionRequest) Get(ctx context.Context) ([]OpenShiftChangeRequestObject, error) {\n\treturn r.GetN(ctx, 0)\n}", "func (repo repository) ListCclaApprovalListRequests(companyID string, projectID, status, userID *string) (*models.CclaWhitelistRequestList, error) {\n\tf := logrus.Fields{\n\t\t\"functionName\": \"v1.approval_list.repository.ListCclaApprovalListRequests\",\n\t\t\"companyID\": companyID,\n\t\t\"projectID\": projectID,\n\t\t\"status\": status,\n\t\t\"userID\": utils.StringValue(userID),\n\t}\n\n\tif projectID == nil {\n\t\treturn nil, errors.New(\"project ID can not be nil for ListCclaApprovalListRequests\")\n\t}\n\n\tlog.WithFields(f).Debugf(\"ListCclaApprovalListRequests with Company ID: %s, Project ID: %+v, Status: %+v, User ID: %+v\",\n\t\tcompanyID, projectID, status, userID)\n\n\t// hashkey is company_id, range key is project_id\n\tindexName := \"company-id-project-id-index\"\n\n\tcondition := expression.Key(\"company_id\").Equal(expression.Value(companyID))\n\tprojectExpression := expression.Key(\"project_id\").Equal(expression.Value(projectID))\n\tcondition = condition.And(projectExpression)\n\n\tbuilder := expression.NewBuilder().WithKeyCondition(condition).WithProjection(buildProjection())\n\n\tvar filter expression.ConditionBuilder\n\tvar filterAdded bool\n\n\t// Add the status filter if provided\n\tif status != nil {\n\t\tlog.WithFields(f).Debugf(\"ListCclaApprovalListRequests - Adding status: %s\", *status)\n\t\tstatusFilterExpression := expression.Name(\"request_status\").Equal(expression.Value(*status))\n\t\tfilter = addConditionToFilter(filter, statusFilterExpression, &filterAdded)\n\t}\n\n\t// Add the user ID filter if provided\n\tif userID != nil {\n\t\tuserFilterExpression := expression.Name(\"user_id\").Equal(expression.Value(userID))\n\t\tfilter = addConditionToFilter(filter, userFilterExpression, &filterAdded)\n\t}\n\tif filterAdded {\n\t\tbuilder = builder.WithFilter(filter)\n\t}\n\n\t// Use the nice builder to create the expression\n\texpr, err := builder.Build()\n\tif err != nil {\n\t\tlog.WithFields(f).WithError(err).Warn(\"error building query\")\n\t\treturn nil, err\n\t}\n\n\t// Assemble the query input parameters\n\tinput := &dynamodb.QueryInput{\n\t\tExpressionAttributeNames: expr.Names(),\n\t\tExpressionAttributeValues: expr.Values(),\n\t\tKeyConditionExpression: expr.KeyCondition(),\n\t\tProjectionExpression: expr.Projection(),\n\t\tFilterExpression: expr.Filter(),\n\t\tTableName: aws.String(repo.tableName),\n\t\tIndexName: aws.String(indexName),\n\t}\n\n\tqueryOutput, queryErr := repo.dynamoDBClient.Query(input)\n\tif queryErr != nil {\n\t\tlog.WithFields(f).WithError(queryErr).Warnf(\"list requests error while querying, error: %+v\", queryErr)\n\t\treturn nil, queryErr\n\t}\n\n\tlist, err := buildCclaWhitelistRequestsModels(queryOutput)\n\tif err != nil {\n\t\tlog.WithFields(f).WithError(err).Warnf(\"unmarshall requests error while decoding the response, error: %+v\", err)\n\t\treturn nil, err\n\t}\n\n\treturn &models.CclaWhitelistRequestList{List: list}, nil\n}", "func (c *client) GetRequests() []Request {\n\treturn c.requests\n}", "func (c APIClient) ListCommit(repoName string, to string, from string, number uint64) ([]*pfs.CommitInfo, error) {\n\tvar result []*pfs.CommitInfo\n\tif err := c.ListCommitF(repoName, to, from, number, func(ci *pfs.CommitInfo) error {\n\t\tresult = append(result, ci)\n\t\treturn nil\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\treturn result, nil\n}", "func (client IdentityClient) listTaggingWorkRequests(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/taggingWorkRequests\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response ListTaggingWorkRequestsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (client ContainerEngineClient) ListWorkRequests(ctx context.Context, request ListWorkRequestsRequest) (response ListWorkRequestsResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.DefaultRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.listWorkRequests, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = ListWorkRequestsResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = ListWorkRequestsResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(ListWorkRequestsResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into ListWorkRequestsResponse\")\n\t}\n\treturn\n}", "func (client BastionClient) listWorkRequestLogs(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/workRequests/{workRequestId}/logs\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response ListWorkRequestLogsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (client BastionClient) ListWorkRequests(ctx context.Context, request ListWorkRequestsRequest) (response ListWorkRequestsResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.listWorkRequests, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = ListWorkRequestsResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = ListWorkRequestsResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(ListWorkRequestsResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into ListWorkRequestsResponse\")\n\t}\n\treturn\n}", "func (c *Client) getPullRequestCommits(ctx context.Context, number int) (commits []string, err error) {\n\tvar commitSHAs []string\n\topts := go_github.ListOptions{\n\t\tPage: 0,\n\t\tPerPage: perPage,\n\t}\n\tfor {\n\t\tcurrCommits, resp, err := c.Client.PullRequests.ListCommits(ctx,\n\t\t\tc.c.Organization,\n\t\t\tc.c.Repository,\n\t\t\tnumber, &go_github.ListOptions{})\n\t\tif err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\n\t\tfor _, commit := range currCommits {\n\t\t\tcommitSHAs = append(commitSHAs, commit.GetSHA())\n\t\t}\n\n\t\tif resp.NextPage == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\topts.Page = resp.NextPage\n\t}\n\treturn commitSHAs, nil\n}", "func GetReviewerRequests(ctx *gin.Context) {\n\tvar requestDto dto.QuesRequestDto\n\terr := ctx.BindJSON(&requestDto)\n\tif err != nil {\n\t\tres := utility.GetErrorResponse(common.MSG_BAD_INPUT)\n\t\tctx.AbortWithStatusJSON(http.StatusBadRequest, res)\n\t\treturn\n\t}\n\tres := service.FetchReviewerRequests(&requestDto)\n\tctx.JSON(http.StatusOK, res)\n}", "func (g *Gatherer) listCommits(branch, start, end string) ([]*gogithub.RepositoryCommit, error) {\n\tstartCommit, _, err := g.client.GetCommit(g.context, g.options.GithubOrg, g.options.GithubRepo, start)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"retrieve start commit: %w\", err)\n\t}\n\n\tendCommit, _, err := g.client.GetCommit(g.context, g.options.GithubOrg, g.options.GithubRepo, end)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"retrieve end commit: %w\", err)\n\t}\n\n\tallCommits := &commitList{}\n\n\tworker := func(clo *gogithub.CommitsListOptions) (\n\t\tcommits []*gogithub.RepositoryCommit, resp *gogithub.Response, err error,\n\t) {\n\t\tfor {\n\t\t\tcommits, resp, err = g.client.ListCommits(g.context, g.options.GithubOrg, g.options.GithubRepo, clo)\n\t\t\tif err != nil {\n\t\t\t\tif !canWaitAndRetry(resp, err) {\n\t\t\t\t\treturn nil, nil, err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\treturn commits, resp, err\n\t}\n\n\tclo := gogithub.CommitsListOptions{\n\t\tSHA: branch,\n\t\tSince: startCommit.GetCommitter().GetDate().Time,\n\t\tUntil: endCommit.GetCommitter().GetDate().Time,\n\t\tListOptions: gogithub.ListOptions{\n\t\t\tPage: 1,\n\t\t\tPerPage: 100,\n\t\t},\n\t}\n\n\tcommits, resp, err := worker(&clo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tallCommits.Add(commits)\n\n\tremainingPages := resp.LastPage - 1\n\tif remainingPages < 1 {\n\t\treturn allCommits.List(), nil\n\t}\n\n\tt := throttler.New(maxParallelRequests, remainingPages)\n\tfor page := 2; page <= resp.LastPage; page++ {\n\t\tclo := clo\n\t\tclo.ListOptions.Page = page\n\n\t\tgo func() {\n\t\t\tcommits, _, err := worker(&clo)\n\t\t\tif err == nil {\n\t\t\t\tallCommits.Add(commits)\n\t\t\t}\n\t\t\tt.Done(err)\n\t\t}()\n\n\t\t// abort all, if we got one error\n\t\tif t.Throttle() > 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif err := t.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn allCommits.List(), nil\n}", "func (r Reports) GetReportRequestList(optional ...mws.Parameters) (*mws.Response, error) {\n\top := mws.OptionalParams([]string{\n\t\t\"ReportRequestIdList\", \"ReportTypeList\", \"ReportProcessingStatusList\",\n\t\t\"MaxCount\", \"RequestedFromDate\", \"RequestedToDate\",\n\t}, optional)\n\tparams := mws.Parameters{\"Action\": \"GetReportRequestList\"}.Merge(op)\n\tstructuredParams := params.StructureKeys(\"ReportRequestIdList\", \"Id\").\n\t\tStructureKeys(\"ReportTypeList\", \"Type\").\n\t\tStructureKeys(\"ReportProcessingStatusList\", \"Status\")\n\n\treturn r.SendRequest(structuredParams)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
TODO GetNewAddress does map to `getnewaddress` rpc call now rpcclient doesn't have such golang wrapper func.
func (client *BtcClient) GetNewAddress(account string) (string, error) { if len(account) == 0 { account = DEFAULT_ACCOUNT } address, err := client.rpcClient.GetNewAddress(account) if err != nil { return "", err } return address.String(), nil }
[ "func (client *Client) GetNewAddress(account string) (string, error) {\n\tif len(account) == 0 {\n\t\taccount = DEFAULT_ACCOUNT\n\t}\n\n\taddress, err := client.rpcClient.GetNewAddress(account)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tclient.l.Infof(\"[GetNewAddress] for account %s, address is %s\", account, address.String())\n\treturn address.String(), nil\n}", "func (d *DCRWallet) GetNewAddress() (string, error) {\n\treturn dcr.GetNewAddress(d.Testnet, d.RPCInfo)\n}", "func (l *LTCWallet) GetNewAddress() (string, error) {\n\treturn ltc.GetNewAddress(l.Testnet, l.RPCInfo)\n}", "func (b *Bitcoind) GetNewAddress(account ...string) (addr string, err error) {\n\t// 0 or 1 account\n\tif len(account) > 1 {\n\t\terr = errors.New(\"Bad parameters for GetNewAddress: you can set 0 or 1 account\")\n\t\treturn\n\t}\n\n\tr, err := b.client.call(\"getnewaddress\", account)\n\tif err = handleError(err, &r); err != nil {\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(r.Result, &addr)\n\treturn\n}", "func (i *Instance) GetNewAddress(_ trinary.Trytes, _ api.GetNewAddressOptions) (trinary.Hashes, error) {\n\treturn i.addressResultValue, i.addressResultError\n}", "func (p *Poloniex) GenerateNewAddress(ctx context.Context, curr string) (string, error) {\n\ttype Response struct {\n\t\tSuccess int\n\t\tError string\n\t\tResponse string\n\t}\n\tresp := Response{}\n\tvalues := url.Values{}\n\tvalues.Set(\"currency\", curr)\n\n\terr := p.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, poloniexGenerateNewAddress, values, &resp)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif resp.Error != \"\" {\n\t\treturn \"\", errors.New(resp.Error)\n\t}\n\n\treturn resp.Response, nil\n}", "func (b Bl3p) GetNewDepositAddress() (callModels.DepositAddress, error) {\n\n\tdepositAddress, err := b.requester(\"GENMKT/money/new_deposit_address\", nil)\n\n\tresult := callModels.DepositAddress{}\n\n\tif err == nil {\n\t\terr = json.Unmarshal(depositAddress.Data, &result)\n\t}\n\n\treturn result, err\n}", "func (h *HitBTC) GenerateNewAddress(ctx context.Context, currency string) (DepositCryptoAddresses, error) {\n\tresp := DepositCryptoAddresses{}\n\terr := h.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodPost,\n\t\tapiV2CryptoAddress+\"/\"+currency,\n\t\turl.Values{},\n\t\totherRequests,\n\t\t&resp)\n\n\treturn resp, err\n}", "func (c *Constructor) newAddress(ctx context.Context) (string, error) {\n\tkp, err := keys.GenerateKeypair(c.curveType)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"%w unable to generate keypair\", err)\n\t}\n\n\taddress, _, err := c.helper.Derive(\n\t\tctx,\n\t\tc.network,\n\t\tkp.PublicKey,\n\t\tnil,\n\t)\n\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"%w: unable to derive address\", err)\n\t}\n\n\terr = c.helper.StoreKey(ctx, address, kp)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"%w: unable to store address\", err)\n\t}\n\n\tif err := c.handler.AddressCreated(ctx, address); err != nil {\n\t\treturn \"\", fmt.Errorf(\"%w: could not handle address creation\", err)\n\t}\n\n\treturn address, nil\n}", "func NewAddresser() Addresser { return &addresser{execute: rtnlExecute} }", "func walletNewAddresses(gateway *daemon.Gateway) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Method != http.MethodPost {\n\t\t\twh.Error405(w)\n\t\t\treturn\n\t\t}\n\n\t\twltID := r.FormValue(\"id\")\n\t\tif wltID == \"\" {\n\t\t\twh.Error400(w, \"missing wallet id\")\n\t\t\treturn\n\t\t}\n\n\t\t// the number of address that need to create, default is 1\n\t\tvar n uint64 = 1\n\t\tvar err error\n\t\tnum := r.FormValue(\"num\")\n\t\tif num != \"\" {\n\t\t\tn, err = strconv.ParseUint(num, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\twh.Error400(w, \"invalid num value\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\taddrs, err := gateway.NewAddresses(wltID, n)\n\t\tif err != nil {\n\t\t\twh.Error400(w, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tvar rlt = struct {\n\t\t\tAddress []string `json:\"addresses\"`\n\t\t}{}\n\n\t\tfor _, a := range addrs {\n\t\t\trlt.Address = append(rlt.Address, a.String())\n\t\t}\n\n\t\twh.SendOr404(w, rlt)\n\t\treturn\n\t}\n}", "func (dcr *ExchangeWallet) NewAddress() (string, error) {\n\treturn dcr.DepositAddress()\n}", "func (walletAPI *WalletAPI) WalletNewAddress(protocol address.Protocol) (address.Address, error) {\n\treturn walletAPI.adapter.NewAddress(protocol)\n}", "func (m *Module) NewAddress(ctx context.Context, typ string) (string, error) {\n\tvar ty crypto.SigType\n\tif typ == \"bls\" {\n\t\tty = crypto.SigTypeBLS\n\t} else if typ == \"secp256k1\" {\n\t\tty = crypto.SigTypeSecp256k1\n\t} else {\n\t\treturn \"\", fmt.Errorf(\"unknown address type %s\", typ)\n\t}\n\n\taddr, err := m.api.WalletNew(ctx, ty)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif m.masterAddr != address.Undef {\n\t\tmsg := &types.Message{\n\t\t\tFrom: m.masterAddr,\n\t\t\tTo: addr,\n\t\t\tValue: types.BigInt{Int: m.iAmount},\n\t\t\tGasLimit: 1000,\n\t\t\tGasPrice: types.NewInt(0),\n\t\t}\n\n\t\t_, err = m.api.MpoolPushMessage(ctx, msg)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"transferring funds to new address: %s\", err)\n\t\t}\n\t}\n\n\treturn addr.String(), nil\n}", "func newAddress(data []byte) sdk.AccAddress {\n\tif data == nil {\n\t\treturn nil\n\t}\n\t// h := blake2b.Sum256(data)\n\th := sha256.Sum256(data)\n\treturn h[:sdk.AddrLen]\n}", "func (f *FFS) NewAddr(ctx context.Context, name string, options ...NewAddressOption) (string, error) {\n\tr := &rpc.NewAddrRequest{Name: name}\n\tfor _, opt := range options {\n\t\topt(r)\n\t}\n\tresp, err := f.client.NewAddr(ctx, r)\n\treturn resp.Addr, err\n}", "func (m Minogrpc) GetAddressFactory() mino.AddressFactory {\n\treturn nil\n}", "func (walletAPI *WalletAPI) WalletNewAddress(protocol address.Protocol) (address.Address, error) {\n\treturn wallet.NewAddress(walletAPI.wallet.Wallet, protocol)\n}", "func newPeerAddr(ip net.IP, curTime time.Time) *peerAddr {\n\n\t// Create the new peer address.\n\tp := &peerAddr{\n\t\tip: ip,\n\t\tlastPing: ring.New(6),\n\t}\n\n\t// Record the current ping.\n\tp.lastPing.Value = curTime\n\n\treturn p\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
btc send raw transaction
func (client *BtcClient)BtcSendRawTrans(serial string , tx []byte)(string,error ){ var redeemTx wire.MsgTx err := json.Unmarshal(tx,&redeemTx) if err != nil { return "",err } sendResult,err := client.rpcClient.SendRawTransaction(&redeemTx,false) //sendResult,err := btcClient.SendRawTransactionAsync(&redeemTx,false).Receive() if err != nil { return "",err } return sendResult.String(),nil }
[ "func sendRawTx(eth *thereum.Thereum, msg *rpcMessage) (*rpcMessage, error) {\n\t// unmarshal into temp data structs (passed via json as a slice of a single hex string)\n\tvar hexTx []string\n\terr := json.Unmarshal(msg.Params, &hexTx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// ensure that some data was passed throught the rpc msg\n\tif len(hexTx) == 0 {\n\t\treturn nil, errors.New(\"no parameters provided for raw transaction\")\n\t}\n\t// unmarshal the hex bytes into a transaction\n\tvar tx types.Transaction\n\ttxBytes, err := hex.DecodeString(strings.Replace(hexTx[0], \"0x\", \"\", 1))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = rlp.DecodeBytes(txBytes, &tx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// add the transaction to thereum\n\terr = eth.AddTx(&tx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tout := &rpcMessage{\n\t\tVersion: \"2.0\",\n\t\tID: 1,\n\t\tResult: tx.Hash().Hex(),\n\t}\n\n\treturn out, nil\n}", "func SendRawTransaction(c context.Context, txHexString string) (string, error) {\n\tif isInit == false {\n\t\tInit()\n\t}\n\n\t// Copy same context values to local variables which are often accessed\n\tenv := c.Value(consts.EnvKey).(string)\n\n\tif env == \"dev\" {\n\t\treturn \"success\", nil\n\t}\n\n\t// Convert the hex string to a byte array\n\ttxBytes, err := hex.DecodeString(txHexString)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn \"\", err\n\t}\n\n\t// Deserialise the transaction\n\ttx, err := btcutil.NewTxFromBytes(txBytes)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn \"\", err\n\t}\n\n\tmsgTx := tx.MsgTx()\n\n\t// Notice the notification parameter is nil since notifications are\n\t// not supported in HTTP POST mode.\n\tclient, err := btcrpcclient.New(&config, nil)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn \"\", err\n\t}\n\tdefer client.Shutdown()\n\n\t// Send the tx\n\tresult, err := client.SendRawTransaction(msgTx, true)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn \"\", err\n\t}\n\n\treturn fmt.Sprintf(\"%s\", result.String()), nil\n}", "func sendRawTransaction(_privateKey string, recipientAddress string, methodName string, value int64, argAmount string) {\n\t//connect to ropsten through infura\n\tec, err := ethclient.Dial(\"https://ropsten.infura.io/\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tchainID := big.NewInt(3) //Ropsten\n\n\t//private key of sender\n\t//TODO: hide key when actual system is implemented\n\tprivateKey, err := crypto.HexToECDSA(_privateKey)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t//get Public Key of sender\n\tpublicKey := privateKey.Public()\n\tpublicKey_ECDSA, valid := publicKey.(*ecdsa.PublicKey)\n\tif !valid {\n\t\tlog.Fatal(\"error casting public key to ECDSA\")\n\t}\n\n\t//get address of sender\n\tfromAddress := crypto.PubkeyToAddress(*publicKey_ECDSA)\n\n\t//get nonce of address\n\tnonce, err := ec.PendingNonceAt(context.Background(), fromAddress)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t//get recipient address\n\trecipient := common.HexToAddress(recipientAddress)\n\n\tamount := big.NewInt(value) // 0 ether\n\tgasLimit := uint64(2000000)\n\tgasPrice, err := ec.SuggestGasPrice(context.Background())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ttransferFnSignature := []byte(methodName)\n\thash := sha3.NewLegacyKeccak256()\n\thash.Write(transferFnSignature)\n\tmethodID := hash.Sum(nil)[:4]\n\t//fmt.Println(hexutil.Encode(methodID)) // 0xa9059cbb\n\n\targumentAmount := new(big.Int)\n\targumentAmount.SetString(argAmount, 10) //\n\tpaddedAmount := common.LeftPadBytes(argumentAmount.Bytes(), 32)\n\t//fmt.Println(hexutil.Encode(paddedAmount)) // 0x00000000000000000000000000000000000000000000003635c9adc5dea00000\n\n\tvar data []byte\n\tdata = append(data, methodID...)\n\tdata = append(data, paddedAmount...)\n\t//data := []byte(\"0x5c22b6b60000000000000000000000000000000000000000000000000000000000000007\")\n\t// fmt.Printf(\"nonce: %i\\n\", nonce)\n\t// fmt.Printf(\"amount: %i\\n\", amount)\n\t// fmt.Printf(\"gasLimit: %s\\n\", gasLimit)\n\t// fmt.Printf(\"gasPrice: %s\\n\", gasPrice)\n\tfmt.Printf(\"data: %x\\n\", data)\n\n\t//create raw transaction\n\ttransaction := types.NewTransaction(nonce, recipient, amount, gasLimit, gasPrice, data)\n\n\t//sign transaction for ropsten network\n\tsigner := types.NewEIP155Signer(chainID)\n\tsignedTx, err := types.SignTx(transaction, signer, privateKey)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// var buff bytes.Buffer\n\t// signedTx.EncodeRLP(&buff)\n\t// fmt.Printf(\"0x%x\\n\", buff.Bytes())\n\n\t//fmt.Println(signedTx)\n\t//broadcast transaction\n\terr = ec.SendTransaction(context.Background(), signedTx)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Printf(\"tx sent: %s\\n\", signedTx.Hash().Hex())\n\n\t// jsonData := fmt.Sprintf(` {\"jsonrpc\":\"2.0\", \"method\":\"eth_sendRawTransaction\", \"params\": [\"0x%x\"], \"id\":4}`, buff.Bytes())\n\t// //params := buff.String()\n\t// fmt.Printf(\"%s\\n\", jsonData)\n\t// response, err := http.Post(\"https://rinkeby.infura.io/gnNuNKvHFmjf9xkJ0StE\", \"application/json\", strings.NewReader(jsonData))\n\t// if err != nil {\n\n\t// \tfmt.Printf(\"Request to INFURA failed with an error: %s\\n\", err)\n\t// \tfmt.Println()\n\n\t// } else {\n\t// \tdata, _ := ioutil.ReadAll(response.Body)\n\n\t// \tfmt.Println(\"INFURA response:\")\n\t// \tfmt.Println(string(data))\n\t// }\n}", "func (a API) SendRawTransaction(cmd *btcjson.SendRawTransactionCmd) (e error) {\n\tRPCHandlers[\"sendrawtransaction\"].Call <-API{a.Ch, cmd, nil}\n\treturn\n}", "func (c BitcoinCoreChain) RawTx(cxt context.Context, from, to, amount, memo, asset string) (string, error) {\n if configure.ChainAssets[asset] != Bitcoin {\n return \"\", fmt.Errorf(\"Unsupport %s in bitcoincore\", asset)\n }\n amountF, err := strconv.ParseFloat(amount, 64)\n if err != nil {\n return \"\", err\n }\n txAmountSatoshi, err := btcutil.NewAmount(amountF)\n if err != nil {\n return \"\", err\n }\n\n fromPkScript, err := BitcoincoreAddressP2AS(from, c.Mode)\n if err != nil {\n return \"\", err\n }\n toPkScript, err := BitcoincoreAddressP2AS(to, c.Mode)\n if err != nil {\n return \"\", err\n }\n\n // query bitcoin chain info\n chaininfo, err := c.Client.GetBlockChainInfo()\n if err != nil {\n return \"\", err\n }\n // feeKB, err := c.Client.EstimateFee(int64(6))\n feeKB, err := c.Client.EstimateSmartFee(int64(6))\n if err != nil {\n return \"\", err\n }\n feeRate := mempool.SatoshiPerByte(feeKB.FeeRate)\n\n if feeKB.FeeRate <= 0 {\n feeRate = mempool.SatoshiPerByte(100)\n }\n\n var (\n selectedutxos, unselectedutxos []db.UTXO\n selectedCoins coinset.Coins\n )\n\n // Coin Select\n if strings.ToLower(configure.ChainsInfo[Bitcoin].Coin) == strings.ToLower(asset) {\n // select coins for BTC transfer\n if selectedutxos, unselectedutxos, selectedCoins, err = CoinSelect(int64(chaininfo.Headers), txAmountSatoshi, c.Wallet.Address.UTXOs); err != nil {\n return \"\", fmt.Errorf(\"Select UTXO for tx %s\", err)\n }\n }else {\n // select coins for Token transfer\n // 300: https://bitcoin.stackexchange.com/questions/1195/how-to-calculate-transaction-size-before-sending-legacy-non-segwit-p2pkh-p2sh\n inputAmount := feeRate.Fee(uint32(300))\n if selectedutxos, unselectedutxos, selectedCoins, err = CoinSelect(int64(chaininfo.Headers), inputAmount, c.Wallet.Address.UTXOs); err != nil {\n return \"\", fmt.Errorf(\"Select UTXO for tx %s\", err)\n }\n }\n\n var vinAmount int64\n for _, coin := range selectedCoins.Coins() {\n vinAmount += int64(coin.Value())\n }\n msgTx := coinset.NewMsgTxWithInputCoins(wire.TxVersion, selectedCoins)\n\n token := configure.ChainsInfo[Bitcoin].Tokens[strings.ToLower(asset)]\n if token != \"\" && strings.ToLower(asset) != strings.ToLower(configure.ChainsInfo[Bitcoin].Coin) {\n // OmniToken transfer\n b := txscript.NewScriptBuilder()\n b.AddOp(txscript.OP_RETURN)\n\n omniVersion := util.Int2byte(uint64(0), 2)\t// omnicore version\n txType := util.Int2byte(uint64(0), 2)\t// omnicore tx type: simple send\n propertyID := configure.ChainsInfo[Bitcoin].Tokens[asset]\n tokenPropertyid, err := strconv.Atoi(propertyID)\n if err != nil {\n return \"\", fmt.Errorf(\"tokenPropertyid to int %s\", err)\n }\n // tokenPropertyid := configure.Config.OmniToken[\"omni_first_token\"].(int)\n tokenIdentifier := util.Int2byte(uint64(tokenPropertyid), 4)\t// omni token identifier\n tokenAmount := util.Int2byte(uint64(txAmountSatoshi), 8)\t// omni token transfer amount\n\n b.AddData([]byte(\"omni\"))\t// transaction maker\n b.AddData(omniVersion)\n b.AddData(txType)\n b.AddData(tokenIdentifier)\n b.AddData(tokenAmount)\n pkScript, err := b.Script()\n if err != nil {\n return \"\", fmt.Errorf(\"Bitcoin Token pkScript %s\", err)\n }\n msgTx.AddTxOut(wire.NewTxOut(0, pkScript))\n txOutReference := wire.NewTxOut(0, toPkScript)\n msgTx.AddTxOut(txOutReference)\n }else {\n // BTC transfer\n txOutTo := wire.NewTxOut(int64(txAmountSatoshi), toPkScript)\n msgTx.AddTxOut(txOutTo)\n\n // recharge\n // 181, 34: https://bitcoin.stackexchange.com/questions/1195/how-to-calculate-transaction-size-before-sending-legacy-non-segwit-p2pkh-p2sh\n fee := feeRate.Fee(uint32(msgTx.SerializeSize() + 181 + 34))\n if (vinAmount - int64(txAmountSatoshi) - int64(fee)) > 0 {\n txOutReCharge := wire.NewTxOut((vinAmount-int64(txAmountSatoshi) - int64(fee)), fromPkScript)\n msgTx.AddTxOut(txOutReCharge)\n }else {\n selectedutxoForFee, _, selectedCoinsForFee, err := CoinSelect(int64(chaininfo.Headers), fee, unselectedutxos)\n if err != nil {\n return \"\", fmt.Errorf(\"Select UTXO for fee %s\", err)\n }\n for _, coin := range selectedCoinsForFee.Coins() {\n vinAmount += int64(coin.Value())\n }\n txOutReCharge := wire.NewTxOut((vinAmount - int64(txAmountSatoshi) - int64(fee)), fromPkScript)\n msgTx.AddTxOut(txOutReCharge)\n selectedutxos = append(selectedutxos, selectedutxoForFee...)\n }\n }\n\n buf := bytes.NewBuffer(make([]byte, 0, msgTx.SerializeSize()))\n msgTx.Serialize(buf)\n rawTxHex := hex.EncodeToString(buf.Bytes())\n c.Wallet.SelectedUTXO = selectedutxos\n return rawTxHex, nil\n}", "func sendTransaction(scid string, entry string, to string, amount int64, id string) {\n\t\n\twalletURL:= \"http://127.0.0.1:30309/json_rpc\"\n\tvar amountString string\t\n\tif amount == 0 {\n\t\tamountString = \"\"\n\t} else {\t\n\t\tamountString = strconv.FormatInt(amount, 10)\n\t}\n\tdata:= PayloadGeneral{\n\t\tJsonrpc: \"2.0\", \n\t\tID: \"0\",\n\t\tMethod: \"transfer_split\",\n\t\tParams: Params2{\n\t\t\tMixin: 5,\n\t\t\tGetTxKey: true,\n\t\t\tScTx: ScTx2{\n\t\t\t\tEntrypoint: entry,\n\t\t\t\tScid: scid,\n\t\t\t\tValue: 0,\n\t\t\t\tParams: Params3{\n\t\t\t\t\t\tTo: to,\n\t\t\t\t\t\tAmount: amountString,\n\t\t\t\t\t\tID: id,\n\t\t\t\t},\n\t\t\t}, \n\t\t},\n\t}\n\n\t\n\tpayloadBytes, err := json.Marshal(data)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tbody := bytes.NewReader(payloadBytes)\n\t\n\t_, err=rpcPost(body, walletURL)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\t\n\t//println(result)\t\n\tfmt.Println(\"Transaction sent to wallet!\")\n\t\n}", "func (w *rpcWallet) SendRawTransaction(ctx context.Context, tx *wire.MsgTx, allowHighFees bool) (*chainhash.Hash, error) {\n\thash, err := w.client().SendRawTransaction(ctx, tx, allowHighFees)\n\treturn hash, translateRPCCancelErr(err)\n}", "func doSendTransaction(ctx *Context, threadNo int, acc *accounts.Account, nodeAddress keys.Address, randomRev bool, currencies balance.Currencies) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tfmt.Println(\"panic in doSendTransaction: thread\", threadNo, r)\n\t\t}\n\t}()\n\n\t// generate a random amount\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tnum := r.Float64() * 10 // amount is a random float between [0, 10)\n\t// populate send arguments\n\tsendArgsLocal := SendArguments{}\n\tsendArgsLocal.Party = []byte(nodeAddress)\n\tif randomRev {\n\t\trecv := ed25519.GenPrivKey().PubKey().Address()\n\t\tsendArgsLocal.CounterParty = []byte(recv)\n\t} else {\n\t\tsendArgsLocal.CounterParty = []byte(acc.Address()) // receiver is the temp account\n\t}\n\n\t// set amount and fee\n\tsendArgsLocal.Amount = strconv.FormatFloat(num, 'f', 6, 64)\n\tsendArgsLocal.Fee = strconv.FormatFloat(0.0000003, 'f', 9, 64)\n\tsendArgsLocal.Currency = \"OLT\"\n\tsendArgsLocal.Gas = 200030\n\n\t// Create message\n\tfullnode := ctx.clCtx.FullNodeClient()\n\n\treq, err := sendArgsLocal.ClientRequest(currencies.GetCurrencySet())\n\tif err != nil {\n\t\tctx.logger.Error(\"failed to get request\", err)\n\t}\n\n\treply, err := fullnode.SendTx(req)\n\tif err != nil {\n\t\tctx.logger.Error(acc.Name, \"error executing SendTx\", err)\n\t\treturn\n\t}\n\n\t// check packet\n\tpacket := reply.RawTx\n\tif packet == nil {\n\t\tctx.logger.Error(acc.Name, \"error in creating new SendTx but server responded with no error\")\n\t\treturn\n\t}\n\n\t// broadcast packet over tendermint\n\tresult, err := ctx.clCtx.BroadcastTxAsync(packet)\n\tif err != nil {\n\t\tctx.logger.Error(acc.Name, \"error in BroadcastTxAsync:\", err)\n\t\treturn\n\t}\n\n\tctx.logger.Info(acc.Name, \"Result Data\", \"log\", string(result.Log))\n}", "func (h *TransactionHandler) sendTransaction(userbank_id int, transaction_id int, totalAmount int) error {\r\n\tjsonW := &SendTransaction{\r\n\t\tUserBankID: userbank_id,\r\n\t\tTransactionID: transaction_id,\r\n\t\tAmount: totalAmount,\r\n\t}\r\n\r\n\tjsonR, _ := json.Marshal(jsonW)\r\n\tjsonStr := []byte(string(jsonR))\r\n\r\n\treq, _ := http.NewRequest(\"POST\", h.Config.BankAPIUrl+\"/transaction\", bytes.NewBuffer(jsonStr))\r\n\treq.Header.Set(\"Content-Type\", \"application/json\")\r\n\r\n\tclient := &http.Client{}\r\n\t_, err2 := client.Do(req)\r\n\r\n\tif err2 != nil {\r\n\t\treturn errors.New(\"Gagal menghubungkan ke server 2\")\r\n\t}\r\n\r\n\treturn nil\r\n}", "func (api *PublicEthereumAPI) SendRawTransaction(data hexutil.Bytes) (common.Hash, error) {\n\tapi.logger.Debug(\"eth_sendRawTransaction\", \"data\", data)\n\ttx := new(evmtypes.MsgEthereumTx)\n\n\t// RLP decode raw transaction bytes\n\tif err := rlp.DecodeBytes(data, tx); err != nil {\n\t\t// Return nil is for when gasLimit overflows uint64\n\t\treturn common.Hash{}, nil\n\t}\n\n\t// Encode transaction by default Tx encoder\n\ttxEncoder := authclient.GetTxEncoder(api.clientCtx.Codec)\n\ttxBytes, err := txEncoder(tx)\n\tif err != nil {\n\t\treturn common.Hash{}, err\n\t}\n\n\t// TODO: Possibly log the contract creation address (if recipient address is nil) or tx data\n\t// If error is encountered on the node, the broadcast will not return an error\n\tres, err := api.clientCtx.BroadcastTx(txBytes)\n\tif err != nil {\n\t\treturn common.Hash{}, err\n\t}\n\n\tif res.Code != abci.CodeTypeOK {\n\t\treturn common.Hash{}, fmt.Errorf(res.RawLog)\n\t}\n\t// Return transaction hash\n\treturn common.HexToHash(res.TxHash), nil\n}", "func SendRawTX(rawTx []byte, endpoint string) (string, error) {\n\treqData := fmt.Sprintf(\n\t\t`{\"jsonrpc\":\"2.0\",\"method\":\"eth_sendRawTransaction\",\"params\":[\"0x%x\"],\"id\":1}`, rawTx)\n\tresp, err := doPost(reqData, endpoint)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to send raw transaction, as: %v\", err)\n\t}\n\ttxHash := new(string)\n\tjson.Unmarshal(resp.Result, txHash)\n\treturn fmt.Sprintf(\"%s\", *txHash), nil\n}", "func (s *PublicTransactionPoolAPI) SendRawTransaction(ctx context.Context, encodedTx hexutil.Bytes) (common.Hash, error) {\n\ttx := new(types.Transaction)\n\tif err := rlp.DecodeBytes(encodedTx, tx); err != nil {\n\t\treturn common.Hash{}, err\n\t}\n\tc := s.b.ChainConfig().ChainID\n\tif tx.ChainID().Cmp(c) != 0 {\n\t\te := errors.Wrapf(errInvalidChainID, \"current chain id:%s\", c.String())\n\t\treturn common.Hash{}, e\n\t}\n\treturn SubmitTransaction(ctx, s.b, tx)\n}", "func broadcastTransaction(tx transaction) error {\n\tjsonTx, _ := json.Marshal(&tx)\n\tresp, err := http.Post(fmt.Sprintf(\"%s/transaction/send\", proxyHost), \"\",\n\t\tstrings.NewReader(string(jsonTx)))\n\tif err != nil {\n\t\tlog.Println(errSendingTx)\n\t\treturn err\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tdefer func() {\n\t\t_ = resp.Body.Close()\n\t}()\n\tif err != nil {\n\t\tlog.Println(errSendingTx)\n\t\treturn err\n\t}\n\tres := string(body)\n\tfmt.Printf(\"Result: %s\\n\\r\", res)\n\treturn nil\n}", "func handleWalletSendRawTransaction(s *rpcServer, cmd btcjson.Cmd, wallet walletChan) error {\n\tresult, err := handleSendRawTransaction(s, cmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// The result is already guaranteed to be a valid hash string if no\n\t// error was returned above, so it's safe to ignore the error here.\n\ttxSha, _ := btcwire.NewShaHashFromStr(result.(string))\n\n\t// Request to be notified when the transaction is mined.\n\ts.ws.AddMinedTxRequest(wallet, txSha)\n\treturn nil\n}", "func handleSendRawTransaction(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {\n\tc := cmd.(*rpcmodel.SendRawTransactionCmd)\n\t// Deserialize and send off to tx relay\n\thexStr := c.HexTx\n\tif len(hexStr)%2 != 0 {\n\t\thexStr = \"0\" + hexStr\n\t}\n\tserializedTx, err := hex.DecodeString(hexStr)\n\tif err != nil {\n\t\treturn nil, rpcDecodeHexError(hexStr)\n\t}\n\tvar msgTx wire.MsgTx\n\terr = msgTx.Deserialize(bytes.NewReader(serializedTx))\n\tif err != nil {\n\t\treturn nil, &rpcmodel.RPCError{\n\t\t\tCode: rpcmodel.ErrRPCDeserialization,\n\t\t\tMessage: \"TX decode failed: \" + err.Error(),\n\t\t}\n\t}\n\n\t// Use 0 for the tag to represent local node.\n\ttx := util.NewTx(&msgTx)\n\tacceptedTxs, err := s.cfg.TxMemPool.ProcessTransaction(tx, false, 0)\n\tif err != nil {\n\t\t// When the error is a rule error, it means the transaction was\n\t\t// simply rejected as opposed to something actually going wrong,\n\t\t// so log it as such. Otherwise, something really did go wrong,\n\t\t// so log it as an actual error. In both cases, a JSON-RPC\n\t\t// error is returned to the client with the deserialization\n\t\t// error code\n\t\tif errors.As(err, &mempool.RuleError{}) {\n\t\t\tlog.Debugf(\"Rejected transaction %s: %s\", tx.ID(),\n\t\t\t\terr)\n\t\t} else {\n\t\t\tlog.Errorf(\"Failed to process transaction %s: %s\",\n\t\t\t\ttx.ID(), err)\n\t\t}\n\t\treturn nil, &rpcmodel.RPCError{\n\t\t\tCode: rpcmodel.ErrRPCVerify,\n\t\t\tMessage: \"TX rejected: \" + err.Error(),\n\t\t}\n\t}\n\n\t// When the transaction was accepted it should be the first item in the\n\t// returned array of accepted transactions. The only way this will not\n\t// be true is if the API for ProcessTransaction changes and this code is\n\t// not properly updated, but ensure the condition holds as a safeguard.\n\t//\n\t// Also, since an error is being returned to the caller, ensure the\n\t// transaction is removed from the memory pool.\n\tif len(acceptedTxs) == 0 || !acceptedTxs[0].Tx.ID().IsEqual(tx.ID()) {\n\t\terr := s.cfg.TxMemPool.RemoveTransaction(tx, true, true)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terrStr := fmt.Sprintf(\"transaction %s is not in accepted list\",\n\t\t\ttx.ID())\n\t\treturn nil, internalRPCError(errStr, \"\")\n\t}\n\n\t// Generate and relay inventory vectors for all newly accepted\n\t// transactions into the memory pool due to the original being\n\t// accepted.\n\ts.cfg.ConnMgr.RelayTransactions(acceptedTxs)\n\n\t// Notify both websocket and getBlockTemplate long poll clients of all\n\t// newly accepted transactions.\n\ts.NotifyNewTransactions(acceptedTxs)\n\n\t// Keep track of all the sendRawTransaction request txns so that they\n\t// can be rebroadcast if they don't make their way into a block.\n\ttxD := acceptedTxs[0]\n\tiv := wire.NewInvVect(wire.InvTypeTx, (*daghash.Hash)(txD.Tx.ID()))\n\ts.cfg.ConnMgr.AddRebroadcastInventory(iv, txD)\n\n\treturn tx.ID().String(), nil\n}", "func (c *EthCli) SendTrx(fromAddress, toAddress, token, amount string, data []byte, key string, priceIn uint64, dryRun bool) (priceOut, gasLimit uint64, hash []byte, err error) {\n\tvar nonce uint64\n\tvar ok bool\n\tvar to common.Address\n\tvar fromKey *ecdsa.PrivateKey\n\tvar amt *big.Int = new(big.Int)\n\tvar raw, tmp, tmpAmt []byte\n\n\t// check arguments\n\tif fromAddress[:2] != \"0x\" || len(fromAddress) != 42 {\n\t\terr = ErrBadFrom\n\t}\n\tif toAddress[:2] != \"0x\" || len(toAddress) != 42 {\n\t\terr = ErrBadTo\n\t}\n\tif token != \"\" && (token[:2] != \"0x\" || len(token) != 42) {\n\t\terr = ErrBadToken\n\t}\n\tif amount[:2] != \"0x\" {\n\t\terr = ErrWrongAmt\n\t}\n\tif len(key) != 64 {\n\t\terr = ErrBadKey\n\t}\n\t// from\n\tif fromKey, err = crypto.HexToECDSA(key); err != nil {\n\t\treturn\n\t}\n\t// check amount and get amt for signTrx\n\tamt, ok = amt.SetString(amount, 0)\n\tif !ok || len(amt.Bytes()) > 32 {\n\t\treturn priceOut, gasLimit, hash, ErrWrongAmt\n\t}\n\t// get nonce\n\tif nonce, err = c.GetTransactionCount(fromAddress, \"latest\"); err != nil {\n\t\treturn\n\t}\n\t// get to, data\n\tif token == \"\" {\n\t\tto = common.HexToAddress(toAddress)\n\t} else {\n\t\t// make sure data comes empty!\n\t\tif data != nil {\n\t\t\treturn priceOut, gasLimit, hash, ErrSendTokenData\n\t\t}\n\t\tto = common.HexToAddress(token)\n\t\tvar toAddr []byte\n\t\ttoAddr, err = hex.DecodeString(toAddress[2:])\n\t\ttmpAmt = make([]byte, 32)\n\t\tcopy(tmpAmt[32-len(amt.Bytes()):32], amt.Bytes()[:])\n\t\tamt.Sub(amt, amt) // amt = 0 as we do not send ether!!\n\t\t// build data for token transaction: methodId (4), to address (32), amount (32)\n\t\ttmp = make([]byte, 0, 4+32+32)\n\t\ttmp = append(tmp, 0xa9, 0x05, 0x9c, 0xbb) // transfer = 0xa9059cbb\n\t\ttmp = append(tmp, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00) // pad 12 zeroes\n\t\ttmp = append(tmp, toAddr[:]...) // to\n\t\ttmp = append(tmp, tmpAmt[:]...) // amount\n\t\tdata = tmp\n\t}\n\t// get gasPrice\n\tif priceIn == 0 {\n\t\tpriceOut, err = c.GasPrice()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tpriceOut = priceIn\n\t}\n\t// gasPrice and gasLimit\n\tgasPrice := new(big.Int).SetUint64(priceOut)\n\tif token == \"\" {\n\t\tgasLimit, err = c.EstimateGas(toAddress, amount, \"0x\"+hex.EncodeToString(data))\n\t} else {\n\t\tgasLimit, err = c.EstimateGas(token, \"0x00\", \"0x\"+hex.EncodeToString(tmp))\n\t}\n\tif err != nil {\n\t\treturn\n\t}\n\t// generate transaction, get hash and raw signed transaction\n\tif hash, raw, err = signTrx(nonce, to, amt, gasLimit, gasPrice, data, fromKey); err != nil {\n\t\treturn\n\t}\n\t// send transaction to blockchain\n\tif !dryRun {\n\t\t_, err = c.sendRawTransaction(raw)\n\t}\n\treturn\n}", "func (cli *CommandLine) send(from, to string, amount int) {\n\tchain := blockchain.ContinueBlockChain(from)\n\tdefer chain.Database.Close()\n\n\ttx := blockchain.NewTransaction(from, to, amount, chain)\n\tchain.AddBlock([]*blockchain.Transaction{tx})\n\tfmt.Println(\"Success send token\")\n}", "func (self *Wallet)SendMoney(amount int,receiver utils.Addr)error{\n total,err := strconv.Atoi(self.GetTotal())\n if err!=nil{return err}\n if total<amount{\n return errors.New(\"You does not have enough money\")\n }\n // sort the available Entries by value\n sort.SliceStable(self.Entries, func(i, j int) bool {\n return self.Entries[i].Value < self.Entries[j].Value\n })\n var inputs []TrInput\n var outputs []TrOutput\n var spending int = 0\n // collects the entries to use up to the amount\n for i:=0;spending<amount;i++{\n inputs = append(inputs,self.Entries[i].Spendable)\n spending += self.Entries[i].Value\n }\n out := new(TrOutput)\n out.Address = receiver\n out.Value = amount\n outputs = append(outputs,*out)\n // send back the remainder\n if spending-amount>0{\n remainder := new(TrOutput)\n remainder.Address = self.Id\n remainder.Value = spending-amount\n outputs = append(outputs,*remainder)\n }\n // create the transaction\n newTransact := MakeStdTransaction(\n self.Id,\n self.Key,\n inputs,\n outputs)\n if !utils.CheckSignature(newTransact.Signature,newTransact.Hash,newTransact.Creator){\n return errors.New(\"Invalid Signature\")\n }\n if newTransact==nil{\n return errors.New(\"Error in StdTransaction creation\")\n }\n // send the transaction to the miner\n conn, err := net.Dial(\"tcp\",self.MinerIp)\n if err!=nil{ return err }\n fmt.Fprintf(conn,\"transaction\\n\")\n fmt.Fprintf(conn,TrStd+\"\\n\")\n fmt.Fprintf(conn,string(newTransact.Serialize())+\"\\n\")\n fmt.Println(\"Sent transaction request to miner\")\n return nil\n}", "func SendTransaction(from, to common.Address,\n\tprivKey *ecdsa.PrivateKey,\n\tethclient ComponentConfig,\n\tdata []byte) ([]byte, error) {\n\n\t// Set the parameters of the transaction\n\tvalue := big.NewInt(0)\n\tgasLimit := uint64(400000)\n\tgasPrice := big.NewInt(0)\n\n\tnonce, err := ethclient.EthereumClient.PendingNonceAt(context.Background(), from)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Create the transaction\n\ttx := types.NewTransaction(nonce, to, value, gasLimit, gasPrice, data)\n\n\t// Sign the transaction with the private key of the sender\n\tchainID, err := ethclient.EthereumClient.NetworkID(context.Background())\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn nil, err\n\t}\n\n\tsignedTx, err := types.SignTx(tx, types.NewEIP155Signer(chainID), privKey)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn nil, err\n\t}\n\n\t// Send the transaction\n\terr = ethclient.EthereumClient.SendTransaction(context.Background(), signedTx)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn nil, err\n\t}\n\n\treturn signedTx.Hash().Bytes(), nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
decodeAddress from string to decodedAddress
func decodeAddress(address string, cfg *chaincfg.Params) (btcutil.Address, error) { decodedAddress, err := btcutil.DecodeAddress(address, cfg) if err != nil { return nil, err } return decodedAddress, nil }
[ "func DecodeString(addr string) (Address, error) {\n\t// Remove any leading slashes.\n\tif strings.HasPrefix(addr, \"/\") {\n\t\taddr = addr[1:]\n\t}\n\n\taddrParts := strings.Split(addr, \"/\")\n\tif len(addrParts) != 4 {\n\t\treturn Address{}, fmt.Errorf(\"invalid format %v\", addr)\n\t}\n\tvar protocol Protocol\n\tswitch addrParts[0] {\n\tcase \"tcp\":\n\t\tprotocol = TCP\n\tcase \"udp\":\n\t\tprotocol = UDP\n\tcase \"ws\":\n\t\tprotocol = WebSocket\n\tdefault:\n\t\treturn Address{}, fmt.Errorf(\"invalid protocol %v\", addrParts[0])\n\t}\n\tvalue := addrParts[1]\n\tnonce, err := strconv.ParseUint(addrParts[2], 10, 64)\n\tif err != nil {\n\t\treturn Address{}, err\n\t}\n\tvar sig id.Signature\n\tsigBytes, err := base64.RawURLEncoding.DecodeString(addrParts[3])\n\tif err != nil {\n\t\treturn Address{}, err\n\t}\n\tif len(sigBytes) != 65 {\n\t\treturn Address{}, fmt.Errorf(\"invalid signature %v\", addrParts[3])\n\t}\n\tcopy(sig[:], sigBytes)\n\treturn Address{\n\t\tProtocol: protocol,\n\t\tValue: value,\n\t\tNonce: nonce,\n\t\tSignature: sig,\n\t}, nil\n}", "func decodeAddress(address string, cfg chaincfg.Params) (btcutil.Address, error) {\n\tdecodedAddress, err := btcutil.DecodeAddress(address, &cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn decodedAddress, nil\n}", "func DecodeAddress(r stdio.Reader) (Address, error) {\n\treturn wallet.DecodeAddress(r)\n}", "func (semi *implsemi) DecodeAddress(octets []byte) (str string) {\n\tfor _, oct := range octets {\n\t\thalf := oct >> 4\n\t\tif half == 0xF {\n\t\t\tstr += fmt.Sprintf(\"%d\", oct&0x0F)\n\t\t\treturn\n\t\t}\n\t\tstr += fmt.Sprintf(\"%d%d\", oct&0x0F, half)\n\t}\n\treturn\n}", "func (c *Client) DecodeAddress(resp *http.Response) (*Address, error) {\n\tvar decoded Address\n\terr := c.Decoder.Decode(&decoded, resp.Body, resp.Header.Get(\"Content-Type\"))\n\treturn &decoded, err\n}", "func DecodePeerAddress(x string) string {\n\treturn nettools.BinaryToDottedPort(x)\n}", "func DecodeAddress(b []byte) (net.IP, []byte, error) {\n\tif len(b) < 6 {\n\t\treturn nil, nil, errors.New(\"too short\")\n\t}\n\n\t// IPv4\n\tif b[0] == 4 && b[1] == 4 {\n\t\treturn net.IP(b[2:6]), b[6:], nil\n\t}\n\n\t// IPv6\n\tif len(b) < 18 {\n\t\treturn nil, nil, errors.New(\"too short\")\n\t}\n\tif b[0] == 6 && b[1] == 16 {\n\t\treturn net.IP(b[2:18]), b[18:], nil\n\t}\n\n\treturn nil, nil, errors.New(\"unrecognized format\")\n}", "func DecodeAddr(address []byte) string {\n\tvar stringAddr string\n\tvar ip []byte\n\tvar port []byte\n\n\tip = address[:4]\n\tport = address[4:]\n\n\t// Decode IP\n\tfor index, octet := range ip {\n\t\tstringAddr = stringAddr + strconv.Itoa(int(octet))\n\t\tif index != 3 {\n\t\t\tstringAddr += \".\"\n\t\t}\n\t}\n\tstringAddr += \":\"\n\n\t// Decode Port\n\tb := make([]byte, 8)\n\tfor i := 0; i < 6; i++ {\n\t\tb[i] = byte(0)\n\t}\n\tb[6] = port[0]\n\tb[7] = port[1]\n\tp := binary.BigEndian.Uint64(b)\n\tstringAddr += strconv.FormatUint(p, 10)\n\t//fmt.Println(\"Complete IP:\", stringAddr)\n\treturn stringAddr\n}", "func decodePeerAddress(chunk string) string {\n\tip := net.IPv4(chunk[0], chunk[1], chunk[2], chunk[3])\n\tremotePort := 256*int(chunk[4]) + int(chunk[5]) // Port is given in network encoding.\n\treturn fmt.Sprintf(\"%s:%d\", ip.String(), remotePort)\n}", "func decodeAddress(address string) (uint32, error) {\n\tsplit := strings.Split(address, \".\")\n\tif len(split) != 4 {\n\t\treturn 0, errors.New(\"Error decoding IPv4 address: wrong amount of octets\")\n\t}\n\tvar IPaddress uint32\n\tfor i, octetstr := range split {\n\t\tsegment, err := strconv.Atoi(octetstr)\n\t\tif err != nil {\n\t\t\treturn 0, errors.Wrap(err, \"Error decoding IPv4 address\")\n\t\t}\n\t\tif segment > math.MaxUint8 {\n\t\t\treturn 0, errors.New(\"Error decoding IPv4 address: value overflow\")\n\t\t}\n\t\t// Shift octets by determined amount of bits.\n\t\tswitch i {\n\t\tcase 0:\n\t\t\tsegment = segment << 24\n\t\tcase 1:\n\t\t\tsegment = segment << 16\n\t\tcase 2:\n\t\t\tsegment = segment << 8\n\t\t}\n\t\tIPaddress += uint32(segment)\n\t}\n\treturn IPaddress, nil\n}", "func stringToAddress(str string) (interface{}, error) {\n\treturn common.HexToAddress(str), nil\n}", "func decodeAddresses(val []byte) ([]common.Address, error) {\n\ts := string(val)\n\tvar res []common.Address\n\tif s == \"\" {\n\t\treturn res, nil\n\t}\n\tfor _, a := range strings.Split(s, \",\") {\n\t\tif !common.IsHexAddress(a) {\n\t\t\treturn nil, errors.Errorf(\"malformed address: %q\", s)\n\t\t}\n\n\t\tres = append(res, common.HexToAddress(a))\n\t}\n\treturn res, nil\n}", "func (c *Client) DecodeEasypostAddress(resp *http.Response) (*EasypostAddress, error) {\n\tvar decoded EasypostAddress\n\terr := c.Decoder.Decode(&decoded, resp.Body, resp.Header.Get(\"Content-Type\"))\n\treturn &decoded, err\n}", "func DecodePeerAddress(encoded bencoding.String) (addr net.TCPAddr, err error) {\n\tif len(encoded) != 6 {\n\t\terr = errors.New(\"encoded address has wrong length (should be 6)\")\n\t} else {\n\t\taddr = net.TCPAddr{\n\t\t\tIP: net.IPv4(encoded[0], encoded[1], encoded[2], encoded[3]),\n\t\t\tPort: int(encoded[4])<<8 + int(encoded[5]),\n\t\t}\n\t}\n\n\treturn\n}", "func Decode(addr string) (bitcoinAddr, domain string, valid bool) {\n\ti := strings.Index(addr, \"@\")\n\tif i < 0 {\n\t\treturn \"\", \"\", false\n\t}\n\n\tbefore := addr[:i]\n\tdomain = addr[i+1:]\n\n\ti = strings.Index(before, \"+\")\n\tif i < 0 {\n\t\treturn \"\", \"\", false\n\t}\n\n\tbitcoinAddr = before[:i]\n\n\texpected, err := Encode(bitcoinAddr, domain)\n\tif err != nil {\n\t\treturn \"\", \"\", false\n\t}\n\n\tif addr != expected {\n\t\treturn \"\", \"\", false\n\t}\n\n\treturn bitcoinAddr, domain, true\n}", "func NewFromString(addr string) (*Address, error) {\n\tlegaddr, err := legacy.Decode(addr)\n\tif err == nil {\n\t\taddr, err := NewFromLegacy(legaddr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn addr, nil\n\t}\n\n\tcashaddr, err := cashaddress.Decode(addr, cashaddress.MainNet)\n\tif err == nil {\n\t\taddr, err := NewFromCashAddress(cashaddr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn addr, nil\n\t}\n\n\treturn nil, errors.New(\"unable to decode address\")\n}", "func FromString(address string) *Address {\n\taddrLowered := strings.ToLower(address)\n\tmatches := regexAddress.FindStringSubmatch(addrLowered)\n\tif len(matches) == 6 {\n\t\tdom := \"0000\"\n\t\tif matches[1] != \"\" {\n\t\t\tdom = matches[2]\n\t\t}\n\t\treturn &Address{\n\t\t\tDomain: dom,\n\t\t\tBus: matches[3],\n\t\t\tDevice: matches[4],\n\t\t\tFunction: matches[5],\n\t\t}\n\t}\n\treturn nil\n}", "func ConvertUserStrToAddress(userFAddr string) []byte {\n\tv := base58.Decode(userFAddr)\n\treturn v[2:34]\n}", "func DecodeRawAddress(s []byte) (Address, error) {\n\tif len(s) == 0 {\n\t\treturn nil, errors.New(\"empty address\")\n\t}\n\n\theader := s[0]\n\tnetwork := Network(header & 0x0f)\n\n\treadAddrCred := func(bit byte, pos int) StakeCredential {\n\t\thashBytes := s[pos : pos+Hash28Size]\n\t\tif header&(1<<bit) == 0 {\n\t\t\treturn StakeCredential{Kind: KeyStakeCredentialType, Data: hashBytes}\n\t\t}\n\t\treturn StakeCredential{Kind: ScriptStakeCredentialype, Data: hashBytes}\n\t}\n\n\tswitch (header & 0xf0) >> 4 {\n\t// Base type\n\tcase 0b0000, 0b0001, 0b0010, 0b0011:\n\t\t// header + keyhash\n\t\tif len(s) != 57 {\n\t\t\treturn nil, errors.New(\"Invalid length for base address\")\n\t\t}\n\t\treturn &BaseAddress{Network: network, Payment: readAddrCred(4, 1),\n\t\t\tStake: readAddrCred(5, Hash28Size+1)}, nil\n\t// Pointer type\n\tcase 0b0100, 0b0101:\n\t\t// header + keyhash + 3 natural numbers (min 1 byte each)\n\t\tif len(s) < 32 {\n\t\t\treturn nil, errors.New(\"Invalid length for pointer address\")\n\t\t}\n\t\tbyteIndex := 1\n\t\tpaymentCred := readAddrCred(4, 1)\n\t\tslot, slotBytes, ok := VariableNatDecode(s[byteIndex:])\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"slot variable decode failed\")\n\t\t}\n\t\tbyteIndex += slotBytes\n\n\t\ttxIndex, txBytes, ok := VariableNatDecode(s[byteIndex:])\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"txIndex variable decode failed\")\n\t\t}\n\t\tbyteIndex += txBytes\n\n\t\tcertIndex, certBytes, ok := VariableNatDecode(s[byteIndex:])\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"certIndex variable decode failed\")\n\t\t}\n\t\tbyteIndex += certBytes\n\n\t\tif byteIndex > len(s) {\n\t\t\treturn nil, errors.New(\"byte index is out range of pointer lenght\")\n\t\t}\n\n\t\treturn &PointerAddress{\n\t\t\tNetwork: network, Payment: paymentCred,\n\t\t\tStake: StakePoint{Slot: slot, TxIndex: txIndex, CertIndex: certIndex},\n\t\t}, nil\n\t// Enterprise type\n\tcase 0b0110, 0b0111:\n\t\t// header + keyhash\n\t\tif len(s) != 29 {\n\t\t\treturn nil, errors.New(\"invalid length for enterprise address\")\n\t\t}\n\t\treturn &EnterpriseAddress{Network: network, Payment: readAddrCred(4, 1)}, nil\n\t// Reward type\n\tcase 0b1110, 0b1111:\n\t\tif len(s) != 29 {\n\t\t\treturn nil, errors.New(\"invalid length for reward address\")\n\t\t}\n\t\treturn &Reward{Network: network, Payment: readAddrCred(4, 1)}, nil\n\t// Legacy byron type\n\tcase 0b1000:\n\t\tvar byron LegacyAddress\n\t\tif err := cbor.Unmarshal(s, &byron); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &byron, nil\n\t}\n\treturn nil, errors.New(\"unsupports address type\")\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that Pool releases resources on GC.
func TestPoolRelease(t *testing.T) { testPool(t, false) }
[ "func (this *PoolTestSuite) TestInvalidateFreesCapacity() {\n\tthis.pool.Config.MaxTotal = 2\n\tthis.pool.Config.MaxWaitMillis = 500\n\tthis.pool.Config.BlockWhenExhausted = true\n\t// Borrow an instance and hold if for 5 seconds\n\tch1 := waitTestGoroutine(this.pool, 5000)\n\t// Borrow another instance\n\tobj := this.NoErrorWithResult(this.pool.BorrowObject())\n\t// Launch another goroutine - will block, but fail in 500 ms\n\tch2 := waitTestGoroutine(this.pool, 100)\n\t// Invalidate the object borrowed by this goroutine - should allow goroutine2 to create\n\tsleep(20)\n\tthis.NoError(this.pool.InvalidateObject(obj))\n\tsleep(600) // Wait for goroutine2 to timeout\n\tresult2 := <-ch2\n\tclose(ch2)\n\tif result2.error != nil {\n\t\tthis.Fail(result2.error.Error())\n\t}\n\t<-ch1\n\tclose(ch1)\n}", "func TestPool(t *testing.T, p pool.Pool) {\n\tt.Helper()\n\tctx := context.Background()\n\toffers, err := p.Offers(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := len(offers), 1; got != want {\n\t\tt.Fatalf(\"got %v, want %v\", got, want)\n\t}\n\t// We accept half the memory and disk; we use 0 CPUs.\n\to := offers[0]\n\tr := o.Available()\n\tvar orig reflow.Resources\n\torig.Set(r)\n\tr[\"cpu\"] = 0\n\tr[\"mem\"] /= 2\n\tr[\"disk\"] /= 2\n\talloc, err := o.Accept(ctx, pool.AllocMeta{Want: r, Owner: \"test\", Labels: nil})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\toffers, err = p.Offers(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := len(offers), 1; got != want {\n\t\tt.Fatalf(\"got %v, want %v\", got, want)\n\t}\n\to = offers[0]\n\tlog.Printf(\"offer received %v\", o.Available())\n\tif got, want := o.Available()[\"mem\"], (orig[\"mem\"] - orig[\"mem\"]/2); got != want {\n\t\tt.Fatalf(\"got %v, want %v\", got, want)\n\t}\n\n\tid := reflow.Digester.FromString(\"alloctest\")\n\texec, err := alloc.Put(ctx, id, reflow.ExecConfig{\n\t\tType: \"exec\",\n\t\tImage: bashImage,\n\t\tCmd: \"echo logthis; echo foobar > $out\",\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Give it some time to fetch the image, etc.\n\tctx, cancel := context.WithTimeout(ctx, 1*time.Minute)\n\tdefer cancel()\n\terr = exec.Wait(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tres, err := exec.Result(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif res.Err != nil {\n\t\tt.Fatal(res.Err)\n\t}\n\torigres := res\n\n\t// Now we force expiry to see that we can grab everything.\n\t// We grab a new alloc, and check that our old alloc died;\n\t// there should now be zero offers.\n\tintv := 1 * time.Nanosecond\n\td, err := alloc.Keepalive(ctx, intv)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := d, intv; got != want {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\ttime.Sleep(d)\n\toffers, err = p.Offers(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := len(offers), 1; got != want {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\to = offers[0]\n\tif got, want := o.Available(), orig; !got.Equal(want) {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\talloc1, err := o.Accept(ctx, pool.AllocMeta{Want: o.Available(), Owner: \"test\", Labels: nil})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := alloc1.Resources(), o.Available(); !got.Equal(want) {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\t// Look it up again to get its zombie.\n\t// Note: in client-server testing we're interacting directly with a client\n\t// not through a cluster implementation, so we'll need to strip off the\n\t// hostname ourselves.\n\tallocID := alloc.ID()\n\tif idx := strings.Index(allocID, \"/\"); idx > 0 {\n\t\tallocID = allocID[idx+1:]\n\t}\n\talloc, err = p.Alloc(ctx, allocID)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\texec, err = alloc.Get(ctx, id)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tres, err = exec.Result(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := res, origres; !reflect.DeepEqual(got, want) {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\trc, err := exec.Logs(ctx, true, false, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer rc.Close()\n\tb, err := ioutil.ReadAll(rc)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := string(b), \"logthis\\n\"; got != want {\n\t\tt.Errorf(\"got %q, want %q\", got, want)\n\t}\n\n\t// We shouldn't have any offers now.\n\toffers, err = p.Offers(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := len(offers), 0; got != want {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n}", "func (p *Pool) Release(){\n if(p.availablePool != nil){\n for _,dbCon := range p.availablePool{\n dbCon.Close()\n }\n }else{\n p.availablePool=nil\n }\n}", "func TestRefcount(t *testing.T) {\n\tpool := NewPool(iobufSize)\n\tdefer pool.Close()\n\n\tiobuf := pool.alloc(0)\n\tslice1 := iobuf.slice(0, 0, 10)\n\tslice2 := iobuf.slice(10, 10, 20)\n\tiobuf.release()\n\texpectEq(t, 0, len(pool.freelist))\n\tslice1.Release()\n\texpectEq(t, 0, len(pool.freelist))\n\tslice2.Release()\n\texpectEq(t, 1, len(pool.freelist))\n}", "func TestCollectGarbage(t *testing.T) {\n\tCollectGarbage()\n}", "func ReleaseCGOMemPool(pool CGOMemPool) {\n\tC.arrow_release_pool(pool)\n}", "func (*MockGPool) ReleaseAndWait() {\n\tpanic(\"implement me\")\n}", "func (p *Pool) Release() {\n\tp.stopOne.Do(p.release)\n}", "func reclaim(p *pool, c *) {\n select {\n case p.pool <- c:\n default:\n // let it go, let it go...\n\t// leaky pool\n }\n}", "func Release() {\n\tdefaultRoutinePool.Release()\n}", "func (p *connPool) gc() {\n\tp.openMu.Lock()\n\tdefer p.openMu.Unlock()\n\n\tp.mapMu.Lock()\n\tdefer p.mapMu.Unlock()\n\n\tvar activeRefs int64\n\tfor params, conn := range p.conns {\n\t\t// We hold the openMu write lock, so no one is trying to open a connection.\n\t\t// The only thing we might race with is callers decrementing the refCount,\n\t\t// which is fine. What matters is that no one will race to increment it,\n\t\t// which could reverse a decision we had already made to close the connection.\n\t\tconn.mu.Lock()\n\t\tactiveRefs += conn.refCount\n\t\tif conn.failed() {\n\t\t\t// The connection attempt failed, so remove it without trying to close it.\n\t\t\tdelete(p.conns, params)\n\t\t} else if conn.refCount <= 0 && time.Since(conn.lastOpened) > idleTTL {\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"implementation\": params.Implementation,\n\t\t\t\t\"address\": params.Address,\n\t\t\t\t\"rootPath\": params.RootPath,\n\t\t\t}).Info(\"closing connection to Vitess topology server due to idle TTL\")\n\t\t\tdisconnects.WithLabelValues(reasonIdle).Inc()\n\n\t\t\tconn.Server.Close()\n\t\t\tdelete(p.conns, params)\n\t\t}\n\t\tconn.mu.Unlock()\n\t}\n\tconnCount.WithLabelValues(connStateActive).Set(float64(len(p.conns)))\n\tconnRefCount.WithLabelValues(connStateActive).Set(float64(activeRefs))\n\n\t// Clean up bad conns once they're no longer being used.\n\t// Make a list of bad conns that still have refs (we need to keep waiting).\n\tvar deadRefs int64\n\tstillUsed := make([]*Conn, 0, len(p.deadConns))\n\tfor _, conn := range p.deadConns {\n\t\tconn.mu.Lock()\n\t\tdeadRefs += conn.refCount\n\t\tif conn.refCount <= 0 {\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"implementation\": conn.params.Implementation,\n\t\t\t\t\"address\": conn.params.Address,\n\t\t\t\t\"rootPath\": conn.params.RootPath,\n\t\t\t}).Info(\"closing connection to Vitess topology server due to liveness check failure\")\n\t\t\tdisconnects.WithLabelValues(reasonDead).Inc()\n\n\t\t\tconn.Server.Close()\n\t\t} else {\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"implementation\": conn.params.Implementation,\n\t\t\t\t\"address\": conn.params.Address,\n\t\t\t\t\"rootPath\": conn.params.RootPath,\n\t\t\t}).Warning(\"cached connection to Vitess topology server failed liveness check but is still in use\")\n\n\t\t\tstillUsed = append(stillUsed, conn)\n\t\t}\n\t\tconn.mu.Unlock()\n\t}\n\tp.deadConns = stillUsed\n\tconnCount.WithLabelValues(connStateDead).Set(float64(len(p.deadConns)))\n\tconnRefCount.WithLabelValues(connStateDead).Set(float64(deadRefs))\n}", "func (pool *DockerPool) Cleanup() {}", "func (p *Pool) Destroy() {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\tp.factory = nil\n\tif p.conns == nil {\n\t\treturn\n\t}\n\n\tfor v := range p.conns {\n\t\tif v != nil {\n\t\t\tp.Close(v)\n\t\t}\n\t}\n\tp.conns = nil\n\n}", "func registerPoolCleanup(cleanup func()) {\n\t// Ignore.\n}", "func TestPool() {\n\tBeforeEach(initializeCoil)\n\tAfterEach(cleanCoil)\n\n\tIt(\"should create address pool\", func() {\n\t\tBy(\"creating address pool\")\n\t\tcoilctlSafe(\"pool\", \"create\", \"test1\", \"10.0.3.0/24\", \"2\")\n\t\tcoilctlSafe(\"pool\", \"show\", \"--json\", \"test1\", \"10.0.3.0/24\")\n\n\t\tBy(\"checking add-subnet to existing pool\")\n\t\tcoilctlSafe(\"pool\", \"add-subnet\", \"test1\", \"10.0.4.0/24\")\n\t\tcoilctlSafe(\"pool\", \"show\", \"--json\", \"test1\", \"10.0.4.0/24\")\n\t})\n}", "func (tbp *TestBucketPool) Close() {\n\tif tbp == nil {\n\t\t// noop\n\t\treturn\n\t}\n\n\t// Cancel async workers\n\tif tbp.ctxCancelFunc != nil {\n\t\ttbp.bucketReadierWaitGroup.Wait()\n\t\ttbp.ctxCancelFunc()\n\t}\n\n\tif tbp.cluster != nil {\n\t\tif err := tbp.cluster.close(); err != nil {\n\t\t\ttbp.Logf(context.Background(), \"Couldn't close cluster connection: %v\", err)\n\t\t}\n\t}\n\n\ttbp.printStats()\n}", "func cleanDiscoveryPool() {\n\tfor {\n\t\tdiscoveryStorage.Clean()\n\t\ttime.Sleep(time.Duration(config.CleanEvery) * time.Second)\n\t}\n\n}", "func (s *MockManagedThread) Release() {}", "func (c *channelPool) Release() {\n\tc.mu.Lock()\n\tfor _, servConn := range c.servConnsMap {\n\t\tfor ic := range servConn.idleConns {\n\t\t\tic.connWrap.CloseConnWrap()\n\t\t}\n\t\tclose(servConn.idleConns)\n\t\tservConn.openingConnNum = 0\n\t}\n\n\tc.servConnsMap = nil\n\tc.servAddrList = nil\n\n\tc.mu.Unlock()\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simulate object starvation in order to force Ps to steal objects from other Ps.
func BenchmarkPoolStarvation(b *testing.B) { var p Pool count := 100 // Reduce number of putted objects by 33 %. It creates objects starvation // that force P-local storage to steal objects from other Ps. countStarved := count - int(float32(count)*0.33) b.RunParallel(func(pb *testing.PB) { for pb.Next() { for b := 0; b < countStarved; b++ { p.Put(1) } for b := 0; b < count; b++ { p.Get() } } }) }
[ "func SyncRuntimeDoSpin()", "func (this *PoolTestSuite) TestInvalidateFreesCapacity() {\n\tthis.pool.Config.MaxTotal = 2\n\tthis.pool.Config.MaxWaitMillis = 500\n\tthis.pool.Config.BlockWhenExhausted = true\n\t// Borrow an instance and hold if for 5 seconds\n\tch1 := waitTestGoroutine(this.pool, 5000)\n\t// Borrow another instance\n\tobj := this.NoErrorWithResult(this.pool.BorrowObject())\n\t// Launch another goroutine - will block, but fail in 500 ms\n\tch2 := waitTestGoroutine(this.pool, 100)\n\t// Invalidate the object borrowed by this goroutine - should allow goroutine2 to create\n\tsleep(20)\n\tthis.NoError(this.pool.InvalidateObject(obj))\n\tsleep(600) // Wait for goroutine2 to timeout\n\tresult2 := <-ch2\n\tclose(ch2)\n\tif result2.error != nil {\n\t\tthis.Fail(result2.error.Error())\n\t}\n\t<-ch1\n\tclose(ch1)\n}", "func startMining() {\n\n\t// First block to mine\n\tblock := Block{PrevHash: [32]byte{}, Transactions: nil}\n\t//start := time.Now()\n\n\tstart := time.Now()\n\n\t// ONLY FOR TEST PURPOSES (If a block comes before another block)\n\tretain := -1\n\ti := 0\n\n\tfor {\n\n\t\t// Generate random Nonce\n\t\tnonce := [32]byte{}\n\t\t_, err := rand.Read(nonce[:])\n\t\tif err != nil {\n\t\t\tcheckError(err, true)\n\t\t}\n\t\tblock.Nonce = nonce\n\t\t//fmt.Println(nonce)\n\n\t\tif block.Valid() /* && len(block.Transactions) != 0 */ {\n\t\t\ti++\n\t\t\tfmt.Printf(\"FOUND-BLOCK %x\\n\", block.Hash())\n\t\t\tmyGossiper.blockChannel <- block\n\n\t\t\t//time.Sleep(10000 * time.Millisecond)\n\n\t\t\tif bytes.Equal(block.PrevHash[:], make([]byte, 32, 32)) {\n\t\t\t\tfmt.Println(\"Wainting for the Genesis\")\n\t\t\t\ttime.Sleep(MINEUR_SLEEPING_TIME)\n\t\t\t} else {\n\t\t\t\telapsed := time.Since(start)\n\t\t\t\t//fmt.Println(\"Mineur sleeping time :\", elapsed)\n\t\t\t\ttime.Sleep(2 * elapsed)\n\t\t\t}\n\n\t\t\tif i == retain {\n\n\t\t\t\tblockRetarded := Block{}\n\t\t\t\tcopier.Copy(&blockRetarded, &block)\n\n\t\t\t\tfmt.Printf(\"$Keeping the block! %x with prev %x\\n\", block.Hash(), blockRetarded.PrevHash)\n\n\t\t\t\tgo func() {\n\t\t\t\t\tt := time.NewTimer(500 * time.Millisecond)\n\n\t\t\t\t\t<-t.C\n\n\t\t\t\t\tfmt.Printf(\"$Releasing the block! %x with prev %x\\n\", blockRetarded.Hash(), blockRetarded.PrevHash)\n\n\t\t\t\t\tbroadcastBlock(&BlockPublish{\n\t\t\t\t\t\tHopLimit: HOP_LIMIT_BLOCK,\n\t\t\t\t\t\tBlock: blockRetarded,\n\t\t\t\t\t}, \"\")\n\n\t\t\t\t}()\n\n\t\t\t} else {\n\t\t\t\tbroadcastBlock(&BlockPublish{\n\t\t\t\t\tHopLimit: HOP_LIMIT_BLOCK,\n\t\t\t\t\tBlock: block,\n\t\t\t\t}, \"\")\n\t\t\t}\n\n\t\t\tstart = time.Now()\n\n\t\t}\n\n\t\t// Updating the pending transaction\n\n\t\tmyGossiper.blockchain.mux.Lock()\n\t\tmyGossiper.pendingTransactions.mux.Lock()\n\n\t\tblock.Transactions = copieTransaction(myGossiper.pendingTransactions.transactions)\n\t\tif myGossiper.blockchain.lengthLongestChain != 0 {\n\t\t\tblock.PrevHash = myGossiper.blockchain.head.Hash()\n\t\t}\n\n\t\tmyGossiper.pendingTransactions.mux.Unlock()\n\t\tmyGossiper.blockchain.mux.Unlock()\n\n\t}\n}", "func ObjMutator(thsObj *gms.ThsObj, objHist *gms.ThsObjs, interval time.Duration) {\n\to := thsObj.Get()\n\to.O = o.O.RandMutate()\n\to.T = time.Now()\n\tobjHist.Add(o.O)\n\tthsObj.Set(o)\n\n\tc := time.Tick(interval)\n\tfor range c {\n\t\to := thsObj.Get()\n\t\to.O = o.O.RandMutate()\n\t\to.T = time.Now()\n\t\tobjHist.Add(o.O)\n\t\tthsObj.Set(o)\n\t}\n}", "func (p philosopher) eat() {\r\n\tdefer eatWgroup.Done()\r\n\tfor j := 0; j < 3; j++ {\r\n\t\tp.leftFork.Lock()\r\n\t\tp.rightFork.Lock()\r\n\r\n\t\tsay(\"eating\", p.id)\r\n\t\ttime.Sleep(time.Second)\r\n\r\n\t\tp.rightFork.Unlock()\r\n\t\tp.leftFork.Unlock()\r\n\r\n\t\tsay(\"finished eating\", p.id)\r\n\t\ttime.Sleep(time.Second)\r\n\t}\r\n\r\n}", "func simulateGC(ncs *nodeCacheStandard, liveList []Node) {\n\thasWork := true\n\tfor hasWork {\n\t\thasWork = false\n\n\t\tliveSet := make(map[*nodeCore]bool)\n\n\t\t// Everything in liveList is live.\n\t\tfor _, n := range liveList {\n\t\t\tliveSet[n.(*nodeStandard).core] = true\n\t\t}\n\n\t\t// Everything referenced as a parent is live.\n\t\tfor _, e := range ncs.nodes {\n\t\t\tif e.core.parent != nil {\n\t\t\t\tp := e.core.parent.Unwrap().(*nodeStandard)\n\t\t\t\tliveSet[p.core] = true\n\t\t\t}\n\t\t}\n\n\t\t// Forget everything not live.\n\t\tfor _, e := range ncs.nodes {\n\t\t\tif _, ok := liveSet[e.core]; !ok {\n\t\t\t\tncs.forget(e.core)\n\t\t\t\thasWork = true\n\t\t\t}\n\t\t}\n\t}\n}", "func pollObstructionSwitch(obstructedPub chan<- ObstructedEvent) {\n\tprev := false\n\tfor {\n\t\ttime.Sleep(_pollRate)\n\t\tv := getObstruction()\n\t\tif v != prev {\n\t\t\tevt := ObstructedEvent{utils.ELEVATOR_ID, v}\n\t\t\tobstructedPub <- evt\n\t\t}\n\t\tprev = v\n\t}\n}", "func (s *System) spawn() {\n\tfor i := range s.threads {\n\t\tt := &s.threads[i]\n\t\tfor _, sr := range t.requests {\n\t\t\tstr := len(s.particles)\n\t\t\ts.particles.Resize(str + sr.Amount)\n\t\t\tfor i := 0; i < sr.Amount; i++ {\n\t\t\t\tr := sr.Rotation.Gen()\n\t\t\t\tvel := mat.Rad(sr.Spread.Gen()+sr.Dir, sr.Velocity.Gen())\n\t\t\t\tif sr.RotationRelativeToVelocity {\n\t\t\t\t\tr += vel.Angle()\n\t\t\t\t}\n\n\t\t\t\tp := &s.particles[i+str]\n\n\t\t\t\tp.Type = sr.Type\n\n\t\t\t\tp.vel = vel\n\t\t\t\tp.orig = sr.Pos\n\t\t\t\tp.pos = sr.Pos.Add(sr.Gen(sr.Dir))\n\n\t\t\t\tp.mask = sr.Mask.Mul(sr.Type.Mask.Gen())\n\n\t\t\t\tp.scl.X = sr.ScaleX.Gen()\n\t\t\t\tp.scl.Y = sr.ScaleY.Gen()\n\t\t\t\tp.livetime = 1 / sr.Livetime.Gen()\n\t\t\t\tp.twerk = sr.Twerk.Gen()\n\t\t\t\tp.rot = r\n\t\t\t\tp.progress = 0\n\n\t\t\t\tp.vertex = s.vertex\n\t\t\t\tp.indice = s.indice\n\n\t\t\t\ts.vertex += p.vertexes\n\t\t\t\ts.indice += p.indices\n\t\t\t}\n\t\t}\n\t\tt.requests = t.requests[:0]\n\t}\n}", "func (e *Engine) GoPonder() {\n\tif atomic.CompareAndSwapInt32(&e.running, 0, 1) {\n\t\te.startNow(true)\n\t}\n}", "func (e *endpoint) thaw() {\n\te.mu.Lock()\n\te.frozen = false\n\te.mu.Unlock()\n}", "func Sweeper() {\n\tfor {\n\t\ttime.Sleep(10 * time.Millisecond)\n\t\tbufMutex.Lock()\n\t\tsendToQueue()\n\t\tbufMutex.Unlock()\n\t}\n}", "func MWAIT() { ctx.MWAIT() }", "func BenchmarkCreateGoroutinesSingle(b *testing.B) {\n\t// Since we are interested in stealing behavior, warm the scheduler to\n\t// get all the Ps running first.\n\twarmupScheduler(runtime.GOMAXPROCS(0))\n\tb.ResetTimer()\n\n\tvar wg sync.WaitGroup\n\twg.Add(b.N)\n\tfor i := 0; i < b.N; i++ {\n\t\tgo func() {\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n}", "func (s *MockManagedThread) SuspendUnsafe() {}", "func TestMoby33781(t *testing.T) {\n\tstop := make(chan bool)\n\tgo monitor(stop) // G1\n\tgo func() { // G2\n\t\ttime.Sleep(50 * time.Nanosecond)\n\t\tstop <- true\n\t}()\n}", "func (s *SpySleeper) Sleep(){\n\ts.Calls++\n}", "func TestPreemptionStarvation(t *testing.T) {\n\t// Initialize scheduler.\n\ttestCtx := initTest(t, \"preemption\")\n\tcs := testCtx.ClientSet\n\n\ttests := []struct {\n\t\tname string\n\t\tnumExistingPod int\n\t\tnumExpectedPending int\n\t\tpreemptor *v1.Pod\n\t}{\n\t\t{\n\t\t\t// This test ensures that while the preempting pod is waiting for the victims\n\t\t\t// terminate, other lower priority pods are not scheduled in the room created\n\t\t\t// after preemption and while the higher priority pods is not scheduled yet.\n\t\t\tname: \"starvation test: higher priority pod is scheduled before the lower priority ones\",\n\t\t\tnumExistingPod: 10,\n\t\t\tnumExpectedPending: 5,\n\t\t\tpreemptor: initPausePod(&testutils.PausePodConfig{\n\t\t\t\tName: \"preemptor-pod\",\n\t\t\t\tNamespace: testCtx.NS.Name,\n\t\t\t\tPriority: &highPriority,\n\t\t\t\tResources: &v1.ResourceRequirements{Requests: v1.ResourceList{\n\t\t\t\t\tv1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),\n\t\t\t\t\tv1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI)},\n\t\t\t\t},\n\t\t\t}),\n\t\t},\n\t}\n\n\t// Create a node with some resources\n\tnodeRes := map[v1.ResourceName]string{\n\t\tv1.ResourcePods: \"32\",\n\t\tv1.ResourceCPU: \"500m\",\n\t\tv1.ResourceMemory: \"500\",\n\t}\n\t_, err := createNode(testCtx.ClientSet, st.MakeNode().Name(\"node1\").Capacity(nodeRes).Obj())\n\tif err != nil {\n\t\tt.Fatalf(\"Error creating nodes: %v\", err)\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tpendingPods := make([]*v1.Pod, test.numExpectedPending)\n\t\t\tnumRunningPods := test.numExistingPod - test.numExpectedPending\n\t\t\trunningPods := make([]*v1.Pod, numRunningPods)\n\t\t\t// Create and run existingPods.\n\t\t\tfor i := 0; i < numRunningPods; i++ {\n\t\t\t\trunningPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(testCtx, fmt.Sprintf(\"rpod-%v\", i), mediumPriority, 0))\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"Error creating pause pod: %v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// make sure that runningPods are all scheduled.\n\t\t\tfor _, p := range runningPods {\n\t\t\t\tif err := testutils.WaitForPodToSchedule(cs, p); err != nil {\n\t\t\t\t\tt.Fatalf(\"Pod %v/%v didn't get scheduled: %v\", p.Namespace, p.Name, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Create pending pods.\n\t\t\tfor i := 0; i < test.numExpectedPending; i++ {\n\t\t\t\tpendingPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(testCtx, fmt.Sprintf(\"ppod-%v\", i), mediumPriority, 0))\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"Error creating pending pod: %v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Make sure that all pending pods are being marked unschedulable.\n\t\t\tfor _, p := range pendingPods {\n\t\t\t\tif err := wait.PollUntilContextTimeout(testCtx.Ctx, 100*time.Millisecond, wait.ForeverTestTimeout, false,\n\t\t\t\t\tpodUnschedulable(cs, p.Namespace, p.Name)); err != nil {\n\t\t\t\t\tt.Errorf(\"Pod %v/%v didn't get marked unschedulable: %v\", p.Namespace, p.Name, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Create the preemptor.\n\t\t\tpreemptor, err := createPausePod(cs, test.preemptor)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Error while creating the preempting pod: %v\", err)\n\t\t\t}\n\t\t\t// Check if .status.nominatedNodeName of the preemptor pod gets set.\n\t\t\tif err := waitForNominatedNodeName(cs, preemptor); err != nil {\n\t\t\t\tt.Errorf(\".status.nominatedNodeName was not set for pod %v/%v: %v\", preemptor.Namespace, preemptor.Name, err)\n\t\t\t}\n\t\t\t// Make sure that preemptor is scheduled after preemptions.\n\t\t\tif err := testutils.WaitForPodToScheduleWithTimeout(cs, preemptor, 60*time.Second); err != nil {\n\t\t\t\tt.Errorf(\"Preemptor pod %v didn't get scheduled: %v\", preemptor.Name, err)\n\t\t\t}\n\t\t\t// Cleanup\n\t\t\tklog.Info(\"Cleaning up all pods...\")\n\t\t\tallPods := pendingPods\n\t\t\tallPods = append(allPods, runningPods...)\n\t\t\tallPods = append(allPods, preemptor)\n\t\t\ttestutils.CleanupPods(testCtx.Ctx, cs, t, allPods)\n\t\t})\n\t}\n}", "func (b *bear) sleep() {\n\tb.hoursSlept += 10.4\n}", "func (self *PhysicsP2) SetSleepModeA(member int) {\n self.Object.Set(\"sleepMode\", member)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewMockFetcher creates a new mock instance
func NewMockFetcher(ctrl *gomock.Controller) *MockFetcher { mock := &MockFetcher{ctrl: ctrl} mock.recorder = &MockFetcherMockRecorder{mock} return mock }
[ "func newTestFetcher() *FetcherTest {\n\treturn &FetcherTest{}\n}", "func newMockProvider() *mockProviderAsync {\n\tprovider := newSyncMockProvider()\n\t// By default notifier is set to a function which is a no-op. In the event we've implemented the PodNotifier interface,\n\t// it will be set, and then we'll call a real underlying implementation.\n\t// This makes it easier in the sense we don't need to wrap each method.\n\treturn &mockProviderAsync{provider}\n}", "func NewMock() *Mock {\n\tmock := &Mock{\n\t\tm: &sync.Mutex{},\n\t\ttimes: 1,\n\t}\n\tmock.request = &MockRequest{\n\t\tmock: mock,\n\t\theaders: map[string][]string{},\n\t\tformData: map[string][]string{},\n\t\tquery: map[string][]string{},\n\t\tmatchers: defaultMatchers,\n\t}\n\tmock.response = &MockResponse{\n\t\tmock: mock,\n\t\theaders: map[string][]string{},\n\t}\n\treturn mock\n}", "func New() *Mock {\n\treturn &Mock{\n\t\tm: mockMap{},\n\t\toldTransport: http.DefaultTransport,\n\t}\n}", "func (c *Crawler) newFetcher(height uint64) {\n\t\n\t// Stop previous fetcher\n\tif c.fetcherStop != nil {\n\t\tc.fetcherStop <- true\n\t}\n\t\n\t// Both channels to be closed by fetcher task\n\tc.fetcherStop = make(chan bool)\n\tc.fetcherBlocks = make(chan blockRecord, FetcherBlockBufferSize)\n\n\t//\n\tgo fetcher(c.rpcConfig, height, c.fetcherBlocks, c.fetcherStop)\n}", "func (e *Exporter) newFetcher(hostname string) *Fetcher {\n\treturn NewFetcher(hostname, e.chAccessInfo.Username, e.chAccessInfo.Password, e.chAccessInfo.Port)\n}", "func NewMock(t *testing.T) *MockT { return &MockT{t: t} }", "func NewForge(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *Forge {\n\tmock := &Forge{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewMockObject(uid, name, ns string, res api.Resource) api.Object {\n\treturn NewObject(uuid.NewFromString(uid), name, ns, res)\n}", "func NewMock() *Hit {\n\th := &Hit{}\n\th.requests = make(map[string]float64)\n\treturn h\n}", "func NewMock() Cache {\n\treturn &mock{}\n}", "func NewMock() (*Client, error) {\n\t// Generate a new mock client\n\tclient := Client{\n\t\tHost: \"__MOCK__\",\n\n\t\t// Use mock data as the data source\n\t\tsource: mockDataSource{},\n\t}\n\n\t// Initialize mock data\n\tif err := mockInit(client); err != nil {\n\t\treturn nil, errors.New(\"gosubsonic: failed to initialize mock client\")\n\t}\n\n\treturn &client, nil\n}", "func NewMock(now time.Time) *Mock {\n\treturn &Mock{\n\t\tnow: now,\n\t\tmockTimers: &timerHeap{},\n\t}\n}", "func newMockReadRequestFactory(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockReadRequestFactory {\n\tmock := &mockReadRequestFactory{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewRequester(t mockConstructorTestingTNewRequester) *Requester {\n\tmock := &Requester{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func CreateMock(method interface{}, url interface{}, headers interface{}, body interface{}) *go_mock_yourself_http.Mock {\n\tmockRequest := new(go_mock_yourself_http.Request)\n\n\tif method != nil {\n\t\tmockRequest.SetMethod(method)\n\t}\n\n\tif url != nil {\n\t\tmockRequest.SetUrl(url)\n\t}\n\n\tif body != nil {\n\t\tmockRequest.SetBody(body)\n\t}\n\n\tif headers != nil {\n\t\tmockRequest.SetHeaders(headers)\n\t}\n\n\tmockResponse := new(go_mock_yourself_http.Response)\n\tmockResponse.SetStatusCode(222)\n\tmockResponse.SetBody(\"i'm a cute loving mock, almost as cute as mumi, bichi and rasti\")\n\n\tmock, _ := go_mock_yourself_http.NewMock(\"my lovely testing mock\", mockRequest, mockResponse)\n\treturn mock\n}", "func newMockPlanner(t *testing.T, functions InfoFunctions) *mockPlanner {\n\tctrl := gomock.NewController(t)\n\tmp := mockPlanner{\n\t\trkeBootstrap: fake.NewMockClientInterface[*rkev1.RKEBootstrap, *rkev1.RKEBootstrapList](ctrl),\n\t\trkeBootstrapCache: fake.NewMockCacheInterface[*rkev1.RKEBootstrap](ctrl),\n\t\trkeControlPlanes: fake.NewMockControllerInterface[*rkev1.RKEControlPlane, *rkev1.RKEControlPlaneList](ctrl),\n\t\tetcdSnapshotCache: fake.NewMockCacheInterface[*rkev1.ETCDSnapshot](ctrl),\n\t\tsecretClient: fake.NewMockClientInterface[*v1.Secret, *v1.SecretList](ctrl),\n\t\tsecretCache: fake.NewMockCacheInterface[*v1.Secret](ctrl),\n\t\tconfigMapCache: fake.NewMockCacheInterface[*v1.ConfigMap](ctrl),\n\t\tmachines: fake.NewMockClientInterface[*capi.Machine, *capi.MachineList](ctrl),\n\t\tmachinesCache: fake.NewMockCacheInterface[*capi.Machine](ctrl),\n\t\tclusterRegistrationTokenCache: fake.NewMockCacheInterface[*apisv3.ClusterRegistrationToken](ctrl),\n\t\tcapiClient: fake.NewMockClientInterface[*capi.Cluster, *capi.ClusterList](ctrl),\n\t\tcapiClusters: fake.NewMockCacheInterface[*capi.Cluster](ctrl),\n\t\tmanagementClusters: fake.NewMockNonNamespacedCacheInterface[*apisv3.Cluster](ctrl),\n\t\trancherClusterCache: fake.NewMockCacheInterface[*apisv1.Cluster](ctrl),\n\t}\n\tstore := PlanStore{\n\t\tsecrets: mp.secretClient,\n\t\tsecretsCache: mp.secretCache,\n\t\tmachineCache: mp.machinesCache,\n\t}\n\tp := Planner{\n\t\tctx: context.TODO(),\n\t\tstore: &store,\n\t\tmachines: mp.machines,\n\t\tmachinesCache: mp.machinesCache,\n\t\tsecretClient: mp.secretClient,\n\t\tsecretCache: mp.secretCache,\n\t\tconfigMapCache: mp.configMapCache,\n\t\tclusterRegistrationTokenCache: mp.clusterRegistrationTokenCache,\n\t\tcapiClient: mp.capiClient,\n\t\tcapiClusters: mp.capiClusters,\n\t\tmanagementClusters: mp.managementClusters,\n\t\trancherClusterCache: mp.rancherClusterCache,\n\t\trkeControlPlanes: mp.rkeControlPlanes,\n\t\trkeBootstrap: mp.rkeBootstrap,\n\t\trkeBootstrapCache: mp.rkeBootstrapCache,\n\t\tetcdSnapshotCache: mp.etcdSnapshotCache,\n\t\tetcdS3Args: s3Args{\n\t\t\tsecretCache: mp.secretCache,\n\t\t},\n\t\tretrievalFunctions: functions,\n\t}\n\tmp.planner = &p\n\treturn &mp\n}", "func NewFetcher(ASlist []string) *Fetcher {\n\treturn &Fetcher{\n\t\tASNs: ASlist,\n\t}\n\n}", "func NewFetcher() *Fetcher {\n\treturn new(Fetcher)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
FetchLocalImage mocks base method
func (m *MockFetcher) FetchLocalImage(arg0 string) (image.Image, error) { ret := m.ctrl.Call(m, "FetchLocalImage", arg0) ret0, _ := ret[0].(image.Image) ret1, _ := ret[1].(error) return ret0, ret1 }
[ "func (m *MockFetcher) FetchUpdatedLocalImage(arg0 context.Context, arg1 string, arg2 io.Writer) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchUpdatedLocalImage\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockFetcher) FetchRemoteImage(arg0 string) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchRemoteImage\", arg0)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (imp *Importer) fetchLocalImages() {\n items, err := ioutil.ReadDir(STORE_DIR)\n if err != nil {\n imp.sendErr(err)\n return\n }\n\n for _, info := range items {\n if info.IsDir() { continue }\n filename := info.Name()\n\n file, err := os.Open(fmt.Sprintf(\"%s/%s\", STORE_DIR, filename))\n if err != nil {\n imp.sendErr(err)\n return\n }\n\n img, err := jpeg.Decode(file)\n if err != nil {\n log.Printf(\"Error decoding image file %s to jpeg\\n\", filename)\n continue\n }\n\n ext := filepath.Ext(filename)\n id := filename[:len(filename)-len(ext)]\n\n imp.send(&imagedata.ImageData{Id: id, Data: &img})\n }\n}", "func (m *MockCommonAPIClient) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ImageLoad\", ctx, input, quiet)\n\tret0, _ := ret[0].(types.ImageLoadResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (f *fetcher) fetchImageFrom(appName string, aciURL, ascURL, scheme string, ascFile *os.File, latest bool) (string, error) {\n\tvar rem *store.Remote\n\n\tif f.insecureSkipVerify {\n\t\tif f.ks != nil {\n\t\t\tstderr(\"rkt: warning: TLS verification and signature verification has been disabled\")\n\t\t}\n\t} else if scheme == \"docker\" {\n\t\treturn \"\", fmt.Errorf(\"signature verification for docker images is not supported (try --insecure-skip-verify)\")\n\t}\n\n\tif (f.local && scheme != \"file\") || (scheme != \"file\" && !latest) {\n\t\tvar err error\n\t\tok := false\n\t\trem, ok, err = f.s.GetRemote(aciURL)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif ok {\n\t\t\tif f.local {\n\t\t\t\tstderr(\"rkt: using image in local store for app %s\", appName)\n\t\t\t\treturn rem.BlobKey, nil\n\t\t\t}\n\t\t\tif useCached(rem.DownloadTime, rem.CacheMaxAge) {\n\t\t\t\tstderr(\"rkt: found image in local store, skipping fetching from %s\", aciURL)\n\t\t\t\treturn rem.BlobKey, nil\n\t\t\t}\n\t\t}\n\t\tif f.local {\n\t\t\treturn \"\", fmt.Errorf(\"url %s not available in local store\", aciURL)\n\t\t}\n\t}\n\n\tif scheme != \"file\" && f.debug {\n\t\tstderr(\"rkt: fetching image from %s\", aciURL)\n\t}\n\n\tvar etag string\n\tif rem != nil {\n\t\tetag = rem.ETag\n\t}\n\tentity, aciFile, cd, err := f.fetch(appName, aciURL, ascURL, ascFile, etag)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif cd != nil && cd.useCached {\n\t\tif rem != nil {\n\t\t\treturn rem.BlobKey, nil\n\t\t} else {\n\t\t\t// should never happen\n\t\t\tpanic(\"asked to use cached image but remote is nil\")\n\t\t}\n\t}\n\tif scheme != \"file\" {\n\t\tdefer os.Remove(aciFile.Name())\n\t}\n\n\tif entity != nil && !f.insecureSkipVerify {\n\t\tstderr(\"rkt: signature verified:\")\n\t\tfor _, v := range entity.Identities {\n\t\t\tstderr(\" %s\", v.Name)\n\t\t}\n\t}\n\tkey, err := f.s.WriteACI(aciFile, latest)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif scheme != \"file\" {\n\t\trem := store.NewRemote(aciURL, ascURL)\n\t\trem.BlobKey = key\n\t\trem.DownloadTime = time.Now()\n\t\tif cd != nil {\n\t\t\trem.ETag = cd.etag\n\t\t\trem.CacheMaxAge = cd.maxAge\n\t\t}\n\t\terr = f.s.WriteRemote(rem)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn key, nil\n}", "func (m *MockImageAPIClient) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ImageLoad\", ctx, input, quiet)\n\tret0, _ := ret[0].(types.ImageLoadResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func mockFetch(testBase string) func(string) (string, error) {\n\treturn func(url string) (string, error) {\n\n\t\tvar file string\n\t\tif url == testBase {\n\t\t\tfile = \"./mock/basePage.html\"\n\t\t} else {\n\n\t\t\tswitch {\n\t\t\tcase strings.Contains(url, \"/faq\"):\n\t\t\t\tfile = \"./mock/faq.html\"\n\t\t\tcase strings.Contains(url, \"/about\"):\n\t\t\t\tfile = \"./mock/about.html\"\n\t\t\tcase strings.Contains(url, \"/careers\"):\n\t\t\t\tfile = \"./mock/careers.html\"\n\t\t\tcase strings.Contains(url, \"/info\"):\n\t\t\t\tfile = \"./mock/info.html\"\n\t\t\tcase strings.Contains(url, \"/generic\"):\n\t\t\t\tfile = \"./mock/generic.html\"\n\t\t\t}\n\t\t}\n\n\t\tbody, err := ioutil.ReadFile(file)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn string(body), nil\n\t}\n}", "func TestFetch(t *testing.T) {\n\timage := \"rkt-inspect-fetch.aci\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\t// Fetch the image for the first time, this should write the image to the\n\t// on-disk store.\n\toldHash := patchImportAndFetchHash(image, []string{\"--exec=/inspect --read-file\"}, t, ctx)\n\n\t// Fetch the image with the same name but different content, the expecting\n\t// result is that we should get a different hash since we are not fetching\n\t// from the on-disk store.\n\tnewHash := patchImportAndFetchHash(image, []string{\"--exec=/inspect --read-file --write-file\"}, t, ctx)\n\n\tif oldHash == newHash {\n\t\tt.Fatalf(\"ACI hash should be different as the image has changed\")\n\t}\n}", "func (f OriginalFetcher) Fetch(namespace string, sourceURL string, imageHash string) (info *info.ImageProperties, downloaded bool, err error) {\n\tif sourceURL == \"\" && imageHash == \"\" {\n\t\treturn nil, false, fmt.Errorf(\"Missing Hash & URL\")\n\t}\n\n\tif imageHash != \"\" {\n\t\tinfo, err = f.fetchFromStore(namespace, imageHash)\n\t}\n\n\tif sourceURL != \"\" && (err != nil || imageHash == \"\") {\n\t\tinfo, downloaded, err = f.fetchFromSource(namespace, sourceURL)\n\t}\n\n\treturn info, downloaded, err\n}", "func (_m *API) ImageLocalDigests(ctx context.Context, image string) ([]string, error) {\n\tret := _m.Called(ctx, image)\n\n\tvar r0 []string\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string) ([]string, error)); ok {\n\t\treturn rf(ctx, image)\n\t}\n\tif rf, ok := ret.Get(0).(func(context.Context, string) []string); ok {\n\t\tr0 = rf(ctx, image)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]string)\n\t\t}\n\t}\n\n\tif rf, ok := ret.Get(1).(func(context.Context, string) error); ok {\n\t\tr1 = rf(ctx, image)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func LoadLocalImage(app *AppData) error {\n\tapp.LocalImage = DockerImage{\n\t\tExists: false,\n\t}\n\tctx := context.Background()\n\tcli, err := client.NewClientWithOpts(client.FromEnv)\n\tcli.NegotiateAPIVersion(ctx)\n\tdefer cli.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\tinspect, _, err := cli.ImageInspectWithRaw(ctx, app.From)\n\tif err != nil {\n\t\tif err.Error() == \"Error: No such image: \"+app.From {\n\t\t\tfmt.Printf(\"Repo not exists in local docker\\n\")\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tfmt.Printf(\"Repo exists in local docker\\n\")\n\tapp.LocalImage.Exists = true\n\tif len(inspect.RepoDigests) > 0 {\n\t\tapp.LocalImage.DockerDigest = inspect.RepoDigests[0]\n\t} else {\n\t\tapp.LocalImage.DockerDigest = inspect.ID\n\t}\n\n\t//Setting Docker Config values\n\tconfigData, err := json.Marshal(inspect.Config)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = pvjson.Unmarshal(configData, &app.LocalImage.DockerConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func TestBaseImage(t *testing.T) {\n\tctx, err := controllerPrepare()\n\tif err != nil {\n\t\tt.Fatal(\"Fail in controller prepare: \", err)\n\t}\n\teveBaseRef := os.Getenv(\"EVE_BASE_REF\")\n\tif len(eveBaseRef) == 0 {\n\t\teveBaseRef = \"4.10.0\"\n\t}\n\tzArch := os.Getenv(\"ZARCH\")\n\tif len(eveBaseRef) == 0 {\n\t\tzArch = \"amd64\"\n\t}\n\tHV := os.Getenv(\"HV\")\n\tif HV == \"xen\" {\n\t\tHV = \"\"\n\t}\n\tvar baseImageTests = []struct {\n\t\tdataStoreID string\n\t\timageID string\n\t\tbaseID string\n\t\timageRelativePath string\n\t\timageFormat config.Format\n\t\teveBaseRef string\n\t\tzArch string\n\t\tHV string\n\t}{\n\t\t{eServerDataStoreID,\n\n\t\t\t\"1ab8761b-5f89-4e0b-b757-4b87a9fa93ec\",\n\n\t\t\t\"22b8761b-5f89-4e0b-b757-4b87a9fa93ec\",\n\n\t\t\t\"baseos.qcow2\",\n\t\t\tconfig.Format_QCOW2,\n\t\t\teveBaseRef,\n\t\t\tzArch,\n\t\t\tHV,\n\t\t},\n\t}\n\tfor _, tt := range baseImageTests {\n\t\tbaseOSVersion := fmt.Sprintf(\"%s-%s\", tt.eveBaseRef, tt.zArch)\n\t\tif tt.HV != \"\" {\n\t\t\tbaseOSVersion = fmt.Sprintf(\"%s-%s-%s\", tt.eveBaseRef, tt.zArch, tt.HV)\n\t\t}\n\t\tt.Run(baseOSVersion, func(t *testing.T) {\n\n\t\t\terr = prepareBaseImageLocal(ctx, tt.dataStoreID, tt.imageID, tt.baseID, tt.imageRelativePath, tt.imageFormat, baseOSVersion)\n\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"Fail in prepare base image from local file: \", err)\n\t\t\t}\n\t\t\tdeviceCtx, err := ctx.GetDeviceFirst()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"Fail in get first device: \", err)\n\t\t\t}\n\t\t\tdeviceCtx.SetBaseOSConfig([]string{tt.baseID})\n\t\t\tdevUUID := deviceCtx.GetID()\n\t\t\terr = ctx.ConfigSync(deviceCtx)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"Fail in sync config with controller: \", err)\n\t\t\t}\n\t\t\tt.Run(\"Started\", func(t *testing.T) {\n\t\t\t\terr := ctx.InfoChecker(devUUID, map[string]string{\"devId\": devUUID.String(), \"shortVersion\": baseOSVersion}, einfo.ZInfoDevSW, 300)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(\"Fail in waiting for base image update init: \", err)\n\t\t\t\t}\n\t\t\t})\n\t\t\tt.Run(\"Downloaded\", func(t *testing.T) {\n\t\t\t\terr := ctx.InfoChecker(devUUID, map[string]string{\"devId\": devUUID.String(), \"shortVersion\": baseOSVersion, \"downloadProgress\": \"100\"}, einfo.ZInfoDevSW, 1500)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(\"Fail in waiting for base image download progress: \", err)\n\t\t\t\t}\n\t\t\t})\n\t\t\tt.Run(\"Logs\", func(t *testing.T) {\n\t\t\t\tif !checkLogs {\n\t\t\t\t\tt.Skip(\"no LOGS flag set - skipped\")\n\t\t\t\t}\n\t\t\t\terr = ctx.LogChecker(devUUID, map[string]string{\"devId\": devUUID.String(), \"eveVersion\": baseOSVersion}, 1200)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(\"Fail in waiting for base image logs: \", err)\n\t\t\t\t}\n\t\t\t})\n\t\t\ttimeout := time.Duration(1200)\n\n\t\t\tif !checkLogs {\n\t\t\t\ttimeout = 2400\n\t\t\t}\n\t\t\tt.Run(\"Active\", func(t *testing.T) {\n\t\t\t\terr = ctx.InfoChecker(devUUID, map[string]string{\"devId\": devUUID.String(), \"shortVersion\": baseOSVersion, \"status\": \"INSTALLED\", \"partitionState\": \"(inprogress|active)\"}, einfo.ZInfoDevSW, timeout)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(\"Fail in waiting for base image installed status: \", err)\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t}\n\n}", "func TestBaseImage(t *testing.T) {\n\t// test with 'original.png'\n\timgs := map[string][]byte{\n\t\t\"original.png\": []byte(\"image\"),\n\t}\n\t_, err := backend.baseImage(imgs)\n\tif err != nil {\n\t\tt.Errorf(\"Got error %s\", err)\n\t}\n\n\t// test with 'original.jpg'\n\timgs = map[string][]byte{\n\t\t\"original.jpg\": []byte(\"image\"),\n\t}\n\t_, err = backend.baseImage(imgs)\n\tif err != nil {\n\t\tt.Errorf(\"Got error %s\", err)\n\t}\n\n\t// without 'original.*' should get an error\n\timgs = map[string][]byte{\n\t\t\"127x127.png\": []byte(\"image\"),\n\t}\n\t_, err = backend.baseImage(imgs)\n\tif err == nil {\n\t\tt.Errorf(\"Should get an error, didn't pass original image.\")\n\t}\n}", "func (m *MockResource) Fetch(target string) ([][]byte, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Fetch\", target)\n\tret0, _ := ret[0].([][]byte)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestImplicitFetch(t *testing.T) {\n\tfoundMsg := \"found image in local store\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\t// 1. Fetch the image.\n\t// TODO(yifan): Add other ACI with different schemes.\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:ubuntu-12.04\")\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:latest\")\n\n\t// 2. Try run/prepare with/without tag ':latest', should not get $foundMsg.\n\tcmds := []string{\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false docker://busybox:latest\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare docker://busybox:latest\", ctx.cmd()),\n\t}\n\n\tfor _, cmd := range cmds {\n\t\tt.Logf(\"Running test %v\", cmd)\n\n\t\tchild, err := gexpect.Spawn(cmd)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Cannot exec rkt: %v\", err)\n\t\t}\n\t\tif err := expectWithOutput(child, foundMsg); err == nil {\n\t\t\tt.Fatalf(\"%q should not be found\", foundMsg)\n\t\t}\n\t\tif err := child.Wait(); err != nil {\n\t\t\tt.Fatalf(\"rkt didn't terminate correctly: %v\", err)\n\t\t}\n\t}\n}", "func (f *Frontend) fetchImage(i *img.Image) (*img.Image, error) {\n\tvar err error\n\n\t// go through image proxy to resize and cache the image\n\tkey := hmacKey(i.ID)\n\tu := fmt.Sprintf(\"%v/image/225x,s%v/%v\", f.Host, key, i.ID)\n\tfmt.Println(u)\n\n\tresp, err := f.Images.Client.Get(u)\n\tif err != nil {\n\t\treturn i, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbdy, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn i, err\n\t}\n\n\ti.Base64 = base64.StdEncoding.EncodeToString(bdy)\n\treturn i, err\n}", "func (m *MockCommonAPIClient) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ImageImport\", ctx, source, ref, options)\n\tret0, _ := ret[0].(io.ReadCloser)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (f *fetcher) fetchImage(img string, asc string, discover bool) (string, error) {\n\tif f.withDeps && !discover {\n\t\treturn \"\", fmt.Errorf(\"cannot fetch image's dependencies with discovery disabled\")\n\t}\n\thash, err := f.fetchSingleImage(img, asc, discover)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif f.withDeps {\n\t\terr = f.fetchImageDeps(hash)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn hash, nil\n}", "func (c *Client) Fetch(ctx context.Context, ref string, opts ...RemoteOpt) (images.Image, error) {\n\tfetchCtx := defaultRemoteContext()\n\tfor _, o := range opts {\n\t\tif err := o(c, fetchCtx); err != nil {\n\t\t\treturn images.Image{}, err\n\t\t}\n\t}\n\n\tif fetchCtx.Unpack {\n\t\treturn images.Image{}, fmt.Errorf(\"unpack on fetch not supported, try pull: %w\", errdefs.ErrNotImplemented)\n\t}\n\n\tif fetchCtx.PlatformMatcher == nil {\n\t\tif len(fetchCtx.Platforms) == 0 {\n\t\t\tfetchCtx.PlatformMatcher = platforms.All\n\t\t} else {\n\t\t\tvar ps []ocispec.Platform\n\t\t\tfor _, s := range fetchCtx.Platforms {\n\t\t\t\tp, err := platforms.Parse(s)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn images.Image{}, fmt.Errorf(\"invalid platform %s: %w\", s, err)\n\t\t\t\t}\n\t\t\t\tps = append(ps, p)\n\t\t\t}\n\n\t\t\tfetchCtx.PlatformMatcher = platforms.Any(ps...)\n\t\t}\n\t}\n\n\tctx, done, err := c.WithLease(ctx)\n\tif err != nil {\n\t\treturn images.Image{}, err\n\t}\n\tdefer done(ctx)\n\n\timg, err := c.fetch(ctx, fetchCtx, ref, 0)\n\tif err != nil {\n\t\treturn images.Image{}, err\n\t}\n\treturn c.createNewImage(ctx, img)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
FetchLocalImage indicates an expected call of FetchLocalImage
func (mr *MockFetcherMockRecorder) FetchLocalImage(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchLocalImage", reflect.TypeOf((*MockFetcher)(nil).FetchLocalImage), arg0) }
[ "func (m *MockFetcher) FetchLocalImage(arg0 string) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchLocalImage\", arg0)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (mr *MockFetcherMockRecorder) FetchUpdatedLocalImage(arg0, arg1, arg2 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"FetchUpdatedLocalImage\", reflect.TypeOf((*MockFetcher)(nil).FetchUpdatedLocalImage), arg0, arg1, arg2)\n}", "func (m *MockFetcher) FetchUpdatedLocalImage(arg0 context.Context, arg1 string, arg2 io.Writer) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchUpdatedLocalImage\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (c *Client) IsLocalImage(image types.ImageInspect) bool {\n\treturn len(image.RepoDigests) == 0\n}", "func TestFetch(t *testing.T) {\n\timage := \"rkt-inspect-fetch.aci\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\t// Fetch the image for the first time, this should write the image to the\n\t// on-disk store.\n\toldHash := patchImportAndFetchHash(image, []string{\"--exec=/inspect --read-file\"}, t, ctx)\n\n\t// Fetch the image with the same name but different content, the expecting\n\t// result is that we should get a different hash since we are not fetching\n\t// from the on-disk store.\n\tnewHash := patchImportAndFetchHash(image, []string{\"--exec=/inspect --read-file --write-file\"}, t, ctx)\n\n\tif oldHash == newHash {\n\t\tt.Fatalf(\"ACI hash should be different as the image has changed\")\n\t}\n}", "func TestImplicitFetch(t *testing.T) {\n\tfoundMsg := \"found image in local store\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\t// 1. Fetch the image.\n\t// TODO(yifan): Add other ACI with different schemes.\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:ubuntu-12.04\")\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:latest\")\n\n\t// 2. Try run/prepare with/without tag ':latest', should not get $foundMsg.\n\tcmds := []string{\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false docker://busybox:latest\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare docker://busybox:latest\", ctx.cmd()),\n\t}\n\n\tfor _, cmd := range cmds {\n\t\tt.Logf(\"Running test %v\", cmd)\n\n\t\tchild, err := gexpect.Spawn(cmd)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Cannot exec rkt: %v\", err)\n\t\t}\n\t\tif err := expectWithOutput(child, foundMsg); err == nil {\n\t\t\tt.Fatalf(\"%q should not be found\", foundMsg)\n\t\t}\n\t\tif err := child.Wait(); err != nil {\n\t\t\tt.Fatalf(\"rkt didn't terminate correctly: %v\", err)\n\t\t}\n\t}\n}", "func (imp *Importer) fetchLocalImages() {\n items, err := ioutil.ReadDir(STORE_DIR)\n if err != nil {\n imp.sendErr(err)\n return\n }\n\n for _, info := range items {\n if info.IsDir() { continue }\n filename := info.Name()\n\n file, err := os.Open(fmt.Sprintf(\"%s/%s\", STORE_DIR, filename))\n if err != nil {\n imp.sendErr(err)\n return\n }\n\n img, err := jpeg.Decode(file)\n if err != nil {\n log.Printf(\"Error decoding image file %s to jpeg\\n\", filename)\n continue\n }\n\n ext := filepath.Ext(filename)\n id := filename[:len(filename)-len(ext)]\n\n imp.send(&imagedata.ImageData{Id: id, Data: &img})\n }\n}", "func (f *fetcher) fetchImageFrom(appName string, aciURL, ascURL, scheme string, ascFile *os.File, latest bool) (string, error) {\n\tvar rem *store.Remote\n\n\tif f.insecureSkipVerify {\n\t\tif f.ks != nil {\n\t\t\tstderr(\"rkt: warning: TLS verification and signature verification has been disabled\")\n\t\t}\n\t} else if scheme == \"docker\" {\n\t\treturn \"\", fmt.Errorf(\"signature verification for docker images is not supported (try --insecure-skip-verify)\")\n\t}\n\n\tif (f.local && scheme != \"file\") || (scheme != \"file\" && !latest) {\n\t\tvar err error\n\t\tok := false\n\t\trem, ok, err = f.s.GetRemote(aciURL)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif ok {\n\t\t\tif f.local {\n\t\t\t\tstderr(\"rkt: using image in local store for app %s\", appName)\n\t\t\t\treturn rem.BlobKey, nil\n\t\t\t}\n\t\t\tif useCached(rem.DownloadTime, rem.CacheMaxAge) {\n\t\t\t\tstderr(\"rkt: found image in local store, skipping fetching from %s\", aciURL)\n\t\t\t\treturn rem.BlobKey, nil\n\t\t\t}\n\t\t}\n\t\tif f.local {\n\t\t\treturn \"\", fmt.Errorf(\"url %s not available in local store\", aciURL)\n\t\t}\n\t}\n\n\tif scheme != \"file\" && f.debug {\n\t\tstderr(\"rkt: fetching image from %s\", aciURL)\n\t}\n\n\tvar etag string\n\tif rem != nil {\n\t\tetag = rem.ETag\n\t}\n\tentity, aciFile, cd, err := f.fetch(appName, aciURL, ascURL, ascFile, etag)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif cd != nil && cd.useCached {\n\t\tif rem != nil {\n\t\t\treturn rem.BlobKey, nil\n\t\t} else {\n\t\t\t// should never happen\n\t\t\tpanic(\"asked to use cached image but remote is nil\")\n\t\t}\n\t}\n\tif scheme != \"file\" {\n\t\tdefer os.Remove(aciFile.Name())\n\t}\n\n\tif entity != nil && !f.insecureSkipVerify {\n\t\tstderr(\"rkt: signature verified:\")\n\t\tfor _, v := range entity.Identities {\n\t\t\tstderr(\" %s\", v.Name)\n\t\t}\n\t}\n\tkey, err := f.s.WriteACI(aciFile, latest)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif scheme != \"file\" {\n\t\trem := store.NewRemote(aciURL, ascURL)\n\t\trem.BlobKey = key\n\t\trem.DownloadTime = time.Now()\n\t\tif cd != nil {\n\t\t\trem.ETag = cd.etag\n\t\t\trem.CacheMaxAge = cd.maxAge\n\t\t}\n\t\terr = f.s.WriteRemote(rem)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn key, nil\n}", "func (o *ImageImportManifest) GetLocalImageIdOk() (*string, bool) {\n\tif o == nil || o.LocalImageId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.LocalImageId, true\n}", "func (f OriginalFetcher) Fetch(namespace string, sourceURL string, imageHash string) (info *info.ImageProperties, downloaded bool, err error) {\n\tif sourceURL == \"\" && imageHash == \"\" {\n\t\treturn nil, false, fmt.Errorf(\"Missing Hash & URL\")\n\t}\n\n\tif imageHash != \"\" {\n\t\tinfo, err = f.fetchFromStore(namespace, imageHash)\n\t}\n\n\tif sourceURL != \"\" && (err != nil || imageHash == \"\") {\n\t\tinfo, downloaded, err = f.fetchFromSource(namespace, sourceURL)\n\t}\n\n\treturn info, downloaded, err\n}", "func (f *fetcher) fetchImage(img string, asc string, discover bool) (string, error) {\n\tif f.withDeps && !discover {\n\t\treturn \"\", fmt.Errorf(\"cannot fetch image's dependencies with discovery disabled\")\n\t}\n\thash, err := f.fetchSingleImage(img, asc, discover)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif f.withDeps {\n\t\terr = f.fetchImageDeps(hash)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn hash, nil\n}", "func LoadLocalImage(app *AppData) error {\n\tapp.LocalImage = DockerImage{\n\t\tExists: false,\n\t}\n\tctx := context.Background()\n\tcli, err := client.NewClientWithOpts(client.FromEnv)\n\tcli.NegotiateAPIVersion(ctx)\n\tdefer cli.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\tinspect, _, err := cli.ImageInspectWithRaw(ctx, app.From)\n\tif err != nil {\n\t\tif err.Error() == \"Error: No such image: \"+app.From {\n\t\t\tfmt.Printf(\"Repo not exists in local docker\\n\")\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tfmt.Printf(\"Repo exists in local docker\\n\")\n\tapp.LocalImage.Exists = true\n\tif len(inspect.RepoDigests) > 0 {\n\t\tapp.LocalImage.DockerDigest = inspect.RepoDigests[0]\n\t} else {\n\t\tapp.LocalImage.DockerDigest = inspect.ID\n\t}\n\n\t//Setting Docker Config values\n\tconfigData, err := json.Marshal(inspect.Config)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = pvjson.Unmarshal(configData, &app.LocalImage.DockerConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c *Client) Fetch(ctx context.Context, ref string, opts ...RemoteOpt) (images.Image, error) {\n\tfetchCtx := defaultRemoteContext()\n\tfor _, o := range opts {\n\t\tif err := o(c, fetchCtx); err != nil {\n\t\t\treturn images.Image{}, err\n\t\t}\n\t}\n\n\tif fetchCtx.Unpack {\n\t\treturn images.Image{}, fmt.Errorf(\"unpack on fetch not supported, try pull: %w\", errdefs.ErrNotImplemented)\n\t}\n\n\tif fetchCtx.PlatformMatcher == nil {\n\t\tif len(fetchCtx.Platforms) == 0 {\n\t\t\tfetchCtx.PlatformMatcher = platforms.All\n\t\t} else {\n\t\t\tvar ps []ocispec.Platform\n\t\t\tfor _, s := range fetchCtx.Platforms {\n\t\t\t\tp, err := platforms.Parse(s)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn images.Image{}, fmt.Errorf(\"invalid platform %s: %w\", s, err)\n\t\t\t\t}\n\t\t\t\tps = append(ps, p)\n\t\t\t}\n\n\t\t\tfetchCtx.PlatformMatcher = platforms.Any(ps...)\n\t\t}\n\t}\n\n\tctx, done, err := c.WithLease(ctx)\n\tif err != nil {\n\t\treturn images.Image{}, err\n\t}\n\tdefer done(ctx)\n\n\timg, err := c.fetch(ctx, fetchCtx, ref, 0)\n\tif err != nil {\n\t\treturn images.Image{}, err\n\t}\n\treturn c.createNewImage(ctx, img)\n}", "func (m *MockFetcher) FetchRemoteImage(arg0 string) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchRemoteImage\", arg0)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (f *SourceFetcher) Fetch(url string, namespace string) (*info.ImageProperties, bool, error) {\n\tc := make(chan FetchResult)\n\tgo f.uniqueFetchSource(c, url, namespace)\n\tr := <-c\n\treturn r.ImageDetails, r.Downloaded, r.Error\n}", "func TestCannotExecuteStatusImage(t *testing.T) {\n\tbuf := setLogBuffer()\n\tdefer func() {\n\t\tif t.Failed() {\n\t\t\tt.Log(buf.String())\n\t\t}\n\t}()\n\n\tif StatusImage == \"\" {\n\t\tt.Skip(\"no status image defined\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\tapp := &models.App{Name: id.New().String()}\n\tapp = ensureApp(t, app)\n\n\tfn := &models.Fn{\n\t\tAppID: app.ID,\n\t\tName: id.New().String(),\n\t\tImage: StatusImage,\n\t\tResourceConfig: models.ResourceConfig{\n\t\t\tMemory: memory,\n\t\t},\n\t}\n\tfn = ensureFn(t, fn)\n\n\tlb, err := LB()\n\tif err != nil {\n\t\tt.Fatalf(\"Got unexpected error: %v\", err)\n\t}\n\tu := url.URL{\n\t\tScheme: \"http\",\n\t\tHost: lb,\n\t}\n\tu.Path = path.Join(u.Path, \"invoke\", fn.ID)\n\n\tcontent := bytes.NewBuffer([]byte(`status`))\n\toutput := &bytes.Buffer{}\n\n\tresp, err := callFN(ctx, u.String(), content, output, models.TypeSync)\n\tif err != nil {\n\t\tt.Fatalf(\"Got unexpected error: %v\", err)\n\t}\n\n\tif resp.StatusCode != http.StatusBadRequest {\n\t\tt.Fatalf(\"StatusCode check failed on %v\", resp.StatusCode)\n\t}\n}", "func (f *fetcher) fetchSingleImage(img string, asc string, discover bool) (string, error) {\n\tvar (\n\t\tascFile *os.File\n\t\terr error\n\t\tlatest bool\n\t)\n\tif asc != \"\" && f.ks != nil {\n\t\tascFile, err = os.Open(asc)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"unable to open signature file: %v\", err)\n\t\t}\n\t\tdefer ascFile.Close()\n\t}\n\n\tu, err := url.Parse(img)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"not a valid image reference (%s)\", img)\n\t}\n\n\t// if img refers to a local file, ensure the scheme is file:// and make the url path absolute\n\t_, err = os.Stat(u.Path)\n\tif err == nil {\n\t\tu.Path, err = filepath.Abs(u.Path)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"unable to get abs path: %v\", err)\n\t\t}\n\t\tu.Scheme = \"file\"\n\t} else if !os.IsNotExist(err) {\n\t\treturn \"\", fmt.Errorf(\"unable to access %q: %v\", img, err)\n\t}\n\n\tif discover && u.Scheme == \"\" {\n\t\tif app := newDiscoveryApp(img); app != nil {\n\t\t\tvar discoveryError error\n\t\t\tif !f.local {\n\t\t\t\tstderr(\"rkt: searching for app image %s\", img)\n\t\t\t\tep, err := discoverApp(app, true)\n\t\t\t\tif err != nil {\n\t\t\t\t\tdiscoveryError = err\n\t\t\t\t} else {\n\t\t\t\t\t// No specified version label, mark it as latest\n\t\t\t\t\tif _, ok := app.Labels[\"version\"]; !ok {\n\t\t\t\t\t\tlatest = true\n\t\t\t\t\t}\n\t\t\t\t\treturn f.fetchImageFromEndpoints(app.Name.String(), ep, ascFile, latest)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif discoveryError != nil {\n\t\t\t\tstderr(\"discovery failed for %q: %v. Trying to find image in the store.\", img, discoveryError)\n\t\t\t}\n\t\t\tif f.local || discoveryError != nil {\n\t\t\t\treturn f.fetchImageFromStore(img)\n\t\t\t}\n\t\t}\n\t}\n\n\tswitch u.Scheme {\n\tcase \"http\", \"https\", \"file\":\n\tcase \"docker\":\n\t\tdockerURL := common.ParseDockerURL(path.Join(u.Host, u.Path))\n\t\tif dockerURL.Tag == \"latest\" {\n\t\t\tlatest = true\n\t\t}\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"rkt only supports http, https, docker or file URLs (%s)\", img)\n\t}\n\treturn f.fetchImageFromURL(u.String(), u.Scheme, ascFile, latest)\n}", "func (f *Frontend) fetchImage(i *img.Image) (*img.Image, error) {\n\tvar err error\n\n\t// go through image proxy to resize and cache the image\n\tkey := hmacKey(i.ID)\n\tu := fmt.Sprintf(\"%v/image/225x,s%v/%v\", f.Host, key, i.ID)\n\tfmt.Println(u)\n\n\tresp, err := f.Images.Client.Get(u)\n\tif err != nil {\n\t\treturn i, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbdy, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn i, err\n\t}\n\n\ti.Base64 = base64.StdEncoding.EncodeToString(bdy)\n\treturn i, err\n}", "func (mr *MockFetcherMockRecorder) FetchRemoteImage(arg0 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"FetchRemoteImage\", reflect.TypeOf((*MockFetcher)(nil).FetchRemoteImage), arg0)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
FetchRemoteImage mocks base method
func (m *MockFetcher) FetchRemoteImage(arg0 string) (image.Image, error) { ret := m.ctrl.Call(m, "FetchRemoteImage", arg0) ret0, _ := ret[0].(image.Image) ret1, _ := ret[1].(error) return ret0, ret1 }
[ "func (m *MockFetcher) FetchLocalImage(arg0 string) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchLocalImage\", arg0)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockFetcher) FetchUpdatedLocalImage(arg0 context.Context, arg1 string, arg2 io.Writer) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchUpdatedLocalImage\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockCommonAPIClient) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ImageLoad\", ctx, input, quiet)\n\tret0, _ := ret[0].(types.ImageLoadResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (f *Frontend) fetchImage(i *img.Image) (*img.Image, error) {\n\tvar err error\n\n\t// go through image proxy to resize and cache the image\n\tkey := hmacKey(i.ID)\n\tu := fmt.Sprintf(\"%v/image/225x,s%v/%v\", f.Host, key, i.ID)\n\tfmt.Println(u)\n\n\tresp, err := f.Images.Client.Get(u)\n\tif err != nil {\n\t\treturn i, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbdy, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn i, err\n\t}\n\n\ti.Base64 = base64.StdEncoding.EncodeToString(bdy)\n\treturn i, err\n}", "func (_m *API) ImageRemoteDigest(ctx context.Context, image string) (string, error) {\n\tret := _m.Called(ctx, image)\n\n\tvar r0 string\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string) (string, error)); ok {\n\t\treturn rf(ctx, image)\n\t}\n\tif rf, ok := ret.Get(0).(func(context.Context, string) string); ok {\n\t\tr0 = rf(ctx, image)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\tif rf, ok := ret.Get(1).(func(context.Context, string) error); ok {\n\t\tr1 = rf(ctx, image)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockImageAPIClient) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ImageLoad\", ctx, input, quiet)\n\tret0, _ := ret[0].(types.ImageLoadResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockCommonAPIClient) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ImagePull\", ctx, ref, options)\n\tret0, _ := ret[0].(io.ReadCloser)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestBaseImage(t *testing.T) {\n\tctx, err := controllerPrepare()\n\tif err != nil {\n\t\tt.Fatal(\"Fail in controller prepare: \", err)\n\t}\n\teveBaseRef := os.Getenv(\"EVE_BASE_REF\")\n\tif len(eveBaseRef) == 0 {\n\t\teveBaseRef = \"4.10.0\"\n\t}\n\tzArch := os.Getenv(\"ZARCH\")\n\tif len(eveBaseRef) == 0 {\n\t\tzArch = \"amd64\"\n\t}\n\tHV := os.Getenv(\"HV\")\n\tif HV == \"xen\" {\n\t\tHV = \"\"\n\t}\n\tvar baseImageTests = []struct {\n\t\tdataStoreID string\n\t\timageID string\n\t\tbaseID string\n\t\timageRelativePath string\n\t\timageFormat config.Format\n\t\teveBaseRef string\n\t\tzArch string\n\t\tHV string\n\t}{\n\t\t{eServerDataStoreID,\n\n\t\t\t\"1ab8761b-5f89-4e0b-b757-4b87a9fa93ec\",\n\n\t\t\t\"22b8761b-5f89-4e0b-b757-4b87a9fa93ec\",\n\n\t\t\t\"baseos.qcow2\",\n\t\t\tconfig.Format_QCOW2,\n\t\t\teveBaseRef,\n\t\t\tzArch,\n\t\t\tHV,\n\t\t},\n\t}\n\tfor _, tt := range baseImageTests {\n\t\tbaseOSVersion := fmt.Sprintf(\"%s-%s\", tt.eveBaseRef, tt.zArch)\n\t\tif tt.HV != \"\" {\n\t\t\tbaseOSVersion = fmt.Sprintf(\"%s-%s-%s\", tt.eveBaseRef, tt.zArch, tt.HV)\n\t\t}\n\t\tt.Run(baseOSVersion, func(t *testing.T) {\n\n\t\t\terr = prepareBaseImageLocal(ctx, tt.dataStoreID, tt.imageID, tt.baseID, tt.imageRelativePath, tt.imageFormat, baseOSVersion)\n\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"Fail in prepare base image from local file: \", err)\n\t\t\t}\n\t\t\tdeviceCtx, err := ctx.GetDeviceFirst()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"Fail in get first device: \", err)\n\t\t\t}\n\t\t\tdeviceCtx.SetBaseOSConfig([]string{tt.baseID})\n\t\t\tdevUUID := deviceCtx.GetID()\n\t\t\terr = ctx.ConfigSync(deviceCtx)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"Fail in sync config with controller: \", err)\n\t\t\t}\n\t\t\tt.Run(\"Started\", func(t *testing.T) {\n\t\t\t\terr := ctx.InfoChecker(devUUID, map[string]string{\"devId\": devUUID.String(), \"shortVersion\": baseOSVersion}, einfo.ZInfoDevSW, 300)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(\"Fail in waiting for base image update init: \", err)\n\t\t\t\t}\n\t\t\t})\n\t\t\tt.Run(\"Downloaded\", func(t *testing.T) {\n\t\t\t\terr := ctx.InfoChecker(devUUID, map[string]string{\"devId\": devUUID.String(), \"shortVersion\": baseOSVersion, \"downloadProgress\": \"100\"}, einfo.ZInfoDevSW, 1500)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(\"Fail in waiting for base image download progress: \", err)\n\t\t\t\t}\n\t\t\t})\n\t\t\tt.Run(\"Logs\", func(t *testing.T) {\n\t\t\t\tif !checkLogs {\n\t\t\t\t\tt.Skip(\"no LOGS flag set - skipped\")\n\t\t\t\t}\n\t\t\t\terr = ctx.LogChecker(devUUID, map[string]string{\"devId\": devUUID.String(), \"eveVersion\": baseOSVersion}, 1200)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(\"Fail in waiting for base image logs: \", err)\n\t\t\t\t}\n\t\t\t})\n\t\t\ttimeout := time.Duration(1200)\n\n\t\t\tif !checkLogs {\n\t\t\t\ttimeout = 2400\n\t\t\t}\n\t\t\tt.Run(\"Active\", func(t *testing.T) {\n\t\t\t\terr = ctx.InfoChecker(devUUID, map[string]string{\"devId\": devUUID.String(), \"shortVersion\": baseOSVersion, \"status\": \"INSTALLED\", \"partitionState\": \"(inprogress|active)\"}, einfo.ZInfoDevSW, timeout)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(\"Fail in waiting for base image installed status: \", err)\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t}\n\n}", "func (cp *RunnerParams) getRemoteImage() (*LatestImage, error) {\n\n\tresultToken, err := cp.getBearerToken()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\turl := \"https://iad.ocir.io/20180419/docker/images/odx-pipelines?repo=wercker%2Fwercker-runner\"\n\n\tvar client http.Client\n\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\treq.Header.Add(\"Authorization\", \"Bearer \"+resultToken)\n\tresp, err := client.Do(req)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlatestImageName := \"\"\n\tvar latestImageTime time.Time\n\tlatestImageDigest := \"\"\n\n\t// I hope this never changes...\n\tbasis := \"iad.ocir.io/odx-pipelines/wercker/wercker-runner\"\n\n\tif resp.StatusCode == 200 {\n\t\tbodyBytes, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbodyString := string(bodyBytes)\n\t\ttheWrapper := listWrapper{}\n\t\tjson.Unmarshal([]byte(bodyString), &theWrapper)\n\n\t\tfor _, imageItem := range theWrapper.Imgs {\n\t\t\ttm, err := time.Parse(time.RFC3339, imageItem.Timestamp)\n\t\t\tif err != nil {\n\t\t\t\tcp.Logger.Error(err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif cp.Debug {\n\t\t\t\tmessage := fmt.Sprintf(\"Repos: %s --> %s\", tm, imageItem.Tag)\n\t\t\t\tcp.Logger.Debugln(message)\n\t\t\t}\n\n\t\t\t// For production only ignore any tag that isn't latest or master\n\t\t\tif cp.ProdType {\n\t\t\t\tif imageItem.Tag != \"latest\" && !strings.HasPrefix(imageItem.Tag, \"master\") {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Match the digest from latest with the proper master entry. That will be the\n\t\t\t// image name returned to the caller.\n\t\t\tif cp.ProdType {\n\t\t\t\tif imageItem.Tag == \"latest\" {\n\t\t\t\t\tlatestImageDigest = imageItem.Digest\n\t\t\t\t\tcp.Logger.Debugln(\"Remote latest digest is \" + imageItem.Digest)\n\t\t\t\t\tcontinue\n\t\t\t\t} else {\n\t\t\t\t\t// Compare the digest to the latest, when the same we have the image name with commit-id\n\t\t\t\t\t// for whatever was tagged as latest.\n\t\t\t\t\tif imageItem.Digest == latestImageDigest {\n\t\t\t\t\t\tlatestImageTime = tm\n\t\t\t\t\t\tlatestImageName = fmt.Sprintf(\"%s:%s\", basis, imageItem.Tag)\n\t\t\t\t\t\tif cp.Debug {\n\t\t\t\t\t\t\tmessage := fmt.Sprintf(\"Selecting %s as doopleganger for discovered latest tag\", latestImageName)\n\t\t\t\t\t\t\tcp.Logger.Debugln(message)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif tm.After(latestImageTime) {\n\t\t\t\tlatestImageTime = tm\n\t\t\t\tlatestImageName = fmt.Sprintf(\"%s:%s\", basis, imageItem.Tag)\n\t\t\t}\n\t\t}\n\t}\n\n\tif latestImageName == \"\" {\n\t\treturn nil, fmt.Errorf(\"no runner image exists in the remote repository\")\n\t}\n\n\treturn &LatestImage{\n\t\tImageName: latestImageName,\n\t\tCreated: latestImageTime,\n\t}, nil\n}", "func (m *MockCEImpl) ImagePull(image string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ImagePull\", image)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestFetch(t *testing.T) {\n\timage := \"rkt-inspect-fetch.aci\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\t// Fetch the image for the first time, this should write the image to the\n\t// on-disk store.\n\toldHash := patchImportAndFetchHash(image, []string{\"--exec=/inspect --read-file\"}, t, ctx)\n\n\t// Fetch the image with the same name but different content, the expecting\n\t// result is that we should get a different hash since we are not fetching\n\t// from the on-disk store.\n\tnewHash := patchImportAndFetchHash(image, []string{\"--exec=/inspect --read-file --write-file\"}, t, ctx)\n\n\tif oldHash == newHash {\n\t\tt.Fatalf(\"ACI hash should be different as the image has changed\")\n\t}\n}", "func (c *Client) Fetch(ctx context.Context, ref string, opts ...RemoteOpt) (images.Image, error) {\n\tfetchCtx := defaultRemoteContext()\n\tfor _, o := range opts {\n\t\tif err := o(c, fetchCtx); err != nil {\n\t\t\treturn images.Image{}, err\n\t\t}\n\t}\n\n\tif fetchCtx.Unpack {\n\t\treturn images.Image{}, fmt.Errorf(\"unpack on fetch not supported, try pull: %w\", errdefs.ErrNotImplemented)\n\t}\n\n\tif fetchCtx.PlatformMatcher == nil {\n\t\tif len(fetchCtx.Platforms) == 0 {\n\t\t\tfetchCtx.PlatformMatcher = platforms.All\n\t\t} else {\n\t\t\tvar ps []ocispec.Platform\n\t\t\tfor _, s := range fetchCtx.Platforms {\n\t\t\t\tp, err := platforms.Parse(s)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn images.Image{}, fmt.Errorf(\"invalid platform %s: %w\", s, err)\n\t\t\t\t}\n\t\t\t\tps = append(ps, p)\n\t\t\t}\n\n\t\t\tfetchCtx.PlatformMatcher = platforms.Any(ps...)\n\t\t}\n\t}\n\n\tctx, done, err := c.WithLease(ctx)\n\tif err != nil {\n\t\treturn images.Image{}, err\n\t}\n\tdefer done(ctx)\n\n\timg, err := c.fetch(ctx, fetchCtx, ref, 0)\n\tif err != nil {\n\t\treturn images.Image{}, err\n\t}\n\treturn c.createNewImage(ctx, img)\n}", "func ttrPullImageAsserting(ctx context.Context, t *testing.T, client apiclient.APIClient, image string) {\n\terr := ttrPullImage(ctx, client, image)\n\tassert.NilError(t, err)\n}", "func getRemoteImage(url string) ([]byte, error) {\n\tvar image []byte\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn image, err\n\t}\n\tdefer resp.Body.Close()\n\n\timage, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn image, err\n\t}\n\n\treturn image, nil\n}", "func TestImplicitFetch(t *testing.T) {\n\tfoundMsg := \"found image in local store\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\t// 1. Fetch the image.\n\t// TODO(yifan): Add other ACI with different schemes.\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:ubuntu-12.04\")\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:latest\")\n\n\t// 2. Try run/prepare with/without tag ':latest', should not get $foundMsg.\n\tcmds := []string{\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false docker://busybox:latest\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare docker://busybox:latest\", ctx.cmd()),\n\t}\n\n\tfor _, cmd := range cmds {\n\t\tt.Logf(\"Running test %v\", cmd)\n\n\t\tchild, err := gexpect.Spawn(cmd)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Cannot exec rkt: %v\", err)\n\t\t}\n\t\tif err := expectWithOutput(child, foundMsg); err == nil {\n\t\t\tt.Fatalf(\"%q should not be found\", foundMsg)\n\t\t}\n\t\tif err := child.Wait(); err != nil {\n\t\t\tt.Fatalf(\"rkt didn't terminate correctly: %v\", err)\n\t\t}\n\t}\n}", "func (m *MockCommonAPIClient) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ImageCreate\", ctx, parentReference, options)\n\tret0, _ := ret[0].(io.ReadCloser)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockImageManager) ImagePull(arg0 context.Context, arg1 string, arg2 types.ImagePullOptions) (io.ReadCloser, error) {\n\tret := m.ctrl.Call(m, \"ImagePull\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(io.ReadCloser)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockResource) Fetch(target string) ([][]byte, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Fetch\", target)\n\tret0, _ := ret[0].([][]byte)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockCommonAPIClient) ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ImagePush\", ctx, ref, options)\n\tret0, _ := ret[0].(io.ReadCloser)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
FetchRemoteImage indicates an expected call of FetchRemoteImage
func (mr *MockFetcherMockRecorder) FetchRemoteImage(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchRemoteImage", reflect.TypeOf((*MockFetcher)(nil).FetchRemoteImage), arg0) }
[ "func (m *MockFetcher) FetchRemoteImage(arg0 string) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchRemoteImage\", arg0)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestFetch(t *testing.T) {\n\timage := \"rkt-inspect-fetch.aci\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\t// Fetch the image for the first time, this should write the image to the\n\t// on-disk store.\n\toldHash := patchImportAndFetchHash(image, []string{\"--exec=/inspect --read-file\"}, t, ctx)\n\n\t// Fetch the image with the same name but different content, the expecting\n\t// result is that we should get a different hash since we are not fetching\n\t// from the on-disk store.\n\tnewHash := patchImportAndFetchHash(image, []string{\"--exec=/inspect --read-file --write-file\"}, t, ctx)\n\n\tif oldHash == newHash {\n\t\tt.Fatalf(\"ACI hash should be different as the image has changed\")\n\t}\n}", "func TestImplicitFetch(t *testing.T) {\n\tfoundMsg := \"found image in local store\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\t// 1. Fetch the image.\n\t// TODO(yifan): Add other ACI with different schemes.\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:ubuntu-12.04\")\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:latest\")\n\n\t// 2. Try run/prepare with/without tag ':latest', should not get $foundMsg.\n\tcmds := []string{\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false docker://busybox:latest\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare docker://busybox:latest\", ctx.cmd()),\n\t}\n\n\tfor _, cmd := range cmds {\n\t\tt.Logf(\"Running test %v\", cmd)\n\n\t\tchild, err := gexpect.Spawn(cmd)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Cannot exec rkt: %v\", err)\n\t\t}\n\t\tif err := expectWithOutput(child, foundMsg); err == nil {\n\t\t\tt.Fatalf(\"%q should not be found\", foundMsg)\n\t\t}\n\t\tif err := child.Wait(); err != nil {\n\t\t\tt.Fatalf(\"rkt didn't terminate correctly: %v\", err)\n\t\t}\n\t}\n}", "func (c *Client) Fetch(ctx context.Context, ref string, opts ...RemoteOpt) (images.Image, error) {\n\tfetchCtx := defaultRemoteContext()\n\tfor _, o := range opts {\n\t\tif err := o(c, fetchCtx); err != nil {\n\t\t\treturn images.Image{}, err\n\t\t}\n\t}\n\n\tif fetchCtx.Unpack {\n\t\treturn images.Image{}, fmt.Errorf(\"unpack on fetch not supported, try pull: %w\", errdefs.ErrNotImplemented)\n\t}\n\n\tif fetchCtx.PlatformMatcher == nil {\n\t\tif len(fetchCtx.Platforms) == 0 {\n\t\t\tfetchCtx.PlatformMatcher = platforms.All\n\t\t} else {\n\t\t\tvar ps []ocispec.Platform\n\t\t\tfor _, s := range fetchCtx.Platforms {\n\t\t\t\tp, err := platforms.Parse(s)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn images.Image{}, fmt.Errorf(\"invalid platform %s: %w\", s, err)\n\t\t\t\t}\n\t\t\t\tps = append(ps, p)\n\t\t\t}\n\n\t\t\tfetchCtx.PlatformMatcher = platforms.Any(ps...)\n\t\t}\n\t}\n\n\tctx, done, err := c.WithLease(ctx)\n\tif err != nil {\n\t\treturn images.Image{}, err\n\t}\n\tdefer done(ctx)\n\n\timg, err := c.fetch(ctx, fetchCtx, ref, 0)\n\tif err != nil {\n\t\treturn images.Image{}, err\n\t}\n\treturn c.createNewImage(ctx, img)\n}", "func (f *Frontend) fetchImage(i *img.Image) (*img.Image, error) {\n\tvar err error\n\n\t// go through image proxy to resize and cache the image\n\tkey := hmacKey(i.ID)\n\tu := fmt.Sprintf(\"%v/image/225x,s%v/%v\", f.Host, key, i.ID)\n\tfmt.Println(u)\n\n\tresp, err := f.Images.Client.Get(u)\n\tif err != nil {\n\t\treturn i, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbdy, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn i, err\n\t}\n\n\ti.Base64 = base64.StdEncoding.EncodeToString(bdy)\n\treturn i, err\n}", "func (f *fetcher) fetchImage(img string, asc string, discover bool) (string, error) {\n\tif f.withDeps && !discover {\n\t\treturn \"\", fmt.Errorf(\"cannot fetch image's dependencies with discovery disabled\")\n\t}\n\thash, err := f.fetchSingleImage(img, asc, discover)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif f.withDeps {\n\t\terr = f.fetchImageDeps(hash)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn hash, nil\n}", "func getRemoteImage(url string) ([]byte, error) {\n\tvar image []byte\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn image, err\n\t}\n\tdefer resp.Body.Close()\n\n\timage, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn image, err\n\t}\n\n\treturn image, nil\n}", "func pullMissingImage(ctx context.Context, apiClient client.CommonAPIClient, image string, force bool) error {\n\tif !force {\n\t\t_, inspectError := apiClient.ImageInspect(ctx, image)\n\t\tif inspectError == nil {\n\t\t\treturn nil\n\t\t}\n\t\tif err, ok := inspectError.(client.RespError); !ok {\n\t\t\treturn inspectError\n\t\t} else if err.Code() != http.StatusNotFound {\n\t\t\treturn inspectError\n\t\t}\n\t}\n\n\tnamedRef, err := reference.Parse(image)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnamedRef = reference.TrimTagForDigest(reference.WithDefaultTagIfMissing(namedRef))\n\n\tvar name, tag string\n\tif reference.IsNameTagged(namedRef) {\n\t\tname, tag = namedRef.Name(), namedRef.(reference.Tagged).Tag()\n\t} else {\n\t\tname = namedRef.String()\n\t}\n\n\tresponseBody, err := apiClient.ImagePull(ctx, name, tag, fetchRegistryAuth(namedRef.Name()))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to pull image: %v\", err)\n\t}\n\tdefer responseBody.Close()\n\n\treturn showProgress(responseBody)\n}", "func (f OriginalFetcher) Fetch(namespace string, sourceURL string, imageHash string) (info *info.ImageProperties, downloaded bool, err error) {\n\tif sourceURL == \"\" && imageHash == \"\" {\n\t\treturn nil, false, fmt.Errorf(\"Missing Hash & URL\")\n\t}\n\n\tif imageHash != \"\" {\n\t\tinfo, err = f.fetchFromStore(namespace, imageHash)\n\t}\n\n\tif sourceURL != \"\" && (err != nil || imageHash == \"\") {\n\t\tinfo, downloaded, err = f.fetchFromSource(namespace, sourceURL)\n\t}\n\n\treturn info, downloaded, err\n}", "func (suite *APIImageInspectSuite) TestImageInspectOk(c *check.C) {\n\tvar (\n\t\trepo = environment.BusyboxRepo\n\t\ttag = \"1.24\"\n\n\t\tid = \"sha256:ca3d7d608b8a8bbaaac2c350bd0f9588cce0509ada74108d5c4b2afb24c46125\"\n\t\tdig = \"sha256:840f2b98a2540ff1d265782c42543dbec7218d3ab0e73b296d7dac846f146e27\"\n\t)\n\n\trepoTag := fmt.Sprintf(\"%s:%s\", repo, tag)\n\trepoDigest := fmt.Sprintf(\"%s@%s\", repo, dig)\n\n\tfor _, image := range []string{\n\t\tid,\n\t\trepoTag,\n\t\trepoDigest,\n\t\tfmt.Sprintf(\"%s:whatever@%s\", repo, dig),\n\t} {\n\t\tresp, err := request.Get(\"/images/\" + image + \"/json\")\n\t\tc.Assert(err, check.IsNil)\n\t\tCheckRespStatus(c, resp, 200)\n\n\t\tgot := types.ImageInfo{}\n\t\terr = request.DecodeBody(&got, resp.Body)\n\t\tc.Assert(err, check.IsNil)\n\n\t\t// TODO: More specific check is needed\n\t\tc.Assert(got.Config, check.NotNil)\n\t\tc.Assert(got.ID, check.Equals, id)\n\t\tc.Assert(got.CreatedAt, check.NotNil)\n\t\tc.Assert(got.Size, check.NotNil)\n\t\tc.Assert(reflect.DeepEqual(got.RepoTags, []string{repoTag}), check.Equals, true)\n\t\tc.Assert(reflect.DeepEqual(got.RepoDigests, []string{repoDigest}), check.Equals, true)\n\t}\n}", "func (f *fetcher) fetchImageFrom(appName string, aciURL, ascURL, scheme string, ascFile *os.File, latest bool) (string, error) {\n\tvar rem *store.Remote\n\n\tif f.insecureSkipVerify {\n\t\tif f.ks != nil {\n\t\t\tstderr(\"rkt: warning: TLS verification and signature verification has been disabled\")\n\t\t}\n\t} else if scheme == \"docker\" {\n\t\treturn \"\", fmt.Errorf(\"signature verification for docker images is not supported (try --insecure-skip-verify)\")\n\t}\n\n\tif (f.local && scheme != \"file\") || (scheme != \"file\" && !latest) {\n\t\tvar err error\n\t\tok := false\n\t\trem, ok, err = f.s.GetRemote(aciURL)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif ok {\n\t\t\tif f.local {\n\t\t\t\tstderr(\"rkt: using image in local store for app %s\", appName)\n\t\t\t\treturn rem.BlobKey, nil\n\t\t\t}\n\t\t\tif useCached(rem.DownloadTime, rem.CacheMaxAge) {\n\t\t\t\tstderr(\"rkt: found image in local store, skipping fetching from %s\", aciURL)\n\t\t\t\treturn rem.BlobKey, nil\n\t\t\t}\n\t\t}\n\t\tif f.local {\n\t\t\treturn \"\", fmt.Errorf(\"url %s not available in local store\", aciURL)\n\t\t}\n\t}\n\n\tif scheme != \"file\" && f.debug {\n\t\tstderr(\"rkt: fetching image from %s\", aciURL)\n\t}\n\n\tvar etag string\n\tif rem != nil {\n\t\tetag = rem.ETag\n\t}\n\tentity, aciFile, cd, err := f.fetch(appName, aciURL, ascURL, ascFile, etag)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif cd != nil && cd.useCached {\n\t\tif rem != nil {\n\t\t\treturn rem.BlobKey, nil\n\t\t} else {\n\t\t\t// should never happen\n\t\t\tpanic(\"asked to use cached image but remote is nil\")\n\t\t}\n\t}\n\tif scheme != \"file\" {\n\t\tdefer os.Remove(aciFile.Name())\n\t}\n\n\tif entity != nil && !f.insecureSkipVerify {\n\t\tstderr(\"rkt: signature verified:\")\n\t\tfor _, v := range entity.Identities {\n\t\t\tstderr(\" %s\", v.Name)\n\t\t}\n\t}\n\tkey, err := f.s.WriteACI(aciFile, latest)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif scheme != \"file\" {\n\t\trem := store.NewRemote(aciURL, ascURL)\n\t\trem.BlobKey = key\n\t\trem.DownloadTime = time.Now()\n\t\tif cd != nil {\n\t\t\trem.ETag = cd.etag\n\t\t\trem.CacheMaxAge = cd.maxAge\n\t\t}\n\t\terr = f.s.WriteRemote(rem)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn key, nil\n}", "func (n *OpenBazaarNode) FetchImage(peerID string, imageType string, size string, useCache bool) (io.ReadSeeker, error) {\n\tquery := \"/\" + peerID + \"/images/\" + size + \"/\" + imageType\n\tb, err := ipfs.ResolveThenCat(n.IpfsNode, ipath.FromString(query), time.Minute, n.IPNSQuorumSize, useCache)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bytes.NewReader(b), nil\n}", "func (v *Virt) ImageRemoteDigest(ctx context.Context, image string) (digest string, err error) {\n\treturn\n}", "func TestCannotExecuteStatusImage(t *testing.T) {\n\tbuf := setLogBuffer()\n\tdefer func() {\n\t\tif t.Failed() {\n\t\t\tt.Log(buf.String())\n\t\t}\n\t}()\n\n\tif StatusImage == \"\" {\n\t\tt.Skip(\"no status image defined\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\tapp := &models.App{Name: id.New().String()}\n\tapp = ensureApp(t, app)\n\n\tfn := &models.Fn{\n\t\tAppID: app.ID,\n\t\tName: id.New().String(),\n\t\tImage: StatusImage,\n\t\tResourceConfig: models.ResourceConfig{\n\t\t\tMemory: memory,\n\t\t},\n\t}\n\tfn = ensureFn(t, fn)\n\n\tlb, err := LB()\n\tif err != nil {\n\t\tt.Fatalf(\"Got unexpected error: %v\", err)\n\t}\n\tu := url.URL{\n\t\tScheme: \"http\",\n\t\tHost: lb,\n\t}\n\tu.Path = path.Join(u.Path, \"invoke\", fn.ID)\n\n\tcontent := bytes.NewBuffer([]byte(`status`))\n\toutput := &bytes.Buffer{}\n\n\tresp, err := callFN(ctx, u.String(), content, output, models.TypeSync)\n\tif err != nil {\n\t\tt.Fatalf(\"Got unexpected error: %v\", err)\n\t}\n\n\tif resp.StatusCode != http.StatusBadRequest {\n\t\tt.Fatalf(\"StatusCode check failed on %v\", resp.StatusCode)\n\t}\n}", "func (c *dockerClientMock) GetRemoteImageInfo(imageSoure string) (v1.Image, error) {\n\treturn &fake.FakeImage{}, nil\n}", "func (cp *RunnerParams) getRemoteImage() (*LatestImage, error) {\n\n\tresultToken, err := cp.getBearerToken()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\turl := \"https://iad.ocir.io/20180419/docker/images/odx-pipelines?repo=wercker%2Fwercker-runner\"\n\n\tvar client http.Client\n\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\treq.Header.Add(\"Authorization\", \"Bearer \"+resultToken)\n\tresp, err := client.Do(req)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlatestImageName := \"\"\n\tvar latestImageTime time.Time\n\tlatestImageDigest := \"\"\n\n\t// I hope this never changes...\n\tbasis := \"iad.ocir.io/odx-pipelines/wercker/wercker-runner\"\n\n\tif resp.StatusCode == 200 {\n\t\tbodyBytes, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbodyString := string(bodyBytes)\n\t\ttheWrapper := listWrapper{}\n\t\tjson.Unmarshal([]byte(bodyString), &theWrapper)\n\n\t\tfor _, imageItem := range theWrapper.Imgs {\n\t\t\ttm, err := time.Parse(time.RFC3339, imageItem.Timestamp)\n\t\t\tif err != nil {\n\t\t\t\tcp.Logger.Error(err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif cp.Debug {\n\t\t\t\tmessage := fmt.Sprintf(\"Repos: %s --> %s\", tm, imageItem.Tag)\n\t\t\t\tcp.Logger.Debugln(message)\n\t\t\t}\n\n\t\t\t// For production only ignore any tag that isn't latest or master\n\t\t\tif cp.ProdType {\n\t\t\t\tif imageItem.Tag != \"latest\" && !strings.HasPrefix(imageItem.Tag, \"master\") {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Match the digest from latest with the proper master entry. That will be the\n\t\t\t// image name returned to the caller.\n\t\t\tif cp.ProdType {\n\t\t\t\tif imageItem.Tag == \"latest\" {\n\t\t\t\t\tlatestImageDigest = imageItem.Digest\n\t\t\t\t\tcp.Logger.Debugln(\"Remote latest digest is \" + imageItem.Digest)\n\t\t\t\t\tcontinue\n\t\t\t\t} else {\n\t\t\t\t\t// Compare the digest to the latest, when the same we have the image name with commit-id\n\t\t\t\t\t// for whatever was tagged as latest.\n\t\t\t\t\tif imageItem.Digest == latestImageDigest {\n\t\t\t\t\t\tlatestImageTime = tm\n\t\t\t\t\t\tlatestImageName = fmt.Sprintf(\"%s:%s\", basis, imageItem.Tag)\n\t\t\t\t\t\tif cp.Debug {\n\t\t\t\t\t\t\tmessage := fmt.Sprintf(\"Selecting %s as doopleganger for discovered latest tag\", latestImageName)\n\t\t\t\t\t\t\tcp.Logger.Debugln(message)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif tm.After(latestImageTime) {\n\t\t\t\tlatestImageTime = tm\n\t\t\t\tlatestImageName = fmt.Sprintf(\"%s:%s\", basis, imageItem.Tag)\n\t\t\t}\n\t\t}\n\t}\n\n\tif latestImageName == \"\" {\n\t\treturn nil, fmt.Errorf(\"no runner image exists in the remote repository\")\n\t}\n\n\treturn &LatestImage{\n\t\tImageName: latestImageName,\n\t\tCreated: latestImageTime,\n\t}, nil\n}", "func (suite *APIImageCreateSuite) TestImageCreateOk(c *check.C) {\n\tq := url.Values{}\n\tq.Add(\"fromImage\", environment.HelloworldRepo)\n\tq.Add(\"tag\", \"latest\")\n\tquery := request.WithQuery(q)\n\tresp, err := request.Post(\"/images/create\", query)\n\tc.Assert(err, check.IsNil)\n\tCheckRespStatus(c, resp, 200)\n\n\t// TODO: add a waituntil func to check the exsitence of image\n\ttime.Sleep(5000 * time.Millisecond)\n\n\tresp, err = request.Delete(\"/images/\" + environment.HelloworldRepo + \":latest\")\n\tc.Assert(err, check.IsNil)\n\tCheckRespStatus(c, resp, 204)\n}", "func (f *SourceFetcher) Fetch(url string, namespace string) (*info.ImageProperties, bool, error) {\n\tc := make(chan FetchResult)\n\tgo f.uniqueFetchSource(c, url, namespace)\n\tr := <-c\n\treturn r.ImageDetails, r.Downloaded, r.Error\n}", "func Test_GetImageFromUrl_badUrl(t *testing.T) {\n\tb, err := GetImageFromUrl(\"some-bad-url\")\n\n\tassert.Equal(t, `Error getting image: Get some-bad-url: unsupported protocol scheme \"\"`, err.Error())\n\tassert.Equal(t, []byte(nil), b)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
FetchUpdatedLocalImage mocks base method
func (m *MockFetcher) FetchUpdatedLocalImage(arg0 context.Context, arg1 string, arg2 io.Writer) (image.Image, error) { ret := m.ctrl.Call(m, "FetchUpdatedLocalImage", arg0, arg1, arg2) ret0, _ := ret[0].(image.Image) ret1, _ := ret[1].(error) return ret0, ret1 }
[ "func (m *MockFetcher) FetchLocalImage(arg0 string) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchLocalImage\", arg0)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockFetcher) FetchRemoteImage(arg0 string) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchRemoteImage\", arg0)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockAPI) UpdateImageStatus(arg0 context.Context, arg1 *models.Host, arg2 *models.ContainerImageAvailability, arg3 *gorm.DB) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateImageStatus\", arg0, arg1, arg2, arg3)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockContainerServer) UpdateImage(arg0 string, arg1 api.ImagePut, arg2 string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateImage\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockContainerServer) RefreshImage(arg0 string) (client.Operation, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"RefreshImage\", arg0)\n\tret0, _ := ret[0].(client.Operation)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestFetch(t *testing.T) {\n\timage := \"rkt-inspect-fetch.aci\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\t// Fetch the image for the first time, this should write the image to the\n\t// on-disk store.\n\toldHash := patchImportAndFetchHash(image, []string{\"--exec=/inspect --read-file\"}, t, ctx)\n\n\t// Fetch the image with the same name but different content, the expecting\n\t// result is that we should get a different hash since we are not fetching\n\t// from the on-disk store.\n\tnewHash := patchImportAndFetchHash(image, []string{\"--exec=/inspect --read-file --write-file\"}, t, ctx)\n\n\tif oldHash == newHash {\n\t\tt.Fatalf(\"ACI hash should be different as the image has changed\")\n\t}\n}", "func (r *MockRepoManager) mockUpdate() {\n\tr.mtx.Lock()\n\tdefer r.mtx.Unlock()\n\tr.updateCount++\n}", "func ttrPullImageAsserting(ctx context.Context, t *testing.T, client apiclient.APIClient, image string) {\n\terr := ttrPullImage(ctx, client, image)\n\tassert.NilError(t, err)\n}", "func (m *MockUpstreamIntf) CachedRemoteDigestOfLocalHeight() blockdigest.Digest {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CachedRemoteDigestOfLocalHeight\")\n\tret0, _ := ret[0].(blockdigest.Digest)\n\treturn ret0\n}", "func (m *MockUsecase) UpdateAvatar(arg0 string, arg1 multipart.File, arg2 string) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateAvatar\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *API) ImageLocalDigests(ctx context.Context, image string) ([]string, error) {\n\tret := _m.Called(ctx, image)\n\n\tvar r0 []string\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string) ([]string, error)); ok {\n\t\treturn rf(ctx, image)\n\t}\n\tif rf, ok := ret.Get(0).(func(context.Context, string) []string); ok {\n\t\tr0 = rf(ctx, image)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]string)\n\t\t}\n\t}\n\n\tif rf, ok := ret.Get(1).(func(context.Context, string) error); ok {\n\t\tr1 = rf(ctx, image)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockCommonAPIClient) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ImageLoad\", ctx, input, quiet)\n\tret0, _ := ret[0].(types.ImageLoadResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (mr *MockFetcherMockRecorder) FetchUpdatedLocalImage(arg0, arg1, arg2 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"FetchUpdatedLocalImage\", reflect.TypeOf((*MockFetcher)(nil).FetchUpdatedLocalImage), arg0, arg1, arg2)\n}", "func (m *MockImageAPIClient) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ImageLoad\", ctx, input, quiet)\n\tret0, _ := ret[0].(types.ImageLoadResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockCEImpl) ImagePull(image string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ImagePull\", image)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestBaseImage(t *testing.T) {\n\tctx, err := controllerPrepare()\n\tif err != nil {\n\t\tt.Fatal(\"Fail in controller prepare: \", err)\n\t}\n\teveBaseRef := os.Getenv(\"EVE_BASE_REF\")\n\tif len(eveBaseRef) == 0 {\n\t\teveBaseRef = \"4.10.0\"\n\t}\n\tzArch := os.Getenv(\"ZARCH\")\n\tif len(eveBaseRef) == 0 {\n\t\tzArch = \"amd64\"\n\t}\n\tHV := os.Getenv(\"HV\")\n\tif HV == \"xen\" {\n\t\tHV = \"\"\n\t}\n\tvar baseImageTests = []struct {\n\t\tdataStoreID string\n\t\timageID string\n\t\tbaseID string\n\t\timageRelativePath string\n\t\timageFormat config.Format\n\t\teveBaseRef string\n\t\tzArch string\n\t\tHV string\n\t}{\n\t\t{eServerDataStoreID,\n\n\t\t\t\"1ab8761b-5f89-4e0b-b757-4b87a9fa93ec\",\n\n\t\t\t\"22b8761b-5f89-4e0b-b757-4b87a9fa93ec\",\n\n\t\t\t\"baseos.qcow2\",\n\t\t\tconfig.Format_QCOW2,\n\t\t\teveBaseRef,\n\t\t\tzArch,\n\t\t\tHV,\n\t\t},\n\t}\n\tfor _, tt := range baseImageTests {\n\t\tbaseOSVersion := fmt.Sprintf(\"%s-%s\", tt.eveBaseRef, tt.zArch)\n\t\tif tt.HV != \"\" {\n\t\t\tbaseOSVersion = fmt.Sprintf(\"%s-%s-%s\", tt.eveBaseRef, tt.zArch, tt.HV)\n\t\t}\n\t\tt.Run(baseOSVersion, func(t *testing.T) {\n\n\t\t\terr = prepareBaseImageLocal(ctx, tt.dataStoreID, tt.imageID, tt.baseID, tt.imageRelativePath, tt.imageFormat, baseOSVersion)\n\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"Fail in prepare base image from local file: \", err)\n\t\t\t}\n\t\t\tdeviceCtx, err := ctx.GetDeviceFirst()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"Fail in get first device: \", err)\n\t\t\t}\n\t\t\tdeviceCtx.SetBaseOSConfig([]string{tt.baseID})\n\t\t\tdevUUID := deviceCtx.GetID()\n\t\t\terr = ctx.ConfigSync(deviceCtx)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"Fail in sync config with controller: \", err)\n\t\t\t}\n\t\t\tt.Run(\"Started\", func(t *testing.T) {\n\t\t\t\terr := ctx.InfoChecker(devUUID, map[string]string{\"devId\": devUUID.String(), \"shortVersion\": baseOSVersion}, einfo.ZInfoDevSW, 300)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(\"Fail in waiting for base image update init: \", err)\n\t\t\t\t}\n\t\t\t})\n\t\t\tt.Run(\"Downloaded\", func(t *testing.T) {\n\t\t\t\terr := ctx.InfoChecker(devUUID, map[string]string{\"devId\": devUUID.String(), \"shortVersion\": baseOSVersion, \"downloadProgress\": \"100\"}, einfo.ZInfoDevSW, 1500)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(\"Fail in waiting for base image download progress: \", err)\n\t\t\t\t}\n\t\t\t})\n\t\t\tt.Run(\"Logs\", func(t *testing.T) {\n\t\t\t\tif !checkLogs {\n\t\t\t\t\tt.Skip(\"no LOGS flag set - skipped\")\n\t\t\t\t}\n\t\t\t\terr = ctx.LogChecker(devUUID, map[string]string{\"devId\": devUUID.String(), \"eveVersion\": baseOSVersion}, 1200)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(\"Fail in waiting for base image logs: \", err)\n\t\t\t\t}\n\t\t\t})\n\t\t\ttimeout := time.Duration(1200)\n\n\t\t\tif !checkLogs {\n\t\t\t\ttimeout = 2400\n\t\t\t}\n\t\t\tt.Run(\"Active\", func(t *testing.T) {\n\t\t\t\terr = ctx.InfoChecker(devUUID, map[string]string{\"devId\": devUUID.String(), \"shortVersion\": baseOSVersion, \"status\": \"INSTALLED\", \"partitionState\": \"(inprogress|active)\"}, einfo.ZInfoDevSW, timeout)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(\"Fail in waiting for base image installed status: \", err)\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t}\n\n}", "func (_m *Manager) UpdateBlobStatus(ctx context.Context, _a1 *models.Blob) (int64, error) {\n\tret := _m.Called(ctx, _a1)\n\n\tvar r0 int64\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *models.Blob) (int64, error)); ok {\n\t\treturn rf(ctx, _a1)\n\t}\n\tif rf, ok := ret.Get(0).(func(context.Context, *models.Blob) int64); ok {\n\t\tr0 = rf(ctx, _a1)\n\t} else {\n\t\tr0 = ret.Get(0).(int64)\n\t}\n\n\tif rf, ok := ret.Get(1).(func(context.Context, *models.Blob) error); ok {\n\t\tr1 = rf(ctx, _a1)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func Test_service_SearchImageNew(t *testing.T) {\n\tctx := context.Background()\n\t// the file aws-west3-images.json has been created with:\n\t// safescale image list > aws-west3-images.json\n\n\t// this turns the json output into the list of images we would receive from the true AWS service\n\t// we don't actually use the network to run this test\n\trecovered, err := getImages(\"aws-west3-images.json\")\n\tif err != nil {\n\t\tt.FailNow()\n\t}\n\n\t// we are mocking the output of the ListImages method -> so we are using ListImagesMock\n\tmc := minimock.NewController(t)\n\tcommon := mocks.NewServiceMock(mc)\n\tcommon.ListImagesMock.Expect(ctx, false).Return(recovered, nil)\n\n\t// now the tricky part, ideally we should use the code 'SearchImage' from the service, but we cannot\n\t// this is a code smell, it means that our code is really hard to test...\n\t// in order to do so, we created a new function, SearchImageOriginal, which is testable, this is, we can replace\n\t// true implementations by our mocks without touching anything else\n\t// the contents of our SearchImageOriginal are the same as in iaas/service.go:685\n\tres, err := SearchImageNew(common, \"Ubuntu 18.04\")\n\tif err != nil {\n\t\tt.FailNow()\n\t}\n\n\t// those are the true results of a request \"Ubuntu 18.04\" using AWS on west-3\n\t// now we can play tuning parameters in SearchImageNew and see what happens\n\texpected := &abstract.Image{\n\t\tID: \"ami-00187db91863e8905\",\n\t\tName: \"ubuntu-18.04-pke-201912101339\",\n\t}\n\n\tif res.Name != expected.Name {\n\t\tt.Errorf(\"It seems that we selected %s, (expected %s)\", res.Name, expected.Name)\n\t\tt.FailNow()\n\t}\n\n\tif res.ID != expected.ID {\n\t\tt.Errorf(\"It seems that we had the wrong ID %s, (expected %s)\", res.ID, expected.ID)\n\t\tt.FailNow()\n\t}\n}", "func (m *MockModuleService) GetLatestModuleImage(arg0 string) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetLatestModuleImage\", arg0)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
FetchUpdatedLocalImage indicates an expected call of FetchUpdatedLocalImage
func (mr *MockFetcherMockRecorder) FetchUpdatedLocalImage(arg0, arg1, arg2 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchUpdatedLocalImage", reflect.TypeOf((*MockFetcher)(nil).FetchUpdatedLocalImage), arg0, arg1, arg2) }
[ "func (m *MockFetcher) FetchUpdatedLocalImage(arg0 context.Context, arg1 string, arg2 io.Writer) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchUpdatedLocalImage\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockFetcher) FetchLocalImage(arg0 string) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchLocalImage\", arg0)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (mr *MockFetcherMockRecorder) FetchLocalImage(arg0 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"FetchLocalImage\", reflect.TypeOf((*MockFetcher)(nil).FetchLocalImage), arg0)\n}", "func TestFetch(t *testing.T) {\n\timage := \"rkt-inspect-fetch.aci\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\t// Fetch the image for the first time, this should write the image to the\n\t// on-disk store.\n\toldHash := patchImportAndFetchHash(image, []string{\"--exec=/inspect --read-file\"}, t, ctx)\n\n\t// Fetch the image with the same name but different content, the expecting\n\t// result is that we should get a different hash since we are not fetching\n\t// from the on-disk store.\n\tnewHash := patchImportAndFetchHash(image, []string{\"--exec=/inspect --read-file --write-file\"}, t, ctx)\n\n\tif oldHash == newHash {\n\t\tt.Fatalf(\"ACI hash should be different as the image has changed\")\n\t}\n}", "func (c *Client) IsLocalImage(image types.ImageInspect) bool {\n\treturn len(image.RepoDigests) == 0\n}", "func TestCannotExecuteStatusImage(t *testing.T) {\n\tbuf := setLogBuffer()\n\tdefer func() {\n\t\tif t.Failed() {\n\t\t\tt.Log(buf.String())\n\t\t}\n\t}()\n\n\tif StatusImage == \"\" {\n\t\tt.Skip(\"no status image defined\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\tapp := &models.App{Name: id.New().String()}\n\tapp = ensureApp(t, app)\n\n\tfn := &models.Fn{\n\t\tAppID: app.ID,\n\t\tName: id.New().String(),\n\t\tImage: StatusImage,\n\t\tResourceConfig: models.ResourceConfig{\n\t\t\tMemory: memory,\n\t\t},\n\t}\n\tfn = ensureFn(t, fn)\n\n\tlb, err := LB()\n\tif err != nil {\n\t\tt.Fatalf(\"Got unexpected error: %v\", err)\n\t}\n\tu := url.URL{\n\t\tScheme: \"http\",\n\t\tHost: lb,\n\t}\n\tu.Path = path.Join(u.Path, \"invoke\", fn.ID)\n\n\tcontent := bytes.NewBuffer([]byte(`status`))\n\toutput := &bytes.Buffer{}\n\n\tresp, err := callFN(ctx, u.String(), content, output, models.TypeSync)\n\tif err != nil {\n\t\tt.Fatalf(\"Got unexpected error: %v\", err)\n\t}\n\n\tif resp.StatusCode != http.StatusBadRequest {\n\t\tt.Fatalf(\"StatusCode check failed on %v\", resp.StatusCode)\n\t}\n}", "func (o *ImageImportManifest) GetLocalImageIdOk() (*string, bool) {\n\tif o == nil || o.LocalImageId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.LocalImageId, true\n}", "func (m *MockFetcher) FetchRemoteImage(arg0 string) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchRemoteImage\", arg0)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestImplicitFetch(t *testing.T) {\n\tfoundMsg := \"found image in local store\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\t// 1. Fetch the image.\n\t// TODO(yifan): Add other ACI with different schemes.\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:ubuntu-12.04\")\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:latest\")\n\n\t// 2. Try run/prepare with/without tag ':latest', should not get $foundMsg.\n\tcmds := []string{\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false docker://busybox:latest\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare docker://busybox:latest\", ctx.cmd()),\n\t}\n\n\tfor _, cmd := range cmds {\n\t\tt.Logf(\"Running test %v\", cmd)\n\n\t\tchild, err := gexpect.Spawn(cmd)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Cannot exec rkt: %v\", err)\n\t\t}\n\t\tif err := expectWithOutput(child, foundMsg); err == nil {\n\t\t\tt.Fatalf(\"%q should not be found\", foundMsg)\n\t\t}\n\t\tif err := child.Wait(); err != nil {\n\t\t\tt.Fatalf(\"rkt didn't terminate correctly: %v\", err)\n\t\t}\n\t}\n}", "func TestUpdateCalledAndProcessedWithNilValues(t *testing.T) {\n\tc := &fakeCamera{\n\t\tinitError: nil,\n\t\tupdateError: nil,\n\t\tstate: &device.CameraState{\n\t\t\tPicture: \"\",\n\t\t\tDistance: 10,\n\t\t},\n\t\tspec: &device.Spec{\n\t\t\tUpdatePeriod: 100 * time.Millisecond,\n\t\t\tSupportedCommands: []enums.Command{enums.CmdTakePicture},\n\t\t\tSupportedProperties: []enums.Property{enums.PropPicture, enums.PropInput},\n\t\t\tPostCommandDeferUpdate: 0,\n\t\t},\n\t\tname: \"\",\n\t}\n\n\tp := getWrapper(false, c)\n\tassert.NotNil(t, p, \"nil wrapper\")\n\n\ttime.Sleep(120 * time.Millisecond)\n\n\tassert.Equal(t, 1, c.updateInvoked, \"update was not called\")\n\n\tm := p.GetUpdateMessage()\n\n\t_, ok := m.State[\"picture\"]\n\n\tassert.False(t, ok)\n}", "func (f OriginalFetcher) Fetch(namespace string, sourceURL string, imageHash string) (info *info.ImageProperties, downloaded bool, err error) {\n\tif sourceURL == \"\" && imageHash == \"\" {\n\t\treturn nil, false, fmt.Errorf(\"Missing Hash & URL\")\n\t}\n\n\tif imageHash != \"\" {\n\t\tinfo, err = f.fetchFromStore(namespace, imageHash)\n\t}\n\n\tif sourceURL != \"\" && (err != nil || imageHash == \"\") {\n\t\tinfo, downloaded, err = f.fetchFromSource(namespace, sourceURL)\n\t}\n\n\treturn info, downloaded, err\n}", "func (r *FakeImageService) ImageStatus(_ context.Context, image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error) {\n\tr.Lock()\n\tdefer r.Unlock()\n\n\tr.Called = append(r.Called, \"ImageStatus\")\n\tif err := r.popError(\"ImageStatus\"); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &runtimeapi.ImageStatusResponse{Image: r.Images[image.Image]}, nil\n}", "func ttrPushImageAsserting(ctx context.Context, t *testing.T, client apiclient.APIClient, image string) {\n\trc, err := client.ImagePush(ctx, ttrImageName(image), types.ImagePushOptions{RegistryAuth: \"{}\"})\n\tassert.Assert(t, err)\n\tif rc != nil {\n\t\tbody, err := readAllAndClose(rc)\n\t\tassert.Assert(t, err)\n\t\tassert.Assert(t, strings.Contains(body, `\"status\":\"latest: digest: `))\n\t}\n}", "func (mr *MockFetcherMockRecorder) FetchRemoteImage(arg0 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"FetchRemoteImage\", reflect.TypeOf((*MockFetcher)(nil).FetchRemoteImage), arg0)\n}", "func ttrPullImageAsserting(ctx context.Context, t *testing.T, client apiclient.APIClient, image string) {\n\terr := ttrPullImage(ctx, client, image)\n\tassert.NilError(t, err)\n}", "func retagLocalImageForRemotePush(localTag string, remoteUrl string) string {\n\tnewTag := fmt.Sprintf(\"%s/%s\", remoteUrl, localTag)\n\tdockerTag(localTag, newTag)\n\treturn newTag\n}", "func (f *fetcher) fetchImageFrom(appName string, aciURL, ascURL, scheme string, ascFile *os.File, latest bool) (string, error) {\n\tvar rem *store.Remote\n\n\tif f.insecureSkipVerify {\n\t\tif f.ks != nil {\n\t\t\tstderr(\"rkt: warning: TLS verification and signature verification has been disabled\")\n\t\t}\n\t} else if scheme == \"docker\" {\n\t\treturn \"\", fmt.Errorf(\"signature verification for docker images is not supported (try --insecure-skip-verify)\")\n\t}\n\n\tif (f.local && scheme != \"file\") || (scheme != \"file\" && !latest) {\n\t\tvar err error\n\t\tok := false\n\t\trem, ok, err = f.s.GetRemote(aciURL)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif ok {\n\t\t\tif f.local {\n\t\t\t\tstderr(\"rkt: using image in local store for app %s\", appName)\n\t\t\t\treturn rem.BlobKey, nil\n\t\t\t}\n\t\t\tif useCached(rem.DownloadTime, rem.CacheMaxAge) {\n\t\t\t\tstderr(\"rkt: found image in local store, skipping fetching from %s\", aciURL)\n\t\t\t\treturn rem.BlobKey, nil\n\t\t\t}\n\t\t}\n\t\tif f.local {\n\t\t\treturn \"\", fmt.Errorf(\"url %s not available in local store\", aciURL)\n\t\t}\n\t}\n\n\tif scheme != \"file\" && f.debug {\n\t\tstderr(\"rkt: fetching image from %s\", aciURL)\n\t}\n\n\tvar etag string\n\tif rem != nil {\n\t\tetag = rem.ETag\n\t}\n\tentity, aciFile, cd, err := f.fetch(appName, aciURL, ascURL, ascFile, etag)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif cd != nil && cd.useCached {\n\t\tif rem != nil {\n\t\t\treturn rem.BlobKey, nil\n\t\t} else {\n\t\t\t// should never happen\n\t\t\tpanic(\"asked to use cached image but remote is nil\")\n\t\t}\n\t}\n\tif scheme != \"file\" {\n\t\tdefer os.Remove(aciFile.Name())\n\t}\n\n\tif entity != nil && !f.insecureSkipVerify {\n\t\tstderr(\"rkt: signature verified:\")\n\t\tfor _, v := range entity.Identities {\n\t\t\tstderr(\" %s\", v.Name)\n\t\t}\n\t}\n\tkey, err := f.s.WriteACI(aciFile, latest)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif scheme != \"file\" {\n\t\trem := store.NewRemote(aciURL, ascURL)\n\t\trem.BlobKey = key\n\t\trem.DownloadTime = time.Now()\n\t\tif cd != nil {\n\t\t\trem.ETag = cd.etag\n\t\t\trem.CacheMaxAge = cd.maxAge\n\t\t}\n\t\terr = f.s.WriteRemote(rem)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn key, nil\n}", "func (c *repoCacheManager) fetchImages(tags []string) (fetchImagesResult, error) {\n\timages := map[string]image.Info{}\n\n\t// Create a list of images that need updating\n\tvar toUpdate []imageToUpdate\n\n\t// Counters for reporting what happened\n\tvar missing, refresh int\n\tfor _, tag := range tags {\n\t\tif tag == \"\" {\n\t\t\treturn fetchImagesResult{}, fmt.Errorf(\"empty tag in fetched tags\")\n\t\t}\n\n\t\t// See if we have the manifest already cached\n\t\tnewID := c.repoID.ToRef(tag)\n\t\tkey := NewManifestKey(newID.CanonicalRef())\n\t\tbytes, deadline, err := c.cacheClient.GetKey(key)\n\t\t// If err, then we don't have it yet. Update.\n\t\tswitch {\n\t\tcase err != nil: // by and large these are cache misses, but any error shall count as \"not found\"\n\t\t\tif err != ErrNotCached {\n\t\t\t\tc.logger.Log(\"warning\", \"error from cache\", \"err\", err, \"ref\", newID)\n\t\t\t}\n\t\t\tmissing++\n\t\t\ttoUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: initialRefresh})\n\t\tcase len(bytes) == 0:\n\t\t\tc.logger.Log(\"warning\", \"empty result from cache\", \"ref\", newID)\n\t\t\tmissing++\n\t\t\ttoUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: initialRefresh})\n\t\tdefault:\n\t\t\tvar entry registry.ImageEntry\n\t\t\tif err := json.Unmarshal(bytes, &entry); err == nil {\n\t\t\t\tif c.trace {\n\t\t\t\t\tc.logger.Log(\"trace\", \"found cached manifest\", \"ref\", newID, \"last_fetched\", entry.LastFetched.Format(time.RFC3339), \"deadline\", deadline.Format(time.RFC3339))\n\t\t\t\t}\n\n\t\t\t\tif entry.ExcludedReason == \"\" {\n\t\t\t\t\timages[tag] = entry.Info\n\t\t\t\t\tif c.now.After(deadline) {\n\t\t\t\t\t\tpreviousRefresh := minRefresh\n\t\t\t\t\t\tlastFetched := entry.Info.LastFetched\n\t\t\t\t\t\tif !lastFetched.IsZero() {\n\t\t\t\t\t\t\tpreviousRefresh = deadline.Sub(lastFetched)\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttoUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: previousRefresh, previousDigest: entry.Info.Digest})\n\t\t\t\t\t\trefresh++\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif c.trace {\n\t\t\t\t\t\tc.logger.Log(\"trace\", \"excluded in cache\", \"ref\", newID, \"reason\", entry.ExcludedReason)\n\t\t\t\t\t}\n\t\t\t\t\tif c.now.After(deadline) {\n\t\t\t\t\t\ttoUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: excludedRefresh})\n\t\t\t\t\t\trefresh++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tresult := fetchImagesResult{\n\t\timagesFound: images,\n\t\timagesToUpdate: toUpdate,\n\t\timagesToUpdateRefreshCount: refresh,\n\t\timagesToUpdateMissingCount: missing,\n\t}\n\n\treturn result, nil\n}", "func (m *MockAPI) UpdateImageStatus(arg0 context.Context, arg1 *models.Host, arg2 *models.ContainerImageAvailability, arg3 *gorm.DB) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateImageStatus\", arg0, arg1, arg2, arg3)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
getRewardHistory reads reward history
func (p *Protocol) getRewardHistory(actionHash string) ([]*RewardHistory, error) { db := p.Store.GetDB() getQuery := fmt.Sprintf(selectRewardHistory, RewardHistoryTableName) stmt, err := db.Prepare(getQuery) if err != nil { return nil, errors.Wrap(err, "failed to prepare get query") } defer stmt.Close() rows, err := stmt.Query(actionHash) if err != nil { return nil, errors.Wrap(err, "failed to execute get query") } var rewardHistory RewardHistory parsedRows, err := s.ParseSQLRows(rows, &rewardHistory) if err != nil { return nil, errors.Wrap(err, "failed to parse results") } if len(parsedRows) == 0 { return nil, indexprotocol.ErrNotExist } var rewardHistoryList []*RewardHistory for _, parsedRow := range parsedRows { rewards := parsedRow.(*RewardHistory) rewardHistoryList = append(rewardHistoryList, rewards) } return rewardHistoryList, nil }
[ "func (as AccountStorage) GetRewardHistory(\n\tctx sdk.Context, me types.AccountKey, bucketSlot int64) (*RewardHistory, sdk.Error) {\n\tstore := ctx.KVStore(as.key)\n\trewardHistoryBytes := store.Get(getRewardHistoryKey(me, bucketSlot))\n\tif rewardHistoryBytes == nil {\n\t\treturn nil, nil\n\t}\n\thistory := new(RewardHistory)\n\tif err := as.cdc.UnmarshalJSON(rewardHistoryBytes, history); err != nil {\n\t\treturn nil, ErrFailedToUnmarshalRewardHistory(err)\n\t}\n\treturn history, nil\n}", "func (_Token *TokenCallerSession) BaseRewardHistory(index *big.Int) (*big.Int, *big.Int, *big.Int, *big.Int, error) {\n\treturn _Token.Contract.BaseRewardHistory(&_Token.CallOpts, index)\n}", "func (_Token *TokenCaller) BaseRewardHistory(opts *bind.CallOpts, index *big.Int) (*big.Int, *big.Int, *big.Int, *big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t\tret1 = new(*big.Int)\n\t\tret2 = new(*big.Int)\n\t\tret3 = new(*big.Int)\n\t)\n\tout := &[]interface{}{\n\t\tret0,\n\t\tret1,\n\t\tret2,\n\t\tret3,\n\t}\n\terr := _Token.contract.Call(opts, out, \"baseRewardHistory\", index)\n\treturn *ret0, *ret1, *ret2, *ret3, err\n}", "func ViewReward(rw http.ResponseWriter, r *http.Request) {\n\t// get the token\n\treqToken := r.Header.Get(\"Authorization\")\n\t\n\t// get the claims\n\tclaims, isNotValid := GetClaims(reqToken, rw)\n\tif isNotValid {\n\t\treturn\n\t}\n\n\tdt, err := db.GetUserRewards(claims.Roll)\n\tif err != nil {\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\trw.Write(Rsp(err.Error(), \"Server Error\"))\n\t\treturn\n\t}\n\trw.WriteHeader(http.StatusOK)\n\tres := c.RespData{\n\t\tMessage: \"All data\",\n\t\tData: dt,\n\t}\n\tjson.NewEncoder(rw).Encode(res)\n}", "func getHistory(ctx context.Context, args *map[string]interface{}) error {\n\treturnResult := totalSummary{\n\t\tNumberOfUpdates: 0,\n\t\tUpdated: makeDefaultMap(),\n\t\tOperation: makeDefaultMap(),\n\t\tFHIRVersion: makeDefaultMap(),\n\t\tTLSVersion: makeDefaultMap(),\n\t}\n\tvar history []historyEntry\n\n\tha, ok := (*args)[\"historyArgs\"].(historyArgs)\n\tif !ok {\n\t\treturn fmt.Errorf(\"unable to cast arguments to type historyArgs\")\n\t}\n\n\t// Get all rows in the history table between given dates\n\thistoryQuery := `SELECT updated_at, operation, capability_fhir_version, tls_version, mime_types FROM fhir_endpoints_info_history\n\t\tWHERE updated_at between '` + ha.dateStart + `' AND '` + ha.dateEnd + `' AND url=$1 AND requested_fhir_version=$2 ORDER BY updated_at`\n\thistoryRows, err := ha.store.DB.QueryContext(ctx, historyQuery, ha.fhirURL, ha.requestedFhirVersion)\n\tif err != nil {\n\t\tlog.Warnf(\"Failed getting the history rows for URL %s with requested version %s. Error: %s\", ha.fhirURL, ha.requestedFhirVersion, err)\n\t\tresult := Result{\n\t\t\tURL: ha.fhirURL,\n\t\t\tRequestedFhirVersion: ha.requestedFhirVersion,\n\t\t\tSummary: returnResult,\n\t\t}\n\t\tha.result <- result\n\t\treturn nil\n\t}\n\n\tdefer historyRows.Close()\n\tfor historyRows.Next() {\n\t\tvar e historyEntry\n\t\tvar fhirVersion string\n\n\t\te.URL = ha.fhirURL\n\t\te.RequestedFhirVersion = ha.requestedFhirVersion\n\n\t\tvar err = historyRows.Scan(\n\t\t\t&e.UpdatedAt,\n\t\t\t&e.Operation,\n\t\t\t&fhirVersion,\n\t\t\t&e.TLSVersion,\n\t\t\tpq.Array(&e.MIMETypes))\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"Error while scanning the rows of the history table for URL %s with requested version %s. Error: %s\", ha.fhirURL, ha.requestedFhirVersion, err)\n\t\t\tresult := Result{\n\t\t\t\tURL: ha.fhirURL,\n\t\t\t\tRequestedFhirVersion: ha.requestedFhirVersion,\n\t\t\t\tSummary: returnResult,\n\t\t\t}\n\t\t\tha.result <- result\n\t\t\treturn nil\n\t\t}\n\n\t\tif fhirVersion == \"\" {\n\t\t\te.FHIRVersion = fhirVersion\n\t\t\te.FHIRVersionError = fmt.Errorf(\"received NULL FHIR version\")\n\t\t} else {\n\t\t\te.FHIRVersion = fhirVersion\n\t\t\te.FHIRVersionError = nil\n\t\t}\n\n\t\thistory = append(history, e)\n\t}\n\n\tif len(history) > 0 {\n\t\treturnResult.NumberOfUpdates = len(history)\n\t\tstartElem := history[0]\n\t\tendElem := history[len(history)-1]\n\n\t\treturnResult.Updated[\"first\"] = startElem.UpdatedAt\n\t\tif startElem.UpdatedAt != endElem.UpdatedAt {\n\t\t\treturnResult.Updated[\"last\"] = endElem.UpdatedAt\n\t\t}\n\t\treturnResult.Operation[\"first\"] = startElem.Operation\n\t\tif startElem.Operation != endElem.Operation {\n\t\t\treturnResult.Operation[\"last\"] = endElem.Operation\n\t\t}\n\t\tif startElem.FHIRVersionError == nil {\n\t\t\treturnResult.FHIRVersion[\"first\"] = startElem.FHIRVersion\n\t\t}\n\t\tif (startElem.FHIRVersion != endElem.FHIRVersion) && (endElem.FHIRVersionError == nil) {\n\t\t\treturnResult.FHIRVersion[\"last\"] = endElem.FHIRVersion\n\t\t}\n\t\treturnResult.TLSVersion[\"first\"] = startElem.TLSVersion\n\t\tif startElem.TLSVersion != endElem.TLSVersion {\n\t\t\treturnResult.TLSVersion[\"last\"] = endElem.TLSVersion\n\t\t}\n\t\treturnResult.MIMETypes.First = startElem.MIMETypes\n\t\tif !helpers.StringArraysEqual(startElem.MIMETypes, endElem.MIMETypes) {\n\t\t\treturnResult.MIMETypes.Last = endElem.MIMETypes\n\t\t}\n\t}\n\n\tresult := Result{\n\t\tURL: ha.fhirURL,\n\t\tRequestedFhirVersion: ha.requestedFhirVersion,\n\t\tSummary: returnResult,\n\t}\n\tha.result <- result\n\treturn nil\n}", "func (_IStakingRewards *IStakingRewardsTransactor) GetReward(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _IStakingRewards.contract.Transact(opts, \"getReward\")\n}", "func (_Stakingbindings *StakingbindingsTransactor) GetReward(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _Stakingbindings.contract.Transact(opts, \"getReward\")\n}", "func getHistory(source string) ([]TransferData, error) {\n\turl := fmt.Sprintf(\"%s/history?duration=%s\", source, url.QueryEscape(AgentRouter.CronInterval))\n\tresp := utils.FetchResponse(url, []byte{})\n\tif resp.Error != nil {\n\t\treturn nil, resp.Error\n\t}\n\tvar transferRecords []TransferData\n\terr := json.Unmarshal(resp.Data, &transferRecords)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn transferRecords, nil\n}", "func (_XStaking *XStakingTransactor) GetReward(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _XStaking.contract.Transact(opts, \"getReward\")\n}", "func GetValidatorRewards(ops types.HTTPOptions, cfg *config.Config, c client.Client) {\n\tbp, err := db.CreateBatchPoints(cfg.InfluxDB.Database)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tsubStr := GetEncodedData(ops, cfg, c, \"validatorRewards()\")\n\tif subStr == \"\" {\n\t\treturn\n\t}\n\tn := len(subStr)\n\tfor i := 0; i < 66-n; i++ {\n\t\tsubStr = subStr + \"0\"\n\t}\n\tdataHash := subStr\n\tif dataHash != \"\" {\n\t\tcontractAddress := GetValContractAddress(cfg, c)\n\t\tresult := EthCall(ops, cfg, c, dataHash, contractAddress)\n\t\tif result.Result != \"\" {\n\t\t\trewards, er := utils.HexToBigInt(result.Result[2:])\n\t\t\tif !er {\n\t\t\t\treturn\n\t\t\t}\n\t\t\trewradsInEth := utils.ConvertWeiToEth(rewards) + utils.MaticDenom\n\n\t\t\t_ = db.WriteToInfluxDb(c, bp, \"heimdall_validator_rewards\", map[string]string{}, map[string]interface{}{\"val_rewards\": rewradsInEth})\n\t\t\tlog.Printf(\"Validator Rewards: %s\", rewradsInEth)\n\t\t}\n\t}\n}", "func (bwh *bwHistory) GetHistory() []float64 {\n\treturn bwh.history\n}", "func GetMatchHistory(accountID, region string, beginIndex, endIndex int) MatchHistory {\n\tapiURL := fmt.Sprintf(\"https://%v.api.riotgames.com/lol/match/v4/matchlists/by-account/%v?queue=420&endIndex=%v&beginIndex=%v\", region, accountID, endIndex, beginIndex)\n\tclient := http.Client{\n\t\tTimeout: 10 * time.Second,\n\t}\n\treq, reqErr := http.NewRequest(http.MethodGet, apiURL, nil)\n\treq.Header.Set(\"X-Riot-Token\", riotAPIKey)\n\tif reqErr != nil {\n\t\tlog.Fatalf(\"Error creating request: %v\", reqErr)\n\t}\n\tresp, getErr := client.Do(req)\n\n\tif getErr != nil {\n\t\tlog.Fatalf(\"Error getting summoner matches: %v\", getErr)\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, readErr := ioutil.ReadAll(resp.Body)\n\tif readErr != nil {\n\t\tlog.Fatalf(\"Error reading response body: %v\", readErr)\n\t}\n\n\tmatchHistory := MatchHistory{}\n\n\tjsonErr := json.Unmarshal(body, &matchHistory)\n\tif jsonErr != nil {\n\t\tlog.Fatal(jsonErr)\n\t}\n\n\tfor i, matchInfo := range matchHistory.Matches {\n\t\tmatch := GetMatch(matchInfo.GameID, region)\n\t\tvar participantID int\n\t\tfor _, participantIdentity := range match.ParticipantIdentities {\n\t\t\tif participantIdentity.Player.AccountID == accountID {\n\t\t\t\tparticipantID = participantIdentity.ParticipantID\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tfor _, participant := range match.Participants {\n\t\t\tif participant.ParticipantID == participantID {\n\t\t\t\tspell := GetSummonerSpellByKey(strconv.Itoa(participant.Spell1ID))\n\t\t\t\tparticipant.Spell1Name = spell.ID\n\t\t\t\tparticipant.Spell1Desc = spell.Description\n\n\t\t\t\tspell = GetSummonerSpellByKey(strconv.Itoa(participant.Spell2ID))\n\t\t\t\tparticipant.Spell2Name = spell.ID\n\t\t\t\tparticipant.Spell2Desc = spell.Description\n\n\t\t\t\tchampion := GetChampionByKey(strconv.Itoa(participant.ChampionID))\n\t\t\t\tparticipant.ChampionName = champion.Name\n\t\t\t\tparticipant.ChampionBlurb = champion.Blurb\n\t\t\t\tparticipant.ChampionImage = champion.Image.Full\n\n\t\t\t\tmatchHistory.Matches[i].Player = participant\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn matchHistory\n}", "func GetRewardEventsInfo(fromBlock *big.Int, toBlock *big.Int) []*RewardInfo {\n\n\tvar logEpochRewardSig = []byte(\"EpochRewardsDistributedToVoters(address,uint256)\")\n\tvar logEpochRewardSigHash = crypto.Keccak256Hash(logEpochRewardSig)\n\tvar TopicsFilter = [][]common.Hash{{logEpochRewardSigHash}}\n\n\tcontractAddress := common.HexToAddress(WrapperContractDeploymentAddress[NetActive][Election])\n\n\tquery := ethereum.FilterQuery{\n\t\tFromBlock: fromBlock,\n\t\tToBlock: toBlock,\n\t\tTopics: TopicsFilter,\n\n\t\tAddresses: []common.Address{\n\t\t\tcontractAddress,\n\t\t},\n\t}\n\n\tlogs, err := atlasEthClient.FilterLogs(context.Background(), query)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\trewards_info := make([]*RewardInfo, 0, len(logs))\n\n\tcontractAbi, err := abi.JSON(strings.NewReader(string(binding.ElectionABI)))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor _, vLog := range logs {\n\n\t\tvar epochRewardEvent EpochRewardEvent\n\t\terr := contractAbi.Unpack(&epochRewardEvent, \"EpochRewardsDistributedToVoters\", vLog.Data)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tri := &RewardInfo{Group: common.HexToAddress(vLog.Topics[1].Hex()).String(),\n\t\t\tGroupHash: vLog.Topics[1],\n\t\t\tRewardValue: epochRewardEvent.Value,\n\t\t\tBlockNumber: new(big.Int).SetUint64(vLog.BlockNumber)}\n\n\t\tAddAtlasToRewardInfo(ri)\n\n\t\trewards_info = append(rewards_info, ri)\n\t}\n\n\treturn rewards_info\n}", "func (_TrialRulesAbstract *TrialRulesAbstractCaller) GetReward(opts *bind.CallOpts) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _TrialRulesAbstract.contract.Call(opts, out, \"getReward\")\n\treturn *ret0, err\n}", "func (as AccountStorage) GetReward(ctx sdk.Context, accKey types.AccountKey) (*Reward, sdk.Error) {\n\tstore := ctx.KVStore(as.key)\n\trewardByte := store.Get(getRewardKey(accKey))\n\tif rewardByte == nil {\n\t\treturn nil, ErrRewardNotFound()\n\t}\n\treward := new(Reward)\n\tif err := as.cdc.UnmarshalJSON(rewardByte, reward); err != nil {\n\t\treturn nil, ErrFailedToUnmarshalReward(err)\n\t}\n\treturn reward, nil\n}", "func (room *RoomRecorder) history() []*action_.PlayerAction {\n\troom.historyM.RLock()\n\tv := room._history\n\troom.historyM.RUnlock()\n\treturn v\n}", "func (s *Client) GetHistory(ctx context.Context, scripthash string) ([]*GetMempoolResult, error) {\n\tvar resp GetMempoolResp\n\n\terr := s.request(ctx, \"blockchain.scripthash.get_history\", []interface{}{scripthash}, &resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp.Result, err\n}", "func (c RewardsController) GetRewards(page int) revel.Result {\n\n\tif !c.GetCurrentUser() {\n\t\treturn c.ForbiddenResponse()\n\t}\n\n\t//ChangeRewardsModel() // Remove when finish production\n\n\tvar reward models.Reward\n\tif Reward, ok := app.Mapper.GetModel(&reward); ok {\n\t\tvar rewards = []models.Reward{}\n\t\tvar match = bson.M{\"$and\": []bson.M{\n\t\t\tbson.M{\"$or\": []bson.M{\n\t\t\t\tbson.M{\"user_id\": c.CurrentUser.GetID().Hex()},\n\t\t\t\tbson.M{\"users\": bson.M{\"$elemMatch\": bson.M{\"$eq\": c.CurrentUser.GetID().Hex()}}},\n\t\t\t}},\n\t\t\tbson.M{\"is_visible\": true},\n\t\t\tbson.M{\"resource_type\": bson.M{\"$ne\": core.ModelTypeChallenge}},\n\t\t}}\n\t\tif page <= 1 {\n\t\t\tpage = 1\n\t\t}\n\t\tvar pipe = mgomap.Aggregate{}.Match(match).Sort(bson.M{\"updated_at\": -1}).Skip((page - 1) * core.LimitRewards).Limit(core.LimitRewards)\n\n\t\tif err := Reward.Pipe(pipe, &rewards); err != nil {\n\t\t\treturn c.ErrorResponse(c.Message(\"error.notFound\", \"Rewards\"), \"No rewards Found\", 400)\n\t\t}\n\t\treturn c.SuccessResponse(rewards, \"success\", core.ModelsType[core.ModelReward], serializers.RewardSerializer{Lang: c.Request.Locale})\n\n\t}\n\treturn c.ServerErrorResponse()\n}", "func (_Lmc *LmcSession) GetUserAccumulatedReward(_userAddress common.Address, tokenIndex *big.Int) (*big.Int, error) {\n\treturn _Lmc.Contract.GetUserAccumulatedReward(&_Lmc.CallOpts, _userAddress, tokenIndex)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
getAccountReward reads account reward details
func (p *Protocol) getAccountReward(epochNumber uint64, candidateName string) (*AccountReward, error) { db := p.Store.GetDB() getQuery := fmt.Sprintf(selectAccountReward, AccountRewardTableName) stmt, err := db.Prepare(getQuery) if err != nil { return nil, errors.Wrap(err, "failed to prepare get query") } defer stmt.Close() rows, err := stmt.Query(epochNumber, candidateName) if err != nil { return nil, errors.Wrap(err, "failed to execute get query") } var accountReward AccountReward parsedRows, err := s.ParseSQLRows(rows, &accountReward) if err != nil { return nil, errors.Wrap(err, "failed to parse results") } if len(parsedRows) == 0 { return nil, indexprotocol.ErrNotExist } if len(parsedRows) > 1 { return nil, errors.New("only one row is expected") } return parsedRows[0].(*AccountReward), nil }
[ "func (as AccountStorage) GetReward(ctx sdk.Context, accKey types.AccountKey) (*Reward, sdk.Error) {\n\tstore := ctx.KVStore(as.key)\n\trewardByte := store.Get(getRewardKey(accKey))\n\tif rewardByte == nil {\n\t\treturn nil, ErrRewardNotFound()\n\t}\n\treward := new(Reward)\n\tif err := as.cdc.UnmarshalJSON(rewardByte, reward); err != nil {\n\t\treturn nil, ErrFailedToUnmarshalReward(err)\n\t}\n\treturn reward, nil\n}", "func (_TrialRulesAbstract *TrialRulesAbstractCaller) GetReward(opts *bind.CallOpts) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _TrialRulesAbstract.contract.Call(opts, out, \"getReward\")\n\treturn *ret0, err\n}", "func (_Stakingbindings *StakingbindingsTransactor) GetReward(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _Stakingbindings.contract.Transact(opts, \"getReward\")\n}", "func (_IStakingRewards *IStakingRewardsTransactor) GetReward(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _IStakingRewards.contract.Transact(opts, \"getReward\")\n}", "func (query *Query) GetReward(ctx context.Context, username string) (*model.Reward, error) {\n\tresp, err := query.transport.Query(ctx, AccountKVStoreKey, AccountRewardSubStore, []string{username})\n\tif err != nil {\n\t\tlinoe, ok := err.(errors.Error)\n\t\tif ok && linoe.BlockChainCode() == uint32(errors.CodeRewardNotFound) {\n\t\t\treturn nil, errors.EmptyResponse(\"account reward is not found\")\n\t\t}\n\t\treturn nil, err\n\t}\n\n\treward := new(model.Reward)\n\tif err := query.transport.Cdc.UnmarshalJSON(resp, reward); err != nil {\n\t\treturn reward, err\n\t}\n\treturn reward, nil\n}", "func (_XStaking *XStakingTransactor) GetReward(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _XStaking.contract.Transact(opts, \"getReward\")\n}", "func (_Lmc *LmcSession) GetUserAccumulatedReward(_userAddress common.Address, tokenIndex *big.Int) (*big.Int, error) {\n\treturn _Lmc.Contract.GetUserAccumulatedReward(&_Lmc.CallOpts, _userAddress, tokenIndex)\n}", "func (_Stakingbindings *StakingbindingsTransactorSession) GetReward() (*types.Transaction, error) {\n\treturn _Stakingbindings.Contract.GetReward(&_Stakingbindings.TransactOpts)\n}", "func (_Lmc *LmcCallerSession) GetUserRewardDebt(_userAddress common.Address, _index *big.Int) (*big.Int, error) {\n\treturn _Lmc.Contract.GetUserRewardDebt(&_Lmc.CallOpts, _userAddress, _index)\n}", "func (_Lmc *LmcCaller) GetUserAccumulatedReward(opts *bind.CallOpts, _userAddress common.Address, tokenIndex *big.Int) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Lmc.contract.Call(opts, &out, \"getUserAccumulatedReward\", _userAddress, tokenIndex)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (c4 *Connect4) GetReward() int {\n\tif c4.Winner == nil {\n\t\treturn 0\n\t} else if *c4.Winner == 1 {\n\t\treturn 1\n\t}\n\treturn -1\n}", "func (_Lmc *LmcCaller) GetUserRewardDebt(opts *bind.CallOpts, _userAddress common.Address, _index *big.Int) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Lmc.contract.Call(opts, &out, \"getUserRewardDebt\", _userAddress, _index)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func GetReward(a Action, feedback Action) float64 {\n\tif a == feedback {\n\t\treturn 1\n\t}\n\treturn -1\n}", "func ViewReward(rw http.ResponseWriter, r *http.Request) {\n\t// get the token\n\treqToken := r.Header.Get(\"Authorization\")\n\t\n\t// get the claims\n\tclaims, isNotValid := GetClaims(reqToken, rw)\n\tif isNotValid {\n\t\treturn\n\t}\n\n\tdt, err := db.GetUserRewards(claims.Roll)\n\tif err != nil {\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\trw.Write(Rsp(err.Error(), \"Server Error\"))\n\t\treturn\n\t}\n\trw.WriteHeader(http.StatusOK)\n\tres := c.RespData{\n\t\tMessage: \"All data\",\n\t\tData: dt,\n\t}\n\tjson.NewEncoder(rw).Encode(res)\n}", "func (api *API) GetRewardFund(name string) (*json.RawMessage, error) {\n\tvar resp json.RawMessage\n\terr := api.call(\"get_reward_fund\", []interface{}{name}, &resp)\n\treturn &resp, err\n}", "func (_Stakingbindings *StakingbindingsCallerSession) GetRewardForDuration() (*big.Int, error) {\n\treturn _Stakingbindings.Contract.GetRewardForDuration(&_Stakingbindings.CallOpts)\n}", "func (_Stakingbindings *StakingbindingsSession) GetRewardForDuration() (*big.Int, error) {\n\treturn _Stakingbindings.Contract.GetRewardForDuration(&_Stakingbindings.CallOpts)\n}", "func (_IStakingRewards *IStakingRewardsCaller) GetRewardForDuration(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _IStakingRewards.contract.Call(opts, &out, \"getRewardForDuration\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_Token *TokenCaller) CurrentReward(opts *bind.CallOpts, account common.Address) (struct {\n\tInitialDeposit *big.Int\n\tReward *big.Int\n}, error) {\n\tret := new(struct {\n\t\tInitialDeposit *big.Int\n\t\tReward *big.Int\n\t})\n\tout := ret\n\terr := _Token.contract.Call(opts, out, \"currentReward\", account)\n\treturn *ret, err\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
updateRewardHistory stores reward information into reward history table
func (p *Protocol) updateRewardHistory(tx *sql.Tx, epochNumber uint64, actionHash string, rewardInfoMap map[string]*RewardInfo) error { valStrs := make([]string, 0, len(rewardInfoMap)) valArgs := make([]interface{}, 0, len(rewardInfoMap)*7) for rewardAddress, rewards := range rewardInfoMap { blockReward := rewards.BlockReward.String() epochReward := rewards.EpochReward.String() foundationBonus := rewards.FoundationBonus.String() var candidateName string // If more than one candidates share the same reward address, just use the first candidate as their delegate if len(p.RewardAddrToName[rewardAddress]) > 0 { candidateName = p.RewardAddrToName[rewardAddress][0] } valStrs = append(valStrs, "(?, ?, ?, ?, CAST(? as DECIMAL(65, 0)), CAST(? as DECIMAL(65, 0)), CAST(? as DECIMAL(65, 0)))") valArgs = append(valArgs, epochNumber, actionHash, rewardAddress, candidateName, blockReward, epochReward, foundationBonus) } insertQuery := fmt.Sprintf(insertRewardHistory, RewardHistoryTableName, strings.Join(valStrs, ",")) if _, err := tx.Exec(insertQuery, valArgs...); err != nil { return err } return nil }
[ "func (m *MemoryRewardStorage) Update(reward rewards.Reward) {\n\tfor index, r := range m.rewards {\n\t\tif r.ID == reward.ID {\n\t\t\tm.rewards[index] = reward\n\t\t}\n\t}\n}", "func (accManager AccountManager) AddRewardHistory(\n\tctx sdk.Context, username types.AccountKey, numOfReward int64,\n\trewardDetail model.RewardDetail) sdk.Error {\n\n\tslotNum := numOfReward / types.RewardHistoryBundleSize\n\n\trewardHistory, err := accManager.storage.GetRewardHistory(ctx, username, slotNum)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif rewardHistory == nil {\n\t\trewardHistory = &model.RewardHistory{Details: []model.RewardDetail{}}\n\t}\n\n\trewardHistory.Details = append(rewardHistory.Details, rewardDetail)\n\n\tif err := accManager.storage.SetRewardHistory(\n\t\tctx, username, slotNum, rewardHistory); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (as AccountStorage) SetRewardHistory(\n\tctx sdk.Context, me types.AccountKey, bucketSlot int64, history *RewardHistory) sdk.Error {\n\tstore := ctx.KVStore(as.key)\n\thistoryBytes, err := as.cdc.MarshalJSON(*history)\n\tif err != nil {\n\t\treturn ErrFailedToMarshalRewardHistory(err)\n\t}\n\tstore.Set(getRewardHistoryKey(me, bucketSlot), historyBytes)\n\treturn nil\n}", "func (d *Dao) AddReward(c context.Context, iRewardID int64, uid int64, iSource int64, iRoomid int64, iLifespan int64) (err error) {\n\t//aReward, _ := getRewardConfByLid(iRewardID)\n\n\tm, _ := time.ParseDuration(fmt.Sprintf(\"+%dh\", iLifespan))\n\n\targ := &AnchorTaskModel.AnchorReward{\n\t\tUid: uid,\n\t\tRewardId: iRewardID,\n\t\tRoomid: iRoomid,\n\t\tSource: iSource,\n\t\tAchieveTime: xtime.Time(time.Now().Unix()),\n\t\tExpireTime: xtime.Time(time.Now().Add(m).Unix()),\n\t\tStatus: model.RewardUnUsed,\n\t}\n\n\t//spew.Dump\n\t// (arg)\n\tif err := d.orm.Create(arg).Error; err != nil {\n\t\tlog.Error(\"addReward(%v) error(%v)\", arg, err)\n\t\treturn err\n\t}\n\n\tif err := d.SetNewReward(c, uid, int64(1)); err != nil {\n\t\tlog.Error(\"addRewardMc(%v) error(%v)\", uid, err)\n\t}\n\n\tif err := d.SetHasReward(c, uid, int64(1)); err != nil {\n\t\tlog.Error(\"SetHasReward(%v) error(%v)\", uid, err)\n\t}\n\n\tlog.Info(\"addReward (%v) succ\", arg)\n\n\treturn\n}", "func (_Token *TokenCallerSession) BaseRewardHistory(index *big.Int) (*big.Int, *big.Int, *big.Int, *big.Int, error) {\n\treturn _Token.Contract.BaseRewardHistory(&_Token.CallOpts, index)\n}", "func accumulateRewards(config *params.ChainConfig, state *state.DB, header *types.Header) {\n\t// TODO: implement mining rewards\n}", "func (_Token *TokenCaller) BaseRewardHistory(opts *bind.CallOpts, index *big.Int) (*big.Int, *big.Int, *big.Int, *big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t\tret1 = new(*big.Int)\n\t\tret2 = new(*big.Int)\n\t\tret3 = new(*big.Int)\n\t)\n\tout := &[]interface{}{\n\t\tret0,\n\t\tret1,\n\t\tret2,\n\t\tret3,\n\t}\n\terr := _Token.contract.Call(opts, out, \"baseRewardHistory\", index)\n\treturn *ret0, *ret1, *ret2, *ret3, err\n}", "func (as AccountStorage) SetReward(ctx sdk.Context, accKey types.AccountKey, reward *Reward) sdk.Error {\n\tstore := ctx.KVStore(as.key)\n\trewardByte, err := as.cdc.MarshalJSON(*reward)\n\tif err != nil {\n\t\treturn ErrFailedToMarshalReward(err)\n\t}\n\tstore.Set(getRewardKey(accKey), rewardByte)\n\treturn nil\n}", "func (s *MutableState) AddRewards(time epochtime.EpochTime, factor *quantity.Quantity, accounts []signature.PublicKey) error {\n\tsteps, err := s.RewardSchedule()\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar activeStep *staking.RewardStep\n\tfor _, step := range steps {\n\t\tif time < step.Until {\n\t\t\tactiveStep = &step\n\t\t\tbreak\n\t\t}\n\t}\n\tif activeStep == nil {\n\t\t// We're past the end of the schedule.\n\t\treturn nil\n\t}\n\n\tcommonPool, err := s.CommonPool()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"loading common pool\")\n\t}\n\n\tfor _, id := range accounts {\n\t\tent := s.Account(id)\n\n\t\tq := ent.Escrow.Active.Balance.Clone()\n\t\t// Multiply first.\n\t\tif err := q.Mul(factor); err != nil {\n\t\t\treturn errors.Wrap(err, \"multiplying by reward factor\")\n\t\t}\n\t\tif err := q.Mul(&activeStep.Scale); err != nil {\n\t\t\treturn errors.Wrap(err, \"multiplying by reward step scale\")\n\t\t}\n\t\tif err := q.Quo(staking.RewardAmountDenominator); err != nil {\n\t\t\treturn errors.Wrap(err, \"dividing by reward amount denominator\")\n\t\t}\n\n\t\tif q.IsZero() {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar com *quantity.Quantity\n\t\trate := ent.Escrow.CommissionSchedule.CurrentRate(time)\n\t\tif rate != nil {\n\t\t\tcom = q.Clone()\n\t\t\t// Multiply first.\n\t\t\tif err := com.Mul(rate); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"multiplying by commission rate\")\n\t\t\t}\n\t\t\tif err := com.Quo(staking.CommissionRateDenominator); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"dividing by commission rate denominator\")\n\t\t\t}\n\n\t\t\tif err := q.Sub(com); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"subtracting commission\")\n\t\t\t}\n\t\t}\n\n\t\tif !q.IsZero() {\n\t\t\tif err := quantity.Move(&ent.Escrow.Active.Balance, commonPool, q); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"transferring to active escrow balance from common pool\")\n\t\t\t}\n\t\t}\n\n\t\tif com != nil && !com.IsZero() {\n\t\t\tdelegation := s.Delegation(id, id)\n\n\t\t\tif err := ent.Escrow.Active.Deposit(&delegation.Shares, commonPool, com); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"depositing commission\")\n\t\t\t}\n\n\t\t\ts.SetDelegation(id, id, delegation)\n\t\t}\n\n\t\ts.SetAccount(id, ent)\n\t}\n\n\ts.SetCommonPool(commonPool)\n\n\treturn nil\n}", "func (p *Protocol) getRewardHistory(actionHash string) ([]*RewardHistory, error) {\n\tdb := p.Store.GetDB()\n\n\tgetQuery := fmt.Sprintf(selectRewardHistory,\n\t\tRewardHistoryTableName)\n\tstmt, err := db.Prepare(getQuery)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to prepare get query\")\n\t}\n\tdefer stmt.Close()\n\n\trows, err := stmt.Query(actionHash)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to execute get query\")\n\t}\n\n\tvar rewardHistory RewardHistory\n\tparsedRows, err := s.ParseSQLRows(rows, &rewardHistory)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to parse results\")\n\t}\n\n\tif len(parsedRows) == 0 {\n\t\treturn nil, indexprotocol.ErrNotExist\n\t}\n\n\tvar rewardHistoryList []*RewardHistory\n\tfor _, parsedRow := range parsedRows {\n\t\trewards := parsedRow.(*RewardHistory)\n\t\trewardHistoryList = append(rewardHistoryList, rewards)\n\t}\n\treturn rewardHistoryList, nil\n}", "func ViewReward(rw http.ResponseWriter, r *http.Request) {\n\t// get the token\n\treqToken := r.Header.Get(\"Authorization\")\n\t\n\t// get the claims\n\tclaims, isNotValid := GetClaims(reqToken, rw)\n\tif isNotValid {\n\t\treturn\n\t}\n\n\tdt, err := db.GetUserRewards(claims.Roll)\n\tif err != nil {\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\trw.Write(Rsp(err.Error(), \"Server Error\"))\n\t\treturn\n\t}\n\trw.WriteHeader(http.StatusOK)\n\tres := c.RespData{\n\t\tMessage: \"All data\",\n\t\tData: dt,\n\t}\n\tjson.NewEncoder(rw).Encode(res)\n}", "func m7RewardsAndDatesPart2UpdateAccounts(db *IndexerDb, accountData []addressAccountData, assetDataMap map[uint32]createClose, txnID txnID, state *MigrationState) error {\n\t// Make sure round accounting doesn't interfere with updating these accounts.\n\tdb.accountingLock.Lock()\n\tdefer db.accountingLock.Unlock()\n\n\t// Open a postgres transaction and submit results for each account.\n\ttx, err := db.db.BeginTx(context.Background(), &serializable)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"m7: tx begin: %v\", err)\n\t}\n\tdefer tx.Rollback() // ignored if .Commit() first\n\n\t// 1. updateTotalRewards - conditionally update the total rewards if the account wasn't closed during iteration.\n\t// $3 is the round after which new blocks will have the closed_at field set.\n\t// We only set rewards_total when closed_at was set before that round.\n\tupdateTotalRewards, err := tx.Prepare(`UPDATE account SET rewards_total = coalesce(rewards_total, 0) + $2 WHERE addr = $1 AND coalesce(closed_at, 0) < $3`)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"m7: set rewards prepare: %v\", err)\n\t}\n\tdefer updateTotalRewards.Close()\n\n\t// 2. setCreateCloseAccount - set the accounts create/close rounds.\n\t// We always set the created_at field because it will never change.\n\t// closed_at may already be set by the time the migration runs, or it might need to be cleared out.\n\tsetCreateCloseAccount, err := tx.Prepare(`UPDATE account SET created_at = $2, closed_at = coalesce(closed_at, $3), deleted = coalesce(deleted, $4) WHERE addr = $1`)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"m7: set create close prepare: %v\", err)\n\t}\n\tdefer setCreateCloseAccount.Close()\n\n\t// 3. setCreateCloseAsset - set the accounts created assets create/close rounds.\n\tsetCreateCloseAsset, err := tx.Prepare(`UPDATE asset SET created_at = $3, closed_at = coalesce(closed_at, $4), deleted = coalesce(deleted, $5) WHERE creator_addr = $1 AND index=$2`)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"m7: set create close asset prepare: %v\", err)\n\t}\n\tdefer setCreateCloseAsset.Close()\n\n\t// 4. setCreateCloseAssetHolding - (upsert) set the accounts asset holding create/close rounds.\n\tsetCreateCloseAssetHolding, err := tx.Prepare(`INSERT INTO account_asset(addr, assetid, amount, frozen, created_at, closed_at, deleted) VALUES ($1, $2, 0, false, $3, $4, $5) ON CONFLICT (addr, assetid) DO UPDATE SET created_at = EXCLUDED.created_at, closed_at = coalesce(account_asset.closed_at, EXCLUDED.closed_at), deleted = coalesce(account_asset.deleted, EXCLUDED.deleted)`)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"m7: set create close asset holding prepare: %v\", err)\n\t}\n\tdefer setCreateCloseAssetHolding.Close()\n\n\t// 5. setCreateCloseApp - set the accounts created apps create/close rounds.\n\tsetCreateCloseApp, err := tx.Prepare(`UPDATE app SET created_at = $3, closed_at = coalesce(closed_at, $4), deleted = coalesce(deleted, $5) WHERE creator = $1 AND index=$2`)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"m7: set create close app prepare: %v\", err)\n\t}\n\tdefer setCreateCloseApp.Close()\n\n\t// 6. setCreateCloseAppLocal - (upsert) set the accounts local apps create/close rounds.\n\tsetCreateCloseAppLocal, err := tx.Prepare(`INSERT INTO account_app (addr, app, created_at, closed_at, deleted) VALUES ($1, $2, $3, $4, $5) ON CONFLICT (addr, app) DO UPDATE SET created_at = EXCLUDED.created_at, closed_at = coalesce(account_app.closed_at, EXCLUDED.closed_at), deleted = coalesce(account_app.deleted, EXCLUDED.deleted)`)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"m7: set create close app local prepare: %v\", err)\n\t}\n\tdefer setCreateCloseAppLocal.Close()\n\n\t// loop through all of the accounts.\n\tfor _, ad := range accountData {\n\t\taddressStr := ad.address.String()\n\n\t\t// 1. updateTotalRewards - conditionally update the total rewards if the account wasn't closed during iteration.\n\t\t_, err = updateTotalRewards.Exec(\n\t\t\tad.address[:], ad.accountData.cumulativeRewards, state.NextRound)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"m7: failed to update %s with rewards %d: %v\",\n\t\t\t\taddressStr, ad.accountData.cumulativeRewards, err)\n\t\t}\n\n\t\t// 2. setCreateCloseAccount - set the accounts create/close rounds.\n\t\t{\n\t\t\tdeleted := sql.NullBool{\n\t\t\t\tBool: ad.accountData.account.deleted,\n\t\t\t\tValid: ad.accountData.account.deletedValid,\n\t\t\t}\n\t\t\tcreated := sql.NullInt64{\n\t\t\t\tInt64: int64(ad.accountData.account.created),\n\t\t\t\tValid: ad.accountData.account.createdValid,\n\t\t\t}\n\t\t\tclosed := sql.NullInt64{\n\t\t\t\tInt64: int64(ad.accountData.account.closed),\n\t\t\t\tValid: ad.accountData.account.closedValid,\n\t\t\t}\n\t\t\t_, err = setCreateCloseAccount.Exec(ad.address[:], created, closed, deleted)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"m7: failed to update %s with create/close: %v\", addressStr, err)\n\t\t\t}\n\t\t}\n\n\t\t// 4. setCreateCloseAssetHolding - (upsert) set the accounts asset holding create/close rounds.\n\t\terr = executeForEachCreatable(setCreateCloseAssetHolding, ad.address,\n\t\t\tad.accountData.assetHolding)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"m7: failed to update %s with asset holding create/close: %v\",\n\t\t\t\taddressStr, err)\n\t\t}\n\n\t\tif ad.accountData.additional != nil {\n\t\t\t// 3. setCreateCloseAsset - set the accounts created assets create/close rounds.\n\t\t\tfor index := range ad.accountData.additional.asset {\n\t\t\t\tcc, ok := assetDataMap[index]\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fmt.Errorf(\"m7: asset index %d created by %s is not in assetDataMap\",\n\t\t\t\t\t\tindex, addressStr)\n\t\t\t\t}\n\t\t\t\terr := executeCreatableCC(setCreateCloseAsset, ad.address, index, cc)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"m7: failed to update %s with asset index %d create/close: %v\",\n\t\t\t\t\t\taddressStr, index, err)\n\t\t\t\t}\n\t\t\t\tdelete(assetDataMap, index)\n\t\t\t}\n\n\t\t\t// 5. setCreateCloseApp - set the accounts created apps create/close rounds.\n\t\t\terr = executeForEachCreatable(setCreateCloseApp, ad.address, ad.accountData.additional.app)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"m7: failed to update %s with app create/close: %v\", addressStr, err)\n\t\t\t}\n\n\t\t\t// 6. setCreateCloseAppLocal - (upsert) set the accounts local apps create/close rounds.\n\t\t\terr = executeForEachCreatable(setCreateCloseAppLocal, ad.address,\n\t\t\t\tad.accountData.additional.appLocal)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"m7: failed to update %s with app local create/close: %v\",\n\t\t\t\t\taddressStr, err)\n\t\t\t}\n\t\t}\n\t}\n\n\t{\n\t\tround := int64(txnID.round)\n\t\tintra := int64(txnID.intra)\n\t\tstate.PointerRound = &round\n\t\tstate.PointerIntra = &intra\n\t}\n\tmigrationStateJSON := encoding.EncodeJSON(state)\n\t_, err = db.db.Exec(setMetastateUpsert, migrationMetastateKey, migrationStateJSON)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"m7: failed to update migration checkpoint: %v\", err)\n\t}\n\n\t// Commit transactions.\n\terr = tx.Commit()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"m7: failed to commit changes: %v\", err)\n\t}\n\n\treturn nil\n}", "func (s *MutableState) AddRewardSingleAttenuated(time epochtime.EpochTime, factor *quantity.Quantity, attenuationNumerator, attenuationDenominator int, account signature.PublicKey) error {\n\tsteps, err := s.RewardSchedule()\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar activeStep *staking.RewardStep\n\tfor _, step := range steps {\n\t\tif time < step.Until {\n\t\t\tactiveStep = &step\n\t\t\tbreak\n\t\t}\n\t}\n\tif activeStep == nil {\n\t\t// We're past the end of the schedule.\n\t\treturn nil\n\t}\n\n\tvar numQ, denQ quantity.Quantity\n\tif err = numQ.FromInt64(int64(attenuationNumerator)); err != nil {\n\t\treturn errors.Wrapf(err, \"importing attenuation numerator %d\", attenuationNumerator)\n\t}\n\tif err = denQ.FromInt64(int64(attenuationDenominator)); err != nil {\n\t\treturn errors.Wrapf(err, \"importing attenuation denominator %d\", attenuationDenominator)\n\t}\n\n\tcommonPool, err := s.CommonPool()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"loading common pool\")\n\t}\n\n\tent := s.Account(account)\n\n\tq := ent.Escrow.Active.Balance.Clone()\n\t// Multiply first.\n\tif err := q.Mul(factor); err != nil {\n\t\treturn errors.Wrap(err, \"multiplying by reward factor\")\n\t}\n\tif err := q.Mul(&activeStep.Scale); err != nil {\n\t\treturn errors.Wrap(err, \"multiplying by reward step scale\")\n\t}\n\tif err := q.Mul(&numQ); err != nil {\n\t\treturn errors.Wrap(err, \"multiplying by attenuation numerator\")\n\t}\n\tif err := q.Quo(staking.RewardAmountDenominator); err != nil {\n\t\treturn errors.Wrap(err, \"dividing by reward amount denominator\")\n\t}\n\tif err := q.Quo(&denQ); err != nil {\n\t\treturn errors.Wrap(err, \"dividing by attenuation denominator\")\n\t}\n\n\tif q.IsZero() {\n\t\treturn nil\n\t}\n\n\tvar com *quantity.Quantity\n\trate := ent.Escrow.CommissionSchedule.CurrentRate(time)\n\tif rate != nil {\n\t\tcom = q.Clone()\n\t\t// Multiply first.\n\t\tif err := com.Mul(rate); err != nil {\n\t\t\treturn errors.Wrap(err, \"multiplying by commission rate\")\n\t\t}\n\t\tif err := com.Quo(staking.CommissionRateDenominator); err != nil {\n\t\t\treturn errors.Wrap(err, \"dividing by commission rate denominator\")\n\t\t}\n\n\t\tif err := q.Sub(com); err != nil {\n\t\t\treturn errors.Wrap(err, \"subtracting commission\")\n\t\t}\n\t}\n\n\tif !q.IsZero() {\n\t\tif err := quantity.Move(&ent.Escrow.Active.Balance, commonPool, q); err != nil {\n\t\t\treturn errors.Wrap(err, \"transferring to active escrow balance from common pool\")\n\t\t}\n\t}\n\n\tif com != nil && !com.IsZero() {\n\t\tdelegation := s.Delegation(account, account)\n\n\t\tif err := ent.Escrow.Active.Deposit(&delegation.Shares, commonPool, com); err != nil {\n\t\t\treturn errors.Wrap(err, \"depositing commission\")\n\t\t}\n\n\t\ts.SetDelegation(account, account, delegation)\n\t}\n\n\ts.SetAccount(account, ent)\n\n\ts.SetCommonPool(commonPool)\n\n\treturn nil\n}", "func (c *Coinbase) AddReward(output *Output) {\n\toutput.EncryptedMask = make([]byte, 1)\n\tc.Rewards = append(c.Rewards, output)\n}", "func (room *RoomRecorder) setHistory(history []*action_.PlayerAction) {\n\troom.historyM.Lock()\n\troom._history = history\n\troom.historyM.Unlock()\n}", "func (_RandomBeacon *RandomBeaconTransactor) UpdateRewardParameters(opts *bind.TransactOpts, sortitionPoolRewardsBanDuration *big.Int, relayEntryTimeoutNotificationRewardMultiplier *big.Int, unauthorizedSigningNotificationRewardMultiplier *big.Int, dkgMaliciousResultNotificationRewardMultiplier *big.Int) (*types.Transaction, error) {\n\treturn _RandomBeacon.contract.Transact(opts, \"updateRewardParameters\", sortitionPoolRewardsBanDuration, relayEntryTimeoutNotificationRewardMultiplier, unauthorizedSigningNotificationRewardMultiplier, dkgMaliciousResultNotificationRewardMultiplier)\n}", "func (path *Path) AddRewards(rewards map[*Reward]int) {\n\tfor key, value := range rewards {\n\t\tpath.rewards[key] += value\n\t}\n}", "func m7RewardsAndDatesPart2(db *IndexerDb, state *MigrationState) error {\n\tdb.log.Print(\"m7 account cumulative rewards migration starting\")\n\n\t// Skip the work if all accounts have previously been updated.\n\tif (state.PointerRound == nil) || (*state.PointerRound != 0) || (*state.PointerIntra != 0) {\n\t\tmaxRound := uint32(state.NextRound)\n\n\t\t// Get the number of accounts to potentially warn the user about high memory usage.\n\t\terr := warnUser(db, maxRound)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Get special accounts, so that we can ignore them throughout the migration. A later migration\n\t\t// handles them.\n\t\tspecialAccounts, err := db.GetSpecialAccounts()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"m7: unable to get special accounts: %v\", err)\n\t\t}\n\t\t// Get the transaction id that created each account. This function simple loops over all\n\t\t// transactions from rounds <= `maxRound` in arbitrary order.\n\t\taccountsFirstUsed, err := getAccountsFirstUsed(db, maxRound, specialAccounts)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Get account data for accounts without transactions such as genesis accounts.\n\t\t// This function reads the `account` table but only considers accounts created before or at\n\t\t// `maxRound`.\n\t\treadyAccountData, err := getAccountsWithoutTxnData(\n\t\t\tdb, maxRound, specialAccounts, accountsFirstUsed)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Finally, read all transactions from most recent to oldest, update rewards and create/close dates,\n\t\t// and write this account data to the database. To save memory, this function removes account's\n\t\t// data as soon as we reach the transaction that created this account at which point older\n\t\t// transactions cannot update its state. It writes account data to the database in batches.\n\t\terr = updateAccounts(db, specialAccounts, accountsFirstUsed, readyAccountData, state)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Update migration state.\n\tstate.NextMigration++\n\tstate.NextRound = 0\n\tstate.PointerRound = nil\n\tstate.PointerIntra = nil\n\tmigrationStateJSON := encoding.EncodeJSON(state)\n\t_, err := db.db.Exec(setMetastateUpsert, migrationMetastateKey, migrationStateJSON)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"m7: failed to write final migration state: %v\", err)\n\t}\n\n\treturn nil\n}", "func (_RandomBeacon *RandomBeaconTransactorSession) UpdateRewardParameters(sortitionPoolRewardsBanDuration *big.Int, relayEntryTimeoutNotificationRewardMultiplier *big.Int, unauthorizedSigningNotificationRewardMultiplier *big.Int, dkgMaliciousResultNotificationRewardMultiplier *big.Int) (*types.Transaction, error) {\n\treturn _RandomBeacon.Contract.UpdateRewardParameters(&_RandomBeacon.TransactOpts, sortitionPoolRewardsBanDuration, relayEntryTimeoutNotificationRewardMultiplier, unauthorizedSigningNotificationRewardMultiplier, dkgMaliciousResultNotificationRewardMultiplier)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
stringToBigInt transforms a string to big int
func stringToBigInt(estr string) (*big.Int, error) { ret, ok := big.NewInt(0).SetString(estr, 10) if !ok { return nil, errors.New("failed to parse string to big int") } return ret, nil }
[ "func stringToBigInt(stringValue string) *big.Int {\n\n\tintToReturn := big.NewInt(0)\n\tintToReturn.SetString(stringValue, 10)\n\n\treturn intToReturn\n}", "func HexStrToBigInt(s string) (*big.Int, error) {\n\tregstr := \"^0[xX][0-9a-fA-F]+$\"\n\tif matched, err := regexp.Match(regstr, []byte(s)); err != nil || !matched {\n\t\treturn nil, errors.New(\"Invalid hex string\")\n\t}\n\n\ti, b := new(big.Int).SetString(s[2:], 16)\n\tif !b {\n\t\treturn nil, errors.New(\"Invalid hex string\")\n\t}\n\n\treturn i, nil\n}", "func HexStrToBigInt(hexString string) (*big.Int, error) {\n\tvalue := new(big.Int)\n\t_, ok := value.SetString(Trim0x(hexString), 16)\n\tif !ok {\n\t\treturn value, fmt.Errorf(\"Could not transform hex string to big int: %s\", hexString)\n\t}\n\n\treturn value, nil\n}", "func ParseBigInt(s string, base int) (*big.Int, error) {\n\tif base < 0 || base > 16 {\n\t\treturn nil, errors.New(\"ParseBigInt: invalid base\")\n\t}\n\ts = strings.Replace(s, \"\\n\", \"\", -1)\n\tz, ok := new(big.Int).SetString(s, base)\n\tif !ok {\n\t\treturn nil, errors.New(\"ParseBigInt: invalid string\")\n\t}\n\treturn z, nil\n}", "func ParseBigInt(str string) (_ *cells.BinaryCell, err error) {\n\tbytes, err := hex.DecodeString(str)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn cells.New(OpUint64, bytes), nil\n}", "func DecodeBigInt(txt string) (*big.Int, error) {\n\tif txt == \"\" {\n\t\treturn new(big.Int), nil // Defaults to 0\n\t}\n\tres, success := new(big.Int).SetString(txt, 10)\n\tif !success {\n\t\treturn nil, fmt.Errorf(\"cannot decode %v into big.Int\", txt)\n\t}\n\treturn res, nil\n}", "func ParseBigIntWithMultiplier(s string, binary bool) (*big.Int, int) {\n\ti := strings.IndexFunc(s, func(x rune) bool {\n\t\treturn strings.IndexRune(\"0123456789-\", x) < 0\n\t})\n\n\tx := new(big.Int)\n\tif i == 0 {\n\t\treturn x, 0\n\t}\n\n\tnumber := s\n\tif i > 0 {\n\t\tnumber = s[:i]\n\t}\n\n\tif _, ok := x.SetString(number, 10); !ok {\n\t\tpanic(\"unitpref: (*big.Int).SetString() failed\")\n\t}\n\n\tif i < len(s) {\n\t\tm, n := MultiplierBigInt(s[i:], binary)\n\t\tx.Mul(x, m)\n\t\ti += n\n\t}\n\treturn x, i\n}", "func parseBigInt(bytes []byte, lax bool, fieldName string) (*big.Int, error) {\n\tif err := checkInteger(bytes, lax, fieldName); err != nil {\n\t\treturn nil, err\n\t}\n\tret := new(big.Int)\n\tif len(bytes) > 0 && bytes[0]&0x80 == 0x80 {\n\t\t// This is a negative number.\n\t\tnotBytes := make([]byte, len(bytes))\n\t\tfor i := range notBytes {\n\t\t\tnotBytes[i] = ^bytes[i]\n\t\t}\n\t\tret.SetBytes(notBytes)\n\t\tret.Add(ret, bigOne)\n\t\tret.Neg(ret)\n\t\treturn ret, nil\n\t}\n\tret.SetBytes(bytes)\n\treturn ret, nil\n}", "func mustBigInt(v string) *big.Int {\n\tvar x big.Int\n\tif err := x.UnmarshalText([]byte(v)); err != nil {\n\t\tpanic(err)\n\t}\n\treturn &x\n}", "func s2big(s string) (r *big.Int) {\n\tr, _ = new(big.Int).SetString(s, 10)\n\treturn\n}", "func chunkIDAsBigInt(chunkID string) (*big.Int, error) {\n\tif chunkID == \"\" {\n\t\t// \"\" indicates start of table. This is one before\n\t\t// ID 00000 .... 00000.\n\t\treturn big.NewInt(-1), nil\n\t}\n\tidBytes, err := hex.DecodeString(chunkID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tid := big.NewInt(0)\n\tid.SetBytes(idBytes)\n\treturn id, nil\n}", "func toBigInt(a *gfP) *big.Int {\n\tv := &gfP{}\n\tmontDecode(v, a)\n\tc := new(big.Int)\n\tfor i := len(v) - 1; i >= 0; i-- {\n\t\tc.Lsh(c, 64)\n\t\tc.Add(c, new(big.Int).SetUint64(v[i]))\n\t}\n\treturn c\n}", "func decodeBigInt(s *Stream, val reflect.Value) error {\n\t// get the content of the string\n\tb, err := s.Bytes()\n\tif err != nil {\n\t\treturn wrapStreamError(err, val.Type())\n\t}\n\n\t// assign i to *big.Int type (its' original type instead of the reflect.val type)\n\ti := val.Interface().(*big.Int)\n\n\t// this means the storage prepared to store the data has some flaws, it pointed nil\n\t// therefore, we need to fix this storage, and make this storage be able to store the data\n\tif i == nil {\n\t\t// allocated space and let i pointed to it\n\t\ti = new(big.Int)\n\t\t// pass the address pointed by i (ValuOf(i)) to val (data synchronization)\n\t\tval.Set(reflect.ValueOf(i))\n\t}\n\n\t// no leading 0s\n\tif len(b) > 0 && b[0] == 0 {\n\t\treturn wrapStreamError(ErrCanonInt, val.Type())\n\t}\n\n\t// assigning values\n\ti.SetBytes(b)\n\treturn nil\n}", "func lettersToBigInt(seq alphabet.Letters) (*big.Int, error) {\n\tout := big.NewInt(0)\n\twords := make([]big.Word, len(seq)/33+1)\n\tfor i := range seq {\n\t\tindex := alphabet.DNA.IndexOf(seq[len(seq)-i-1])\n\t\tif index < 0 {\n\t\t\treturn out, fmt.Errorf(\"Sequence is not a valid DNA sequence at position %d\\n\", i+1)\n\t\t} else {\n\t\t\twordIndex := i / 32\n\t\t\tshiftDist := uint(i-wordIndex*32) * 2\n\t\t\twords[wordIndex] |= big.Word(index << shiftDist)\n\t\t}\n\t}\n\treturn out.SetBits(words), nil\n}", "func MultiplierBigInt(s string, binary bool) (*big.Int, int) {\n\tm := new(big.Int).SetUint64(1)\n\tr, n := utf8.DecodeRuneInString(s)\n\tif r == utf8.RuneError {\n\t\treturn m, 0\n\t}\n\n\tk := strings.IndexRune(multiplier, r)\n\tif k < 0 {\n\t\treturn m, 0\n\t}\n\n\tx := new(big.Int).SetUint64(1000)\n\tif binary && strings.HasPrefix(s[n:], \"i\") {\n\t\tx.SetUint64(1024)\n\t\tn++\n\t}\n\n\tswitch k {\n\tcase 8: m.Mul(m, x); fallthrough\n\tcase 7: m.Mul(m, x); fallthrough\n\tcase 6: m.Mul(m, x); fallthrough\n\tcase 5: m.Mul(m, x); fallthrough\n\tcase 4: m.Mul(m, x); fallthrough\n\tcase 3: m.Mul(m, x); fallthrough\n\tcase 2: m.Mul(m, x); fallthrough\n\tcase 1: m.Mul(m, x)\n\tcase 0: m.Mul(m, x)\n\t}\n\n\treturn m, n\n}", "func (b64) FromBigInt(b *big.Int, size int) string {\n\tdata := b.Bytes()\n\tif size > 0 {\n\t\tdata = bytesPadding(data, size)\n\t}\n\n\treturn base64.RawURLEncoding.EncodeToString(data)\n}", "func (t TokenID) BigInt() *big.Int {\n\treturn utils.ByteSliceToBigInt(t[:])\n}", "func DecodeBig(input string) (*big.Int, error) {\n\traw, err := checkNumber(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(raw) > 64 {\n\t\treturn nil, ErrBig256Range\n\t}\n\twords := make([]big.Word, len(raw)/bigWordNibbles+1)\n\tend := len(raw)\n\tfor i := range words {\n\t\tstart := end - bigWordNibbles\n\t\tif start < 0 {\n\t\t\tstart = 0\n\t\t}\n\t\tfor ri := start; ri < end; ri++ {\n\t\t\tnib := decodeNibble(raw[ri])\n\t\t\tif nib == badNibble {\n\t\t\t\treturn nil, ErrSyntax\n\t\t\t}\n\t\t\twords[i] *= 16\n\t\t\twords[i] += big.Word(nib)\n\t\t}\n\t\tend = start\n\t}\n\tdec := new(big.Int).SetBits(words)\n\treturn dec, nil\n}", "func StringToInt(s string) (int64, error) {\n\treturn strconv.ParseInt(s, 10, 64)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewStorageReader creates a new instance of StorageReader
func NewStorageReader(accountName, accountKey, containerName string) (*StorageReader, error) { logp.Debug( "storage_reader", "Creating new instance of storage reader", ) c := &Config{ accountName: accountName, accountKey: accountKey, containerName: containerName, } sr := &StorageReader{ config: c, } err := sr.initialize() if err != nil { return nil, err } return sr, nil }
[ "func StorageReader(storage StorageAPI, volume, path string, offset int64) io.Reader {\n\treturn &storageReader{storage, volume, path, offset}\n}", "func newStorage() *storage {\n\tr := make(map[string][]byte)\n\treturn &storage{\n\t\trepository: r,\n\t}\n}", "func NewReader(z *SGL) *Reader { return &Reader{z, 0} }", "func newStorage(root string, maxFileSize int64) (s *storage, err error) {\n\tfi, err := os.Stat(root)\n\tif os.IsNotExist(err) {\n\t\treturn nil, fmt.Errorf(\"storage root %q doesn't exist\", root)\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to stat directory %q: %v\", root, err)\n\t}\n\tif !fi.IsDir() {\n\t\treturn nil, fmt.Errorf(\"storage root %q exists but is not a directory.\", root)\n\t}\n\tindex, err := kvfile.NewStorage(filepath.Join(root, \"index.kv\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tindex.Close()\n\t\t}\n\t}()\n\tif maxFileSize <= 0 {\n\t\tmaxFileSize = defaultMaxFileSize\n\t}\n\ts = &storage{\n\t\troot: root,\n\t\tindex: index,\n\t\tmaxFileSize: maxFileSize,\n\t\tGenerationer: local.NewGenerationer(root),\n\t}\n\tif err := s.openCurrent(); err != nil {\n\t\treturn nil, err\n\t}\n\tif _, _, err := s.StorageGeneration(); err != nil {\n\t\treturn nil, fmt.Errorf(\"Error initialization generation for %q: %v\", root, err)\n\t}\n\treturn s, nil\n}", "func NewRawStorage(config *storagebackend.ConfigForResource, newFunc func() runtime.Object) (storage.Interface, factory.DestroyFunc, error) {\n\treturn factory.Create(*config, newFunc)\n}", "func NewStorage(cfg config.LoLStorage, riotClients map[string]riotclient.Client, backend Backend) (*Storage, error) {\n\tif client, ok := riotClients[cfg.DefaultRiotClient]; ok {\n\t\ts := &Storage{\n\t\t\tconfig: cfg,\n\t\t\triotClients: riotClients,\n\t\t\triotClient: client,\n\t\t\tlog: logging.Get(\"Storage\"),\n\t\t\tbackend: backend,\n\t\t}\n\n\t\treturn s, nil\n\t}\n\treturn nil, fmt.Errorf(\"Error creating Storage. Requested default region RiotAPI does not exist: %s\", cfg.DefaultRiotClient)\n}", "func (s *StandAloneStorage) Reader(ctx *kvrpcpb.Context) (storage.StorageReader, error) {\n\treturn &standAloneStorageReader{txn: s.db.NewTransaction(false)}, nil\n}", "func newMockStorage() Storage {\n\treturn &mockStorage{}\n}", "func NewStorageReadClient(c datatypes.Storage_ReadClient) *StorageReadClient {\n\treturn &StorageReadClient{c: c}\n}", "func NewReader(in filesystem.ReadCloser) *Reader {\n\treturn &Reader{\n\t\torig_in: in,\n\t\tin: recordio.NewRecordReader(in),\n\t}\n}", "func newStorageObject(URL string, source interface{}, fileInfo os.FileInfo) storage.Object {\n\tabstract := storage.NewAbstractStorageObject(URL, source, fileInfo)\n\tresult := &object{\n\t\tAbstractObject: abstract,\n\t}\n\tresult.AbstractObject.Object = result\n\treturn result\n}", "func NewStorage(conn *rpcc.Conn) *Storage {\n\treturn &Storage{conn: conn}\n}", "func newStorage() *storage {\n\treturn &storage{\n\t\tsto: make(map[uint16]mqtt.Message),\n\t\tmux: new(sync.RWMutex),\n\t}\n}", "func newStorageObject(url string, source interface{}, fileInfo os.FileInfo) storage.Object {\n\tabstract := storage.NewAbstractStorageObject(url, source, fileInfo)\n\tresult := &object{\n\t\tAbstractObject: abstract,\n\t}\n\tresult.AbstractObject.Object = result\n\treturn result\n}", "func NewReader(r io.Reader) *Reader { return &Reader{r: r} }", "func NewStorage(ring *redis.Ring) cache.Storage {\n\tc := &rcache.Codec{\n\t\tRedis: ring,\n\t\tMarshal: json.Marshal,\n\t\tUnmarshal: json.Unmarshal,\n\t}\n\treturn &storage{cache: c}\n}", "func NewStorage() *Storage {\r\n\treturn new(Storage)\r\n}", "func NewStorage(config StorageConfig) (spec.Storage, error) {\n\tnewStorage := &storage{\n\t\tStorageConfig: config,\n\n\t\tID: id.MustNew(),\n\t\tShutdownOnce: sync.Once{},\n\t\tType: ObjectType,\n\t}\n\n\t// Dependencies.\n\tif newStorage.Log == nil {\n\t\treturn nil, maskAnyf(invalidConfigError, \"logger must not be empty\")\n\t}\n\tif newStorage.Pool == nil {\n\t\treturn nil, maskAnyf(invalidConfigError, \"connection pool must not be empty\")\n\t}\n\t// Settings.\n\tif newStorage.BackOffFactory == nil {\n\t\treturn nil, maskAnyf(invalidConfigError, \"backoff factory must not be empty\")\n\t}\n\tif newStorage.Prefix == \"\" {\n\t\treturn nil, maskAnyf(invalidConfigError, \"prefix must not be empty\")\n\t}\n\n\tnewStorage.Log.Register(newStorage.GetType())\n\n\treturn newStorage, nil\n}", "func NewStorage(cfg *Config) *Storage {\n\tif cfg.Engine == nil {\n\t\tlog.Fatalln(\"Cannot create a ops proxy without an engine\")\n\t}\n\tif cfg.App == nil {\n\t\tnrConfig := newrelic.NewConfig(\"widget\", \"\")\n\t\tnrConfig.Enabled = false\n\t\tapp, err := newrelic.NewApplication(nrConfig)\n\t\tif err != nil {\n\t\t\tlogrus.WithField(\"error\", err).Fatalln(\"could not create dummy new relic app\")\n\t\t}\n\t\tcfg.App = app\n\t}\n\treturn &Storage{engine: cfg.Engine, newrelic: cfg.App}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ListBlobsModifiedBetween Get list of blobs modified between two specified timestamps
func (sr *StorageReader) ListBlobsModifiedBetween(startTime, endTime int64) *[]BlobDetails { logp.Debug("storage_reader", "Listing blobs modified between %v and %v.", startTime, endTime) ctx := context.Background() var blobItems []BlobDetails i := 0 for marker := (azblob.Marker{}); marker.NotDone(); { listBlob, err := sr.container.ListBlobsFlatSegment(ctx, marker, azblob.ListBlobsSegmentOptions{}) marker = listBlob.NextMarker if err != nil { logp.Error(err) continue } for _, blobInfo := range listBlob.Segment.BlobItems { i++ lastModified := blobInfo.Properties.LastModified.UTC().Unix() if lastModified > startTime && lastModified < endTime { length := *blobInfo.Properties.ContentLength if length == int64(0) { continue } blobItems = append(blobItems, NewBlobDetails(blobInfo.Name, string(blobInfo.Properties.Etag), length, lastModified)) } } } logp.Info("Found %v blobs in container. Found %v blobs modified between %v and %v.", i, len(blobItems), startTime, endTime, ) return &blobItems }
[ "func (w *Wallet) ListSinceBlock(start, end, syncHeight int32) ([]btcjson.ListTransactionsResult, er.R) {\n\ttxList := []btcjson.ListTransactionsResult{}\n\terr := walletdb.View(w.db, func(tx walletdb.ReadTx) er.R {\n\t\ttxmgrNs := tx.ReadBucket(wtxmgrNamespaceKey)\n\n\t\trangeFn := func(details []wtxmgr.TxDetails) (bool, er.R) {\n\t\t\tfor _, detail := range details {\n\t\t\t\tjsonResults := listTransactions(tx, &detail,\n\t\t\t\t\tw.Manager, syncHeight, w.chainParams)\n\t\t\t\ttxList = append(txList, jsonResults...)\n\t\t\t}\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn w.TxStore.RangeTransactions(txmgrNs, start, end, rangeFn)\n\t})\n\treturn txList, err\n}", "func (s *server) ListBlobs(req *gitalypb.ListBlobsRequest, stream gitalypb.BlobService_ListBlobsServer) error {\n\tif err := verifyListBlobsRequest(req); err != nil {\n\t\treturn helper.ErrInvalidArgument(err)\n\t}\n\n\tctx := stream.Context()\n\trepo := s.localrepo(req.GetRepository())\n\n\tchunker := chunk.New(&blobSender{\n\t\tsend: func(blobs []*gitalypb.ListBlobsResponse_Blob) error {\n\t\t\treturn stream.Send(&gitalypb.ListBlobsResponse{\n\t\t\t\tBlobs: blobs,\n\t\t\t})\n\t\t},\n\t})\n\n\trevlistOptions := []gitpipe.RevlistOption{\n\t\tgitpipe.WithObjects(),\n\t\tgitpipe.WithObjectTypeFilter(gitpipe.ObjectTypeBlob),\n\t}\n\n\trevlistIter := gitpipe.Revlist(ctx, repo, req.GetRevisions(), revlistOptions...)\n\n\tif err := s.processBlobs(ctx, repo, revlistIter, nil, req.GetLimit(), req.GetBytesLimit(),\n\t\tfunc(oid string, size int64, contents []byte, path []byte) error {\n\t\t\tif !req.GetWithPaths() {\n\t\t\t\tpath = nil\n\t\t\t}\n\n\t\t\treturn chunker.Send(&gitalypb.ListBlobsResponse_Blob{\n\t\t\t\tOid: oid,\n\t\t\t\tSize: size,\n\t\t\t\tData: contents,\n\t\t\t\tPath: path,\n\t\t\t})\n\t\t},\n\t); err != nil {\n\t\treturn helper.ErrInternal(fmt.Errorf(\"processing blobs: %w\", err))\n\t}\n\n\tif err := chunker.Flush(); err != nil {\n\t\treturn helper.ErrInternal(err)\n\t}\n\n\treturn nil\n}", "func (m *MetaDB) ListBlobRefsByStatus(ctx context.Context, status blobref.Status, olderThan time.Time) (*blobref.BlobRefCursor, error) {\n\tquery := m.newQuery(blobKind).Filter(\"Status = \", int(status)).\n\t\tFilter(\"Timestamps.UpdatedAt <\", olderThan)\n\titer := blobref.NewCursor(m.client.Run(ctx, query))\n\treturn iter, nil\n}", "func (repo *GitRepo) ListCommitsBetween(from, to string) ([]string, error) {\n\tout, err := repo.runGitCommand(\"rev-list\", \"--reverse\", from+\"..\"+to)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif out == \"\" {\n\t\treturn nil, nil\n\t}\n\treturn strings.Split(out, \"\\n\"), nil\n}", "func (a *App) ListBlobs(w http.ResponseWriter, r *http.Request) {\n\tresource := a.eventID\n\tblobs, err := a.Blob.List(resource)\n\tif err != nil {\n\t\trespondWithError(err, http.StatusNotFound, w)\n\t\treturn\n\t}\n\tw.Header().Set(types.ContentType, types.ContentTypeApplicationJSON)\n\terr = json.NewEncoder(w).Encode(blobs)\n\tif err != nil {\n\t\trespondWithError(err, http.StatusInternalServerError, w)\n\t\treturn\n\t}\n}", "func (t *Table) List() (*sql.Rows, error) {\r\n\tquery := fmt.Sprintf(\"select digest, last_modified from blob.%s\", t.Name)\r\n\trows, err := t.drv.db.Query(query)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\treturn rows, err\r\n}", "func ReadTimestamps(bkt *bolt.Bucket, created, updated *time.Time) error {\n\tfor _, f := range []struct {\n\t\tb []byte\n\t\tt *time.Time\n\t}{\n\t\t{bucketKeyCreatedAt, created},\n\t\t{bucketKeyUpdatedAt, updated},\n\t} {\n\t\tv := bkt.Get(f.b)\n\t\tif v != nil {\n\t\t\tif err := f.t.UnmarshalBinary(v); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (ti *TimeIndex) Notify(blobs ...*blob.Blob) {\n ti.lock.Lock()\n defer ti.lock.Unlock()\n\n for _, b := range blobs {\n tp := b.Type()\n if tp == blob.NoType || tp == blob.Object {\n continue\n }\n\n t, err := b.Timestamp()\n if err != nil {\n continue\n }\n\n ti.entries = append(ti.entries, &timeEntry{tm: t, ref: b.Ref()})\n }\n}", "func GetBlobberSnapshots(round int64, limit int64, offset int64, cb GetInfoCallback) (err error) {\n\tif err = CheckConfig(); err != nil {\n\t\treturn\n\t}\n\tvar url = withParams(STORAGE_GET_BLOBBER_SNAPSHOT, Params{\n\t\t\"round\": strconv.FormatInt(round, 10),\n\t\t\"limit\": strconv.FormatInt(limit, 10),\n\t\t\"offset\": strconv.FormatInt(offset, 10),\n\t})\n\tgo GetInfoFromAnySharder(url, OpStorageSCGetBlobberSnapshots, cb)\n\treturn\n}", "func (a *Account) ListSinceBlock(since, curBlockHeight int32,\n\tminconf int) ([]btcjson.ListTransactionsResult, error) {\n\n\ttxList := []btcjson.ListTransactionsResult{}\n\tfor _, txRecord := range a.TxStore.Records() {\n\t\t// Transaction records must only be considered if they occur\n\t\t// after the block height since.\n\t\tif since != -1 && txRecord.BlockHeight <= since {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Transactions that have not met minconf confirmations are to\n\t\t// be ignored.\n\t\tif !txRecord.Confirmed(minconf, curBlockHeight) {\n\t\t\tcontinue\n\t\t}\n\n\t\tjsonResults, err := txRecord.ToJSON(a.name, curBlockHeight,\n\t\t\ta.KeyStore.Net())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttxList = append(txList, jsonResults...)\n\t}\n\n\treturn txList, nil\n}", "func (a *Account) ListSinceBlock(since, curBlockHeight int32, minconf int) ([]map[string]interface{}, error) {\n\tvar txInfoList []map[string]interface{}\n\tfor _, txRecord := range a.TxStore.SortedRecords() {\n\t\t// check block number.\n\t\tif since != -1 && txRecord.Height() <= since {\n\t\t\tcontinue\n\t\t}\n\n\t\ttxInfoList = append(txInfoList,\n\t\t\ttxRecord.TxInfo(a.name, curBlockHeight, a.Net())...)\n\t}\n\n\treturn txInfoList, nil\n}", "func (am *AccountManager) ListSinceBlock(since, curBlockHeight int32, minconf int) ([]map[string]interface{}, error) {\n\t// Create and fill a map of account names and their balances.\n\ttxInfoList := []map[string]interface{}{}\n\tfor _, a := range am.AllAccounts() {\n\t\ttxTmp, err := a.ListSinceBlock(since, curBlockHeight, minconf)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttxInfoList = append(txInfoList, txTmp...)\n\t}\n\treturn txInfoList, nil\n}", "func ListBlobs(query *models.BlobQuery) ([]*models.Blob, error) {\n\tqs := GetOrmer().QueryTable(&models.Blob{})\n\n\tif query != nil {\n\t\tif query.Digest != \"\" {\n\t\t\tqs = qs.Filter(\"Digest\", query.Digest)\n\t\t}\n\n\t\tif query.ContentType != \"\" {\n\t\t\tqs = qs.Filter(\"ContentType\", query.ContentType)\n\t\t}\n\n\t\tif len(query.Digests) > 0 {\n\t\t\tqs = qs.Filter(\"Digest__in\", query.Digests)\n\t\t}\n\n\t\tif query.Size > 0 {\n\t\t\tqs = qs.Limit(query.Size)\n\t\t\tif query.Page > 0 {\n\t\t\t\tqs = qs.Offset((query.Page - 1) * query.Size)\n\t\t\t}\n\t\t}\n\t}\n\n\tblobs := []*models.Blob{}\n\t_, err := qs.All(&blobs)\n\treturn blobs, err\n}", "func (db *MDB) ListBlob() []int64 {\n\treturn db.listKV(\"_KVBLOB\")\n}", "func (fmd *FakeMysqlDaemon) ReadBinlogFilesTimestamps(ctx context.Context, req *mysqlctlpb.ReadBinlogFilesTimestampsRequest) (*mysqlctlpb.ReadBinlogFilesTimestampsResponse, error) {\n\treturn nil, nil\n}", "func BlobTime(v interface{}) (t time.Time) {\n\tvar (\n\t\tf *os.File\n\t\tfn string\n\t\terr error\n\t\tok bool\n\t)\n\tif fn, ok = v.(string); ok {\n\t\tif f, err = os.Open(fn); err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer f.Close()\n\t} else if f, ok = v.(*os.File); !ok {\n\t\treturn\n\t}\n\tf.Seek(BlobTimeOff, os.SEEK_SET)\n\t(NBOReader{f}).ReadNBO(&t)\n\treturn\n}", "func (t *Tag) LastModified() (lastModified time.Time) {\n\tfor _, history := range t.History {\n\t\tif history.Created.After(lastModified) {\n\t\t\tlastModified = history.Created\n\t\t}\n\t}\n\treturn lastModified\n}", "func getRecentlyModified(match []string, modified int, verbose bool) []string {\n\tvar matches []string // slice to hold the matching file paths\n\tvar paths []string // slice to hold the file paths\n\tvar modTimes []time.Time // slice to hold the modification times of the files\n\n\t// Loop through the provided slice of file names\n\tfor _, file := range match {\n\t\t// Get the file info and handle any errors\n\t\tinfo, err := os.Stat(file)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\t// Append the file path and modification time to the corresponding slices\n\t\tpaths = append(paths, file)\n\t\tmodTimes = append(modTimes, info.ModTime())\n\t}\n\n\t// Sort the slices by modification time\n\tsort.SliceStable(paths, func(i, j int) bool {\n\t\treturn modTimes[i].After(modTimes[j])\n\t})\n\n\t// Get the current time\n\tnow := time.Now()\n\n\t// Loop through the sorted slice of file paths\n\tfor i, path := range paths {\n\t\t// Check if the file was modified within the last modified hours\n\t\tif now.Sub(modTimes[i]) < (time.Duration(modified) * time.Hour) {\n\t\t\t// If it was, append the file path to the matches slice\n\t\t\tmatches = append(matches, path)\n\t\t} else {\n\t\t\tif verbose {\n\t\t\t\tlog.Printf(\"[IGNORING] Last modified time: %s older than configured timeframe (%d hours): %s.\", modTimes[i], modified, path)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Return the slice of matching file paths\n\treturn matches\n}", "func SplitChangesFetchRaw(since int64) ([]byte, error) {\n\n\tvar bufferQuery bytes.Buffer\n\tbufferQuery.WriteString(\"/splitChanges\")\n\n\tif since >= -1 {\n\t\tbufferQuery.WriteString(\"?since=\")\n\t\tbufferQuery.WriteString(strconv.FormatInt(since, 10))\n\t}\n\n\tdata, err := sdkFetch(bufferQuery.String())\n\tif err != nil {\n\t\tlog.Error.Println(\"Error fetching split changes \", err)\n\t\treturn nil, err\n\t}\n\n\treturn data, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ReadBlobData Reads blob from specified starting location
func (sr *StorageReader) ReadBlobData(path string, startIndex, length int64) []byte { ctx := context.Background() blobURL := sr.container.NewBlockBlobURL(path) downloadResponse, err := blobURL.Download(ctx, startIndex, length, azblob.BlobAccessConditions{}, false) logp.Info("Attempting to download blob %s at %v", path, startIndex) bodyStream := downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 10}) downloadedData := bytes.Buffer{} _, err = downloadedData.ReadFrom(bodyStream) if err != nil { panic(err) } return downloadedData.Bytes() }
[ "func ReadBlob(length int32, data []byte) ([]byte, int64) {\n\tl := length\n\tif length > int32(len(data)) {\n\t\tl = int32(len(data))\n\t}\n\n\tvar idx int32\n\tfor idx = l; (idx % 4) != 0; idx++ {\n\t\tif idx >= int32(len(data)) {\n\t\t\tdata = append(data, 0)\n\t\t}\n\t}\n\treturn data[:idx], int64(idx)\n}", "func (fh *FilesystemHandler) ReadBlob(container models.SimpleContainer, blobName string) models.SimpleBlob {\n\tvar blob models.SimpleBlob\n\n\tdirPath := fh.generateFullPath(&container)\n\tfullPath := filepath.Join(dirPath, blobName)\n\n\tblob.DataCachedAtPath = fullPath\n\tblob.BlobInMemory = false\n\tblob.Name = blobName\n\tblob.ParentContainer = &container\n\tblob.Origin = container.Origin\n\tblob.URL = fullPath\n\treturn blob\n}", "func (store *fileStorage) ReadBlob(path string) ([]byte, error) {\n\tif err := store.sem.Acquire(context.TODO(), 1); err != nil {\n\t\tlog.Printf(\"Failed to acquire semaphore: %v\", err)\n\t\treturn nil, err\n\t}\n\tdefer store.sem.Release(1)\n\tresp, err := ioutil.ReadFile(filepath.Join(store.directory, path))\n\tif err != nil && os.IsNotExist(err) {\n\t\treturn nil, nil\n\t}\n\treturn resp, err\n}", "func (d *StorageDriver) ReadBlob(account keppel.Account, storageID string) (io.ReadCloser, uint64, error) {\n\tpath := d.getBlobPath(account, storageID)\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\tstat, err := f.Stat()\n\tif err != nil {\n\t\tf.Close()\n\t\treturn nil, 0, err\n\t}\n\treturn f, uint64(stat.Size()), nil\n}", "func (c *Client) ReadBlob(ctx context.Context, d digest.Digest) ([]byte, error) {\n\treturn c.readBlob(ctx, d.Hash, d.Size, 0, 0)\n}", "func (d *swiftDriver) ReadBlob(account keppel.Account, storageID string) (io.ReadCloser, uint64, error) {\n\tc, _, err := d.getBackendConnection(account)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\to := blobObject(c, storageID)\n\thdr, err := o.Headers()\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\treader, err := o.Download(nil).AsReadCloser()\n\treturn reader, hdr.SizeBytes().Get(), err\n}", "func (c *Client) ReadBlobRange(ctx context.Context, d digest.Digest, offset, limit int64) ([]byte, error) {\n\treturn c.readBlob(ctx, d.Hash, d.Size, offset, limit)\n}", "func (ac *AzureCopy) ReadBlob(blob *models.SimpleBlob) {\n\n\tlog.Debugf(\"ReadBlob %s\", blob.Name)\n\terr := ac.sourceHandler.PopulateBlob(blob)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\n\t}\n}", "func GetFileData(filepath string, index int, length int) ([]byte, error) {\n\n\tif !IsFileExist(filepath) {\n\t\treturn nil, errors.New(\"file not exist\")\n\t}\n\n\tfd, err := os.Open(filepath)\n\tif err != nil {\n\t\treturn nil, errors.New(\"open file error\")\n\t}\n\n\tdefer fd.Close()\n\n\tfd.Seek(int64(configuration.BlobSize * index), 0)\n\tbuffer := make([]byte, length)\n\tn, err := fd.Read(buffer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif n != length {\n\t\treturn nil, errors.New(\"no enough bytes data to fetch\")\n\t}\n\n\treturn buffer, nil\n}", "func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {\n\tblobRef := d.client.GetContainerReference(d.container).GetBlobReference(path)\n\tif ok, err := blobRef.Exists(); err != nil {\n\t\treturn nil, err\n\t} else if !ok {\n\t\treturn nil, storagedriver.PathNotFoundError{Path: path}\n\t}\n\n\terr := blobRef.GetProperties(nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinfo := blobRef.Properties\n\tsize := info.ContentLength\n\tif offset >= size {\n\t\treturn ioutil.NopCloser(bytes.NewReader(nil)), nil\n\t}\n\n\tresp, err := blobRef.GetRange(&azure.GetBlobRangeOptions{\n\t\tRange: &azure.BlobRange{\n\t\t\tStart: uint64(offset),\n\t\t\tEnd: 0,\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}", "func ExtractBlob(section []byte, off int, end int) ([]byte, error) {\n\tif off < 0 {\n\t\treturn nil, fmt.Errorf(\"invalid offset: %d\", off)\n\t}\n\n\tif end < 0 {\n\t\tend = len(section)\n\t}\n\n\tsize := end - off\n\tif size < 0 {\n\t\treturn nil, wlerr.Errorf(\n\t\t\t\"data has negative length: off=%d end=%d\", off, end)\n\t}\n\n\tif size == 0 {\n\t\treturn nil, nil\n\t}\n\n\tif end > len(section) {\n\t\treturn nil, wlerr.Errorf(\n\t\t\t\"data extends beyond end of section: data=%d section=%d\",\n\t\t\tend, len(section))\n\t}\n\n\treturn section[off:end], nil\n}", "func (c *containerdCAS) ReadBlob(blobHash string) (io.Reader, error) {\n\tshaDigest := digest.Digest(blobHash)\n\t_, err := contentStore.Info(ctrdCtx, shaDigest)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ReadBlob: Exception getting info of blob: %s. %s\", blobHash, err.Error())\n\t}\n\treaderAt, err := contentStore.ReaderAt(ctrdCtx, spec.Descriptor{Digest: shaDigest})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ReadBlob: Exception while reading blob: %s. %s\", blobHash, err.Error())\n\t}\n\treturn content.NewReader(readerAt), nil\n}", "func (is *ObjectStorage) GetBlobPartial(repo string, digest godigest.Digest, mediaType string, from, to int64,\n) (io.ReadCloser, int64, int64, error) {\n\tvar lockLatency time.Time\n\n\tif err := digest.Validate(); err != nil {\n\t\treturn nil, -1, -1, err\n\t}\n\n\tblobPath := is.BlobPath(repo, digest)\n\n\tis.RLock(&lockLatency)\n\tdefer is.RUnlock(&lockLatency)\n\n\tbinfo, err := is.store.Stat(context.Background(), blobPath)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"blob\", blobPath).Msg(\"failed to stat blob\")\n\n\t\treturn nil, -1, -1, zerr.ErrBlobNotFound\n\t}\n\n\tend := to\n\n\tif to < 0 || to >= binfo.Size() {\n\t\tend = binfo.Size() - 1\n\t}\n\n\tblobHandle, err := is.store.Reader(context.Background(), blobPath, from)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"blob\", blobPath).Msg(\"failed to open blob\")\n\n\t\treturn nil, -1, -1, err\n\t}\n\n\tblobReadCloser, err := NewBlobStream(blobHandle, from, end)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"blob\", blobPath).Msg(\"failed to open blob stream\")\n\n\t\treturn nil, -1, -1, err\n\t}\n\n\t// is a 'deduped' blob?\n\tif binfo.Size() == 0 {\n\t\tdefer blobReadCloser.Close()\n\n\t\t// Check blobs in cache\n\t\tdstRecord, err := is.checkCacheBlob(digest)\n\t\tif err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"digest\", digest.String()).Msg(\"cache: not found\")\n\n\t\t\treturn nil, -1, -1, zerr.ErrBlobNotFound\n\t\t}\n\n\t\tbinfo, err := is.store.Stat(context.Background(), dstRecord)\n\t\tif err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"blob\", dstRecord).Msg(\"failed to stat blob\")\n\n\t\t\treturn nil, -1, -1, zerr.ErrBlobNotFound\n\t\t}\n\n\t\tend := to\n\n\t\tif to < 0 || to >= binfo.Size() {\n\t\t\tend = binfo.Size() - 1\n\t\t}\n\n\t\tblobHandle, err := is.store.Reader(context.Background(), dstRecord, from)\n\t\tif err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"blob\", dstRecord).Msg(\"failed to open blob\")\n\n\t\t\treturn nil, -1, -1, err\n\t\t}\n\n\t\tblobReadCloser, err := NewBlobStream(blobHandle, from, end)\n\t\tif err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"blob\", blobPath).Msg(\"failed to open blob stream\")\n\n\t\t\treturn nil, -1, -1, err\n\t\t}\n\n\t\treturn blobReadCloser, end - from + 1, binfo.Size(), nil\n\t}\n\n\t// The caller function is responsible for calling Close()\n\treturn blobReadCloser, end - from + 1, binfo.Size(), nil\n}", "func (s *server) ReadBlob(ctx context.Context, req *pb.ReadBlobRequest) (*pb.ReadBlobResponse, error) {\n\n\t// Run a Get request to Azure with the incoming blob key\n\tresp, err := http.Get(strings.Join([]string{base_uri,req.Key},\"\"))\n\tif err != nil {\n\t\tlog.Fatalf(\"failed get request: %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\t// Read the body of the http response containing the blob data\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed response read: %v\", err)\n\t}\n\n\t// Return the body of the response to the client\n\treturn &pb.ReadBlobResponse{Data: body}, nil\n}", "func (fr *FileReader) readerForOffset(off int64) (io.ReadCloser, error) {\n\tif off < 0 {\n\t\tpanic(\"negative offset\")\n\t}\n\tif off >= fr.size {\n\t\treturn eofReader, nil\n\t}\n\toffRemain := off\n\tparts := fr.ss.Parts\n\tfor len(parts) > 0 && parts[0].Size <= uint64(offRemain) {\n\t\toffRemain -= int64(parts[0].Size)\n\t\tparts = parts[1:]\n\t}\n\tif len(parts) == 0 {\n\t\treturn eofReader, nil\n\t}\n\tp0 := parts[0]\n\tvar rsc blobref.ReadSeekCloser\n\tvar err error\n\tswitch {\n\tcase p0.BlobRef != nil && p0.BytesRef != nil:\n\t\treturn nil, fmt.Errorf(\"part illegally contained both a blobRef and bytesRef\")\n\tcase p0.BlobRef == nil && p0.BytesRef == nil:\n\t\treturn &nZeros{int(p0.Size - uint64(offRemain))}, nil\n\tcase p0.BlobRef != nil:\n\t\trsc, _, err = fr.fetcher.Fetch(p0.BlobRef)\n\tcase p0.BytesRef != nil:\n\t\trsc, err = NewFileReader(fr.fetcher, p0.BytesRef)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\toffRemain += int64(p0.Offset)\n\tif offRemain > 0 {\n\t\tnewPos, err := rsc.Seek(offRemain, os.SEEK_SET)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif newPos != offRemain {\n\t\t\tpanic(\"Seek didn't work\")\n\t\t}\n\t}\n\treturn struct {\n\t\tio.Reader\n\t\tio.Closer\n\t}{\n\t\tio.LimitReader(rsc, int64(p0.Size)),\n\t\trsc,\n\t}, nil\n}", "func blobGets(fn string) string {\n\tf, err := os.Open(fn)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn \"\"\n\t\t}\n\t\tpanic(err)\n\t}\n\tdefer f.Close()\n\tpos := blobSeek(f)\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tb := make([]byte, int(fi.Size()-pos))\n\tif _, err = f.Read(b); err != nil {\n\t\tpanic(err)\n\t}\n\treturn string(b)\n}", "func (c *Client) ReadBlobToFile(ctx context.Context, d digest.Digest, fpath string) (int64, error) {\n\treturn c.readBlobToFile(ctx, d.Hash, d.Size, fpath)\n}", "func (c *INDIClient) GetBlob(deviceName, propName, blobName string) (rdr io.ReadCloser, fileName string, length int64, err error) {\n\tdevice, err := c.findDevice(deviceName)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tprop, ok := device.BlobProperties[propName]\n\tif !ok {\n\t\terr = ErrPropertyNotFound\n\t\treturn\n\t}\n\n\tval, ok := prop.Values[blobName]\n\tif !ok {\n\t\terr = ErrPropertyValueNotFound\n\t\treturn\n\t}\n\n\trdr, err = c.fs.Open(val.Value)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfileName = filepath.Base(val.Value)\n\n\tlength = val.Size\n\treturn\n}", "func ReadPointer(reader io.Reader) (Pointer, error) {\n\tbuf := make([]byte, blobSizeCutoff)\n\tn, err := io.ReadFull(reader, buf)\n\tif err != nil && err != io.ErrUnexpectedEOF {\n\t\treturn Pointer{}, err\n\t}\n\tbuf = buf[:n]\n\n\treturn ReadPointerFromBuffer(buf)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convenience wrapper to return Response.Data as a string.
func (r *Response) String() string { return string(r.Data) }
[ "func (r Response) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (r *Response) String() string {\n\treturn string(r.Body)\n}", "func (self *BatchResponse) ResponseAsString() string {\n\treturn string(self.Debug.RawResponse)\n}", "func (r *Response) String() string {\n\treturn fmt.Sprintf(\"%s %s %d %d %s\", r.request.method, r.request.baseURL.String(), r.StatusCode(), r.Size(), r.Time())\n}", "func (r *Response) Data(data interface{}) JResponseWriter {\n\treturn r.Field(fieldData, data)\n}", "func (r Response) ToString() string {\n\treturn fmt.Sprintf(\"Response \\nRequestID: %v, \\nStatus: {%#v}, \\nResult: {%#v}\\n\", r.RequestID, r.Status, r.Result)\n}", "func DataResponse(data interface{}) Response {\n\treturn Response{\n\t\tCode: 200,\n\t\tData: data,\n\t\tMessage: \"ok\",\n\t}\n}", "func StringResponse(req *http.Request, status int, headers http.Header, body string) *http.Response {\n\treturn SimpleResponse(req, status, headers, int64(len(body)), strings.NewReader(body))\n}", "func (c *Context) String(s string) error {\n\treturn c.Response.String(s)\n}", "func (t GetEncodedResponseEncoding) String() string {\n\treturn string(t)\n}", "func (ctx *Context) Data(code int, contentType string, data []byte) {\n\tctx.Response.StatusCode = code\n\tctx.SetContentType(contentType)\n\tctx.Response.Body = ioutil.NopCloser(bytes.NewBuffer(data))\n}", "func (t AuthChallengeResponseResponse) String() string {\n\treturn string(t)\n}", "func (e Data) String() string {\n\tj, _ := e.MarshalJSON()\n\treturn string(j)\n}", "func (r *Response) String() string {\n\tif r.body == nil {\n\t\treturn \"\"\n\t}\n\treturn strings.TrimSpace(string(r.body))\n}", "func (response HTTPResponse) GetAsString() string {\n\tresult, err := GetAsJSON(response)\n\tif err != nil {\n\t\tfmt.Println(\"Failed to convert HTTP response to string: \" + err.Error())\n\t\tblankHTTPResponse := GetHTTPResponseTemplate()\n\t\tblankHTTPResponse.Error = true\n\t\tblankHTTPResponse.StatusCode = http.StatusInternalServerError\n\t\tblankHTTPResponse.ContentType = \"string\"\n\t\tblankHTTPResponse.Content = \"Failed to process response.\"\n\t\t// BUG: This can recurse infinitely if there is something\n\t\t// wrong with the GetAsJSON() function\n\t\tresult = blankHTTPResponse.GetAsString()\n\t}\n\treturn result\n}", "func AsData(ip string, data interface{}) (resp Response, err error) {\n\traw, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn AsError(ip, err)\n\t}\n\n\tresp = Response{\n\t\tStatusCode: 200,\n\t\tHeaders: jsonHeaders,\n\t\tBody: string(raw),\n\t}\n\treturn\n}", "func (r *Response) Text() (string, error) {\n\tb, err := r.Raw()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(b), nil\n}", "func (r *ResponseData) ToHTTP() string {\n\tpayload := []string{}\n\tif r.ProtocolVersion != \"\" {\n\t\t// HTTP/1.1 200 OK\n\t\tstatusLine := fmt.Sprintf(\"%s %d %s\", r.ProtocolVersion, r.StatusCode, r.StatusString)\n\t\tpayload = append(payload, statusLine)\n\t}\n\tfor _, key := range r.OrderedHeaderKeys {\n\t\theaderLine := fmt.Sprintf(\"%s: %s\", key, r.Headers[key])\n\t\tpayload = append(payload, headerLine)\n\t}\n\tpayload = append(payload, r.Unparsed...)\n\treturn strings.Join(payload, \"\\n\")\n}", "func (r response) Body() []byte {\n\treturn []byte(r.String)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convenience wrapper to return Response.Data as an int.
func (r *Response) Int() (int, error) { return strconv.Atoi(r.String()) }
[ "func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v }", "func GetInt64Data(response *bcsmonitor.QueryResponse) int64 {\n\tif len(response.Data.Result) == 0 {\n\t\treturn 0\n\t}\n\tvalueStr, ok := response.Data.Result[0].Value[1].(string)\n\tif !ok {\n\t\treturn 0\n\t}\n\tvalue, err := strconv.ParseInt(valueStr, 10, 64)\n\tif err != nil {\n\t\treturn 0\n\t}\n\treturn value\n}", "func (s *SliceInt) Data() []int {\n\tif s == nil {\n\t\treturn nil\n\t}\n\treturn s.data\n}", "func (d Data) Int() int {\n\tif d.Item != nil {\n\t\tnum, err := strconv.Atoi(string(d.Value))\n\t\tif err != nil {\n\t\t\treturn 0\n\t\t}\n\t\treturn num\n\t}\n\treturn 0\n}", "func (v *Value) Int() int {\n return Util.ToInt(v.data)\n}", "func ParseIntWithResponse(ctx *fasthttp.RequestCtx, key string) (int, error) {\n\tval, err := strconv.Atoi(ctx.UserValue(key).(string))\n\tif err != nil {\n\t\tGenerateInvalidErrorBody(ctx, &InvalidInputError{errorUnableParseValue + \": \" + key}, fasthttp.StatusUnprocessableEntity)\n\t\treturn 0, err\n\t}\n\treturn val, err\n}", "func (ctx *Context) Data(code int, contentType string, data []byte) {\n\tctx.Response.StatusCode = code\n\tctx.SetContentType(contentType)\n\tctx.Response.Body = ioutil.NopCloser(bytes.NewBuffer(data))\n}", "func (c *Context) Data(data interface{}, total ...int64) {\n\tc.responseFormat.SetData(data, total...)\n}", "func (client PrimitiveClient) GetIntResponder(resp *http.Response) (result IntWrapper, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByUnmarshallingJSON(&result),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n}", "func (IntCodec) Read(data []byte, ptr unsafe.Pointer, wt WireType) (n int, err error) {\n\ti, n := ReadVarInt(data)\n\tif n < 0 {\n\t\treturn 0, fmt.Errorf(\"corrupt var int\")\n\t}\n\t*(*int)(ptr) = int(i)\n\treturn n, nil\n}", "func (id *RequestID) Int() (int, error) {\n\treturn id.intValue, id.intError\n}", "func (n *eeNum) int() *int { return (*int)(unsafe.Pointer(&n.data)) }", "func (code Code) Int() int {\n\treturn int(code)\n}", "func (res Response) AsInt32() (int32, error) {\n\treturn res.Bits.AsInt32(), res.Error\n}", "func (res Response) AsInt64() (int64, error) {\n\treturn res.Bits.AsInt64(), res.Error\n}", "func (client IntGroupClient) GetOverflowInt32Responder(resp *http.Response) (result Int32, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByUnmarshallingJSON(&result.Value),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n}", "func DataResponse(data interface{}) Response {\n\treturn Response{\n\t\tCode: 200,\n\t\tData: data,\n\t\tMessage: \"ok\",\n\t}\n}", "func (s *SessionItem) GetDataAsArray() []int {\n\tdataAsArray, _ := s.Data.([]int)\n\treturn dataAsArray\n}", "func intDataSize(data interface{}) int {\n\tswitch data := data.(type) {\n\tcase int8, *int8, *uint8:\n\t\treturn 1\n\tcase []int8:\n\t\treturn len(data)\n\tcase []uint8:\n\t\treturn len(data)\n\tcase int16, *int16, *uint16:\n\t\treturn 2\n\tcase []int16:\n\t\treturn 2 * len(data)\n\tcase []uint16:\n\t\treturn 2 * len(data)\n\tcase int32, *int32, *uint32:\n\t\treturn 4\n\tcase []int32:\n\t\treturn 4 * len(data)\n\tcase []uint32:\n\t\treturn 4 * len(data)\n\tcase int64, *int64, *uint64:\n\t\treturn 8\n\tcase []int64:\n\t\treturn 8 * len(data)\n\tcase []uint64:\n\t\treturn 8 * len(data)\n\t}\n\treturn 0\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewPolicyDefinitionsClient creates a new instance of PolicyDefinitionsClient with the specified values.
func NewPolicyDefinitionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) *PolicyDefinitionsClient { cp := arm.ClientOptions{} if options != nil { cp = *options } if len(cp.Host) == 0 { cp.Host = arm.AzurePublicCloud } return &PolicyDefinitionsClient{subscriptionID: subscriptionID, ep: string(cp.Host), pl: armruntime.NewPipeline(module, version, credential, &cp)} }
[ "func NewServiceEndpointPolicyDefinitionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ServiceEndpointPolicyDefinitionsClient, error) {\n\tcl, err := arm.NewClient(moduleName+\".ServiceEndpointPolicyDefinitionsClient\", moduleVersion, credential, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient := &ServiceEndpointPolicyDefinitionsClient{\n\t\tsubscriptionID: subscriptionID,\n\t\tinternal: cl,\n\t}\n\treturn client, nil\n}", "func NewApplicationDefinitionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) *ApplicationDefinitionsClient {\n\tcp := arm.ClientOptions{}\n\tif options != nil {\n\t\tcp = *options\n\t}\n\tif len(cp.Host) == 0 {\n\t\tcp.Host = arm.AzurePublicCloud\n\t}\n\treturn &ApplicationDefinitionsClient{subscriptionID: subscriptionID, ep: string(cp.Host), pl: armruntime.NewPipeline(module, version, credential, &cp)}\n}", "func NewRegistrationDefinitionsClient(credential azcore.TokenCredential, options *arm.ClientOptions) (*RegistrationDefinitionsClient, error) {\n\tcl, err := arm.NewClient(moduleName+\".RegistrationDefinitionsClient\", moduleVersion, credential, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient := &RegistrationDefinitionsClient{\n\t\tinternal: cl,\n\t}\n\treturn client, nil\n}", "func NewPlacementPoliciesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*PlacementPoliciesClient, error) {\n\tcl, err := arm.NewClient(moduleName+\".PlacementPoliciesClient\", moduleVersion, credential, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient := &PlacementPoliciesClient{\n\t\tsubscriptionID: subscriptionID,\n\t\tinternal: cl,\n\t}\n\treturn client, nil\n}", "func NewClient(confs ...ClientConfiguration) *Client {\n\tq := setupClient()\n\n\t// Loop through the configurations and apply them to the client.\n\tfor _, c := range confs {\n\t\tc(q)\n\t}\n\n\treturn q\n}", "func NewClient() *Client {\n\t// Get application working directory\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Generate schema path\n\tschemaDir := path.Join(wd, \"/schema/\")\n\n\t// Read all json schema files\n\tfiles, _ := ioutil.ReadDir(schemaDir)\n\tschemaLoaders := make(map[string]gojsonschema.JSONLoader)\n\tfor _, f := range files {\n\t\tname := f.Name()\n\t\tif !strings.HasSuffix(name, \".json\") {\n\t\t\tbreak\n\t\t}\n\n\t\tschemaFileName := path.Join(schemaDir, name)\n\t\tschemaName := strings.TrimSuffix(name, filepath.Ext(name))\n\t\tschemaLoader := gojsonschema.NewReferenceLoader(\"file://\" + schemaFileName)\n\t\tschemaLoaders[schemaName] = schemaLoader\n\t}\n\n\treturn &Client{\n\t\tschemaLocation: schemaDir,\n\t\tschemaLoaders: schemaLoaders,\n\t}\n}", "func NewClient(cfg watson.Config) (Client, error) {\n\tdialog := Client{version: \"/\" + defaultMajorVersion}\n\tif len(cfg.Version) > 0 {\n\t\tdialog.version = \"/\" + cfg.Version\n\t}\n\tif len(cfg.Credentials.ServiceName) == 0 {\n\t\tcfg.Credentials.ServiceName = \"dialog\"\n\t}\n\tif len(cfg.Credentials.Url) == 0 {\n\t\tcfg.Credentials.Url = defaultUrl\n\t}\n\tclient, err := watson.NewClient(cfg.Credentials)\n\tif err != nil {\n\t\treturn Client{}, err\n\t}\n\tdialog.watsonClient = client\n\treturn dialog, nil\n}", "func NewClient(options ...func(c *Client)) *Client {\n\tc := &Client{}\n\tfor _, option := range options {\n\t\toption(c)\n\t}\n\n\t// Set default user-agent if not set\n\tif c.UserAgent == \"\" {\n\t\tc.UserAgent = \"wporg/1.0\"\n\t}\n\n\t// Set default client if not set\n\tif c.HTTPClient == nil {\n\t\tc.HTTPClient = getDefaultClient()\n\t}\n\n\treturn c\n}", "func NewRoleDefinitionsClient(credential azcore.TokenCredential, options *arm.ClientOptions) *RoleDefinitionsClient {\n\tcp := arm.ClientOptions{}\n\tif options != nil {\n\t\tcp = *options\n\t}\n\tif len(cp.Host) == 0 {\n\t\tcp.Host = arm.AzurePublicCloud\n\t}\n\treturn &RoleDefinitionsClient{ep: string(cp.Host), pl: armruntime.NewPipeline(module, version, credential, &cp)}\n}", "func NewClient(pluginsDir, configsDir string) (*client, error) {\n\treturn &client{\n\t\tpluginsDir: pluginsDir,\n\t\tconfigsDir: configsDir,\n\t}, nil\n}", "func NewPolicyClient(cfg aws.Config) PolicyClient {\n\treturn iam.NewFromConfig(cfg)\n}", "func (c *clientsFactory) PoliciesClient() (authz.PoliciesServiceClient, error) {\n\tconn, err := c.connectionByName(\"authz-service\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn authz.NewPoliciesServiceClient(conn), nil\n}", "func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {\n\tclientOpts := defaultGRPCClientOptions()\n\tif newClientHook != nil {\n\t\thookOpts, err := newClientHook(ctx, clientHookParams{})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tclientOpts = append(clientOpts, hookOpts...)\n\t}\n\n\tconnPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient := Client{CallOptions: defaultCallOptions()}\n\n\tc := &gRPCClient{\n\t\tconnPool: connPool,\n\t\tclient: dlppb.NewDlpServiceClient(connPool),\n\t\tCallOptions: &client.CallOptions,\n\t}\n\tc.setGoogleClientInfo()\n\n\tclient.internalClient = c\n\n\treturn &client, nil\n}", "func NewClient(companyList, getCompany, createCompany, updateCompany, deleteCompany goa.Endpoint) *Client {\n\treturn &Client{\n\t\tCompanyListEndpoint: companyList,\n\t\tGetCompanyEndpoint: getCompany,\n\t\tCreateCompanyEndpoint: createCompany,\n\t\tUpdateCompanyEndpoint: updateCompany,\n\t\tDeleteCompanyEndpoint: deleteCompany,\n\t}\n}", "func NewClient(cfg watson.Config) (Client, error) {\n\tci := Client{version: \"/\" + defaultMajorVersion}\n\tif len(cfg.Credentials.ServiceName) == 0 {\n\t\tcfg.Credentials.ServiceName = \"conversation\"\n\t}\n\tif len(cfg.Credentials.Url) == 0 {\n\t\tcfg.Credentials.Url = defaultUrl\n\t}\n\tclient, err := watson.NewClient(cfg.Credentials)\n\tif err != nil {\n\t\treturn Client{}, err\n\t}\n\tci.watsonClient = client\n\treturn ci, nil\n}", "func NewPolicyEventsClient(credential azcore.TokenCredential, options *arm.ClientOptions) *PolicyEventsClient {\n\tcp := arm.ClientOptions{}\n\tif options != nil {\n\t\tcp = *options\n\t}\n\tif len(cp.Endpoint) == 0 {\n\t\tcp.Endpoint = arm.AzurePublicCloud\n\t}\n\tclient := &PolicyEventsClient{\n\t\thost: string(cp.Endpoint),\n\t\tpl: armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, &cp),\n\t}\n\treturn client\n}", "func NewClient(project string, jsonKey string) (Client, error) {\n\tcomputeJwtConf, err := oauthgoogle.JWTConfigFromJSON([]byte(jsonKey), computeScope)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error reading Google JSON Key: %v\", err)\n\t}\n\tcomputeClient := computeJwtConf.Client(oauth2.NoContext)\n\n\tstorageJwtConf, err := oauthgoogle.JWTConfigFromJSON([]byte(jsonKey), storageScope)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error reading Google JSON Key: %v\", err)\n\t}\n\tstorageClient := storageJwtConf.Client(oauth2.NoContext)\n\n\tcomputeService, err := compute.New(computeClient)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error creating a Google Compute Service client: %v\", err)\n\t}\n\n\tstorageService, err := storage.New(storageClient)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error creating a Google Storage Service client: %v\", err)\n\t}\n\n\treturn &client{\n\t\tproject: project,\n\t\tcomputeService: computeService,\n\t\tstorageService: storageService,\n\t}, nil\n}", "func NewPolicySetDefinition(ctx *pulumi.Context,\n\tname string, args *PolicySetDefinitionArgs, opts ...pulumi.ResourceOption) (*PolicySetDefinition, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.PolicyDefinitions == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'PolicyDefinitions'\")\n\t}\n\taliases := pulumi.Aliases([]pulumi.Alias{\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20200301:PolicySetDefinition\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization:PolicySetDefinition\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization:PolicySetDefinition\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20170601preview:PolicySetDefinition\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20170601preview:PolicySetDefinition\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20180301:PolicySetDefinition\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20180301:PolicySetDefinition\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20180501:PolicySetDefinition\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20180501:PolicySetDefinition\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20190101:PolicySetDefinition\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20190101:PolicySetDefinition\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20190601:PolicySetDefinition\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20190601:PolicySetDefinition\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20190901:PolicySetDefinition\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20190901:PolicySetDefinition\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20200901:PolicySetDefinition\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20200901:PolicySetDefinition\"),\n\t\t},\n\t})\n\topts = append(opts, aliases)\n\tvar resource PolicySetDefinition\n\terr := ctx.RegisterResource(\"azure-native:authorization/v20200301:PolicySetDefinition\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewClient(secretBytes []byte, authorizeHandler func(string) (string, error)) (Client, error) {\n\tvar secret map[string]interface{}\n\tif err := json.Unmarshal(secretBytes, &secret); err != nil {\n\t\treturn nil, err\n\t}\n\tif authorizeHandler == nil {\n\t\tauthorizeHandler = defaultAuthorizeFlowHandler\n\t}\n\n\t// TODO: support \"web\" client secret by using a local web server.\n\t// According to the content in the json, decide whether to run three-legged\n\t// flow (for client secret) or two-legged flow (for service account).\n\tif installed, ok := secret[\"installed\"]; ok {\n\t\t// When the secret contains \"installed\" field, it is a client secret. We\n\t\t// will run a three-legged flow\n\t\tinstalledMap, ok := installed.(map[string]interface{})\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"Malformatted secret json, expected map for param \\\"installed\\\"\")\n\t\t}\n\t\treturn ThreeLeggedClient{installedMap, authorizeHandler}, nil\n\t} else if tokenType, ok := secret[\"type\"]; ok && \"service_account\" == tokenType {\n\t\t// If the token type is \"service_account\", we will run the two-legged flow\n\t\treturn TwoLeggedClient{secret}, nil\n\t} else {\n\t\treturn nil, fmt.Errorf(\"Unsupported token type.\")\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
createOrUpdateCreateRequest creates the CreateOrUpdate request.
func (client *PolicyDefinitionsClient) createOrUpdateCreateRequest(ctx context.Context, policyDefinitionName string, parameters PolicyDefinition, options *PolicyDefinitionsCreateOrUpdateOptions) (*policy.Request, error) { urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}" if policyDefinitionName == "" { return nil, errors.New("parameter policyDefinitionName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{policyDefinitionName}", url.PathEscape(policyDefinitionName)) if client.subscriptionID == "" { return nil, errors.New("parameter client.subscriptionID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2021-06-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header.Set("Accept", "application/json") return req, runtime.MarshalAsJSON(req, parameters) }
[ "func (client *RolloutsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, rolloutName string, options *RolloutsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeploymentManager/rollouts/{rolloutName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif rolloutName == \"\" {\n\t\treturn nil, errors.New(\"parameter rolloutName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{rolloutName}\", url.PathEscape(rolloutName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif options != nil && options.RolloutRequest != nil {\n\t\treturn req, runtime.MarshalAsJSON(req, *options.RolloutRequest)\n\t}\n\treturn req, nil\n}", "func (client *OperationsClient) createOrUpdateCreateRequest(ctx context.Context, providerNamespace string, operationsPutContent OperationsPutContent, options *OperationsClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.ProviderHub/providerRegistrations/{providerNamespace}/operations/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif providerNamespace == \"\" {\n\t\treturn nil, errors.New(\"parameter providerNamespace cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{providerNamespace}\", url.PathEscape(providerNamespace))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-20\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, operationsPutContent)\n}", "func (client *RoleDefinitionsClient) createOrUpdateCreateRequest(ctx context.Context, scope string, roleDefinitionID string, roleDefinition RoleDefinition, options *RoleDefinitionsCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Authorization/roleDefinitions/{roleDefinitionId}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\tif roleDefinitionID == \"\" {\n\t\treturn nil, errors.New(\"parameter roleDefinitionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{roleDefinitionId}\", url.PathEscape(roleDefinitionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, roleDefinition)\n}", "func (client *ApplicationDefinitionsClient) createOrUpdateByIDCreateRequest(ctx context.Context, resourceGroupName string, applicationDefinitionName string, parameters ApplicationDefinition, options *ApplicationDefinitionsBeginCreateOrUpdateByIDOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Solutions/applicationDefinitions/{applicationDefinitionName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif applicationDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter applicationDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{applicationDefinitionName}\", url.PathEscape(applicationDefinitionName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *TagsClient) createOrUpdateAtScopeCreateRequest(ctx context.Context, scope string, parameters TagsResource, options *TagsCreateOrUpdateAtScopeOptions) (*azcore.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Resources/tags/default\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\treq, err := azcore.NewRequest(ctx, http.MethodPut, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, req.MarshalAsJSON(parameters)\n}", "func (client *TriggerClient) createOrUpdateTriggerCreateRequest(ctx context.Context, triggerName string, trigger TriggerResource, options *TriggerBeginCreateOrUpdateTriggerOptions) (*azcore.Request, error) {\n\turlPath := \"/triggers/{triggerName}\"\n\tif triggerName == \"\" {\n\t\treturn nil, errors.New(\"parameter triggerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{triggerName}\", url.PathEscape(triggerName))\n\treq, err := azcore.NewRequest(ctx, http.MethodPut, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, req.MarshalAsJSON(trigger)\n}", "func (client *TagsClient) createOrUpdateValueCreateRequest(ctx context.Context, tagName string, tagValue string, options *TagsCreateOrUpdateValueOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{tagName}\", url.PathEscape(tagName))\n\turlPath = strings.ReplaceAll(urlPath, \"{tagValue}\", url.PathEscape(tagValue))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodPut, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *SparkJobDefinitionClient) createOrUpdateSparkJobDefinitionCreateRequest(ctx context.Context, sparkJobDefinitionName string, sparkJobDefinition SparkJobDefinitionResource, options *SparkJobDefinitionBeginCreateOrUpdateSparkJobDefinitionOptions) (*azcore.Request, error) {\n\turlPath := \"/sparkJobDefinitions/{sparkJobDefinitionName}\"\n\tif sparkJobDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter sparkJobDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sparkJobDefinitionName}\", url.PathEscape(sparkJobDefinitionName))\n\treq, err := azcore.NewRequest(ctx, http.MethodPut, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, req.MarshalAsJSON(sparkJobDefinition)\n}", "func (client *WebAppsClient) createOrUpdateConfigurationCreateRequest(ctx context.Context, resourceGroupName string, name string, siteConfig SiteConfigResource, options *WebAppsCreateOrUpdateConfigurationOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/web\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, siteConfig)\n}", "func (client *WebAppsClient) createOrUpdateHostSecretCreateRequest(ctx context.Context, resourceGroupName string, name string, keyType string, keyName string, key KeyInfo, options *WebAppsCreateOrUpdateHostSecretOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/host/default/{keyType}/{keyName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif keyType == \"\" {\n\t\treturn nil, errors.New(\"parameter keyType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{keyType}\", url.PathEscape(keyType))\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{keyName}\", url.PathEscape(keyName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, key)\n}", "func (client *NotebookClient) createOrUpdateNotebookCreateRequest(ctx context.Context, notebookName string, notebook NotebookResource, options *NotebookClientBeginCreateOrUpdateNotebookOptions) (*policy.Request, error) {\n\turlPath := \"/notebooks/{notebookName}\"\n\tif notebookName == \"\" {\n\t\treturn nil, errors.New(\"parameter notebookName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{notebookName}\", url.PathEscape(notebookName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.endpoint, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-12-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header[\"If-Match\"] = []string{*options.IfMatch}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, notebook); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *NotebookClient) createOrUpdateNotebookCreateRequest(ctx context.Context, notebookName string, notebook NotebookResource, options *NotebookBeginCreateOrUpdateNotebookOptions) (*azcore.Request, error) {\n\turlPath := \"/notebooks/{notebookName}\"\n\tif notebookName == \"\" {\n\t\treturn nil, errors.New(\"parameter notebookName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{notebookName}\", url.PathEscape(notebookName))\n\treq, err := azcore.NewRequest(ctx, http.MethodPut, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, req.MarshalAsJSON(notebook)\n}", "func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f controllerutil.MutateFn) (controllerutil.OperationResult, error) {\n\n\t// check if the name key has to be generated\n\taccessor, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn controllerutil.OperationResultNone, err\n\t}\n\tkey := client.ObjectKey{Namespace: accessor.GetNamespace(), Name: accessor.GetName()}\n\n\tif accessor.GetName() == \"\" && accessor.GetGenerateName() != \"\" {\n\t\tif err := Mutate(f, key, obj); err != nil {\n\t\t\treturn controllerutil.OperationResultNone, err\n\t\t}\n\t\tif err := c.Create(ctx, obj); err != nil {\n\t\t\treturn controllerutil.OperationResultNone, err\n\t\t}\n\t\treturn controllerutil.OperationResultCreated, nil\n\t}\n\n\treturn controllerutil.CreateOrUpdate(ctx, c, obj, f)\n}", "func (client *ApplyUpdatesClient) createOrUpdateParentCreateRequest(ctx context.Context, resourceGroupName string, providerName string, resourceParentType string, resourceParentName string, resourceType string, resourceName string, options *ApplyUpdatesClientCreateOrUpdateParentOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceParentType}/{resourceParentName}/{resourceType}/{resourceName}/providers/Microsoft.Maintenance/applyUpdates/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif providerName == \"\" {\n\t\treturn nil, errors.New(\"parameter providerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{providerName}\", url.PathEscape(providerName))\n\tif resourceParentType == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceParentType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceParentType}\", url.PathEscape(resourceParentType))\n\tif resourceParentName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceParentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceParentName}\", url.PathEscape(resourceParentName))\n\tif resourceType == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceType}\", url.PathEscape(resourceType))\n\tif resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(resourceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *PipelineClient) createOrUpdatePipelineCreateRequest(ctx context.Context, pipelineName string, pipeline PipelineResource, options *PipelineBeginCreateOrUpdatePipelineOptions) (*azcore.Request, error) {\n\turlPath := \"/pipelines/{pipelineName}\"\n\tif pipelineName == \"\" {\n\t\treturn nil, errors.New(\"parameter pipelineName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{pipelineName}\", url.PathEscape(pipelineName))\n\treq, err := azcore.NewRequest(ctx, http.MethodPut, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, req.MarshalAsJSON(pipeline)\n}", "func (client *DatasetClient) createOrUpdateDatasetCreateRequest(ctx context.Context, datasetName string, dataset DatasetResource, options *DatasetBeginCreateOrUpdateDatasetOptions) (*azcore.Request, error) {\n\turlPath := \"/datasets/{datasetName}\"\n\tif datasetName == \"\" {\n\t\treturn nil, errors.New(\"parameter datasetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{datasetName}\", url.PathEscape(datasetName))\n\treq, err := azcore.NewRequest(ctx, http.MethodPut, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, req.MarshalAsJSON(dataset)\n}", "func (client *JobExecutionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, serverName string, jobAgentName string, jobName string, jobExecutionID string, options *JobExecutionsBeginCreateOrUpdateOptions) (*http.Response, error) {\n\treq, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, serverName, jobAgentName, jobName, jobExecutionID, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := client.pl.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted) {\n\t\treturn nil, client.createOrUpdateHandleError(resp)\n\t}\n\treturn resp, nil\n}", "func (client *CassandraClustersClient) createUpdateCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, body ClusterResource, options *CassandraClustersClientBeginCreateUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-03-15-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, body)\n}", "func (client *BuildServiceClient) createOrUpdateBuildCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, buildServiceName string, buildName string, buildParam Build, options *BuildServiceClientCreateOrUpdateBuildOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builds/{buildName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif buildServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildServiceName}\", url.PathEscape(buildServiceName))\n\tif buildName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildName}\", url.PathEscape(buildName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, buildParam)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
createOrUpdateAtManagementGroupCreateRequest creates the CreateOrUpdateAtManagementGroup request.
func (client *PolicyDefinitionsClient) createOrUpdateAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, parameters PolicyDefinition, options *PolicyDefinitionsCreateOrUpdateAtManagementGroupOptions) (*policy.Request, error) { urlPath := "/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}" if policyDefinitionName == "" { return nil, errors.New("parameter policyDefinitionName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{policyDefinitionName}", url.PathEscape(policyDefinitionName)) if managementGroupID == "" { return nil, errors.New("parameter managementGroupID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{managementGroupId}", url.PathEscape(managementGroupID)) req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2021-06-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header.Set("Accept", "application/json") return req, runtime.MarshalAsJSON(req, parameters) }
[ "func (client *PolicyDefinitionsClient) getAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, options *PolicyDefinitionsGetAtManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (c *MockResourceGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, parameters resources.Group) error {\n\tparameters.Name = &resourceGroupName\n\tc.RGs[resourceGroupName] = parameters\n\treturn nil\n}", "func (c GroupClient) CreateOrUpdate(ctx context.Context, resourceGroupName, region string) error {\n\tif _, err := c.client.CreateOrUpdate(ctx, resourceGroupName, resources.Group{\n\t\tLocation: &region,\n\t}); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (client *PolicyDefinitionsClient) deleteAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, options *PolicyDefinitionsDeleteAtManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateCreateOrUpdateSwimmingLaneGroupRequest() (request *CreateOrUpdateSwimmingLaneGroupRequest) {\n\trequest = &CreateOrUpdateSwimmingLaneGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"mse\", \"2019-05-31\", \"CreateOrUpdateSwimmingLaneGroup\", \"mse\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (c *MockApplicationSecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName, asgName string, parameters network.ApplicationSecurityGroup) (*network.ApplicationSecurityGroup, error) {\n\t// Ignore resourceGroupName for simplicity.\n\tif _, ok := c.ASGs[asgName]; ok {\n\t\treturn nil, fmt.Errorf(\"update not supported\")\n\t}\n\tparameters.Name = &asgName\n\tc.ASGs[asgName] = parameters\n\treturn &parameters, nil\n}", "func CreateSaveApgroupConfigRequest() (request *SaveApgroupConfigRequest) {\n\trequest = &SaveApgroupConfigRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"cloudwf\", \"2017-03-28\", \"SaveApgroupConfig\", \"cloudwf\", \"openAPI\")\n\treturn\n}", "func (client *ExemptionsClient) listForManagementGroupCreateRequest(ctx context.Context, managementGroupID string, options *ExemptionsClientListForManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyExemptions\"\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-07-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tunencodedParams := []string{req.Raw().URL.RawQuery}\n\tif options != nil && options.Filter != nil {\n\t\tunencodedParams = append(unencodedParams, \"$filter=\"+*options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = strings.Join(unencodedParams, \"&\")\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *PolicyRestrictionsClient) checkAtManagementGroupScopeCreateRequest(ctx context.Context, managementGroupID string, parameters CheckManagementGroupRestrictionsRequest, options *PolicyRestrictionsClientCheckAtManagementGroupScopeOptions) (*policy.Request, error) {\n\turlPath := \"/providers/{managementGroupsNamespace}/managementGroups/{managementGroupId}/providers/Microsoft.PolicyInsights/checkPolicyRestrictions\"\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupsNamespace}\", url.PathEscape(\"Microsoft.Management\"))\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func NewDeploymentAtManagementGroupScope(ctx *pulumi.Context,\n\tname string, args *DeploymentAtManagementGroupScopeArgs, opts ...pulumi.ResourceOption) (*DeploymentAtManagementGroupScope, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.GroupId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'GroupId'\")\n\t}\n\tif args.Properties == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Properties'\")\n\t}\n\taliases := pulumi.Aliases([]pulumi.Alias{\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20190501:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20190510:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20190510:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20190701:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20190701:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20190801:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20190801:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20191001:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20191001:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20200601:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20200601:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20200801:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20200801:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20201001:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20201001:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20210101:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20210101:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20210401:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20210401:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t})\n\topts = append(opts, aliases)\n\tvar resource DeploymentAtManagementGroupScope\n\terr := ctx.RegisterResource(\"azure-native:resources/v20190501:DeploymentAtManagementGroupScope\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func CreateCreateApplicationGroupRequest() (request *CreateApplicationGroupRequest) {\n\trequest = &CreateApplicationGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"oos\", \"2019-06-01\", \"CreateApplicationGroup\", \"oos\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (c *MockNetworkSecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName, nsgName string, parameters network.SecurityGroup) (*network.SecurityGroup, error) {\n\t// Ignore resourceGroupName for simplicity.\n\tif _, ok := c.NSGs[nsgName]; ok {\n\t\treturn nil, fmt.Errorf(\"update not supported\")\n\t}\n\tparameters.Name = &nsgName\n\tc.NSGs[nsgName] = parameters\n\treturn &parameters, nil\n}", "func (client *PolicyDefinitionsClient) listByManagementGroupCreateRequest(ctx context.Context, managementGroupID string, options *PolicyDefinitionsListByManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions\"\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tunencodedParams := []string{req.Raw().URL.RawQuery}\n\tif options != nil && options.Filter != nil {\n\t\tunencodedParams = append(unencodedParams, \"$filter=\"+*options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = strings.Join(unencodedParams, \"&\")\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreatePutMetricAlarmRequest() (request *PutMetricAlarmRequest) {\n\trequest = &PutMetricAlarmRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Cms\", \"2018-03-08\", \"PutMetricAlarm\", \"cms\", \"openAPI\")\n\treturn\n}", "func (c *UMemClient) NewCreateUMemcacheGroupRequest() *CreateUMemcacheGroupRequest {\n\treq := &CreateUMemcacheGroupRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(false)\n\treturn req\n}", "func (b *Batch) CreateAugroup(name string, opts map[string]interface{}, id *int) {\n\tb.call(\"nvim_create_augroup\", id, name, opts)\n}", "func CreateUpdateEndpointGroupRequest() (request *UpdateEndpointGroupRequest) {\n\trequest = &UpdateEndpointGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Ga\", \"2019-11-20\", \"UpdateEndpointGroup\", \"gaplus\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateModifyDesktopsPolicyGroupRequest() (request *ModifyDesktopsPolicyGroupRequest) {\n\trequest = &ModifyDesktopsPolicyGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"ecd\", \"2020-09-30\", \"ModifyDesktopsPolicyGroup\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (v *Nvim) CreateAugroup(name string, opts map[string]interface{}) (id int, err error) {\n\terr = v.call(\"nvim_create_augroup\", &id, name, opts)\n\treturn id, err\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
createOrUpdateAtManagementGroupHandleResponse handles the CreateOrUpdateAtManagementGroup response.
func (client *PolicyDefinitionsClient) createOrUpdateAtManagementGroupHandleResponse(resp *http.Response) (PolicyDefinitionsCreateOrUpdateAtManagementGroupResponse, error) { result := PolicyDefinitionsCreateOrUpdateAtManagementGroupResponse{RawResponse: resp} if err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinition); err != nil { return PolicyDefinitionsCreateOrUpdateAtManagementGroupResponse{}, runtime.NewResponseError(err, resp) } return result, nil }
[ "func handleCreateGroup(c *Context, w http.ResponseWriter, r *http.Request) {\n\tcreateGroupRequest, err := model.NewCreateGroupRequestFromReader(r.Body)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"failed to decode request\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tgroup := model.Group{\n\t\tName: createGroupRequest.Name,\n\t\tDescription: createGroupRequest.Description,\n\t\tVersion: createGroupRequest.Version,\n\t\tImage: createGroupRequest.Image,\n\t\tMaxRolling: createGroupRequest.MaxRolling,\n\t\tAPISecurityLock: createGroupRequest.APISecurityLock,\n\t\tMattermostEnv: createGroupRequest.MattermostEnv,\n\t}\n\n\tannotations, err := model.AnnotationsFromStringSlice(createGroupRequest.Annotations)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"invalid annotations\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\terr = c.Store.CreateGroup(&group, annotations)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"failed to create group\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tc.Supervisor.Do()\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\toutputJSON(c, w, group.ToDTO(annotations))\n}", "func CreateCreateOrUpdateSwimmingLaneGroupResponse() (response *CreateOrUpdateSwimmingLaneGroupResponse) {\n\tresponse = &CreateOrUpdateSwimmingLaneGroupResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (s *Server) handleGroupCreate(w http.ResponseWriter, r *http.Request) {\n\tuser := api.UserFromContext(r.Context())\n\tif !user.IsTeacher {\n\t\tError(w, r, api.Errorf(api.EUNAUTHORIZED, \"You are not a teacher\"))\n\t\treturn\n\t}\n\n\t// Unmarshal data\n\tgroup := api.Group{}\n\tif err := json.NewDecoder(r.Body).Decode(&group); err != nil {\n\t\tError(w, r, api.Errorf(api.EINVALID, \"Invalid JSON body\"))\n\t\treturn\n\t}\n\n\t// Generate share link\n\trand.Seed(time.Now().UnixNano())\n\tgroup.ShareLink = api.RandStringSeq(11)\n\n\t// Assign owner\n\tgroup.OwnerID = user.ID\n\n\t// Create group in the database.\n\terr := s.GroupService.CreateGroup(r.Context(), &group)\n\tif err != nil {\n\t\tError(w, r, err)\n\t\treturn\n\t}\n\n\t// Write new group content to response based on accept header.\n\tw.Header().Set(\"Content-type\", \"application/json\")\n\tw.WriteHeader(http.StatusCreated)\n\tif err := json.NewEncoder(w).Encode(group); err != nil {\n\t\tLogError(r, err)\n\t\treturn\n\t}\n}", "func (client *PolicyDefinitionsClient) getAtManagementGroupHandleResponse(resp *http.Response) (PolicyDefinitionsGetAtManagementGroupResponse, error) {\n\tresult := PolicyDefinitionsGetAtManagementGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinition); err != nil {\n\t\treturn PolicyDefinitionsGetAtManagementGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *PolicyDefinitionsClient) createOrUpdateAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, parameters PolicyDefinition, options *PolicyDefinitionsCreateOrUpdateAtManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client IotHubResourceClient) CreateEventHubConsumerGroupResponder(resp *http.Response) (result EventHubConsumerGroupInfo, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func CreateCreateMonitorGroupResponse() (response *CreateMonitorGroupResponse) {\n\tresponse = &CreateMonitorGroupResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateCreateMonitorGroupByResourceGroupIdResponse() (response *CreateMonitorGroupByResourceGroupIdResponse) {\n\tresponse = &CreateMonitorGroupByResourceGroupIdResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateSaveApgroupConfigResponse() (response *SaveApgroupConfigResponse) {\n\tresponse = &SaveApgroupConfigResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateModifyDesktopsPolicyGroupResponse() (response *ModifyDesktopsPolicyGroupResponse) {\n\tresponse = &ModifyDesktopsPolicyGroupResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (client *WorkspaceManagedSQLServerVulnerabilityAssessmentsClient) createOrUpdateHandleResponse(resp *http.Response) (WorkspaceManagedSQLServerVulnerabilityAssessmentsClientCreateOrUpdateResponse, error) {\n\tresult := WorkspaceManagedSQLServerVulnerabilityAssessmentsClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ServerVulnerabilityAssessment); err != nil {\n\t\treturn WorkspaceManagedSQLServerVulnerabilityAssessmentsClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *TagsClient) createOrUpdateAtScopeHandleResponse(resp *azcore.Response) (TagsResourceResponse, error) {\n\tvar val *TagsResource\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn TagsResourceResponse{}, err\n\t}\n\treturn TagsResourceResponse{RawResponse: resp.Response, TagsResource: val}, nil\n}", "func CreateUpdateEndpointGroupResponse() (response *UpdateEndpointGroupResponse) {\n\tresponse = &UpdateEndpointGroupResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateUpdateAppInstanceGroupImageResponse() (response *UpdateAppInstanceGroupImageResponse) {\n\tresponse = &UpdateAppInstanceGroupImageResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateCreateScalingTaskGroupResponse() (response *CreateScalingTaskGroupResponse) {\n\tresponse = &CreateScalingTaskGroupResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (a *IAMApiService) CreateGroup(ctx context.Context, gid string, iamGroupCreate IamGroupCreate) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = http.MethodPut\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/acs/api/v1/groups/{gid}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"gid\"+\"}\", fmt.Sprintf(\"%v\", gid), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &iamGroupCreate\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 0 {\n\t\t\tvar v IamError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (c GroupClient) CreateOrUpdate(ctx context.Context, resourceGroupName, region string) error {\n\tif _, err := c.client.CreateOrUpdate(ctx, resourceGroupName, resources.Group{\n\t\tLocation: &region,\n\t}); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (client *ExemptionsClient) listForManagementGroupHandleResponse(resp *http.Response) (ExemptionsClientListForManagementGroupResponse, error) {\n\tresult := ExemptionsClientListForManagementGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ExemptionListResult); err != nil {\n\t\treturn ExemptionsClientListForManagementGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func CreateGroupHandler(ctx *gin.Context) {\n\tum := ctx.MustGet(m.UMServices).(server.UserManager)\n\n\tvar request kube_types.UserGroup\n\tif err := ctx.ShouldBindWith(&request, binding.JSON); err != nil {\n\t\tgonic.Gonic(umerrors.ErrRequestValidationFailed().AddDetailsErr(err), ctx)\n\t\treturn\n\t}\n\n\tif errs := validation.ValidateCreateGroup(request); errs != nil {\n\t\tgonic.Gonic(umerrors.ErrRequestValidationFailed().AddDetailsErr(errs...), ctx)\n\t\treturn\n\t}\n\n\tif request.UserGroupMembers != nil {\n\t\tif errs := validation.ValidateAddMembers(*request.UserGroupMembers); errs != nil {\n\t\t\tgonic.Gonic(umerrors.ErrRequestValidationFailed().AddDetailsErr(errs...), ctx)\n\t\t\treturn\n\t\t}\n\t}\n\n\tgr, gerr := um.GetGroupByLabel(ctx.Request.Context(), request.Label)\n\tif cherr, ok := gerr.(*cherry.Err); ok {\n\t\tif gr != nil && !cherr.Equals(umerrors.ErrGroupNotExist()) {\n\t\t\tgonic.Gonic(umerrors.ErrGroupAlreadyExist(), ctx)\n\t\t\treturn\n\t\t}\n\t}\n\tif gr != nil && gerr == nil {\n\t\tgonic.Gonic(umerrors.ErrGroupAlreadyExist(), ctx)\n\t\treturn\n\t}\n\n\t_, err := um.CreateGroup(ctx.Request.Context(), request)\n\tif err != nil {\n\t\tif cherr, ok := err.(*cherry.Err); ok {\n\t\t\tgonic.Gonic(cherr, ctx)\n\t\t} else {\n\t\t\tctx.Error(err)\n\t\t\tgonic.Gonic(umerrors.ErrUnableCreateGroup(), ctx)\n\t\t}\n\t\treturn\n\t}\n\n\tresp, err := um.GetGroupByLabel(ctx.Request.Context(), request.Label)\n\tif err != nil {\n\t\tif cherr, ok := err.(*cherry.Err); ok {\n\t\t\tgonic.Gonic(cherr, ctx)\n\t\t} else {\n\t\t\tctx.Error(err)\n\t\t\tgonic.Gonic(umerrors.ErrUnableGetGroup(), ctx)\n\t\t}\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusCreated, resp)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
deleteCreateRequest creates the Delete request.
func (client *PolicyDefinitionsClient) deleteCreateRequest(ctx context.Context, policyDefinitionName string, options *PolicyDefinitionsDeleteOptions) (*policy.Request, error) { urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}" if policyDefinitionName == "" { return nil, errors.New("parameter policyDefinitionName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{policyDefinitionName}", url.PathEscape(policyDefinitionName)) if client.subscriptionID == "" { return nil, errors.New("parameter client.subscriptionID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2021-06-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header.Set("Accept", "application/json") return req, nil }
[ "func (client *TagsClient) deleteCreateRequest(ctx context.Context, tagName string, options *TagsDeleteOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/tagNames/{tagName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{tagName}\", url.PathEscape(tagName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodDelete, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *FactoriesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, factoryName string, options *FactoriesClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif factoryName == \"\" {\n\t\treturn nil, errors.New(\"parameter factoryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{factoryName}\", url.PathEscape(factoryName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (connection *Connection) CreateDeleteRequest(fnr Fnr) (*DeleteRequest, error) {\n\treturn NewDeleteRequestAdabas(connection.adabasToData, fnr), nil\n}", "func (client *KeyVaultClient) deleteKeyCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, options *KeyVaultClientDeleteKeyOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.3\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *KeyVaultClient) deleteKeyCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, options *KeyVaultClientDeleteKeyOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateDeleteApDeviceRequest() (request *DeleteApDeviceRequest) {\n\trequest = &DeleteApDeviceRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"cloudesl\", \"2020-02-01\", \"DeleteApDevice\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *TriggerClient) deleteTriggerCreateRequest(ctx context.Context, triggerName string, options *TriggerBeginDeleteTriggerOptions) (*azcore.Request, error) {\n\turlPath := \"/triggers/{triggerName}\"\n\tif triggerName == \"\" {\n\t\treturn nil, errors.New(\"parameter triggerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{triggerName}\", url.PathEscape(triggerName))\n\treq, err := azcore.NewRequest(ctx, http.MethodDelete, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateDeleteDataCenterRequest() (request *DeleteDataCenterRequest) {\n\trequest = &DeleteDataCenterRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Cassandra\", \"2019-01-01\", \"DeleteDataCenter\", \"Cassandra\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *DatasetClient) deleteDatasetCreateRequest(ctx context.Context, datasetName string, options *DatasetBeginDeleteDatasetOptions) (*azcore.Request, error) {\n\turlPath := \"/datasets/{datasetName}\"\n\tif datasetName == \"\" {\n\t\treturn nil, errors.New(\"parameter datasetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{datasetName}\", url.PathEscape(datasetName))\n\treq, err := azcore.NewRequest(ctx, http.MethodDelete, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *PipelineClient) deletePipelineCreateRequest(ctx context.Context, pipelineName string, options *PipelineBeginDeletePipelineOptions) (*azcore.Request, error) {\n\turlPath := \"/pipelines/{pipelineName}\"\n\tif pipelineName == \"\" {\n\t\treturn nil, errors.New(\"parameter pipelineName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{pipelineName}\", url.PathEscape(pipelineName))\n\treq, err := azcore.NewRequest(ctx, http.MethodDelete, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *KeyVaultClient) deleteSecretCreateRequest(ctx context.Context, vaultBaseURL string, secretName string, options *KeyVaultClientDeleteSecretOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/secrets/{secret-name}\"\n\tif secretName == \"\" {\n\t\treturn nil, errors.New(\"parameter secretName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{secret-name}\", url.PathEscape(secretName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateDeleteApiGroupRequest() (request *DeleteApiGroupRequest) {\n\trequest = &DeleteApiGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"CloudAPI\", \"2016-07-14\", \"DeleteApiGroup\", \"apigateway\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *RoleAssignmentsClient) deleteByIDCreateRequest(ctx context.Context, roleAssignmentID string, options *RoleAssignmentsClientDeleteByIDOptions) (*policy.Request, error) {\n\turlPath := \"/{roleAssignmentId}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{roleAssignmentId}\", roleAssignmentID)\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-04-01\")\n\tif options != nil && options.TenantID != nil {\n\t\treqQP.Set(\"tenantId\", *options.TenantID)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *NotebookClient) deleteNotebookCreateRequest(ctx context.Context, notebookName string, options *NotebookClientBeginDeleteNotebookOptions) (*policy.Request, error) {\n\turlPath := \"/notebooks/{notebookName}\"\n\tif notebookName == \"\" {\n\t\treturn nil, errors.New(\"parameter notebookName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{notebookName}\", url.PathEscape(notebookName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.endpoint, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-12-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *DataFlowDebugSessionClient) deleteDataFlowDebugSessionCreateRequest(ctx context.Context, request DeleteDataFlowDebugSessionRequest, options *DataFlowDebugSessionClientDeleteDataFlowDebugSessionOptions) (*policy.Request, error) {\n\turlPath := \"/deleteDataFlowDebugSession\"\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-12-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, request); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *TagsClient) deleteAtScopeCreateRequest(ctx context.Context, scope string, options *TagsDeleteAtScopeOptions) (*azcore.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Resources/tags/default\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\treq, err := azcore.NewRequest(ctx, http.MethodDelete, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *KeyVaultClient) deleteCertificateCreateRequest(ctx context.Context, vaultBaseURL string, certificateName string, options *KeyVaultClientDeleteCertificateOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/certificates/{certificate-name}\"\n\tif certificateName == \"\" {\n\t\treturn nil, errors.New(\"parameter certificateName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{certificate-name}\", url.PathEscape(certificateName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func NewDeleteRequest(space string) *DeleteRequest {\n\treq := new(DeleteRequest)\n\treq.initImpl(\"crud.delete\")\n\treq.setSpace(space)\n\treq.key = Tuple{}\n\treq.opts = DeleteOpts{}\n\treturn req\n}", "func (client *NotebookClient) deleteNotebookCreateRequest(ctx context.Context, notebookName string, options *NotebookBeginDeleteNotebookOptions) (*azcore.Request, error) {\n\turlPath := \"/notebooks/{notebookName}\"\n\tif notebookName == \"\" {\n\t\treturn nil, errors.New(\"parameter notebookName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{notebookName}\", url.PathEscape(notebookName))\n\treq, err := azcore.NewRequest(ctx, http.MethodDelete, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
deleteAtManagementGroupCreateRequest creates the DeleteAtManagementGroup request.
func (client *PolicyDefinitionsClient) deleteAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, options *PolicyDefinitionsDeleteAtManagementGroupOptions) (*policy.Request, error) { urlPath := "/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}" if policyDefinitionName == "" { return nil, errors.New("parameter policyDefinitionName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{policyDefinitionName}", url.PathEscape(policyDefinitionName)) if managementGroupID == "" { return nil, errors.New("parameter managementGroupID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{managementGroupId}", url.PathEscape(managementGroupID)) req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2021-06-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header.Set("Accept", "application/json") return req, nil }
[ "func CreateDeleteCorpGroupRequest() (request *DeleteCorpGroupRequest) {\n\trequest = &DeleteCorpGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Vcs\", \"2020-05-15\", \"DeleteCorpGroup\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateDeleteAppInstanceGroupRequest() (request *DeleteAppInstanceGroupRequest) {\n\trequest = &DeleteAppInstanceGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"appstream-center\", \"2021-09-01\", \"DeleteAppInstanceGroup\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateDeleteSkillGroupConfigRequest() (request *DeleteSkillGroupConfigRequest) {\n\trequest = &DeleteSkillGroupConfigRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Qualitycheck\", \"2019-01-15\", \"DeleteSkillGroupConfig\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *PolicyDefinitionsClient) createOrUpdateAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, parameters PolicyDefinition, options *PolicyDefinitionsCreateOrUpdateAtManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func CreateDeleteVideoDnaGroupRequest() (request *DeleteVideoDnaGroupRequest) {\n\trequest = &DeleteVideoDnaGroupRequest{\n\t\tRoaRequest: &requests.RoaRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Green\", \"2018-05-09\", \"DeleteVideoDnaGroup\", \"/green/video/dna/group/delete\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *PolicyDefinitionsClient) getAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, options *PolicyDefinitionsGetAtManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateDeleteApiGroupRequest() (request *DeleteApiGroupRequest) {\n\trequest = &DeleteApiGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"CloudAPI\", \"2016-07-14\", \"DeleteApiGroup\", \"apigateway\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (c *UDBClient) NewDeleteUDBParamGroupRequest() *DeleteUDBParamGroupRequest {\n\treq := &DeleteUDBParamGroupRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (client *ExemptionsClient) listForManagementGroupCreateRequest(ctx context.Context, managementGroupID string, options *ExemptionsClientListForManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyExemptions\"\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-07-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tunencodedParams := []string{req.Raw().URL.RawQuery}\n\tif options != nil && options.Filter != nil {\n\t\tunencodedParams = append(unencodedParams, \"$filter=\"+*options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = strings.Join(unencodedParams, \"&\")\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *PolicyDefinitionsClient) listByManagementGroupCreateRequest(ctx context.Context, managementGroupID string, options *PolicyDefinitionsListByManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions\"\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tunencodedParams := []string{req.Raw().URL.RawQuery}\n\tif options != nil && options.Filter != nil {\n\t\tunencodedParams = append(unencodedParams, \"$filter=\"+*options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = strings.Join(unencodedParams, \"&\")\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *PolicyRestrictionsClient) checkAtManagementGroupScopeCreateRequest(ctx context.Context, managementGroupID string, parameters CheckManagementGroupRestrictionsRequest, options *PolicyRestrictionsClientCheckAtManagementGroupScopeOptions) (*policy.Request, error) {\n\turlPath := \"/providers/{managementGroupsNamespace}/managementGroups/{managementGroupId}/providers/Microsoft.PolicyInsights/checkPolicyRestrictions\"\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupsNamespace}\", url.PathEscape(\"Microsoft.Management\"))\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func CreateRemoveAppGroupRequest() (request *RemoveAppGroupRequest) {\n\trequest = &RemoveAppGroupRequest{\n\t\tRoaRequest: &requests.RoaRequest{},\n\t}\n\trequest.InitWithApiInfo(\"OpenSearch\", \"2017-12-25\", \"RemoveAppGroup\", \"/v4/openapi/app-groups/[appGroupIdentity]\", \"\", \"\")\n\trequest.Method = requests.DELETE\n\treturn\n}", "func NewDeleteaspecificPeeringGroupRequest(server string, id string) (*http.Request, error) {\n\tvar err error\n\n\tvar pathParam0 string\n\n\tpathParam0, err = runtime.StyleParam(\"simple\", false, \"id\", id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/peeringgroups/%s\", pathParam0)\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"DELETE\", queryUrl.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}", "func CreateDeleteCasterEpisodeGroupRequest() (request *DeleteCasterEpisodeGroupRequest) {\n\trequest = &DeleteCasterEpisodeGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"live\", \"2016-11-01\", \"DeleteCasterEpisodeGroup\", \"live\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func NewDeleteGroupsRequest(clientID string, groupsNames []string) DeleteGroupsRequest {\n\trequestHeader := &RequestHeader{\n\t\tAPIKey: API_Delete_Groups,\n\t\tAPIVersion: 0,\n\t\tClientID: clientID,\n\t}\n\treturn DeleteGroupsRequest{\n\t\tRequestHeader: requestHeader,\n\t\tGroupsNames: groupsNames,\n\t}\n}", "func (c *UMemClient) NewCreateUMemcacheGroupRequest() *CreateUMemcacheGroupRequest {\n\treq := &CreateUMemcacheGroupRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(false)\n\treturn req\n}", "func CreateDeleteCompliancePacksRequest() (request *DeleteCompliancePacksRequest) {\n\trequest = &DeleteCompliancePacksRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Config\", \"2020-09-07\", \"DeleteCompliancePacks\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateDeleteDataCenterRequest() (request *DeleteDataCenterRequest) {\n\trequest = &DeleteDataCenterRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Cassandra\", \"2019-01-01\", \"DeleteDataCenter\", \"Cassandra\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func DeleteObjFromGroupCmd(objectName string, object api.Object, group api.Object) *Command {\n\treturn &Command{\n\t\tName: fmt.Sprintf(\"delete%s\", capitalize(objectName)),\n\t\tUsageLine: fmt.Sprintf(`%[1]sgroup delete%[2]s (--id%[2]s | --name%[2]s) (--idGroup | --nameGroup)`, objectName, capitalize(objectName)),\n\t\tLong: fmt.Sprintf(`\nDelete a %[1]s from a %[1]s group.\n\nThe flags for \"delete%[2]s\" %[1]sgroup action are:\n\nMandatory:\n\t--id%[2]s\n\t\tspecify the %[1]s id.\n\tor\n\t--name%[2]s\n\t\tspecify the %[1]s name.\n\t--idGroup\n\t\tspecify the group id.\n\tor\n\t--nameGroup\n\t\tspecify the group name.\n`, objectName, capitalize(objectName)),\n\t\tRun: func(cmd *Command, args []string) {\n\t\t\tvar idGroup, idObject int64\n\t\t\tvar nameGroup, nameObject string\n\t\t\tcmd.Flag.Usage = func() { cmd.PrintUsage() }\n\t\t\tcmd.Flag.Int64Var(&idGroup, \"idGroup\", -1, \"\")\n\t\t\tcmd.Flag.Int64Var(&idObject, fmt.Sprintf(\"id%s\", capitalize(objectName)), -1, \"\")\n\t\t\tcmd.Flag.StringVar(&nameGroup, \"nameGroup\", DEFAULT_STRING_FLAG_VALUE, \"\")\n\t\t\tcmd.Flag.StringVar(&nameObject, fmt.Sprintf(\"name%s\", capitalize(objectName)), DEFAULT_STRING_FLAG_VALUE, \"\")\n\t\t\tcmd.ParseArgs(args)\n\n\t\t\tvar err error\n\t\t\tif idObject != -1 {\n\t\t\t\terr = cmd.Capi.GetObjectRef(objectName, idObject, object)\n\t\t\t} else if nameObject != DEFAULT_STRING_FLAG_VALUE {\n\t\t\t\terr = cmd.Capi.GetObjectRefByName(objectName, nameObject, object)\n\t\t\t} else {\n\t\t\t\tcmd.PrintUsage()\n\t\t\t\tos.Exit(EXIT_FLAG_ERROR)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tcmd.PrintResult(\"\", err)\n\t\t\t}\n\n\t\t\tvar objectGroupName = api.GetObjectGroupName(objectName)\n\t\t\tif idGroup != -1 {\n\t\t\t\terr = cmd.Capi.GetObjectRef(objectGroupName, idGroup, group)\n\t\t\t} else if nameObject != DEFAULT_STRING_FLAG_VALUE {\n\t\t\t\terr = cmd.Capi.GetObjectRefByName(objectGroupName, nameGroup, group)\n\t\t\t} else {\n\t\t\t\tcmd.PrintUsage()\n\t\t\t\tos.Exit(EXIT_FLAG_ERROR)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tcmd.PrintResult(\"\", err)\n\t\t\t}\n\t\t\tcmd.PrintResult(cmd.Capi.DeleteObjectFromGroup(objectName, object, group))\n\t\t},\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
getAtManagementGroupCreateRequest creates the GetAtManagementGroup request.
func (client *PolicyDefinitionsClient) getAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, options *PolicyDefinitionsGetAtManagementGroupOptions) (*policy.Request, error) { urlPath := "/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}" if policyDefinitionName == "" { return nil, errors.New("parameter policyDefinitionName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{policyDefinitionName}", url.PathEscape(policyDefinitionName)) if managementGroupID == "" { return nil, errors.New("parameter managementGroupID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{managementGroupId}", url.PathEscape(managementGroupID)) req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2021-06-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header.Set("Accept", "application/json") return req, nil }
[ "func (client *ExemptionsClient) listForManagementGroupCreateRequest(ctx context.Context, managementGroupID string, options *ExemptionsClientListForManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyExemptions\"\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-07-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tunencodedParams := []string{req.Raw().URL.RawQuery}\n\tif options != nil && options.Filter != nil {\n\t\tunencodedParams = append(unencodedParams, \"$filter=\"+*options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = strings.Join(unencodedParams, \"&\")\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *PolicyDefinitionsClient) listByManagementGroupCreateRequest(ctx context.Context, managementGroupID string, options *PolicyDefinitionsListByManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions\"\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tunencodedParams := []string{req.Raw().URL.RawQuery}\n\tif options != nil && options.Filter != nil {\n\t\tunencodedParams = append(unencodedParams, \"$filter=\"+*options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = strings.Join(unencodedParams, \"&\")\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *PolicyRestrictionsClient) checkAtManagementGroupScopeCreateRequest(ctx context.Context, managementGroupID string, parameters CheckManagementGroupRestrictionsRequest, options *PolicyRestrictionsClientCheckAtManagementGroupScopeOptions) (*policy.Request, error) {\n\turlPath := \"/providers/{managementGroupsNamespace}/managementGroups/{managementGroupId}/providers/Microsoft.PolicyInsights/checkPolicyRestrictions\"\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupsNamespace}\", url.PathEscape(\"Microsoft.Management\"))\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *PolicyDefinitionsClient) createOrUpdateAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, parameters PolicyDefinition, options *PolicyDefinitionsCreateOrUpdateAtManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *PolicyDefinitionsClient) deleteAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, options *PolicyDefinitionsDeleteAtManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ContainerGroupsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ContainerGroupsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *IPGroupsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *IPGroupsListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ipGroups\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateDescribeContainerGroupMetricRequest() (request *DescribeContainerGroupMetricRequest) {\n\trequest = &DescribeContainerGroupMetricRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Eci\", \"2018-08-08\", \"DescribeContainerGroupMetric\", \"eci\", \"openAPI\")\n\treturn\n}", "func (c *UMemClient) NewCreateUMemcacheGroupRequest() *CreateUMemcacheGroupRequest {\n\treq := &CreateUMemcacheGroupRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(false)\n\treturn req\n}", "func NewIgroupCreateRequest() *IgroupCreateRequest {\n\treturn &IgroupCreateRequest{}\n}", "func (client *LocalRulestacksClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *LocalRulestacksClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-29\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateGetUserGroupRequest() (request *GetUserGroupRequest) {\n\trequest = &GetUserGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"csas\", \"2023-01-20\", \"GetUserGroup\", \"\", \"\")\n\trequest.Method = requests.GET\n\treturn\n}", "func CreateCreateApplicationGroupRequest() (request *CreateApplicationGroupRequest) {\n\trequest = &CreateApplicationGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"oos\", \"2019-06-01\", \"CreateApplicationGroup\", \"oos\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateGetDataServiceGroupRequest() (request *GetDataServiceGroupRequest) {\n\trequest = &GetDataServiceGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"dataworks-public\", \"2020-05-18\", \"GetDataServiceGroup\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *CapacityReservationsClient) listByCapacityReservationGroupCreateRequest(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, options *CapacityReservationsListByCapacityReservationGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif capacityReservationGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter capacityReservationGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{capacityReservationGroupName}\", url.PathEscape(capacityReservationGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateDescribeScalingTaskGroupRequest() (request *DescribeScalingTaskGroupRequest) {\n\trequest = &DescribeScalingTaskGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Emr\", \"2016-04-08\", \"DescribeScalingTaskGroup\", \"emr\", \"openAPI\")\n\treturn\n}", "func CreateDescribeMultiContainerGroupMetricRequest() (request *DescribeMultiContainerGroupMetricRequest) {\n\trequest = &DescribeMultiContainerGroupMetricRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Eci\", \"2018-08-08\", \"DescribeMultiContainerGroupMetric\", \"eci\", \"openAPI\")\n\treturn\n}", "func (client *DedicatedHostsClient) listByHostGroupCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, options *DedicatedHostsListByHostGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif hostGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter hostGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hostGroupName}\", url.PathEscape(hostGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateDescribeScalingGroupsRequest() (request *DescribeScalingGroupsRequest) {\n\trequest = &DescribeScalingGroupsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Ess\", \"2014-08-28\", \"DescribeScalingGroups\", \"ess\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
getAtManagementGroupHandleResponse handles the GetAtManagementGroup response.
func (client *PolicyDefinitionsClient) getAtManagementGroupHandleResponse(resp *http.Response) (PolicyDefinitionsGetAtManagementGroupResponse, error) { result := PolicyDefinitionsGetAtManagementGroupResponse{RawResponse: resp} if err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinition); err != nil { return PolicyDefinitionsGetAtManagementGroupResponse{}, runtime.NewResponseError(err, resp) } return result, nil }
[ "func (client *ExemptionsClient) listForManagementGroupHandleResponse(resp *http.Response) (ExemptionsClientListForManagementGroupResponse, error) {\n\tresult := ExemptionsClientListForManagementGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ExemptionListResult); err != nil {\n\t\treturn ExemptionsClientListForManagementGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PolicyRestrictionsClient) checkAtManagementGroupScopeHandleResponse(resp *http.Response) (PolicyRestrictionsClientCheckAtManagementGroupScopeResponse, error) {\n\tresult := PolicyRestrictionsClientCheckAtManagementGroupScopeResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CheckRestrictionsResult); err != nil {\n\t\treturn PolicyRestrictionsClientCheckAtManagementGroupScopeResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PolicyDefinitionsClient) listByManagementGroupHandleResponse(resp *http.Response) (PolicyDefinitionsListByManagementGroupResponse, error) {\n\tresult := PolicyDefinitionsListByManagementGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinitionListResult); err != nil {\n\t\treturn PolicyDefinitionsListByManagementGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ContainerGroupsClient) listByResourceGroupHandleResponse(resp *http.Response) (ContainerGroupsClientListByResourceGroupResponse, error) {\n\tresult := ContainerGroupsClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ContainerGroupListResult); err != nil {\n\t\treturn ContainerGroupsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagedInstancesClient) listByResourceGroupHandleResponse(resp *http.Response) (ManagedInstancesClientListByResourceGroupResponse, error) {\n\tresult := ManagedInstancesClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedInstanceListResult); err != nil {\n\t\treturn ManagedInstancesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PolicyEventsClient) listQueryResultsForManagementGroupHandleResponse(resp *http.Response) (PolicyEventsClientListQueryResultsForManagementGroupResponse, error) {\n\tresult := PolicyEventsClientListQueryResultsForManagementGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyEventsQueryResults); err != nil {\n\t\treturn PolicyEventsClientListQueryResultsForManagementGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *IPGroupsClient) listByResourceGroupHandleResponse(resp *http.Response) (IPGroupsListByResourceGroupResponse, error) {\n\tresult := IPGroupsListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IPGroupListResult); err != nil {\n\t\treturn IPGroupsListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *PolicyDefinitionsClient) createOrUpdateAtManagementGroupHandleResponse(resp *http.Response) (PolicyDefinitionsCreateOrUpdateAtManagementGroupResponse, error) {\n\tresult := PolicyDefinitionsCreateOrUpdateAtManagementGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinition); err != nil {\n\t\treturn PolicyDefinitionsCreateOrUpdateAtManagementGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client UsageDetailsClient) ListByManagementGroupResponder(resp *http.Response) (result UsageDetailsListResult, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *DedicatedHostsClient) listByHostGroupHandleResponse(resp *http.Response) (DedicatedHostsListByHostGroupResponse, error) {\n\tresult := DedicatedHostsListByHostGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DedicatedHostListResult); err != nil {\n\t\treturn DedicatedHostsListByHostGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *CapacityReservationsClient) listByCapacityReservationGroupHandleResponse(resp *http.Response) (CapacityReservationsListByCapacityReservationGroupResponse, error) {\n\tresult := CapacityReservationsListByCapacityReservationGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CapacityReservationListResult); err != nil {\n\t\treturn CapacityReservationsListByCapacityReservationGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *IPGroupsClient) updateGroupsHandleResponse(resp *http.Response) (IPGroupsUpdateGroupsResponse, error) {\n\tresult := IPGroupsUpdateGroupsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IPGroup); err != nil {\n\t\treturn IPGroupsUpdateGroupsResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *SQLVirtualMachinesClient) listBySQLVMGroupHandleResponse(resp *http.Response) (SQLVirtualMachinesClientListBySQLVMGroupResponse, error) {\n\tresult := SQLVirtualMachinesClientListBySQLVMGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ListResult); err != nil {\n\t\treturn SQLVirtualMachinesClientListBySQLVMGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DedicatedHostsClient) listByHostGroupHandleResponse(resp *azcore.Response) (DedicatedHostListResultResponse, error) {\n\tvar val *DedicatedHostListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn DedicatedHostListResultResponse{}, err\n\t}\n\treturn DedicatedHostListResultResponse{RawResponse: resp.Response, DedicatedHostListResult: val}, nil\n}", "func (client WorkloadNetworksClient) GetVMGroupResponder(resp *http.Response) (result WorkloadNetworkVMGroup, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *GroupClient) listByServiceHandleResponse(resp *http.Response) (GroupListByServiceResponse, error) {\n\tresult := GroupListByServiceResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GroupCollection); err != nil {\n\t\treturn GroupListByServiceResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *AvailabilityGroupListenersClient) listByGroupHandleResponse(resp *http.Response) (AvailabilityGroupListenersClientListByGroupResponse, error) {\n\tresult := AvailabilityGroupListenersClientListByGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AvailabilityGroupListenerListResult); err != nil {\n\t\treturn AvailabilityGroupListenersClientListByGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client IotHubResourceClient) GetEventHubConsumerGroupResponder(resp *http.Response) (result EventHubConsumerGroupInfo, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client LongTermRetentionBackupsClient) GetByResourceGroupResponder(resp *http.Response) (result LongTermRetentionBackup, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
getBuiltInCreateRequest creates the GetBuiltIn request.
func (client *PolicyDefinitionsClient) getBuiltInCreateRequest(ctx context.Context, policyDefinitionName string, options *PolicyDefinitionsGetBuiltInOptions) (*policy.Request, error) { urlPath := "/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}" if policyDefinitionName == "" { return nil, errors.New("parameter policyDefinitionName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{policyDefinitionName}", url.PathEscape(policyDefinitionName)) req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2021-06-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header.Set("Accept", "application/json") return req, nil }
[ "func (client *PolicyDefinitionsClient) listBuiltInCreateRequest(ctx context.Context, options *PolicyDefinitionsListBuiltInOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Authorization/policyDefinitions\"\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tunencodedParams := []string{req.Raw().URL.RawQuery}\n\tif options != nil && options.Filter != nil {\n\t\tunencodedParams = append(unencodedParams, \"$filter=\"+*options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = strings.Join(unencodedParams, \"&\")\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *BuildServiceClient) getBuildCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, buildServiceName string, buildName string, options *BuildServiceClientGetBuildOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builds/{buildName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif buildServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildServiceName}\", url.PathEscape(buildServiceName))\n\tif buildName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildName}\", url.PathEscape(buildName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateGetOpenNLURequest() (request *GetOpenNLURequest) {\n\trequest = &GetOpenNLURequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetOpenNLU\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *BuildServiceClient) getSupportedBuildpackCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, buildServiceName string, buildpackName string, options *BuildServiceClientGetSupportedBuildpackOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/supportedBuildpacks/{buildpackName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif buildServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildServiceName}\", url.PathEscape(buildServiceName))\n\tif buildpackName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildpackName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildpackName}\", url.PathEscape(buildpackName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ImplicitClient) getRequiredGlobalQueryCreateRequest(ctx context.Context, options *ImplicitClientGetRequiredGlobalQueryOptions) (*policy.Request, error) {\n\turlPath := \"/reqopt/global/required/query\"\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"required-global-query\", client.requiredGlobalQuery)\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateGetPatchBaselineRequest() (request *GetPatchBaselineRequest) {\n\trequest = &GetPatchBaselineRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"oos\", \"2019-06-01\", \"GetPatchBaseline\", \"oos\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (c *Client) BuildGetRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tvar (\n\t\tname string\n\t)\n\t{\n\t\tp, ok := v.(*creatures.GetPayload)\n\t\tif !ok {\n\t\t\treturn nil, goahttp.ErrInvalidType(\"creatures\", \"get\", \"*creatures.GetPayload\", v)\n\t\t}\n\t\tname = p.Name\n\t}\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: GetCreaturesPath(name)}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"creatures\", \"get\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func CreateGetManagedRuleRequest() (request *GetManagedRuleRequest) {\n\trequest = &GetManagedRuleRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Config\", \"2020-09-07\", \"GetManagedRule\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *PipelineClient) getPipelinesByWorkspaceCreateRequest(ctx context.Context, options *PipelineGetPipelinesByWorkspaceOptions) (*azcore.Request, error) {\n\turlPath := \"/pipelines\"\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (cli *OpsGenieClient) buildGetRequest(uri string, request interface{}) goreq.Request {\n\treq := cli.buildCommonRequestProps()\n\treq.Method = \"GET\"\n\treq.ContentType = \"application/x-www-form-urlencoded; charset=UTF-8\"\n\turi = cli.OpsGenieAPIUrl() + uri\n\tif request != nil {\n\t\tv, _ := goquery.Values(request)\n\t\treq.Uri = uri + \"?\" + v.Encode()\n\t} else {\n\t\treq.Uri = uri\n\t}\n\n\tlogging.Logger().Info(\"Executing OpsGenie request to [\" + uri + \"] with parameters: \")\n\treturn req\n}", "func (client *GremlinResourcesClient) getGremlinDatabaseCreateRequest(ctx context.Context, resourceGroupName string, accountName string, databaseName string, options *GremlinResourcesClientGetGremlinDatabaseOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-10-15\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *TriggerClient) getTriggersByWorkspaceCreateRequest(ctx context.Context, options *TriggerGetTriggersByWorkspaceOptions) (*azcore.Request, error) {\n\turlPath := \"/triggers\"\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *BuildServiceClient) getBuildResultCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, buildServiceName string, buildName string, buildResultName string, options *BuildServiceClientGetBuildResultOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builds/{buildName}/results/{buildResultName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif buildServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildServiceName}\", url.PathEscape(buildServiceName))\n\tif buildName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildName}\", url.PathEscape(buildName))\n\tif buildResultName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildResultName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildResultName}\", url.PathEscape(buildResultName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *TriggerClient) getTriggerCreateRequest(ctx context.Context, triggerName string, options *TriggerGetTriggerOptions) (*azcore.Request, error) {\n\turlPath := \"/triggers/{triggerName}\"\n\tif triggerName == \"\" {\n\t\treturn nil, errors.New(\"parameter triggerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{triggerName}\", url.PathEscape(triggerName))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfNoneMatch != nil {\n\t\treq.Header.Set(\"If-None-Match\", *options.IfNoneMatch)\n\t}\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *AlertsClient) getCreateRequest(ctx context.Context, scope string, alertID string, options *AlertsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.CostManagement/alerts/{alertId}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\turlPath = strings.ReplaceAll(urlPath, \"{alertId}\", alertID)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-10-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *BuildServiceClient) getBuildServiceCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, buildServiceName string, options *BuildServiceClientGetBuildServiceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif buildServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildServiceName}\", url.PathEscape(buildServiceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateGetServiceInputMappingRequest() (request *GetServiceInputMappingRequest) {\n\trequest = &GetServiceInputMappingRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"industry-brain\", \"2018-07-12\", \"GetServiceInputMapping\", \"\", \"\")\n\treturn\n}", "func (c *Client) BuildGetRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tvar (\n\t\tid string\n\t)\n\t{\n\t\tp, ok := v.(*warehouse.GetPayload)\n\t\tif !ok {\n\t\t\treturn nil, goahttp.ErrInvalidType(\"Warehouse\", \"Get\", \"*warehouse.GetPayload\", v)\n\t\t}\n\t\tid = p.ID\n\t}\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: GetWarehousePath(id)}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"Warehouse\", \"Get\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func CreateGetRenderResultRequest() (request *GetRenderResultRequest) {\n\trequest = &GetRenderResultRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"ivpd\", \"2019-06-25\", \"GetRenderResult\", \"ivpd\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
getBuiltInHandleResponse handles the GetBuiltIn response.
func (client *PolicyDefinitionsClient) getBuiltInHandleResponse(resp *http.Response) (PolicyDefinitionsGetBuiltInResponse, error) { result := PolicyDefinitionsGetBuiltInResponse{RawResponse: resp} if err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinition); err != nil { return PolicyDefinitionsGetBuiltInResponse{}, runtime.NewResponseError(err, resp) } return result, nil }
[ "func (client *PolicyDefinitionsClient) listBuiltInHandleResponse(resp *http.Response) (PolicyDefinitionsListBuiltInResponse, error) {\n\tresult := PolicyDefinitionsListBuiltInResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinitionListResult); err != nil {\n\t\treturn PolicyDefinitionsListBuiltInResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *LiveOutputsClient) getHandleResponse(resp *http.Response) (LiveOutputsClientGetResponse, error) {\n\tresult := LiveOutputsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.LiveOutput); err != nil {\n\t\treturn LiveOutputsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *BuildServiceClient) getSupportedBuildpackHandleResponse(resp *http.Response) (BuildServiceClientGetSupportedBuildpackResponse, error) {\n\tresult := BuildServiceClientGetSupportedBuildpackResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SupportedBuildpackResource); err != nil {\n\t\treturn BuildServiceClientGetSupportedBuildpackResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *OutputsClient) getHandleResponse(resp *http.Response) (OutputsGetResponse, error) {\n\tresult := OutputsGetResponse{RawResponse: resp}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Output); err != nil {\n\t\treturn OutputsGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachineImageTemplatesClient) getRunOutputHandleResponse(resp *http.Response) (VirtualMachineImageTemplatesClientGetRunOutputResponse, error) {\n\tresult := VirtualMachineImageTemplatesClientGetRunOutputResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RunOutput); err != nil {\n\t\treturn VirtualMachineImageTemplatesClientGetRunOutputResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SQLResourcesClient) getSQLUserDefinedFunctionHandleResponse(resp *http.Response) (SQLResourcesClientGetSQLUserDefinedFunctionResponse, error) {\n\tresult := SQLResourcesClientGetSQLUserDefinedFunctionResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SQLUserDefinedFunctionGetResults); err != nil {\n\t\treturn SQLResourcesClientGetSQLUserDefinedFunctionResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PipelineClient) getPipelinesByWorkspaceHandleResponse(resp *azcore.Response) (PipelineListResponseResponse, error) {\n\tvar val *PipelineListResponse\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn PipelineListResponseResponse{}, err\n\t}\n\treturn PipelineListResponseResponse{RawResponse: resp.Response, PipelineListResponse: val}, nil\n}", "func (client *PipelineClient) getPipelineHandleResponse(resp *azcore.Response) (PipelineResourceResponse, error) {\n\tvar val *PipelineResource\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn PipelineResourceResponse{}, err\n\t}\n\treturn PipelineResourceResponse{RawResponse: resp.Response, PipelineResource: val}, nil\n}", "func (client *IntegrationRuntimesClient) getStatusHandleResponse(resp *http.Response) (IntegrationRuntimesClientGetStatusResponse, error) {\n\tresult := IntegrationRuntimesClientGetStatusResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IntegrationRuntimeStatusResponse); err != nil {\n\t\treturn IntegrationRuntimesClientGetStatusResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) getTriggeredWebJobHandleResponse(resp *http.Response) (WebAppsGetTriggeredWebJobResponse, error) {\n\tresult := WebAppsGetTriggeredWebJobResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.TriggeredWebJob); err != nil {\n\t\treturn WebAppsGetTriggeredWebJobResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *TriggerClient) getTriggerHandleResponse(resp *azcore.Response) (TriggerResourceResponse, error) {\n\tvar val *TriggerResource\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn TriggerResourceResponse{}, err\n\t}\n\treturn TriggerResourceResponse{RawResponse: resp.Response, TriggerResource: val}, nil\n}", "func (client *WebAppsClient) getProcessHandleResponse(resp *http.Response) (WebAppsGetProcessResponse, error) {\n\tresult := WebAppsGetProcessResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ProcessInfo); err != nil {\n\t\treturn WebAppsGetProcessResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) getFunctionHandleResponse(resp *http.Response) (WebAppsGetFunctionResponse, error) {\n\tresult := WebAppsGetFunctionResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.FunctionEnvelope); err != nil {\n\t\treturn WebAppsGetFunctionResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *SQLResourcesClient) getSQLTriggerHandleResponse(resp *http.Response) (SQLResourcesClientGetSQLTriggerResponse, error) {\n\tresult := SQLResourcesClientGetSQLTriggerResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SQLTriggerGetResults); err != nil {\n\t\treturn SQLResourcesClientGetSQLTriggerResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *BuildServiceClient) getBuildHandleResponse(resp *http.Response) (BuildServiceClientGetBuildResponse, error) {\n\tresult := BuildServiceClientGetBuildResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Build); err != nil {\n\t\treturn BuildServiceClientGetBuildResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ContainerAppsDiagnosticsClient) getDetectorHandleResponse(resp *http.Response) (ContainerAppsDiagnosticsClientGetDetectorResponse, error) {\n\tresult := ContainerAppsDiagnosticsClientGetDetectorResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Diagnostics); err != nil {\n\t\treturn ContainerAppsDiagnosticsClientGetDetectorResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *Client) getScriptHandleResponse(resp *http.Response) (ClientGetScriptResponse, error) {\n\tresult := ClientGetScriptResponse{}\n\tbody, err := runtime.Payload(resp)\n\tif err != nil {\n\t\treturn ClientGetScriptResponse{}, err\n\t}\n\ttxt := string(body)\n\tresult.Value = &txt\n\treturn result, nil\n}", "func (client *LiveOutputsClient) listHandleResponse(resp *http.Response) (LiveOutputsClientListResponse, error) {\n\tresult := LiveOutputsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.LiveOutputListResult); err != nil {\n\t\treturn LiveOutputsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client JobClient) GetOutputResponder(resp *http.Response) (result String, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
listBuiltInCreateRequest creates the ListBuiltIn request.
func (client *PolicyDefinitionsClient) listBuiltInCreateRequest(ctx context.Context, options *PolicyDefinitionsListBuiltInOptions) (*policy.Request, error) { urlPath := "/providers/Microsoft.Authorization/policyDefinitions" req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2021-06-01") if options != nil && options.Top != nil { reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() unencodedParams := []string{req.Raw().URL.RawQuery} if options != nil && options.Filter != nil { unencodedParams = append(unencodedParams, "$filter="+*options.Filter) } req.Raw().URL.RawQuery = strings.Join(unencodedParams, "&") req.Raw().Header.Set("Accept", "application/json") return req, nil }
[ "func (client *PolicyDefinitionsClient) getBuiltInCreateRequest(ctx context.Context, policyDefinitionName string, options *PolicyDefinitionsGetBuiltInOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *BuildServiceClient) listBuildsCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, buildServiceName string, options *BuildServiceClientListBuildsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builds\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif buildServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildServiceName}\", url.PathEscape(buildServiceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateGetApplicationListRequest() (request *GetApplicationListRequest) {\n\trequest = &GetApplicationListRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"mse\", \"2019-05-31\", \"GetApplicationList\", \"mse\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (c *Client) BuildListRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: ListWarehousePath()}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"Warehouse\", \"List\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func (c *Client) NewListApplicationRequest(ctx context.Context, path string, payload *ListApplicationPayload) (*http.Request, error) {\n\tvar body bytes.Buffer\n\terr := c.Encoder.Encode(payload, &body, \"*/*\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to encode body: %s\", err)\n\t}\n\tscheme := c.Scheme\n\tif scheme == \"\" {\n\t\tscheme = \"http\"\n\t}\n\tu := url.URL{Host: c.Host, Scheme: scheme, Path: path}\n\treq, err := http.NewRequest(\"GET\", u.String(), &body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\theader := req.Header\n\theader.Set(\"Content-Type\", \"application/json\")\n\treturn req, nil\n}", "func NewListRequest(payload *step.ListPayload) *steppb.ListRequest {\n\tmessage := &steppb.ListRequest{\n\t\tId: payload.ID,\n\t}\n\treturn message\n}", "func CreateListProjectAPIsRequest() (request *ListProjectAPIsRequest) {\n\trequest = &ListProjectAPIsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"imm\", \"2017-09-06\", \"ListProjectAPIs\", \"imm\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (c *Client) BuildListRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: ListCreaturesPath()}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"creatures\", \"list\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func (client *KustoOperationsClient) listCreateRequest(ctx context.Context, options *KustoOperationsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Synapse/kustooperations\"\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (c *Client) BuildListRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: ListResourcePath()}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"resource\", \"List\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func (client *BuildServiceClient) listSupportedBuildpacksCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, buildServiceName string, options *BuildServiceClientListSupportedBuildpacksOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/supportedBuildpacks\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif buildServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildServiceName}\", url.PathEscape(buildServiceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *BuildServiceClient) listBuildServicesCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, options *BuildServiceClientListBuildServicesOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (rm *resourceManager) newListRequestPayload(\n\tr *resource,\n) (*svcsdk.DescribeLaunchTemplateVersionsInput, error) {\n\tres := &svcsdk.DescribeLaunchTemplateVersionsInput{}\n\n\tif r.ko.Spec.DryRun != nil {\n\t\tres.SetDryRun(*r.ko.Spec.DryRun)\n\t}\n\tif r.ko.Spec.LaunchTemplateID != nil {\n\t\tres.SetLaunchTemplateId(*r.ko.Spec.LaunchTemplateID)\n\t}\n\tif r.ko.Spec.LaunchTemplateName != nil {\n\t\tres.SetLaunchTemplateName(*r.ko.Spec.LaunchTemplateName)\n\t}\n\n\treturn res, nil\n}", "func (client *LocalRulestacksClient) listAppIDsCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientListAppIDsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/listAppIds\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif localRulestackName == \"\" {\n\t\treturn nil, errors.New(\"parameter localRulestackName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{localRulestackName}\", url.PathEscape(localRulestackName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-29\")\n\tif options != nil && options.AppIDVersion != nil {\n\t\treqQP.Set(\"appIdVersion\", *options.AppIDVersion)\n\t}\n\tif options != nil && options.AppPrefix != nil {\n\t\treqQP.Set(\"appPrefix\", *options.AppPrefix)\n\t}\n\tif options != nil && options.Skip != nil {\n\t\treqQP.Set(\"skip\", *options.Skip)\n\t}\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func NewListRequest() *rolespb.ListRequest {\n\tmessage := &rolespb.ListRequest{}\n\treturn message\n}", "func (c *Client) BuildListRequest(ctx context.Context, v any) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: ListStoragePath()}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"storage\", \"list\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func (client *SubscriptionsClient) listLocationsCreateRequest(ctx context.Context, subscriptionID string, options *SubscriptionsListLocationsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/locations\"\n\tif subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-01-01\")\n\tif options != nil && options.IncludeExtendedLocations != nil {\n\t\treqQP.Set(\"includeExtendedLocations\", strconv.FormatBool(*options.IncludeExtendedLocations))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *LocalRulestacksClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *LocalRulestacksClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-29\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func NewListRequest() *todopb.ListRequest {\n\tmessage := &todopb.ListRequest{}\n\treturn message\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
listBuiltInHandleResponse handles the ListBuiltIn response.
func (client *PolicyDefinitionsClient) listBuiltInHandleResponse(resp *http.Response) (PolicyDefinitionsListBuiltInResponse, error) { result := PolicyDefinitionsListBuiltInResponse{RawResponse: resp} if err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinitionListResult); err != nil { return PolicyDefinitionsListBuiltInResponse{}, runtime.NewResponseError(err, resp) } return result, nil }
[ "func (client *PolicyDefinitionsClient) getBuiltInHandleResponse(resp *http.Response) (PolicyDefinitionsGetBuiltInResponse, error) {\n\tresult := PolicyDefinitionsGetBuiltInResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinition); err != nil {\n\t\treturn PolicyDefinitionsGetBuiltInResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ReplicationsClient) listHandleResponse(resp *azcore.Response) (ReplicationListResultResponse, error) {\n\tvar val *ReplicationListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn ReplicationListResultResponse{}, err\n\t}\n\treturn ReplicationListResultResponse{RawResponse: resp.Response, ReplicationListResult: val}, nil\n}", "func (client *LiveOutputsClient) listHandleResponse(resp *http.Response) (LiveOutputsClientListResponse, error) {\n\tresult := LiveOutputsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.LiveOutputListResult); err != nil {\n\t\treturn LiveOutputsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *LocalRulestacksClient) listAppIDsHandleResponse(resp *http.Response) (LocalRulestacksClientListAppIDsResponse, error) {\n\tresult := LocalRulestacksClientListAppIDsResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ListAppIDResponse); err != nil {\n\t\treturn LocalRulestacksClientListAppIDsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *TagsClient) listHandleResponse(resp *azcore.Response) (TagsListResultResponse, error) {\n\tvar val *TagsListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn TagsListResultResponse{}, err\n\t}\n\treturn TagsListResultResponse{RawResponse: resp.Response, TagsListResult: val}, nil\n}", "func (client *BuildServiceClient) listBuildsHandleResponse(resp *http.Response) (BuildServiceClientListBuildsResponse, error) {\n\tresult := BuildServiceClientListBuildsResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.BuildCollection); err != nil {\n\t\treturn BuildServiceClientListBuildsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *Client) listHandleResponse(resp *http.Response) (ClientListResponse, error) {\n\tresult := ClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ListResponse); err != nil {\n\t\treturn ClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) listWebJobsHandleResponse(resp *http.Response) (WebAppsListWebJobsResponse, error) {\n\tresult := WebAppsListWebJobsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WebJobCollection); err != nil {\n\t\treturn WebAppsListWebJobsResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *JobsClient) listByAgentHandleResponse(resp *http.Response) (JobsListByAgentResponse, error) {\n\tresult := JobsListByAgentResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.JobListResult); err != nil {\n\t\treturn JobsListByAgentResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *BuildServiceClient) listSupportedBuildpacksHandleResponse(resp *http.Response) (BuildServiceClientListSupportedBuildpacksResponse, error) {\n\tresult := BuildServiceClientListSupportedBuildpacksResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SupportedBuildpacksCollection); err != nil {\n\t\treturn BuildServiceClientListSupportedBuildpacksResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SQLResourcesClient) listSQLUserDefinedFunctionsHandleResponse(resp *http.Response) (SQLResourcesClientListSQLUserDefinedFunctionsResponse, error) {\n\tresult := SQLResourcesClientListSQLUserDefinedFunctionsResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SQLUserDefinedFunctionListResult); err != nil {\n\t\treturn SQLResourcesClientListSQLUserDefinedFunctionsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagersClient) listMetricDefinitionHandleResponse(resp *http.Response) (ManagersClientListMetricDefinitionResponse, error) {\n\tresult := ManagersClientListMetricDefinitionResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.MetricDefinitionList); err != nil {\n\t\treturn ManagersClientListMetricDefinitionResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) listProcessesHandleResponse(resp *http.Response) (WebAppsListProcessesResponse, error) {\n\tresult := WebAppsListProcessesResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ProcessInfoCollection); err != nil {\n\t\treturn WebAppsListProcessesResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *OutputsClient) listByStreamingJobHandleResponse(resp *http.Response) (OutputsClientListByStreamingJobResponse, error) {\n\tresult := OutputsClientListByStreamingJobResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.OutputListResult); err != nil {\n\t\treturn OutputsClientListByStreamingJobResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PlansClient) listWebAppsHandleResponse(resp *http.Response) (PlansClientListWebAppsResponse, error) {\n\tresult := PlansClientListWebAppsResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WebAppCollection); err != nil {\n\t\treturn PlansClientListWebAppsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SolutionsReferenceDataClient) listByHomeRegionHandleResponse(resp *http.Response) (SolutionsReferenceDataClientListByHomeRegionResponse, error) {\n\tresult := SolutionsReferenceDataClientListByHomeRegionResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SolutionsReferenceDataList); err != nil {\n\t\treturn SolutionsReferenceDataClientListByHomeRegionResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *OutputsClient) listByStreamingJobHandleResponse(resp *http.Response) (OutputsListByStreamingJobResponse, error) {\n\tresult := OutputsListByStreamingJobResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.OutputListResult); err != nil {\n\t\treturn OutputsListByStreamingJobResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ApplicationClient) listOperationsHandleResponse(resp *http.Response) (ApplicationClientListOperationsResponse, error) {\n\tresult := ApplicationClientListOperationsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.OperationListResult); err != nil {\n\t\treturn ApplicationClientListOperationsResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) listTriggeredWebJobsHandleResponse(resp *http.Response) (WebAppsListTriggeredWebJobsResponse, error) {\n\tresult := WebAppsListTriggeredWebJobsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.TriggeredWebJobCollection); err != nil {\n\t\treturn WebAppsListTriggeredWebJobsResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
listByManagementGroupCreateRequest creates the ListByManagementGroup request.
func (client *PolicyDefinitionsClient) listByManagementGroupCreateRequest(ctx context.Context, managementGroupID string, options *PolicyDefinitionsListByManagementGroupOptions) (*policy.Request, error) { urlPath := "/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions" if managementGroupID == "" { return nil, errors.New("parameter managementGroupID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{managementGroupId}", url.PathEscape(managementGroupID)) req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2021-06-01") if options != nil && options.Top != nil { reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() unencodedParams := []string{req.Raw().URL.RawQuery} if options != nil && options.Filter != nil { unencodedParams = append(unencodedParams, "$filter="+*options.Filter) } req.Raw().URL.RawQuery = strings.Join(unencodedParams, "&") req.Raw().Header.Set("Accept", "application/json") return req, nil }
[ "func (client *ExemptionsClient) listForManagementGroupCreateRequest(ctx context.Context, managementGroupID string, options *ExemptionsClientListForManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyExemptions\"\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-07-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tunencodedParams := []string{req.Raw().URL.RawQuery}\n\tif options != nil && options.Filter != nil {\n\t\tunencodedParams = append(unencodedParams, \"$filter=\"+*options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = strings.Join(unencodedParams, \"&\")\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ContainerGroupsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ContainerGroupsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *IPGroupsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *IPGroupsListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ipGroups\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *PolicyDefinitionsClient) getAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, options *PolicyDefinitionsGetAtManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *LocalRulestacksClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *LocalRulestacksClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-29\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client UsageDetailsClient) ListByManagementGroupSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}", "func (client *HardwareComponentGroupsClient) listByDeviceCreateRequest(ctx context.Context, deviceName string, resourceGroupName string, managerName string, options *HardwareComponentGroupsClientListByDeviceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorSimple/managers/{managerName}/devices/{deviceName}/hardwareComponentGroups\"\n\turlPath = strings.ReplaceAll(urlPath, \"{deviceName}\", deviceName)\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", client.subscriptionID)\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", resourceGroupName)\n\turlPath = strings.ReplaceAll(urlPath, \"{managerName}\", managerName)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client UsageDetailsClient) ListByManagementGroup(ctx context.Context, managementGroupID string, expand string, filter string, skiptoken string, top *int32, apply string) (result UsageDetailsListResultPage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/UsageDetailsClient.ListByManagementGroup\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.udlr.Response.Response != nil {\n\t\t\t\tsc = result.udlr.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: top,\n\t\t\tConstraints: []validation.Constraint{{Target: \"top\", Name: validation.Null, Rule: false,\n\t\t\t\tChain: []validation.Constraint{{Target: \"top\", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil},\n\t\t\t\t\t{Target: \"top\", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},\n\t\t\t\t}}}}}); err != nil {\n\t\treturn result, validation.NewError(\"consumption.UsageDetailsClient\", \"ListByManagementGroup\", err.Error())\n\t}\n\n\tresult.fn = client.listByManagementGroupNextResults\n\treq, err := client.ListByManagementGroupPreparer(ctx, managementGroupID, expand, filter, skiptoken, top, apply)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"consumption.UsageDetailsClient\", \"ListByManagementGroup\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListByManagementGroupSender(req)\n\tif err != nil {\n\t\tresult.udlr.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"consumption.UsageDetailsClient\", \"ListByManagementGroup\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.udlr, err = client.ListByManagementGroupResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"consumption.UsageDetailsClient\", \"ListByManagementGroup\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}", "func (client *DedicatedHostsClient) listByHostGroupCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, options *DedicatedHostsListByHostGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif hostGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter hostGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hostGroupName}\", url.PathEscape(hostGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client AlertsClient) ListByManagementGroupsPreparer(ctx context.Context, managementGroupID string, filter string, skiptoken string, top *int32) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"managementGroupId\": autorest.Encode(\"path\", managementGroupID),\n\t}\n\n\tconst APIVersion = \"2018-08-01-preview\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\tif len(filter) > 0 {\n\t\tqueryParameters[\"$filter\"] = autorest.Encode(\"query\", filter)\n\t}\n\tif len(skiptoken) > 0 {\n\t\tqueryParameters[\"$skiptoken\"] = autorest.Encode(\"query\", skiptoken)\n\t}\n\tif top != nil {\n\t\tqueryParameters[\"$top\"] = autorest.Encode(\"query\", *top)\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsGet(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.CostManagement/alerts\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (client *SyncGroupsClient) listByDatabaseCreateRequest(ctx context.Context, resourceGroupName string, serverName string, databaseName string, options *SyncGroupsClientListByDatabaseOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client UsageDetailsClient) ListByManagementGroupPreparer(ctx context.Context, managementGroupID string, expand string, filter string, skiptoken string, top *int32, apply string) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"managementGroupId\": autorest.Encode(\"path\", managementGroupID),\n\t}\n\n\tconst APIVersion = \"2018-06-30\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\tif len(expand) > 0 {\n\t\tqueryParameters[\"$expand\"] = autorest.Encode(\"query\", expand)\n\t}\n\tif len(filter) > 0 {\n\t\tqueryParameters[\"$filter\"] = autorest.Encode(\"query\", filter)\n\t}\n\tif len(skiptoken) > 0 {\n\t\tqueryParameters[\"$skiptoken\"] = autorest.Encode(\"query\", skiptoken)\n\t}\n\tif top != nil {\n\t\tqueryParameters[\"$top\"] = autorest.Encode(\"query\", *top)\n\t}\n\tif len(apply) > 0 {\n\t\tqueryParameters[\"$apply\"] = autorest.Encode(\"query\", apply)\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsGet(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Consumption/usageDetails\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (client *SQLVirtualMachinesClient) listBySQLVMGroupCreateRequest(ctx context.Context, resourceGroupName string, sqlVirtualMachineGroupName string, options *SQLVirtualMachinesClientListBySQLVMGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachineGroups/{sqlVirtualMachineGroupName}/sqlVirtualMachines\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif sqlVirtualMachineGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter sqlVirtualMachineGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sqlVirtualMachineGroupName}\", url.PathEscape(sqlVirtualMachineGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-03-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateListExperimentGroupsRequest() (request *ListExperimentGroupsRequest) {\n\trequest = &ListExperimentGroupsRequest{\n\t\tRoaRequest: &requests.RoaRequest{},\n\t}\n\trequest.InitWithApiInfo(\"PaiRecService\", \"2022-12-13\", \"ListExperimentGroups\", \"/api/v1/experimentgroups\", \"\", \"\")\n\trequest.Method = requests.GET\n\treturn\n}", "func (client *DedicatedHostsClient) listByHostGroupCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, options *DedicatedHostsListByHostGroupOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{hostGroupName}\", url.PathEscape(hostGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *PolicyEventsClient) listQueryResultsForManagementGroupCreateRequest(ctx context.Context, policyEventsResource Enum1, managementGroupsNamespace Enum0, managementGroupName string, options *QueryOptions) (*policy.Request, error) {\n\turlPath := \"/providers/{managementGroupsNamespace}/managementGroups/{managementGroupName}/providers/Microsoft.PolicyInsights/policyEvents/{policyEventsResource}/queryResults\"\n\tif policyEventsResource == \"\" {\n\t\treturn nil, errors.New(\"parameter policyEventsResource cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyEventsResource}\", url.PathEscape(string(policyEventsResource)))\n\tif managementGroupsNamespace == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupsNamespace cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupsNamespace}\", url.PathEscape(string(managementGroupsNamespace)))\n\tif managementGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupName}\", url.PathEscape(managementGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-10-01\")\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\tif options != nil && options.OrderBy != nil {\n\t\treqQP.Set(\"$orderby\", *options.OrderBy)\n\t}\n\tif options != nil && options.Select != nil {\n\t\treqQP.Set(\"$select\", *options.Select)\n\t}\n\tif options != nil && options.From != nil {\n\t\treqQP.Set(\"$from\", options.From.Format(time.RFC3339Nano))\n\t}\n\tif options != nil && options.To != nil {\n\t\treqQP.Set(\"$to\", options.To.Format(time.RFC3339Nano))\n\t}\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\tif options != nil && options.Apply != nil {\n\t\treqQP.Set(\"$apply\", *options.Apply)\n\t}\n\tif options != nil && options.SkipToken != nil {\n\t\treqQP.Set(\"$skiptoken\", *options.SkipToken)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *GroupClient) listByServiceCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, options *GroupListByServiceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\tif options != nil && options.Skip != nil {\n\t\treqQP.Set(\"$skip\", strconv.FormatInt(int64(*options.Skip), 10))\n\t}\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *CatalogsClient) listDeviceGroupsCreateRequest(ctx context.Context, resourceGroupName string, catalogName string, listDeviceGroupsRequest ListDeviceGroupsRequest, options *CatalogsClientListDeviceGroupsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AzureSphere/catalogs/{catalogName}/listDeviceGroups\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif catalogName == \"\" {\n\t\treturn nil, errors.New(\"parameter catalogName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{catalogName}\", url.PathEscape(catalogName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-01-preview\")\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\tif options != nil && options.Skip != nil {\n\t\treqQP.Set(\"$skip\", strconv.FormatInt(int64(*options.Skip), 10))\n\t}\n\tif options != nil && options.Maxpagesize != nil {\n\t\treqQP.Set(\"$maxpagesize\", strconv.FormatInt(int64(*options.Maxpagesize), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, listDeviceGroupsRequest)\n}", "func CreateListMultiAccountResourceGroupsRequest() (request *ListMultiAccountResourceGroupsRequest) {\n\trequest = &ListMultiAccountResourceGroupsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"ResourceCenter\", \"2022-12-01\", \"ListMultiAccountResourceGroups\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
listByManagementGroupHandleResponse handles the ListByManagementGroup response.
func (client *PolicyDefinitionsClient) listByManagementGroupHandleResponse(resp *http.Response) (PolicyDefinitionsListByManagementGroupResponse, error) { result := PolicyDefinitionsListByManagementGroupResponse{RawResponse: resp} if err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinitionListResult); err != nil { return PolicyDefinitionsListByManagementGroupResponse{}, runtime.NewResponseError(err, resp) } return result, nil }
[ "func (client *ExemptionsClient) listForManagementGroupHandleResponse(resp *http.Response) (ExemptionsClientListForManagementGroupResponse, error) {\n\tresult := ExemptionsClientListForManagementGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ExemptionListResult); err != nil {\n\t\treturn ExemptionsClientListForManagementGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client UsageDetailsClient) ListByManagementGroupResponder(resp *http.Response) (result UsageDetailsListResult, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *PolicyEventsClient) listQueryResultsForManagementGroupHandleResponse(resp *http.Response) (PolicyEventsClientListQueryResultsForManagementGroupResponse, error) {\n\tresult := PolicyEventsClientListQueryResultsForManagementGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyEventsQueryResults); err != nil {\n\t\treturn PolicyEventsClientListQueryResultsForManagementGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagedInstancesClient) listByResourceGroupHandleResponse(resp *http.Response) (ManagedInstancesClientListByResourceGroupResponse, error) {\n\tresult := ManagedInstancesClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedInstanceListResult); err != nil {\n\t\treturn ManagedInstancesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SQLVirtualMachinesClient) listBySQLVMGroupHandleResponse(resp *http.Response) (SQLVirtualMachinesClientListBySQLVMGroupResponse, error) {\n\tresult := SQLVirtualMachinesClientListBySQLVMGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ListResult); err != nil {\n\t\treturn SQLVirtualMachinesClientListBySQLVMGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DedicatedHostsClient) listByHostGroupHandleResponse(resp *azcore.Response) (DedicatedHostListResultResponse, error) {\n\tvar val *DedicatedHostListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn DedicatedHostListResultResponse{}, err\n\t}\n\treturn DedicatedHostListResultResponse{RawResponse: resp.Response, DedicatedHostListResult: val}, nil\n}", "func (client *GroupClient) listByServiceHandleResponse(resp *http.Response) (GroupListByServiceResponse, error) {\n\tresult := GroupListByServiceResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GroupCollection); err != nil {\n\t\treturn GroupListByServiceResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ContainerGroupsClient) listByResourceGroupHandleResponse(resp *http.Response) (ContainerGroupsClientListByResourceGroupResponse, error) {\n\tresult := ContainerGroupsClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ContainerGroupListResult); err != nil {\n\t\treturn ContainerGroupsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DedicatedHostsClient) listByHostGroupHandleResponse(resp *http.Response) (DedicatedHostsListByHostGroupResponse, error) {\n\tresult := DedicatedHostsListByHostGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DedicatedHostListResult); err != nil {\n\t\treturn DedicatedHostsListByHostGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client AlertsClient) ListByManagementGroupsResponder(resp *http.Response) (result AlertListResult, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *SyncGroupsClient) listByDatabaseHandleResponse(resp *http.Response) (SyncGroupsClientListByDatabaseResponse, error) {\n\tresult := SyncGroupsClientListByDatabaseResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SyncGroupListResult); err != nil {\n\t\treturn SyncGroupsClientListByDatabaseResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *IPGroupsClient) listByResourceGroupHandleResponse(resp *http.Response) (IPGroupsListByResourceGroupResponse, error) {\n\tresult := IPGroupsListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IPGroupListResult); err != nil {\n\t\treturn IPGroupsListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *HardwareComponentGroupsClient) listByDeviceHandleResponse(resp *http.Response) (HardwareComponentGroupsClientListByDeviceResponse, error) {\n\tresult := HardwareComponentGroupsClientListByDeviceResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.HardwareComponentGroupList); err != nil {\n\t\treturn HardwareComponentGroupsClientListByDeviceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AvailabilityGroupListenersClient) listByGroupHandleResponse(resp *http.Response) (AvailabilityGroupListenersClientListByGroupResponse, error) {\n\tresult := AvailabilityGroupListenersClientListByGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AvailabilityGroupListenerListResult); err != nil {\n\t\treturn AvailabilityGroupListenersClientListByGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client UsageDetailsClient) ListByManagementGroupSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}", "func (client *CapacityReservationsClient) listByCapacityReservationGroupHandleResponse(resp *http.Response) (CapacityReservationsListByCapacityReservationGroupResponse, error) {\n\tresult := CapacityReservationsListByCapacityReservationGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CapacityReservationListResult); err != nil {\n\t\treturn CapacityReservationsListByCapacityReservationGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client DataControllersClient) ListInGroupResponder(resp *http.Response) (result PageOfDataControllerResource, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client NamespacesClient) ListByResourceGroupResponder(resp *http.Response) (result EHNamespaceListResult, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *CatalogsClient) listDeviceGroupsHandleResponse(resp *http.Response) (CatalogsClientListDeviceGroupsResponse, error) {\n\tresult := CatalogsClientListDeviceGroupsResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DeviceGroupListResult); err != nil {\n\t\treturn CatalogsClientListDeviceGroupsResponse{}, err\n\t}\n\treturn result, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
startLiveReloadServer initializes a livereload to notify the browser of changes to code that does not need a recompile.
func startLiveReloadServer(tpls *template.Template, cfg *env.Config, staticAssets *static.Files) error { if cfg.IsProduction { return nil } log.Info("Initializing livereload") paths := []string{ "assets", "templates", } tmplFn := func(name string) (bool, error) { templates, err := initTemplates(cfg, staticAssets) if err != nil { return false, err } *tpls = *templates return true, nil } mappings := livereload.ReloadMapping{ ".css": nil, ".js": nil, ".tmpl": tmplFn, } _, err := livereload.ListenAndServe(livereload.DefaultPort, paths, mappings) return err }
[ "func StartReloadServer(port string) {\n\thub = newHub()\n\tgo hub.run()\n\thttp.HandleFunc(\"/reload\", func(w http.ResponseWriter, r *http.Request) {\n\t\tserveWs(hub, w, r)\n\t})\n\n\tgo StartServer(port)\n}", "func StartWebserver() {\n\tlog.Fatal(http.ListenAndServe(\":8080\", nil))\n}", "func InitializeServer(host string) (server *network.WebServer) {\n\trand.Seed(time.Now().UTC().UnixNano())\n\t// Make sure folders exist that we want:\n\tif err := ensureBindDirs(); err != nil {\n\t\tLog.Error(\"Failed to have home working dir to put the files into at ~/Desktop/bind, err: \", err)\n\t} else {\n\t\tLog.Info(\"bind dirs ensured!\")\n\t}\n\tif os.Args[0] != \"d\" { //development mode\n\t\tgin.SetMode(gin.ReleaseMode)\n\t}\n\tr := gin.New()\n\tr.LoadHTMLGlob(\"public/tmpl/*.html\")\n\tr.StaticFS(\"/videos\", http.Dir(basePath+\"/videos\"))\n\tr.StaticFS(\"/frames\", http.Dir(basePath+\"/frames\"))\n\tr.Static(\"/public\", \"./public\")\n\tr.GET(\"/\", getIndex)\n\tr.POST(\"/g\", postIndex)\n\tr.GET(\"/g\", getIndex)\n\tr.GET(\"/about\", getAbout)\n\tr.GET(\"/jobs\", getJobs)\n\tr.GET(\"/code\", getCode)\n\tmel = melody.New() // melody middleware\n\n\t// websocket route\n\tr.GET(\"/ws\",func(ctx *gin.Context){\n\t\t// handle request with Melody\n\t\tmel.HandleRequest(ctx.Writer,ctx.Request)\n\t})\n\n\t// Melody message handler\n\tmel.HandleMessage(func(ses *melody.Session,msg []byte){\n\t\t// broadcast message to connected sockets\n\t\tmel.Broadcast(msg)\n\t})\n\n\n\tr.GET(\"/openframes\", func(c *gin.Context) {\n\t\topen.Run(basePath + \"/frames\")\n\t})\n\tr.GET(\"/openvideos\", func(c *gin.Context) {\n\t\topen.Run(basePath + \"/videos\")\n\t})\n\tr.GET(\"/openlogs\", func(c *gin.Context) {\n\t\topen.Run(basePath + \"/logs\")\n\t})\n\tr.GET(\"/toggleClipYt\", func(c *gin.Context) {\n\t\topen.Run(basePath + \"/logs\")\n\t})\n\t// go requests(mel)\n\t// go jobUpdates(mel)\n\n\treturn network.InitializeWebServer(r, host)\n}", "func StartServer() {\n\trouter := httprouter.New()\n\trouter.GET(\"/\", indexHandler)\n\trouter.GET(\"/download/*filepath\", downloadHandler)\n\n\t// todo HTTPS\n\t// todo parameterize port\n\tlog.Println(\"Starting webapp listener\")\n\tlog.Fatalln(http.ListenAndServe(\"0.0.0.0:8080\", logRequest(router)))\n}", "func (server *Server) runServer() {\n\tserver.G.Go(func() error {\n\t\tserver.API.log.Info(\"running server %v\", server.config.Server.ListenAddr)\n\t\treturn http.ListenAndServe(server.config.Server.ListenAddr, server.Server.Handler)\n\t})\n}", "func startServer(config *Config) {\n\n\taddr := config.serverAddr\n\tserver := Server{addr: addr}\n\tlog.Println(\"Listening on\", addr)\n\tserver.listen()\n}", "func StartServer() {\n\tif server == nil {\n\t\tGetInstance()\n\t}\n\n\tlog.Println(\"starting server on http://localhost\" + defaultPort)\n\tserver.Run(defaultPort)\n}", "func StartReceiverServer(config *ConfigStruct, db *dbhelper.DBhelper, debug bool) {\n\tdbs = db\n\tconfigs = config\n\n\t//Always listen only on /\n\thttp.HandleFunc(\"/\", webhookPage)\n\n\t//Start the server\n\tif config.Webserver.HTTPS.Enabled {\n\t\t//Start TLS server in background\n\t\tgo (func() {\n\t\t\tlog.Fatal(http.ListenAndServeTLS(config.Webserver.HTTPS.ListenAddress, config.Webserver.HTTPS.CertFile, config.Webserver.HTTPS.KeyFile, nil))\n\t\t})()\n\t\tif debug {\n\t\t\tlog.Printf(\"Started HTTPS server on address %s\\n\", config.Webserver.HTTPS.ListenAddress)\n\t\t}\n\t}\n\n\tif config.Webserver.HTTP.Enabled {\n\t\t//Start HTTP server in background\n\t\tgo (func() {\n\t\t\tlog.Fatal(http.ListenAndServe(config.Webserver.HTTP.ListenAddress, nil))\n\t\t})()\n\t\tif debug {\n\t\t\tlog.Printf(\"Started HTTP server on address %s\\n\", config.Webserver.HTTP.ListenAddress)\n\t\t}\n\t}\n\n\t//keep program running\n\tfor {\n\t\ttime.Sleep(1 * time.Hour)\n\t}\n}", "func BlobServerStart() {\n\thttp.Handle(\n\t\t\"/b/\",\n\t\thttp.StripPrefix(\n\t\t\t\"/b/\",\n\t\t\thttp.FileServer(http.Dir(\"bin/blobs\")),\n\t\t),\n\t)\n\thttp.Handle(\n\t\t\"/t/\",\n\t\thttp.StripPrefix(\n\t\t\t\"/t/\",\n\t\t\thttp.FileServer(http.Dir(\"bin/tags\")),\n\t\t),\n\t)\n\thttp.Handle(\n\t\t\"/c/\",\n\t\thttp.StripPrefix(\"/c/\",\n\t\t\thttp.FileServer(http.Dir(\"bin/commits\")),\n\t\t),\n\t)\n\thttp.ListenAndServe(\":8080\", nil)\n\tfmt.Println(\"Listening on :8080\")\n}", "func (g *LightningTerminal) startMainWebServer() error {\n\t// Initialize the in-memory file server from the content compiled by\n\t// the go:embed directive. Since everything's relative to the root dir,\n\t// we need to create an FS of the sub directory app/build.\n\tbuildDir, err := fs.Sub(appBuildFS, appFilesDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tstaticFileServer := http.FileServer(&ClientRouteWrapper{\n\t\tassets: http.FS(buildDir),\n\t})\n\n\t// Both gRPC (web) and static file requests will come into through the\n\t// main UI HTTP server. We use this simple switching handler to send the\n\t// requests to the correct implementation.\n\thttpHandler := func(resp http.ResponseWriter, req *http.Request) {\n\t\t// If this is some kind of gRPC, gRPC Web or REST call that\n\t\t// should go to lnd or one of the daemons, pass it to the proxy\n\t\t// that handles all those calls.\n\t\tif g.rpcProxy.isHandling(resp, req) {\n\t\t\treturn\n\t\t}\n\n\t\t// If we got here, it's a static file the browser wants, or\n\t\t// something we don't know in which case the static file server\n\t\t// will answer with a 404.\n\t\tlog.Infof(\"Handling static file request: %s\", req.URL.Path)\n\n\t\t// Add 1-year cache header for static files. React uses content-\n\t\t// based hashes in file names, so when any file is updated, the\n\t\t// url will change causing the browser cached version to be\n\t\t// invalidated.\n\t\tvar re = regexp.MustCompile(`^/(static|fonts|icons)/.*`)\n\t\tif re.MatchString(req.URL.Path) {\n\t\t\tresp.Header().Set(\"Cache-Control\", \"max-age=31536000\")\n\t\t}\n\n\t\t// Transfer static files using gzip to save up to 70% of\n\t\t// bandwidth.\n\t\tgzipHandler := makeGzipHandler(staticFileServer.ServeHTTP)\n\t\tgzipHandler(resp, req)\n\t}\n\n\t// Create and start our HTTPS server now that will handle both gRPC web\n\t// and static file requests.\n\tg.httpServer = &http.Server{\n\t\t// To make sure that long-running calls and indefinitely opened\n\t\t// streaming connections aren't terminated by the internal\n\t\t// proxy, we need to disable all timeouts except the one for\n\t\t// reading the HTTP headers. That timeout shouldn't be removed\n\t\t// as we would otherwise be prone to the slowloris attack where\n\t\t// an attacker takes too long to send the headers and uses up\n\t\t// connections that way. Once the headers are read, we either\n\t\t// know it's a static resource and can deliver that very cheaply\n\t\t// or check the authentication for other calls.\n\t\tWriteTimeout: 0,\n\t\tIdleTimeout: 0,\n\t\tReadTimeout: 0,\n\t\tReadHeaderTimeout: defaultServerTimeout,\n\t\tHandler: http.HandlerFunc(httpHandler),\n\t}\n\thttpListener, err := net.Listen(\"tcp\", g.cfg.HTTPSListen)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to listen on %v: %v\",\n\t\t\tg.cfg.HTTPSListen, err)\n\t}\n\ttlsConfig, err := buildTLSConfigForHttp2(g.cfg)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to create TLS config: %v\", err)\n\t}\n\ttlsListener := tls.NewListener(httpListener, tlsConfig)\n\n\tg.wg.Add(1)\n\tgo func() {\n\t\tdefer g.wg.Done()\n\n\t\tlog.Infof(\"Listening for http_tls on: %v\", tlsListener.Addr())\n\t\terr := g.httpServer.Serve(tlsListener)\n\t\tif err != nil && err != http.ErrServerClosed {\n\t\t\tlog.Errorf(\"http_tls server error: %v\", err)\n\t\t}\n\t}()\n\n\t// We only enable an additional HTTP only listener if the user\n\t// explicitly sets a value.\n\tif g.cfg.HTTPListen != \"\" {\n\t\tinsecureListener, err := net.Listen(\"tcp\", g.cfg.HTTPListen)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to listen on %v: %v\",\n\t\t\t\tg.cfg.HTTPListen, err)\n\t\t}\n\n\t\tg.wg.Add(1)\n\t\tgo func() {\n\t\t\tdefer g.wg.Done()\n\n\t\t\tlog.Infof(\"Listening for http on: %v\",\n\t\t\t\tinsecureListener.Addr())\n\t\t\terr := g.httpServer.Serve(insecureListener)\n\t\t\tif err != nil && err != http.ErrServerClosed {\n\t\t\t\tlog.Errorf(\"http server error: %v\", err)\n\t\t\t}\n\t\t}()\n\t}\n\n\treturn nil\n}", "func startServer() {\n\thttp.HandleFunc(\"/write\", writer)\n\thttp.HandleFunc(\"/read\", reader)\n\thttp.HandleFunc(\"/load\", loader)\n\n\t// Starting Http Server at port 7000\n\tfmt.Println(\"Starting Http server\")\n\thttp.ListenAndServe(\":7000\", nil)\n}", "func (s *Server) StartServer() {\n\ts.Router = s.NewRouter()\n\n\tlog.Info(\"HTTP Server Ready.\", map[string]interface{}{\n\t\t\"module\": \"server\",\n\t})\n\tgo sl.StartSlack(s.Info.Version, slackKey)\n\tport := fmt.Sprintf(\":%d\", s.Info.Port)\n\thttp.ListenAndServe(port, s.Router)\n\n}", "func StartServer() {\n\tfmt.Println(\"Server is started at 8082\")\n\thttp.ListenAndServe(\":8082\", r)\n}", "func startServer() *handler.WebServer {\n\twebServer := handler.CreateServer(port(), 5)\n\n\tgo func() {\n\t\tfmt.Printf(\"Listening at %s\\n\", webServer.Server.Addr)\n\t\tif err := webServer.Server.ListenAndServe(); err != nil {\n\t\t\tfmt.Printf(\"Httpserver: ListenAndServe() error: %s\\n\", err)\n\t\t}\n\t}()\n\n\treturn webServer\n}", "func (s *Server) startDevServer() (cleanup func()) {\n\troot := gitRootDir()\n\twebClientPath := filepath.Join(root, \"client\", \"web\")\n\n\tyarn := filepath.Join(root, \"tool\", \"yarn\")\n\tnode := filepath.Join(root, \"tool\", \"node\")\n\tvite := filepath.Join(webClientPath, \"node_modules\", \".bin\", \"vite\")\n\n\tlog.Printf(\"installing JavaScript deps using %s... (might take ~30s)\", yarn)\n\tout, err := exec.Command(yarn, \"--non-interactive\", \"-s\", \"--cwd\", webClientPath, \"install\").CombinedOutput()\n\tif err != nil {\n\t\tlog.Fatalf(\"error running tailscale web's yarn install: %v, %s\", err, out)\n\t}\n\tlog.Printf(\"starting JavaScript dev server...\")\n\tcmd := exec.Command(node, vite)\n\tcmd.Dir = webClientPath\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Start(); err != nil {\n\t\tlog.Fatalf(\"Starting JS dev server: %v\", err)\n\t}\n\tlog.Printf(\"JavaScript dev server running as pid %d\", cmd.Process.Pid)\n\treturn func() {\n\t\tcmd.Process.Signal(os.Interrupt)\n\t\terr := cmd.Wait()\n\t\tlog.Printf(\"JavaScript dev server exited: %v\", err)\n\t}\n}", "func StartServer() {\n\t// Initialize\n\tinitialize()\n\n\t// Centralized middleware for error handling\n\tr := middleware.NewRecovery()\n\tm := middleware.With(http.HandlerFunc(omikujiHandler), r)\n\thttp.Handle(\"/omikuji\", m)\n\tif err := http.ListenAndServe(\":8080\", nil); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func startWsServer(listen_addr string) {\n\t//hub = newHub()\n\tgo hub.Run()\n\n\t//http.HandleFunc(\"/\", cmdHandler)\n\thttp.HandleFunc(\"/upgrade\", func(w http.ResponseWriter, r *http.Request) {\n\t\tserveWs(hub, w, r)\n\t})\n\terr := http.ListenAndServe(listen_addr, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not listen to %s: %s\", listen_addr, err)\n\t}\n}", "func HTTPAPIServerStreamReload(c *gin.Context) {\n\terr := service.ProgramReload(c.Param(\"uuid\"))\n\tif err != nil {\n\t\tc.IndentedJSON(500, Message{Status: 0, Payload: err.Error()})\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"module\": \"http_stream\",\n\t\t\t\"stream\": c.Param(\"uuid\"),\n\t\t\t\"func\": \"HTTPAPIServerStreamReload\",\n\t\t\t\"call\": \"StreamReload\",\n\t\t}).Errorln(err.Error())\n\t\treturn\n\t}\n\tc.IndentedJSON(200, Message{Status: 1, Payload: gss.Success})\n}", "func (s *Refresh) StartBackgroundRefresh() {\n\tgo s.FindServerStateAdded()\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CreateFooterView creates a footer area for the application
func CreateFooterView(g *gocui.Gui) error { viewName := "footer" if footer, err := g.SetView(viewName, maxX/6+1, maxY-maxY/4, maxX-1, maxY-1); err != nil { if err != gocui.ErrUnknownView { return err } footer.Wrap = true footer.Title = "Top HeadLines" footer.SelBgColor = gocui.ColorGreen footer.SelFgColor = gocui.ColorRed fmt.Fprintln(footer, Country, Source, Category) } views = append(views, viewName) return nil }
[ "func (wa *WebApp) LayFooterHeight() int { return 50 }", "func NewAddFooter()(*AddFooter) {\n m := &AddFooter{\n MarkContent: *NewMarkContent(),\n }\n odataTypeValue := \"#microsoft.graph.addFooter\"\n m.SetOdataType(&odataTypeValue)\n return m\n}", "func buildCommandFooterBar() *tview.TextView {\n\n\tfooterBar := tview.NewTextView().\n\t\tSetDynamicColors(true).\n\t\tSetRegions(true).\n\t\tSetWrap(false)\n\n\tpageCommands := make([]string, 0)\n\tfor key, page := range clusterDetailsPageMap {\n\t\tpageCommands = append(pageCommands, fmt.Sprintf(`[bold]%c [\"%c\"][darkcyan]%s[white][\"\"]`, key, key, page.Name))\n\t}\n\tsort.Strings(pageCommands)\n\n\tfooterPageText := strings.Join(pageCommands, \" \")\n\tfooterPageText = fmt.Sprintf(`%s %c [white::b]R[darkcyan::-] Refresh-Data`, footerPageText, tcell.RuneVLine)\n\tfooterPageText = fmt.Sprintf(`%s [white::b]Tab / Mouse[darkcyan::-] Navigate`, footerPageText)\n\n\tfmt.Fprint(footerBar, footerPageText)\n\n\treturn footerBar\n}", "func (tui *TUI) drawFooter() {\n\tmin := int(tui.trackDuration.Minutes())\n\tsecs := int(tui.trackDuration.Seconds()) % 60\n\tvar title string\n\tif tui.currentTrack != nil {\n\t\ttitle = tui.currentTrack.Title\n\t}\n\ttui.footer.Clear()\n\tfmt.Fprintf(tui.footer, \"%02d:%02d / %s\", min, secs, title)\n\ttui.app.Draw()\n}", "func footer() (*template.Template, error) {\n\ttpl := `<footer class=\"pt-2 border-top\">\n <div class=\"d-flex justify-content-center\">\n <h5><small class=\"text-muted\">&copy; 2018-%d The Soteria DAG developers</small></h5>\n </div>\n</footer>`\n\n\tt := template.New(\"footer\")\n\treturn t.Parse(fmt.Sprintf(tpl, time.Now().Year()))\n}", "func drawFooter(s tcell.Screen, content string) {\n\tcontent = \"FLShell v2.0 | Image File: \" + *imagepath + \" | \" + content\n\tcolourRow(s, footerStyle, windowHeight-1)\n\tputln(s, footerStyle, content, windowHeight-1)\n}", "func (b *Bill) makeFooter() func() {\n\treturn func() {\n\t\tb.pdf.Ln(10)\n\t\tb.darkDrawColor()\n\t\tb.pdf.Line(8, 280, 200, 280)\n\t\tb.pdf.SetXY(8.0, 285)\n\t\tb.darkText()\n\t\tb.pdf.Cell(143, 0, b.config.Business.Name)\n\t\tb.lightText()\n\t\tb.pdf.Cell(40, 0, \"Generated: \"+time.Now().UTC().Format(\"2006-01-02 15:04:05\"))\n\t}\n}", "func Footer_(children ...HTML) HTML {\n return Footer(nil, children...)\n}", "func (p GraphvizStrategy) Footer(scribe Scribe) {\n\tfor _, line := range strings.Split(p.CustomFooter, \"\\n\") {\n\t\tscribe.WriteLine(line)\n\t}\n\tscribe.UpdateIndent(-1)\n\tscribe.WriteLine(\"}\")\n}", "func (p PlantUmlStrategy) Footer(scribe Scribe) {\n\tscribe.WriteString(true, p.CustomFooter)\n\tscribe.WriteLine(\"@enduml\")\n}", "func (r *txtRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) {}", "func Footer(attrs []htmlgo.Attribute, children ...HTML) HTML {\n\treturn &htmlgo.Tree{Tag: \"footer\", Attributes: attrs, Children: children}\n}", "func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) {\n}", "func ToolbarFooter(append ...bool) vecty.Markup {\n\treturn AddClass(toolbarFooter, append...)\n}", "func (v Binary) Footer(cursor, width int, baseStyle lipgloss.Style) string {\n\treturn baseStyle.Render(fmt.Sprintf(\"%d / %d bytes (%d bytes per row)\", cursor*dataWidth(width), v.size, dataWidth(width)))\n}", "func (rb *PDFBuilder) Footer(m HeaderFooter) *PDFBuilder {\n\treturn rb.set(\"footer\", m.encode())\n}", "func Footer() string {\n\treturn \"```\\n\\n</details>\"\n}", "func (dao *blockDAO) Footer(h hash.Hash256) (*block.Footer, error) {\n\treturn dao.footer(h)\n}", "func (s Spec) FooterLocation() panel.Point {\n\treturn panel.Point{X: s.Width() / 2, Y: s.MountingHoleBottomY()}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search scrapes azlyrics.com for song lyrics and does regex magic to clean them up. Beware, your IP can AND will get blocked while running this, but it is only called in `go generate` (see midi/generate.go) so a normal user will never run this.
func Search(query string) (string, error) { v := url.Values{ "q": []string{query}, } uri := fmt.Sprintf("%s?%s", queryURI, v.Encode()) // start the scrape resp, err := http.Get(uri) if err != nil { logrus.Fatalf("requesting %s failed: %v", uri, err) } defer resp.Body.Close() doc, err := goquery.NewDocumentFromReader(resp.Body) if err != nil { logrus.Fatalf("creating document failed: %v", err) } link, ok := doc.Find("td").First().Find("a").Attr("href") if !ok { return "", fmt.Errorf("could not find top link at %s", uri) } // get the lyrics link resp, err = http.Get(link) if err != nil { return "", fmt.Errorf("request to %s failed: %v", link, err) } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return "", fmt.Errorf("reading body from %s failed: %v", link, err) } // get the lyrics from the HTML html := re.FindStringSubmatch(string(body)) if len(html) <= 0 { return "", fmt.Errorf("[%s] regex parsing failed for body: %s", query, body) } // strip html tags from decoded lyrics lyrics := reHTML.ReplaceAllString(html[0], "") return lyrics, nil }
[ "func renderLyrics(songSearchTerm string) {\n\ttoken := readToken()\n\n\tsearchResponse, err := api.Search(songSearchTerm, token)\n\tif err != nil {\n\t\tlog.Fatalf(\"renderLyrics: %v\", err)\n\t}\n\n\tfirstSearchHit := findFirstSongSearchHit(searchResponse.Response.Hits)\n\tif firstSearchHit == nil {\n\t\tlog.Printf(\"renderLyrics: Unable to find the song: %s\", songSearchTerm)\n\t\treturn\n\t}\n\n\tlyricsURL := firstSearchHit.Result[api.DocumentURL].(string)\n\tua := useragent.GetUserAgent(lyricsURL)\n\tif ua == nil {\n\t\tlog.Printf(\"renderLyrics: Unable to render the document: %s\", lyricsURL)\n\t\treturn\n\t}\n\n\tua.Open(lyricsURL)\n}", "func (e ExtractorFunc) ExtractLyrics(req request.Requester) (*lyrics.Info, error) {\n\treturn e(req)\n}", "func wikia(track track.Track) ([]string, error) {\n\turl := getLyricsURL(track)\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\te := fmt.Sprintf(\"Could not access the URL: %s\", err)\n\t\treturn []string{}, errors.New(e)\n\t}\n\n\troot, err := html.Parse(resp.Body)\n\tif err != nil {\n\t\te := fmt.Sprintf(\"Could not parse the HTML body: %s\", err)\n\t\treturn []string{}, errors.New(e)\n\t}\n\n\tnode, ok := scrape.Find(root, scrape.ByClass(\"lyricbox\"))\n\tif ok {\n\t\tlyrics := buildLyrics(node.FirstChild)\n\t\treturn lyrics, nil\n\t}\n\n\treturn []string{}, errors.New(\"Could not fetch song lyrics\")\n}", "func analyze() {\n\tlog.Printf(\"analyzing...\")\n\n\tregexp, err := regexp.Compile(\"https?://t\\\\.co/(\\\\w|-)+\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfor tweet := range tweets {\n\t\tfor _, link := range regexp.FindAllString(tweet, -1) {\n\t\t\tlinks <- link\n\t\t}\n\t}\n}", "func processYahooResponses(result string) []messageQueryBody {\n\n\tsubsl := \"<a class=\\\" ac-algo fz-l ac-21th lh-24\\\"\";\n\tlensubsl := len(subsl)\n\tsubsl2 := \"</a>\"\n\tlensubsl2 := len(subsl2)\n\tsubsl3 := \"<span class=\\\" fz-ms fw-m fc-12th wr-bw lh-17\\\">\"\n\tlensubsl3 := len(subsl3)\n\tsubsl4 := \"</span>\"\n\tlensubsl4 := len(subsl4)\n\tsubsl5 := \"<p class=\\\"lh-16\\\"\"\n\tlensubsl5 := len(subsl5)\n\n\tvar queryResult messageQueryBody\n\tvar queryResultArray []messageQueryBody\n\tfor i := 0; i < len(result) - lensubsl; i++ {\n\t\tmess := \"\"\n\t\tif result[i : i + lensubsl] == subsl {\n\t\t\tlength := i + lensubsl\n\t\t\tvar last int\n\t\t\tvar start int\n\n\t\t\tfor k := 1; ; k++ {\n\t\t\t\tif result[length + k: length+k+1 ] == \">\" {\n\t\t\t\t\tstart = length + k + 1;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor j:=1; ; j++ {\n\t\t\t\tif result[start + j: start + j + lensubsl2] == subsl2 {\n\t\t\t\t\tmess = result[start: start + j]\n\t\t\t\t\tqueryResult.Head = mess\n\t\t\t\t\tlast = start + j + lensubsl2\n\t\t\t\t\ti = last\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfound := false\n\t\t\tfor j:= 1; ; j++ {\n\t\t\t\tif result[last + j: last + j + lensubsl3] == subsl3 { // matched found for \"<span class=\\\" fz-ms fw-m fc-12th wr-bw lh-17\\\">\"\n\t\t\t\t\tfor k:= 1; ; k++ {\n\t\t\t\t\t\tif result[last + j + lensubsl3 + k: last + j + lensubsl3 + k + lensubsl4] == subsl4 { // finding index for \"</span>\"\n\t\t\t\t\t\t\tlink := result[last + j + lensubsl3 : last + j + lensubsl3 + k]\n\t\t\t\t\t\t\ti = last + j + lensubsl3 + k + lensubsl4\n\t\t\t\t\t\t\tfound = true\n\t\t\t\t\t\t\tlink = strings.Replace(link, \"<b>\", \"\", -1)\n\t\t\t\t\t\t\tlink = strings.Replace(link, \"</b>\", \"\", -1)\n\t\t\t\t\t\t\tif len(link) >= 7 {\n\t\t\t\t\t\t\t\tif link[0: 7] != \"http://\" && link[0: 8] != \"https://\" {\n\t\t\t\t\t\t\t\t\tlink = \"http://\" + link\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tqueryResult.Link = link\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tfor k := 1; ; k++ {\n\t\t\t\t\t\tif result[i + k : i + k + lensubsl5] == subsl5 {\n\t\t\t\t\t\t\tlength = i + k + lensubsl5 + 1;\n\t\t\t\t\t\t\tfor l := 1; ; l++ {\n\t\t\t\t\t\t\t\tif result[length + l: length + l + 4] == \"</p>\" {\n\t\t\t\t\t\t\t\t\tdesc := result[length: length + l]\n\t\t\t\t\t\t\t\t\tqueryResult.Desc = desc;\n\t\t\t\t\t\t\t\t\ti = length + l +4;\n\t\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif found {\n\t\t\t\t\tqueryResultArray = append(queryResultArray, queryResult)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn queryResultArray\n\n}", "func GetLyricsByArtist(w http.ResponseWriter, r *http.Request) {\n\tvar test foo\n\n\tif err := json.NewDecoder(r.Body).Decode(&test); err != nil {\n\t\terr := fmt.Errorf(\"error when reading request body: %w\", err)\n\t\tlog.Logger.Errorf(\"GetAllSongs failed: %v\", err)\n\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif test.Search == \"\" {\n\t\tlog.Logger.Infof(\"GetAllSongs: request body was empty: %v\", test)\n\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tlog.Logger.Infof(\"GetLyricsByArtist: successfully read request body: %v\", test)\n\n\tsongData, err := internal.GetAllLyricsByArtist(test.Search)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"error when getting all lyrics by artist: %w\", err)\n\t\tlog.Logger.Errorf(\"GetLyricsByArtist failed: %v\", err)\n\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\twordMap, err := internal.ScanWords(songData, &test.Words)\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(400), 400)\n\t}\n\n\tlog.Logger.Infof(\"finished scanning words: %v\", wordMap)\n\n\tresponse := models.Response{\n\t\tSongs: songData,\n\t\tWordMap: wordMap,\n\t}\n\n\tif err := json.NewEncoder(w).Encode(response); err != nil {\n\t\terr := fmt.Errorf(\"error when encoding response: %w\", err)\n\t\tlog.Logger.Errorf(\"GetLyricsByArtist failed: %v\", err)\n\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n}", "func processYahooResponses(result string) []messageQueryBody {\n\n\tsubsl := \"<a class=\\\" ac-algo fz-l ac-21th lh-24\\\"\";\n\tlensubsl := len(subsl)\n\tsubsl2 := \"</a>\"\n\tlensubsl2 := len(subsl2)\n\tsubsl3 := \"<span class=\\\" fz-ms fw-m fc-12th wr-bw lh-17\\\">\"\n\tlensubsl3 := len(subsl3)\n\tsubsl4 := \"</span>\"\n\tlensubsl4 := len(subsl4)\n\n\tvar queryResult messageQueryBody\n\tvar queryResultArray []messageQueryBody\n\tfor i := 0; i < len(result) - lensubsl; i++ {\n\t\tmess := \"\"\n\t\tif result[i : i + lensubsl] == subsl {\n\t\t\tlength := i + lensubsl\n\t\t\tvar last int\n\t\t\tvar start int\n\n\t\t\tfor k := 1; ; k++ {\n\t\t\t\tif result[length + k: length+k+1 ] == \">\" {\n\t\t\t\t\tstart = length + k + 1;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor j:=1; ; j++ {\n\t\t\t\tif result[start + j: start + j + lensubsl2] == subsl2 {\n\t\t\t\t\tmess = result[start: start + j]\n\t\t\t\t\tqueryResult.Head = mess\n\t\t\t\t\tlast = start + j + lensubsl2\n\t\t\t\t\ti = last\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfound := false\n\t\t\tfor j:= 1; ; j++ {\n\t\t\t\tif result[last + j: last + j + lensubsl3] == subsl3 { // matched found for \"<span class=\\\" fz-ms fw-m fc-12th wr-bw lh-17\\\">\"\n\t\t\t\t\tfor k:= 1; ; k++ {\n\t\t\t\t\t\tif result[last + j + lensubsl3 + k: last + j + lensubsl3 + k + lensubsl4] == subsl4 { // finding index for \"</span>\"\n\t\t\t\t\t\t\tlink := result[last + j + lensubsl3 : last + j + lensubsl3 + k]\n\t\t\t\t\t\t\ti = last + j + lensubsl3 + k + lensubsl4\n\t\t\t\t\t\t\tfound = true\n\t\t\t\t\t\t\tlink = strings.Replace(link, \"<b>\", \"\", -1)\n\t\t\t\t\t\t\tlink = strings.Replace(link, \"</b>\", \"\", -1)\n\t\t\t\t\t\t\tif link[0: 7] != \"http://\" && link[0: 4] != \"www.\" {\n\t\t\t\t\t\t\t\tlink = \"http://\" + link\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tqueryResult.Link = link\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif found {\n\t\t\t\t\tqueryResultArray = append(queryResultArray, queryResult)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn queryResultArray\n\n}", "func GetLyricsBySearch(w http.ResponseWriter, r *http.Request) {\n\tvar test foo\n\n\tif err := json.NewDecoder(r.Body).Decode(&test); err != nil {\n\t\terr := fmt.Errorf(\"error when reading request body: %w\", err)\n\t\tlog.Logger.Errorf(\"GetLyricsBySearch failed: %v\", err)\n\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif test.Search == \"\" {\n\t\tlog.Logger.Infof(\"GetAllSongs: request body was empty: %v\", test)\n\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tlog.Logger.Infof(\"GetLyricsBySearch: successfully read request body: %v\", test)\n\n\tsongData, err := internal.GetLyricsBySearch(test.Search)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"error when getting lyrics by search: %w\", err)\n\t\tlog.Logger.Errorf(\"GetLyricsBySearch failed: %v\", err)\n\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\twordMap, err := internal.ScanWords(songData, &test.Words)\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(400), 400)\n\t}\n\n\tlog.Logger.Infof(\"finished scanning words... %v\", wordMap)\n\n\tresponse := models.Response{\n\t\tSongs: songData,\n\t\tWordMap: wordMap,\n\t}\n\n\tif err := json.NewEncoder(w).Encode(response); err != nil {\n\t\terr := fmt.Errorf(\"error when encoding response: %w\", err)\n\t\tlog.Logger.Errorf(\"GetLyricsBySearch failed: %v\", err)\n\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n}", "func ProxyScrape() (results []string) {\n\tvar (\n\t\tdomain = \"https://api.proxyscrape.com/?request=getproxies&proxytype=http&timeout=10000\"\n\t\terr error\n\t\tbody []byte\n\t)\n\n\tif body, err = client.New(10 * time.Second).Read(domain); err != nil {\n\t\tlog.Printf(\"Failed to read response for source website %s\\n\")\n\t\treturn\n\t}\n\n\tfor _, proxy := range strings.Split(string(body), \"\\n\") {\n\t\tif strings.Trim(proxy, \"\\n\") != \"\" {\n\t\t\tresults = append(results, strings.Trim(proxy, \"\\n\"))\n\t\t}\n\t}\n\treturn\n}", "func processYoutubeResponses(result string) []messageQueryBody {\n\n\tsubsl := \"<a id=\\\"video-title\\\"\"\n\tsubsl2 := \"href=\\\"\"\n\tsubsl3 := \"</a>\"\n\tlensubsl3 := len(subsl3)\n\n\tvar queryResult messageQueryBody\n\tvar queryResultArray []messageQueryBody\n\tvar mid int\n\n\tfor i := 0; i < len(result) - len(subsl); i++ {\n\t\tmess := \"\"\n\t\tif result[i : i + len(subsl)] == subsl {\n\t\t\tlength := i + len(subsl)\n\t\t\tvar last int\n\t\t\tfor j:=1; ; j++ {\n\t\t\t\tif result[length + j: length + j + len(subsl2)] == subsl2 {\n\t\t\t\t\tmid = length + j + len(subsl2)\n\t\t\t\t\tfor k := 1; ; k++ {\n\t\t\t\t\t\tif result[mid + k: mid + k + 2] == \"\\\">\" {\n\t\t\t\t\t\t\tlink := result[mid: mid + k]\n\t\t\t\t\t\t\tflink := \"https://www.youtube.com\" + link\n\t\t\t\t\t\t\tqueryResult.Link = flink\n\t\t\t\t\t\t\tlast = mid + k + 2\n\t\t\t\t\t\t\ti = last\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfound := false\n\t\t\tfor j:= 1; ; j++ {\n\t\t\t\tif result[last + j: last + j + lensubsl3] == subsl3 { // matched found for \"</a>\"\n\t\t\t\t\t\tmess = result[last: last + j]\n\t\t\t\t\t\ti = last + j + lensubsl3\n\t\t\t\t\t\tfound = true\n\t\t\t\t\t\tqueryResult.Head = mess\n\t\t\t\t\t}\n\t\t\t\tif found {\n\t\t\t\t\tqueryResultArray = append(queryResultArray, queryResult)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn queryResultArray\n\n}", "func GetLyricsOneSong(w http.ResponseWriter, r *http.Request) {\n\tvar test foo\n\n\tif err := json.NewDecoder(r.Body).Decode(&test); err != nil {\n\t\terr := fmt.Errorf(\"error when reading request body: %w\", err)\n\t\tlog.Logger.Errorf(\"GetLyricsOneSong failed: %v\", err)\n\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif test.Search == \"\" {\n\t\tlog.Logger.Infof(\"GetAllSongs: request body was empty: %v\", test)\n\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tlog.Logger.Infof(\"GetLyricsOneSong: successfully read request body: %v, %v\", test.Search, test.Words)\n\n\tsongs, err := internal.SearchSongs(test.Search)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"error when searching songs: %w\", err)\n\t\tlog.Logger.Errorf(\"GetLyricsOneSong failed: %w\", err)\n\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tsingleSong, err := internal.GetOneSong(*songs)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"error when getting song: %w\", err)\n\t\tlog.Logger.Errorf(\"GetLyricsOneSong failed: %v\", err)\n\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tsongWithLyrics, err := internal.GetLyricsForSingleSong(*singleSong)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"error when getting lyrics for song: %w\", err)\n\t\tlog.Logger.Errorf(\"GetLyricsOneSong failed: %w\", err)\n\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tsongData := models.Song{\n\t\tID: singleSong.ID,\n\t\tTitle: singleSong.Title,\n\t\tArtist: singleSong.Artist,\n\t\tLyrics: models.Lyrics{\n\t\t\tID: songWithLyrics.ID,\n\t\t\tLyrics: songWithLyrics.Lyrics,\n\t\t},\n\t}\n\n\twordMap, err := internal.ScanWords([]models.Song{songData}, &test.Words)\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(400), 400)\n\t}\n\n\tlog.Logger.Infof(\"finished scanning words: %v\", wordMap)\n\n\tresponse := models.Response{\n\t\tSongs: []models.Song{songData},\n\t\tWordMap: wordMap,\n\t}\n\n\tif err := json.NewEncoder(w).Encode(response); err != nil {\n\t\terr := fmt.Errorf(\"error when encoding response: %w\", err)\n\t\tlog.Logger.Errorf(\"GetLyricsOneSong failed: %v\", err)\n\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n}", "func processYoutubeResponses(result string) []messageQueryBody {\n\n\tsubsl := \"<a id=\\\"video-title\\\"\"\n\tsubsl2 := \"href=\\\"\"\n\tsubsl3 := \"</a>\"\n\tlensubsl3 := len(subsl3)\n\tsubsl4 := \"<yt-formatted-string id=\\\"description-text\\\" class=\\\"style-scope ytd-video-renderer\\\">\"\n\tlensubsl4 := len(subsl4)\n\tsubsl5 := \"</yt-formatted-string>\"\n\tlensubsl5 := len(subsl5)\n\n\tvar queryResult messageQueryBody\n\tvar queryResultArray []messageQueryBody\n\tvar mid int\n\n\tfor i := 0; i < len(result) - len(subsl); i++ {\n\t\tmess := \"\"\n\t\tif result[i : i + len(subsl)] == subsl {\n\t\t\tlength := i + len(subsl)\n\t\t\tvar last int\n\t\t\tfor j:=1; ; j++ {\n\t\t\t\tif result[length + j: length + j + len(subsl2)] == subsl2 {\n\t\t\t\t\tmid = length + j + len(subsl2)\n\t\t\t\t\tfor k := 1; ; k++ {\n\t\t\t\t\t\tif result[mid + k: mid + k + 1] == \"\\\"\" {\n\t\t\t\t\t\t\tlink := result[mid: mid + k]\n\t\t\t\t\t\t\tflink := \"https://www.youtube.com\" + link\n\t\t\t\t\t\t\tqueryResult.Link = flink\n\t\t\t\t\t\t\tlast = mid + k + 1\n\t\t\t\t\t\t\tfor l := 1; ; l++ {\n\t\t\t\t\t\t\t\tif result[last + l: last+ l +2] == \"\\\">\" {\n\t\t\t\t\t\t\t\t\tlast = last + l +2\n\t\t\t\t\t\t\t\t\ti = last + l + 2\n\t\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfound := false\n\t\t\tfor j:= 1; ; j++ {\n\t\t\t\tif result[last + j: last + j + lensubsl3] == subsl3 { // matched found for \"</a>\"\n\t\t\t\t\tmess = result[last: last + j]\n\t\t\t\t\ti = last + j + lensubsl3\n\t\t\t\t\tfound = true\n\t\t\t\t\tqueryResult.Head = mess\n\t\t\t\t\tfor k := 1; ; k++ {\n\t\t\t\t\t\tif result[i + k : i + k + lensubsl4] == subsl4 {\n\t\t\t\t\t\t\tlength = i + k + lensubsl4;\n\t\t\t\t\t\t\tfor l := 1; ; l++ {\n\t\t\t\t\t\t\t\tif result[length + l: length + l + lensubsl5] == subsl5 {\n\t\t\t\t\t\t\t\t\tdesc := result[length: length + l]\n\t\t\t\t\t\t\t\t\tqueryResult.Desc = desc;\n\t\t\t\t\t\t\t\t\ti = length + l +4;\n\t\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif found {\n\t\t\t\t\tqueryResultArray = append(queryResultArray, queryResult)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn queryResultArray\n\n}", "func GetMatches(document string) ([]string, error) {\n\tvar matches []string\n\n\t// This retrieves the phrases from the text files in string array format\n\tphrases, err := lookoutPhrases()\n\tif err != nil {\n\t\treturn matches, err\n\t}\n\n\t// This sorts the phrases list by length firstly, and reverse alphabetical secondly.\n\tsort.Sort(ByLength(phrases))\n\n\t// Then transforms everything in the document passed to lowercase\n\tdocument = strings.ToLower(document)\n\tdocument = removeShockwaveScripts(document)\n\tdocument = strings.Replace(document, \":\", \"\", -1)\n\tdocument = strings.Replace(document, \"-\", \" \", -1)\n\tdocument = strings.Replace(document, \"–\", \" \", -1)\n\tdocument = strings.Replace(document, \".\", \"\", -1)\n\tdocument = strings.Replace(document, \"&nbsp;\", \"\", -1)\n\tdocument = removeExtraWhitespaces(document) // This has to be done lastly since \"-\" and \"–\" are replaced with a whitespace\n\n\t// if f, err := os.Create(\"cleanedDocument.txt\"); err == nil {\n\t// \tf.WriteString(document)\n\t// \tf.Close()\n\t// }\n\n\t// For each phrase starting from longest to shortest\n\tfor _, p := range phrases {\n\t\t// From the item name removes \":\" and replaces dashes with empty spaces\n\t\tpp := strings.Replace(p, \":\", \"\", -1)\n\t\tpp = strings.Replace(pp, \"-\", \" \", -1)\n\t\tpp = strings.Replace(pp, \".\", \"\", -1)\n\t\tpp = strings.ToLower(pp)\n\n\t\t// Checks if phrase is in the document\n\t\tif matchedPhraseInText(pp, document) {\n\t\t\t// If so appends to original phrase to the array to be returned\n\t\t\tmatches = append(matches, p)\n\t\t\t// And deletes the modified phrase from the document (so as to not be matched twice or more)\n\t\t\tdocument = strings.Replace(document, pp, \" \", -1)\n\t\t}\n\t}\n\n\treturn matches, nil\n}", "func processGoogleResponses(result string) []messageQueryBody {\n\n\tsubsl := \"<h3 class=\\\"LC20lb\\\">\"\n\tlensubsl := len(subsl)\n\tsubsl2 := \"</h3>\"\n\tlensubsl2 := len(subsl2)\n\tsubsl3 := \"<cite\"\n\tlensubsl3 := len(subsl3)\n\tsubsl4 := \"</cite>\"\n\tlensubsl4 := len(subsl4)\n\tvar queryResult messageQueryBody\n\tvar queryResultArray []messageQueryBody\n\tfor i := 0; i < len(result) - lensubsl; i++ {\n\t\tmess := \"\"\n\t\tif result[i : i + lensubsl] == subsl {\n\t\t\tlength := i + lensubsl\n\t\t\tvar last int\n\t\t\tfor j:=1; ; j++ {\n\t\t\t\tif result[length + j: length + j + lensubsl2] == subsl2 {\n\t\t\t\t\tmess = result[length: length + j]\n\t\t\t\t\tqueryResult.Head = mess\n\t\t\t\t\tlast = length + j + lensubsl2\n\t\t\t\t\ti = last\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfound := false\n\t\t\tfor j:= 1; ; j++ {\n\t\t\t\tif result[last + j: last + j + lensubsl3] == subsl3 { // matched found for \"<cite\"\n\t\t\t\t\tfor k:= 1; ; k++ {\n\t\t\t\t\t\tif result[last + j + lensubsl3 + k: last + j + lensubsl3 + k + lensubsl4] == subsl4 { // finding index for \"</cite>\"\n\t\t\t\t\t\t\tlink := result[last + j + lensubsl3 + 15 : last + j + lensubsl3 + k]\n\t\t\t\t\t\t\ti = last + j + lensubsl3 + k + lensubsl4\n\t\t\t\t\t\t\tfound = true\n\t\t\t\t\t\t\tif link[0: 7] != \"http://\" && link[0: 4] != \"www.\" && link[0: 8] != \"https://\" {\n\t\t\t\t\t\t\t\tlink = \"http://\" + link\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tqueryResult.Link = link\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif found {\n\t\t\t\t\tqueryResultArray = append(queryResultArray, queryResult)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn queryResultArray\n\n}", "func (s *Server) GetLyrics(ctx context.Context, track *api.TracksInfo) (*api.LyricsInfo, error) {\n\turi, err := s.GeniusClient.GetSongURL(ctx, track.GetArtist(), track.GetName())\n\tif err != nil {\n\t\tlog.Printf(\"Error geting uri from genius.com: %s\", err)\n\t\turi = \"\"\n\t}\n\n\tlyrics, err := s.GeniusClient.GetSongLyrics(ctx, uri)\n\tif err != nil {\n\t\tlog.Printf(\"Error geting lyrics from genius.com: %s\", err)\n\t\tlyrics = \"\"\n\t}\n\n\tresult := &api.LyricsInfo{\n\t\tGeniusURI: uri,\n\t\tLyrics: lyrics,\n\t}\n\treturn result, nil\n}", "func (p *Proxy) RobotsTxt(w http.ResponseWriter, _ *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tfmt.Fprintf(w, \"User-agent: *\\nDisallow: /\")\n}", "func ScrubTrackTitle(original string) string {\n\tvar result string\n\tresult = normalizeParens(original)\n\tfor _, re := range TrackTitleIgnoredPhrases {\n\t\tresult = re.ReplaceAllString(result, \"\")\n\t}\n\tresult = Scrub(result)\n\tresult = strings.TrimSpace(result)\n\treturn result\n}", "func sanitizeSkylinks(links []string) []string {\n\tvar result []string\n\n\tfor _, link := range links {\n\t\ttrimmed := strings.TrimPrefix(link, \"sia://\")\n\t\tresult = append(result, trimmed)\n\t}\n\n\treturn result\n}", "func crawl(url string) []string {\n fmt.Println(url)\n list, err := links.Extract(url)\n if err != nil {\n log.Print(err)\n }\n return list\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithProxyConfig returns a proxy config functional option
func WithProxyConfig(cfg config.Proxy) GRPCOption { return func(h *GRPCHandler) { h.proxyCfg = cfg } }
[ "func WithProxyUrl(proxyURL string) configurer {\n\treturn func(conf *config) {\n\t\tconf.proxyURL = proxyURL\n\t}\n}", "func WithProxy(proxyURL string) (Option, error) {\n\tu, err := url.Parse(proxyURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse proxy url %q: %s\", proxyURL, err)\n\t}\n\n\treturn func(c *Client) {\n\t\ttransport := http.DefaultTransport.(*http.Transport).Clone()\n\t\tif c.client.Transport != nil {\n\t\t\ttransport = c.client.Transport.(*http.Transport).Clone()\n\t\t}\n\n\t\ttransport.Proxy = http.ProxyURL(u)\n\n\t\tc.client.Transport = transport\n\t}, nil\n}", "func WithProxy(proxyURL string) (Option, error) {\n\tu, err := url.Parse(proxyURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse proxy url %q: %s\", proxyURL, err)\n\t}\n\n\treturn func(c *Client) {\n\t\tvar transport *http.Transport = LazyCreateNewTransport(c)\n\t\ttransport.Proxy = http.ProxyURL(u)\n\t\tc.client.Transport = transport\n\t}, nil\n}", "func WithProxy(p proxy.BackwardProxy) Option {\n\treturn Option{F: func(o *internal_server.Options, di *utils.Slice) {\n\t\to.Once.OnceOrPanic()\n\t\tdi.Push(fmt.Sprintf(\"WithProxy(%T)\", p))\n\n\t\tif o.Proxy != nil {\n\t\t\tpanic(fmt.Errorf(\"reassignment of Proxy is not allowed: %T -> %T\", o.Proxy, p))\n\t\t}\n\t\to.Proxy = p\n\t}}\n}", "func WithProxy() func(*engine.Spec) {\n\tenviron := map[string]string{}\n\tif value := getenv(\"no_proxy\"); value != \"\" {\n\t\tenviron[\"no_proxy\"] = value\n\t\tenviron[\"NO_PROXY\"] = value\n\t}\n\tif value := getenv(\"http_proxy\"); value != \"\" {\n\t\tenviron[\"http_proxy\"] = value\n\t\tenviron[\"HTTP_PROXY\"] = value\n\t}\n\tif value := getenv(\"https_proxy\"); value != \"\" {\n\t\tenviron[\"https_proxy\"] = value\n\t\tenviron[\"HTTPS_PROXY\"] = value\n\t}\n\treturn WithEnviron(environ)\n}", "func WithHTTPProxy(proxy string) ClientOption {\n\treturn withHTTPProxy{proxy}\n}", "func WithProxy(http, https, none string) Option {\n\treturn WithEnviron(\n\t\tmap[string]string{\n\t\t\t\"no_proxy\": none,\n\t\t\t\"NO_PROXY\": none,\n\t\t\t\"http_proxy\": http,\n\t\t\t\"HTTP_PROXY\": http,\n\t\t\t\"HTTPS_PROXY\": https,\n\t\t\t\"https_proxy\": https,\n\t\t},\n\t)\n}", "func Proxy(proxy ep.Endpoint) option {\n\treturn func(bl *BurrowListener) error {\n\t\tif proxy.IsSet() {\n\t\t\tbl.proxy = proxy\n\t\t}\n\n\t\treturn nil\n\t}\n}", "func NewProxy(c *ProxyConfig) (proxy.Proxy, error) {\n\tif c == nil {\n\t\treturn nil, errors.New(\"config should be provided\")\n\t}\n\n\tif err := c.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Yeah, not a good practice at all but I guess it's fine for now.\n\tkafkaproxy.ActualDefaultRequestHandler.RequestKeyHandlers.Set(protocol.RequestAPIKeyProduce, NewProduceRequestHandler(c.MessageHandlers...))\n\n\tif c.BrokersMapping == nil {\n\t\treturn nil, errors.New(\"Brokers mapping is required\")\n\t}\n\n\tif c.Debug {\n\t\t_ = server.Server.Flags().Set(\"log-level\", \"debug\")\n\t}\n\n\tfor _, v := range c.ExtraConfig {\n\t\tf := strings.Split(v, \"=\")\n\t\t_ = server.Server.Flags().Set(f[0], f[1])\n\t}\n\n\tfor _, v := range c.BrokersMapping {\n\t\t_ = server.Server.Flags().Set(\"bootstrap-server-mapping\", v)\n\t}\n\n\tfor _, v := range c.DialAddressMapping {\n\t\t_ = server.Server.Flags().Set(\"dial-address-mapping\", v)\n\t}\n\n\treturn func(_ context.Context) error {\n\t\treturn server.Server.Execute()\n\t}, nil\n}", "func newProxyConfig(config PodConfig) interface{} {\n\tswitch config.ProxyType {\n\tcase NoopProxyType:\n\t\treturn nil\n\tcase CCProxyType:\n\t\tvar ccConfig CCProxyConfig\n\t\terr := mapstructure.Decode(config.ProxyConfig, &ccConfig)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn ccConfig\n\tdefault:\n\t\treturn nil\n\t}\n}", "func WithProxy(url string) *Client {\n\treturn std.WithProxy(url)\n}", "func NewProxyConfig(brokersMapping []string, opts ...ProxyOption) (*ProxyConfig, error) {\n\tc := &ProxyConfig{BrokersMapping: brokersMapping}\n\tfor _, opt := range opts {\n\t\tif err := opt(c); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn c, c.Validate()\n}", "func ConfigureProxyDialer() error {\n\t// load proxy configuration for tests\n\tvar cert tls.Certificate\n\troots := x509.NewCertPool()\n\n\tsecretsPath := \"secrets\"\n\tif _, err := os.Stat(path.Join(secretsPath, \"proxy-client.pem\")); os.IsNotExist(err) {\n\t\tsecretsPath = \"../../secrets\"\n\t}\n\tcert, err := tls.LoadX509KeyPair(path.Join(secretsPath, \"proxy-client.pem\"), path.Join(secretsPath, \"proxy-client.key\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tca, err := ioutil.ReadFile(path.Join(secretsPath, \"proxy-ca.pem\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif ok := roots.AppendCertsFromPEM(ca); !ok {\n\t\treturn fmt.Errorf(\"error configuring proxy\")\n\t}\n\n\troundtrippers.PrivateEndpointDialHook = func(location string) func(context.Context, string, string) (net.Conn, error) {\n\t\treturn func(ctx context.Context, network, address string) (net.Conn, error) {\n\t\t\tproxyEnvName := \"PROXYURL_\" + strings.ToUpper(location)\n\t\t\tproxyURL := os.Getenv(proxyEnvName)\n\t\t\tif proxyURL == \"\" {\n\t\t\t\treturn nil, fmt.Errorf(\"%s not set\", proxyEnvName)\n\t\t\t}\n\n\t\t\tc, err := tls.Dial(\"tcp\", proxyURL, &tls.Config{\n\t\t\t\tRootCAs: roots,\n\t\t\t\tCertificates: []tls.Certificate{cert},\n\t\t\t\tServerName: \"proxy-server\",\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tr := bufio.NewReader(c)\n\n\t\t\treq, err := http.NewRequest(http.MethodConnect, \"\", nil)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treq.Host = address\n\n\t\t\terr = req.Write(c)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tresp, err := http.ReadResponse(r, req)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\treturn nil, fmt.Errorf(\"unexpected status code %d\", resp.StatusCode)\n\t\t\t}\n\n\t\t\treturn &conn{Conn: c, r: r}, nil\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http.Request) (interface{}, error) {\n\t// Get the proxy ID. Note that this is the ID of a proxy's service instance.\n\tid := strings.TrimPrefix(req.URL.Path, \"/v1/agent/connect/proxy/\")\n\n\t// Maybe block\n\tvar queryOpts structs.QueryOptions\n\tif parseWait(resp, req, &queryOpts) {\n\t\t// parseWait returns an error itself\n\t\treturn nil, nil\n\t}\n\n\t// Parse the token - don't resolve a proxy token to a real token\n\t// that will be done with a call to verifyProxyToken later along with\n\t// other security relevant checks.\n\tvar token string\n\ts.parseTokenWithoutResolvingProxyToken(req, &token)\n\n\t// Parse hash specially since it's only this endpoint that uses it currently.\n\t// Eventually this should happen in parseWait and end up in QueryOptions but I\n\t// didn't want to make very general changes right away.\n\thash := req.URL.Query().Get(\"hash\")\n\n\treturn s.agentLocalBlockingQuery(resp, hash, &queryOpts,\n\t\tfunc(ws memdb.WatchSet) (string, interface{}, error) {\n\t\t\t// Retrieve the proxy specified\n\t\t\tproxy := s.agent.State.Proxy(id)\n\t\t\tif proxy == nil {\n\t\t\t\tresp.WriteHeader(http.StatusNotFound)\n\t\t\t\tfmt.Fprintf(resp, \"unknown proxy service ID: %s\", id)\n\t\t\t\treturn \"\", nil, nil\n\t\t\t}\n\n\t\t\t// Lookup the target service as a convenience\n\t\t\ttarget := s.agent.State.Service(proxy.Proxy.TargetServiceID)\n\t\t\tif target == nil {\n\t\t\t\t// Not found since this endpoint is only useful for agent-managed proxies so\n\t\t\t\t// service missing means the service was deregistered racily with this call.\n\t\t\t\tresp.WriteHeader(http.StatusNotFound)\n\t\t\t\tfmt.Fprintf(resp, \"unknown target service ID: %s\", proxy.Proxy.TargetServiceID)\n\t\t\t\treturn \"\", nil, nil\n\t\t\t}\n\n\t\t\t// Validate the ACL token - because this endpoint uses data local to a single\n\t\t\t// agent, this function is responsible for all enforcement regarding\n\t\t\t// protection of the configuration. verifyProxyToken will match the proxies\n\t\t\t// token to the correct service or in the case of being provide a real ACL\n\t\t\t// token it will ensure that the requester has ServiceWrite privileges\n\t\t\t// for this service.\n\t\t\t_, isProxyToken, err := s.agent.verifyProxyToken(token, target.Service, id)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", nil, err\n\t\t\t}\n\n\t\t\t// Watch the proxy for changes\n\t\t\tws.Add(proxy.WatchCh)\n\n\t\t\thash, err := hashstructure.Hash(proxy.Proxy, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", nil, err\n\t\t\t}\n\t\t\tcontentHash := fmt.Sprintf(\"%x\", hash)\n\n\t\t\t// Set defaults\n\t\t\tconfig, err := s.agent.applyProxyConfigDefaults(proxy.Proxy)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", nil, err\n\t\t\t}\n\n\t\t\t// Only merge in telemetry config from agent if the requested is\n\t\t\t// authorized with a proxy token. This prevents us leaking potentially\n\t\t\t// sensitive config like Circonus API token via a public endpoint. Proxy\n\t\t\t// tokens are only ever generated in-memory and passed via ENV to a child\n\t\t\t// proxy process so potential for abuse here seems small. This endpoint in\n\t\t\t// general is only useful for managed proxies now so it should _always_ be\n\t\t\t// true that auth is via a proxy token but inconvenient for testing if we\n\t\t\t// lock it down so strictly.\n\t\t\tif isProxyToken {\n\t\t\t\t// Add telemetry config. Copy the global config so we can customize the\n\t\t\t\t// prefix.\n\t\t\t\ttelemetryCfg := s.agent.config.Telemetry\n\t\t\t\ttelemetryCfg.MetricsPrefix = telemetryCfg.MetricsPrefix + \".proxy.\" + target.ID\n\n\t\t\t\t// First see if the user has specified telemetry\n\t\t\t\tif userRaw, ok := config[\"telemetry\"]; ok {\n\t\t\t\t\t// User specified domething, see if it is compatible with agent\n\t\t\t\t\t// telemetry config:\n\t\t\t\t\tvar uCfg lib.TelemetryConfig\n\t\t\t\t\tdec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{\n\t\t\t\t\t\tResult: &uCfg,\n\t\t\t\t\t\t// Make sure that if the user passes something that isn't just a\n\t\t\t\t\t\t// simple override of a valid TelemetryConfig that we fail so that we\n\t\t\t\t\t\t// don't clobber their custom config.\n\t\t\t\t\t\tErrorUnused: true,\n\t\t\t\t\t})\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tif err = dec.Decode(userRaw); err == nil {\n\t\t\t\t\t\t\t// It did decode! Merge any unspecified fields from agent config.\n\t\t\t\t\t\t\tuCfg.MergeDefaults(&telemetryCfg)\n\t\t\t\t\t\t\tconfig[\"telemetry\"] = uCfg\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t// Failed to decode, just keep user's config[\"telemetry\"] verbatim\n\t\t\t\t\t// with no agent merge.\n\t\t\t\t} else {\n\t\t\t\t\t// Add agent telemetry config.\n\t\t\t\t\tconfig[\"telemetry\"] = telemetryCfg\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treply := &api.ConnectProxyConfig{\n\t\t\t\tProxyServiceID: proxy.Proxy.ProxyService.ID,\n\t\t\t\tTargetServiceID: target.ID,\n\t\t\t\tTargetServiceName: target.Service,\n\t\t\t\tContentHash: contentHash,\n\t\t\t\tExecMode: api.ProxyExecMode(proxy.Proxy.ExecMode.String()),\n\t\t\t\tCommand: proxy.Proxy.Command,\n\t\t\t\tConfig: config,\n\t\t\t\tUpstreams: proxy.Proxy.Upstreams.ToAPI(),\n\t\t\t}\n\t\t\treturn contentHash, reply, nil\n\t\t})\n}", "func (optr *Operator) getProxyConfig() (*httpproxy.Config, error) {\n\tproxy, err := optr.proxyLister.Get(\"cluster\")\n\n\tif apierrors.IsNotFound(err) {\n\t\treturn nil, nil\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &httpproxy.Config{\n\t\tHTTPProxy: proxy.Status.HTTPProxy,\n\t\tHTTPSProxy: proxy.Status.HTTPSProxy,\n\t\tNoProxy: proxy.Status.NoProxy,\n\t}, nil\n}", "func WithPROXYHeaderGetter(proxyHeaderGetter PROXYHeaderGetter) DialProxyOption {\n\treturn func(cfg *dialProxyConfig) {\n\t\tcfg.proxyHeaderGetter = proxyHeaderGetter\n\t}\n}", "func WithProxyAuth(auth ProxyAuthorization) opt {\n\treturn func(t *HttpTunnel) {\n\t\tt.auth = auth\n\t}\n}", "func WithProxy(ctx context.Context, addrs ...string) context.Context {\n\tif md, ok := metadata.FromContext(ctx); ok {\n\t\tmd[proxyKey] = append(md[proxyKey], addrs...)\n\t\treturn ctx\n\t}\n\treturn metadata.NewContext(ctx, metadata.MD{proxyKey: addrs})\n}", "func TestConnectProxyConfig(t testing.T) ConnectProxyConfig {\n\treturn ConnectProxyConfig{\n\t\tDestinationServiceName: \"web\",\n\t\tUpstreams: TestUpstreams(t),\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithRoleTokenConfig returns a role token config functional option
func WithRoleTokenConfig(cfg config.RoleToken) GRPCOption { return func(h *GRPCHandler) { h.roleCfg = cfg } }
[ "func (h *handler) RoleToken(w http.ResponseWriter, r *http.Request) error {\n\tdefer flushAndClose(r.Body)\n\n\tvar data model.RoleRequest\n\terr := json.NewDecoder(r.Body).Decode(&data)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttok, err := h.role(r.Context(), data.Domain, data.Role, data.ProxyForPrincipal, data.MinExpiry, data.MaxExpiry)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tw.Header().Set(\"Content-type\", \"application/json; charset=utf-8\")\n\treturn json.NewEncoder(w).Encode(tok)\n}", "func WithToken(t string) OptionFunc {\n\treturn func(b *Bot) {\n\t\tb.conf.Token = t\n\t}\n}", "func AssumeRoleTokenProvider(provider func() (string, error)) SessionOption {\n\treturn func(options *session.Options) {\n\t\toptions.AssumeRoleTokenProvider = provider\n\t}\n}", "func TokenConfig(globalConfig *viper.Viper) (*token.Config, error) {\n\tsub := subconfig(globalConfig, tokenConfigKey)\n\treturn token.Initialize(sub)\n}", "func (a *authority) AuthorizeRoleToken(ctx context.Context, tok, act, res string) (Principal, error) {\n\treturn a.authorize(ctx, roleToken, tok, act, res, \"\", nil)\n}", "func Token(tokenURL string) func(*Config) {\n\treturn func(c *Config) {\n\t\tc.auth = \"token\"\n\t\tc.tokenURL = tokenURL\n\t}\n}", "func WithToken(tk string) Option {\n\treturn func(s *sess) error {\n\t\tif len(tk) == 0 {\n\t\t\treturn errors.NewErrInvalidOption(\"token\", tk)\n\t\t}\n\t\ts.token = tk\n\t\treturn nil\n\t}\n}", "func withRole(node *Role) roleOption {\n\treturn func(m *RoleMutation) {\n\t\tm.oldValue = func(context.Context) (*Role, error) {\n\t\t\treturn node, nil\n\t\t}\n\t\tm.id = &node.ID\n\t}\n}", "func (h *handler) RoleTokenProxy(w http.ResponseWriter, r *http.Request) error {\n\tdefer flushAndClose(r.Body)\n\n\trole := r.Header.Get(\"Athenz-Role\")\n\tdomain := r.Header.Get(\"Athenz-Domain\")\n\tprincipal := r.Header.Get(\"Athenz-Proxy-Principal\")\n\ttok, err := h.role(r.Context(), domain, role, principal, 0, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.Header.Set(h.cfg.RoleAuthHeader, tok.Token)\n\th.proxy.ServeHTTP(w, r)\n\treturn nil\n}", "func (c VaultConfig) GetToken() string {\n\treturn c.Token\n}", "func TokenOption(token string) Option {\n\treturn func(opts *options) {\n\t\topts.Token = token\n\t}\n}", "func (c Config) WithToken(token string) Config {\n\tc.Token = token\n\treturn c\n}", "func WithToken(with string) wrapping.Option {\n\treturn func() interface{} {\n\t\treturn OptionFunc(func(o *options) error {\n\t\t\to.withToken = with\n\t\t\treturn nil\n\t\t})\n\t}\n}", "func WithToken(serviceBusNamespaceName string, spt *adal.ServicePrincipalToken) ManagementOption {\n\treturn func(l *Listener) error {\n\t\tif spt == nil {\n\t\t\treturn errors.New(\"cannot provide a nil token\")\n\t\t}\n\t\tns, err := servicebus.NewNamespace(servicebusinternal.NamespaceWithTokenProvider(serviceBusNamespaceName, aad.AsJWTTokenProvider(spt)))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tl.namespace = ns\n\t\treturn nil\n\t}\n}", "func WithAdminToken(token string) TestContextOpt {\n\treturn func(tstContext *testContext) {\n\t\ttstContext.save.Globals.CachedToken = token\n\t}\n}", "func (a *authorizer) VerifyRoleToken(ctx context.Context, tok, act, res string) error {\n\treturn a.verify(ctx, token, tok, act, res)\n}", "func (cfg Config) GetToken() (token string) {\n\treturn cfg.Token\n}", "func newTokenConfig() TokenConfig {\n\treturn TokenConfig{\n\t\tName: \"My awesome token\",\n\t\tSymbol: \"MAT\",\n\t\tDecimals: decimals,\n\t\tOwner: owner,\n\t\tTotalSupply: 10000000 * multiplier,\n\t\tInitialAmount: 5000000 * multiplier,\n\t\tAmountPerNEO: 60 * multiplier,\n\t\tAmountPerGas: 40 * multiplier,\n\t\tMaxExchangeLimitRound: 500 * 60 * multiplier,\n\t\tSaleStart: 75500,\n\t\tLimitRoundEnd: 75500 + 10000,\n\t\tCirculationKey: []byte(\"in_circulation\"),\n\t\tLimitRoundKey: []byte(\"r1\"),\n\t\tKYCKey: []byte(\"kyc_ok\"),\n\t}\n}", "func MatchRoleToConfig(poolRole string, ec2Configs []rancherEc2.AWSEC2Config) *rancherEc2.AWSEC2Config {\n\tfor _, config := range ec2Configs {\n\t\thasMatch := false\n\t\tfor _, configRole := range config.Roles {\n\t\t\tif strings.Contains(poolRole, configRole) {\n\t\t\t\thasMatch = true\n\t\t\t}\n\t\t}\n\t\tif hasMatch {\n\t\t\treturn &config\n\t\t}\n\t}\n\treturn nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithAuthorizationd returns a authorizationd functional option
func WithAuthorizationd(a service.Authorizationd) GRPCOption { return func(h *GRPCHandler) { h.authorizationd = a } }
[ "func (c Client) WithAuthorization() PrepareDecorator {\n\treturn c.authorizer().WithAuthorization()\n}", "func Authorization(auth string) Opt {\n\treturn func(cl *Client) { cl.setHeader(\"Authorization\", auth) }\n}", "func WithAuthorizationHeader(h string) Option {\n\treturn func(s *QSvc) {\n\t\ts.authzHeader = h\n\t}\n}", "func WithAuthorization(hhandler http.Handler, auth authorizer.Authorizer, s runtime.NegotiatedSerializer) http.Handler {\n\treturn withAuthorization(hhandler, auth, s, recordAuthorizationMetrics)\n}", "func (sk *SharedKeyAuthorizer) WithAuthorization() PrepareDecorator {\n\treturn func(p Preparer) Preparer {\n\t\treturn PreparerFunc(func(r *http.Request) (*http.Request, error) {\n\t\t\tr, err := p.Prepare(r)\n\t\t\tif err != nil {\n\t\t\t\treturn r, err\n\t\t\t}\n\n\t\t\tsk, err := buildSharedKey(sk.accountName, sk.accountKey, r, sk.keyType)\n\t\t\tif err != nil {\n\t\t\t\treturn r, err\n\t\t\t}\n\t\t\treturn Prepare(r, WithHeader(headerAuthorization, sk))\n\t\t})\n\t}\n}", "func ContextWithAuthorization(ctx context.Context) context.Context {\n\treturn context.WithValue(ctx, isAuthorizedKey, true)\n}", "func authorizationFilter(host service.Host) FilterFunc {\n\treturn func(ctx context.Context, w http.ResponseWriter, r *http.Request, next Handler) {\n\t\tfxctx := &fxcontext.Context{\n\t\t\tContext: ctx,\n\t\t}\n\n\t\tif err := host.AuthClient().Authorize(fxctx); err != nil {\n\t\t\thost.Metrics().SubScope(\"http\").SubScope(\"auth\").Counter(\"fail\").Inc(1)\n\t\t\tfxctx.Logger().Error(auth.ErrAuthorization, \"error\", err)\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\tfmt.Fprintf(w, \"Unauthorized access: %+v\", err)\n\t\t\treturn\n\t\t}\n\t\tnext.ServeHTTP(fxctx, w, r)\n\t}\n}", "func (c *Authorizer) WithAuthorization() autorest.PrepareDecorator {\n\treturn func(p autorest.Preparer) autorest.Preparer {\n\t\treturn autorest.PreparerFunc(func(req *http.Request) (*http.Request, error) {\n\t\t\tvar err error\n\t\t\treq, err = p.Prepare(req)\n\t\t\tif err == nil {\n\t\t\t\ttoken, err := c.Token()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\treq, err = autorest.Prepare(req, autorest.WithHeader(\"Authorization\", fmt.Sprintf(\"Bearer %s\", token.AccessToken)))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn req, err\n\t\t\t\t}\n\n\t\t\t\tauxTokens, err := c.AuxiliaryTokens()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn req, err\n\t\t\t\t}\n\n\t\t\t\tauxTokenList := make([]string, 0)\n\t\t\t\tfor _, a := range auxTokens {\n\t\t\t\t\tif a != nil && a.AccessToken != \"\" {\n\t\t\t\t\t\tauxTokenList = append(auxTokenList, fmt.Sprintf(\"%s %s\", a.TokenType, a.AccessToken))\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn autorest.Prepare(req, autorest.WithHeader(\"x-ms-authorization-auxiliary\", strings.Join(auxTokenList, \", \")))\n\t\t\t}\n\n\t\t\treturn req, err\n\t\t})\n\t}\n}", "func (ca *policyAdapter) WithAuthorization() autorest.PrepareDecorator {\n\treturn func(p autorest.Preparer) autorest.Preparer {\n\t\treturn autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {\n\t\t\tr, err := p.Prepare(r)\n\t\t\tif err != nil {\n\t\t\t\treturn r, err\n\t\t\t}\n\t\t\t// create a dummy request\n\t\t\treq, err := runtime.NewRequest(r.Context(), r.Method, r.URL.String())\n\t\t\tif err != nil {\n\t\t\t\treturn r, err\n\t\t\t}\n\t\t\t_, err = ca.pl.Do(req)\n\t\t\t// if the authentication failed due to invalid/missing credentials\n\t\t\t// return a wrapped error so the retry policy won't kick in.\n\t\t\ttype nonRetriable interface {\n\t\t\t\tNonRetriable()\n\t\t\t}\n\t\t\tvar nre nonRetriable\n\t\t\tif errors.As(err, &nre) {\n\t\t\t\treturn r, &tokenRefreshError{\n\t\t\t\t\tinner: err,\n\t\t\t\t}\n\t\t\t}\n\t\t\t// some other error\n\t\t\tif err != nil {\n\t\t\t\treturn r, err\n\t\t\t}\n\t\t\t// copy the authorization header to the real request\n\t\t\tconst authHeader = \"Authorization\"\n\t\t\tr.Header.Set(authHeader, req.Raw().Header.Get(authHeader))\n\t\t\treturn r, err\n\t\t})\n\t}\n}", "func Authorization(ctx context.Context) (string, error) {\n\treturn fromMeta(ctx, AuthKey)\n}", "func WithAuthorizer(a middleware.SwitchingAuthorizationHandler) Opts {\n\treturn func(s *Server) {\n\t\ts.authorizer = a\n\t}\n}", "func WithAuthorizationCheck(handler http.Handler, getAttribs RequestAttributeGetter, a authorizer.Authorizer) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\terr := a.Authorize(getAttribs.GetAttribs(req))\n\t\tif err == nil {\n\t\t\thandler.ServeHTTP(w, req)\n\t\t\treturn\n\t\t}\n\t\tforbidden(w, req)\n\t})\n}", "func (k Keeper) Authorization(c context.Context, req *types.QueryAuthorizationRequest) (*types.QueryAuthorizationResponse, error) {\n\tif req == nil {\n\t\treturn nil, status.Errorf(codes.InvalidArgument, \"empty request\")\n\t}\n\n\tif req.MethodName == \"\" {\n\t\treturn nil, status.Errorf(codes.InvalidArgument, \"empty method-name\")\n\t}\n\n\tgranter, err := sdk.AccAddressFromBech32(req.Granter)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgrantee, err := sdk.AccAddressFromBech32(req.Grantee)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctx := sdk.UnwrapSDKContext(c)\n\n\tauthorization, expiration := k.GetOrRevokeAuthorization(ctx, grantee, granter, req.MethodName)\n\tif authorization == nil {\n\t\treturn nil, status.Errorf(codes.NotFound, \"no authorization found for %s type\", req.MethodName)\n\t}\n\n\tauthorizationAny, err := codectypes.NewAnyWithValue(authorization)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, err.Error())\n\t}\n\n\treturn &types.QueryAuthorizationResponse{\n\t\tAuthorization: &types.AuthorizationGrant{\n\t\t\tAuthorization: authorizationAny,\n\t\t\tExpiration: expiration,\n\t\t},\n\t}, nil\n}", "func Authorizer() security.Authorizer { return authorizer{} }", "func withRights() adapter {\n\treturn func(h http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t// No authentification to check\n\t\t\th.ServeHTTP(w, r)\n\t\t})\n\t}\n}", "func (o *BuiltInAuthenticationOptions) ApplyAuthorization(authorization *BuiltInAuthorizationOptions) {\n\tif o == nil || authorization == nil {\n\t\treturn\n\t}\n}", "func (ba *ExplicitBearerAuthorizer) WithAuthorization() autorest.PrepareDecorator {\n\treturn func(p autorest.Preparer) autorest.Preparer {\n\t\treturn autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {\n\t\t\tr, err := p.Prepare(r)\n\t\t\tif err == nil {\n\t\t\t\treturn autorest.Prepare(r, autorest.WithHeader(\"Authorization\", fmt.Sprintf(\"Bearer %s\", ba.token)))\n\t\t\t}\n\t\t\treturn r, err\n\t\t})\n\t}\n}", "func withAuthorizeID(id int) authorizeOption {\n\treturn func(m *AuthorizeMutation) {\n\t\tvar (\n\t\t\terr error\n\t\t\tonce sync.Once\n\t\t\tvalue *Authorize\n\t\t)\n\t\tm.oldValue = func(ctx context.Context) (*Authorize, error) {\n\t\t\tonce.Do(func() {\n\t\t\t\tif m.done {\n\t\t\t\t\terr = fmt.Errorf(\"querying old values post mutation is not allowed\")\n\t\t\t\t} else {\n\t\t\t\t\tvalue, err = m.Client().Authorize.Get(ctx, id)\n\t\t\t\t}\n\t\t\t})\n\t\t\treturn value, err\n\t\t}\n\t\tm.id = &id\n\t}\n}", "func BearerAuth(token string) Opt {\n\treturn func(cl *Client) { cl.setHeader(\"Authorization\", \"Bearer \"+token) }\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the defender chance after accounting for modifiers
func (c *Conflict) GetModDefenderChance() int32 { // TODO: Return modifier defender chance return c.BaseChance() }
[ "func getDamage(attacker *pet, defender pet) (float64) {\n\tif doesCrit(attacker) {\n\t\treturn 2*(attacker.EffectiveATK * (100.00/(defender.EffectiveDEF + 100.00)))\n\t}\n\t\n\treturn attacker.EffectiveATK * (100.00/(defender.EffectiveDEF + 100.00))\n}", "func (g Gun)calculateDamage(bodyPart BodyArmor,attacker *Character, defender *Character)float64{\n\tcalibreIdx := getCalibreIdxFromCalibre(g.Calibre)\n\tdmgModifier := BODY_DAMAGE_MODIFIERS[bodyPart]\n\tdistance := calculateDistance(attacker.Location,defender.Location,attacker.isIndoor())\n\tdistanceModifier := math.Pow(CALIBRE_DAMAGE_FALLOFF[calibreIdx],distance/BULLET_DROPOFF_DISTANCE)\n\tbaseDamage := g.MaxDamage\n\tarmorDurability := defender.Armor[bodyPart].Durability/100\n\tbulletproofModifier := (100-defender.Armor[bodyPart].Bulletproof*(armorDurability))/100\n\tbulletAppropriateModifier := math.Abs(g.LoadedMagazine.ArmorPiercing-bulletproofModifier*100)/100\n\tbulletproofModifier = bulletproofModifier+bulletAppropriateModifier*BULLETPROOF_APPROPRIATE_MODIFIER\n\tdamage := baseDamage*dmgModifier*distanceModifier*bulletproofModifier\n\tdamage = pickRandomVariedAround(damage,DAMAGE_RANDOMNESS)\n\tif damage<0{\n\t\tdamage = 0\n\t}\n\tif LOG_MODE>=1{\n\t\tfmt.Printf(\"%s did %f damage to %s\\n\",attacker.Name,damage,defender.Name)\n\t}\n\tif LOG_MODE==DEBUG{\n\t\tfmt.Printf(\"Body part damage modifier: %f\\n\",dmgModifier)\n\t\tfmt.Printf(\"Distance: %f\\n\",distance)\n\t\tfmt.Printf(\"Distance modifier: %f\\n\",distanceModifier)\n\t\tfmt.Printf(\"Base damage: %f\\n\",baseDamage)\n\t\tfmt.Printf(\"Armor durability: %f\\n\",armorDurability)\n\t\tfmt.Printf(\"Bulletproof modifier: %f\\n\",bulletproofModifier)\n\t\tfmt.Printf(\"Bullet appropriate modifier: %f\\n\",bulletAppropriateModifier)\n\t}\n\n\treturn damage\n}", "func (m *Monster) BaseDamage() int {\n\tswitch m.id {\n\tcase Bat: // bats deal a base of 1 always\n\t\treturn 1\n\tdefault:\n\t\td := m.Info.Dmg\n\t\tif d < 1 {\n\t\t\td++\n\t\t} else {\n\t\t\td += rand.Intn(d)\n\t\t}\n\t\td += m.Info.Lvl\n\t\treturn d\n\t}\n}", "func chance(line *base.Line) {\n\tstr := line.Args[1]\n\tvar chance float64\n\n\tif strings.HasSuffix(str, \"%\") {\n\t\t// Handle 'chance of that is \\d+%'\n\t\tif i, err := strconv.Atoi(str[:len(str)-1]); err != nil {\n\t\t\tbot.ReplyN(line, \"'%s' didn't look like a % chance to me.\", str)\n\t\t\treturn\n\t\t} else {\n\t\t\tchance = float64(i) / 100\n\t\t}\n\t} else {\n\t\t// Assume the chance is a floating point number.\n\t\tif c, err := strconv.ParseFloat(str, 64); err != nil {\n\t\t\tbot.ReplyN(line, \"'%s' didn't look like a chance to me.\", str)\n\t\t\treturn\n\t\t} else {\n\t\t\tchance = c\n\t\t}\n\t}\n\n\t// Make sure the chance we've parsed lies in (0.0,1.0]\n\tif chance > 1.0 || chance <= 0.0 {\n\t\tbot.ReplyN(line, \"'%s' was outside possible chance ranges.\", str)\n\t\treturn\n\t}\n\n\t// Retrieve last seen ObjectId, replace with \"\"\n\tls := LastSeen(line.Args[0], \"\")\n\t// ok, we're good to update the chance.\n\tif fact := fc.GetById(ls); fact != nil {\n\t\t// Store the old chance, update with the new\n\t\told := fact.Chance\n\t\tfact.Chance = chance\n\t\t// Update the Modified field\n\t\tfact.Modify(line.Storable())\n\t\t// And store the new factoid data\n\t\tif err := fc.Update(bson.M{\"_id\": ls}, fact); err == nil {\n\t\t\tbot.ReplyN(line, \"'%s' was at %.0f%% chance, now is at %.0f%%.\",\n\t\t\t\tfact.Key, old*100, chance*100)\n\t\t} else {\n\t\t\tbot.ReplyN(line, \"I failed to replace '%s': %s\", fact.Key, err)\n\t\t}\n\t} else {\n\t\tbot.ReplyN(line, \"Whatever that was, I've already forgotten it.\")\n\t}\n}", "func (d *Decoder) Probability() int32 {\n\treturn pocketsphinx.GetProb(d.dec)\n}", "func (d *Degradater) RandomPercent() int {\n\treturn defaultSafeRander.Intn(101)\n}", "func (o *observation) Probability() float64 {\n\tif !o.particle.isConfirmed() {\n\t\treturn o.process.probability\n\t} else if o.particle.confirm() {\n\t\treturn 1.0\n\t} else {\n\t\treturn 0\n\t}\n}", "func GetDrawProbability(epsilon, beta float64, nPlayers uint) float64 {\n\treturn 2*stats.NormalCDF(epsilon/(math.Sqrt(float64(nPlayers))*beta)) - 1\n}", "func GetProbability(l uint) float64 {\n\treturn 1.0 / math.Pow(2.0, float64(l))\n}", "func (c *Cell) ApplyChance(total uint64) {\n\tif total < c.count {\n\t\treturn\n\t}\n\tc.chance = (float64(c.count) / float64(total)) * 100.0\n}", "func (c *Creature) DetermineHealingRate() *Attribute {\n\n\thealingRate := &Attribute{\n\t\tName: \"Healing Rate\",\n\t\tMax: 21,\n\t}\n\n\tcon := c.Statistics[\"CON\"]\n\n\tcon.UpdateStatistic()\n\ttCon := con.Total\n\n\tswitch {\n\tcase tCon < 7:\n\t\thealingRate.Base = 1\n\tcase tCon < 13:\n\t\thealingRate.Base = 2\n\tcase tCon < 19:\n\t\thealingRate.Base = 3\n\tcase tCon > 18:\n\t\thealingRate.Base = ((tCon - 18) / 6) + 3\n\t}\n\thealingRate.Total = healingRate.Base + healingRate.Value\n\treturn healingRate\n}", "func (c *Character)GetWeaponAbilityModifier(w rules.Item) (int, string) {\n\tstrMod := c.GetModifier(Str)\n\tdexMod := c.GetModifier(Dex)\n\n\tfrags := []string{}\n\tmod := 0\n\n\tif w.HasProperty(\"Finesse\") {\n\t\tif strMod > dexMod {\n\t\t\tfrags = append(frags, fmt.Sprintf(\"{finesse; str %+d}\", strMod))\n\t\t\tmod = strMod\n\t\t} else {\n\t\t\tfrags = append(frags, fmt.Sprintf(\"{finesse; dex %+d}\", dexMod))\n\t\t\tmod = dexMod\n\t\t}\n\n\t} else {\n\t\t// First, basic attr bonus (melee or ranged)\n\t\tswitch w.WeaponRange {\n\t\tcase \"Melee\":\n\t\t\tmod = strMod\n\t\t\tfrags = append(frags, fmt.Sprintf(\"{melee; str %+d}\", strMod))\n\n\t\tcase \"Ranged\":\n\t\t\tmod = dexMod\n\t\t\tfrags = append(frags, fmt.Sprintf(\"{ranged; dex %+d}\", dexMod))\n\t\t}\n\t}\n\n\treturn mod, strings.Join(frags, \" \")\n}", "func (atk *Attack) Defender() interfaces.Ship { return atk.defender }", "func (e *Explosion) MaxDamage() float64 {\n\treturn 100 + e.Strength*10\n}", "func (e Enemy) Damage() int {\n\treturn 10 * e.Level\n}", "func boostChance(data *BoostCheckData) (int, []string) {\n\treasons := make([]string, 0)\n\tchance := 0\n\n\t// get region with highest winrate\n\thighestWr := 0\n\tvar highestWrRegion string\n\tfor region, wr := range data.Winrates {\n\t\tif wr > highestWr {\n\t\t\thighestWr = wr\n\t\t\thighestWrRegion = region\n\t\t}\n\t}\n\n\twrOnMostPlayedServer := data.Winrates[data.MostPlayedServer]\n\twrDiff := float64(highestWr - wrOnMostPlayedServer)\n\tchance = int(math.Min(wrDiff*3, 99.0))\n\twrDifference := \"Winrate on most played region (\" + data.MostPlayedServer + \", \" + strconv.Itoa(wrOnMostPlayedServer) + \"%) was \" + strconv.FormatFloat(wrDiff, 'f', 1, 64) + \" less than the highest winrate (\" + highestWrRegion + \", \" + strconv.Itoa(highestWr) + \"%)\"\n\treasons = append(reasons, wrDifference)\n\n\treturn chance, reasons\n}", "func (c *Creature) DetermineDamageBonus() *Attribute {\n\n\tdamageBonus := &Attribute{\n\t\tName: \"Damage Bonus\",\n\t\tMax: 21,\n\t\tDice: 1,\n\t}\n\n\tstr := c.Statistics[\"STR\"]\n\tsiz := c.Statistics[\"SIZ\"]\n\n\tstr.UpdateStatistic()\n\tsiz.UpdateStatistic()\n\n\tdb := siz.Total + str.Total\n\n\tswitch {\n\tcase db < 13:\n\t\tdamageBonus.Base = -4\n\t\tdamageBonus.Text = \"-1D4\"\n\tcase db < 25:\n\t\tdamageBonus.Base = 0\n\t\tdamageBonus.Text = \"-\"\n\tcase db < 33:\n\t\tdamageBonus.Base = 4\n\t\tdamageBonus.Text = \"+1D4\"\n\tcase db < 41:\n\t\tdamageBonus.Base = 6\n\t\tdamageBonus.Text = \"+1D6\"\n\tcase db < 57:\n\t\tdamageBonus.Base = 6\n\t\tdamageBonus.Dice = 2\n\t\tdamageBonus.Text = \"+2D6\"\n\tcase db > 56:\n\t\tdamageBonus.Base = 6\n\t\tdamageBonus.Dice = ((db - 56) / 16) + 2\n\t\tdamageBonus.Text = fmt.Sprintf(\"+%dD%d\",\n\t\t\tdamageBonus.Dice,\n\t\t\tdamageBonus.Base,\n\t\t)\n\t}\n\n\treturn damageBonus\n}", "func (f *Faker) Gender() string { return gender(f.Rand) }", "func Gender() string { return gender(globalFaker.Rand) }" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
QUERIES Checks if province is the location of a conflict
func (s *State) IsSiteOfConflict(id pb.ProvinceId) bool { for _, c := range s.Conflicts { for _, l := range c.Locations() { if l == id { return true } } } return false }
[ "func checkProvinceValid(citizenNo []byte) bool {\n\tprovinceCode := make([]byte, 0)\n\tprovinceCode = append(provinceCode, citizenNo[:2]...)\n\tprovinceStr := string(provinceCode)\n\n\t// 判断省份/地区是否合规\n\tif _, ok := validProvince[provinceStr]; ok {\n\t\treturn true\n\t}\n\treturn false\n}", "func (o *Ga4ghPatient) GetProvinceOfResidenceOk() (string, bool) {\n\tif o == nil || o.ProvinceOfResidence == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.ProvinceOfResidence, true\n}", "func (o *Ga4ghPatient) HasProvinceOfResidence() bool {\n\tif o != nil && o.ProvinceOfResidence != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (w *Worker) In(region string) bool { return region == \"\" || region == w.Zone }", "func verifyProvinceKTP(customer *models.Customer) bool {\n\tsliceProv, _ := strconv.Atoi(customer.Ktp[0:2])\n\n\tvar provPermitted = map[int]string{\n\t\t12: \"Sumatera Utara\",\n\t\t31: \"DKI Jakarta\",\n\t\t32: \"Jawa Barat\",\n\t\t35: \"Jawa Timur\",\n\t}\n\n\t_, isExist := provPermitted[sliceProv]\n\n\treturn isExist\n}", "func (db *Db) SearchCityProvince(query string) ([]*locationpb.SearchCityCountry, error) {\n\tvar cities []*locationpb.SearchCityCountry\n\t_, err := db.Client.Query(&cities, `SELECT search_result, city_id FROM \"public\".search_city_province(?)`, query)\n\treturn cities, err\n}", "func (m CrossOrderCancelReplaceRequest) HasStateOrProvinceOfIssue() bool {\n\treturn m.Has(tag.StateOrProvinceOfIssue)\n}", "func checkresult(action string, photolng float64, citylng float64) bool{\n\treturn (photolng <= citylng && action == \"West\") || (photolng >= citylng && action == \"East\")\n}", "func (c *StudentClient) QueryProvince(s *Student) *ProvinceQuery {\n\tquery := &ProvinceQuery{config: c.config}\n\tquery.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {\n\t\tid := s.ID\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(student.Table, student.FieldID, id),\n\t\t\tsqlgraph.To(province.Table, province.FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.O2M, false, student.ProvinceTable, student.ProvinceColumn),\n\t\t)\n\t\tfromV = sqlgraph.Neighbors(s.driver.Dialect(), step)\n\t\treturn fromV, nil\n\t}\n\treturn query\n}", "func (s *State) GetConflict(location pb.ProvinceId) *Conflict {\n\treturn s.Conflicts[location]\n}", "func (c *DistrictClient) QueryProvince(d *District) *ProvinceQuery {\n\tquery := &ProvinceQuery{config: c.config}\n\tquery.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {\n\t\tid := d.ID\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(district.Table, district.FieldID, id),\n\t\t\tsqlgraph.To(province.Table, province.FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.O2M, false, district.ProvinceTable, district.ProvinceColumn),\n\t\t)\n\t\tfromV = sqlgraph.Neighbors(d.driver.Dialect(), step)\n\t\treturn fromV, nil\n\t}\n\treturn query\n}", "func addressIn(location map[string]interface{}, country string) bool {\n\tprops, ok := location[\"props\"]\n\tif !ok {\n\t\treturn false\n\t}\n\n\tc, ok := (props.(map[string]interface{}))[\"country\"]\n\tif !ok {\n\t\treturn false\n\t}\n\n\tif c == country {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (v *Vehicle) InCity() bool {\n if v.City == nil {\n return false\n }\n return v.City.Vehicles.Stores(v)\n}", "func (ctl *AddressAPIController) ShowProvince(c echo.Context) (err error) {\n\tmodel := models.Province{}\n\tif err := ctl.DB(nil).Select([]string{\"CH_ID\", \"CHANGWAT_E\", \"CHANGWAT_T\"}).\n\t\tWhere(\"CH_ID = ?\", c.Param(\"id\")).\n\t\tGroup(\"CH_ID\").\n\t\tFirst(&model).Error; gorm.IsRecordNotFoundError(err) {\n\t\treturn &exceptions.ErrorException{\n\t\t\tMessage: \"Not found.\",\n\t\t\tErrorKey: \"not-found\",\n\t\t\tCode: http.StatusNotFound,\n\t\t}\n\t}\n\treturn c.JSON(http.StatusOK, map[string]interface{}{\n\t\t\"data\": model,\n\t})\n}", "func (c *SubdistrictClient) QueryProvince(s *Subdistrict) *ProvinceQuery {\n\tquery := &ProvinceQuery{config: c.config}\n\tquery.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {\n\t\tid := s.ID\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(subdistrict.Table, subdistrict.FieldID, id),\n\t\t\tsqlgraph.To(province.Table, province.FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.O2M, false, subdistrict.ProvinceTable, subdistrict.ProvinceColumn),\n\t\t)\n\t\tfromV = sqlgraph.Neighbors(s.driver.Dialect(), step)\n\t\treturn fromV, nil\n\t}\n\treturn query\n}", "func (m *ProvinceMutation) ProvinceName() (r string, exists bool) {\n\tv := m._Province_Name\n\tif v == nil {\n\t\treturn\n\t}\n\treturn *v, true\n}", "func (l *Location) IsValid() bool {\n\tif len(l.Country) == 0 {\n\t\treturn false\n\t}\n\tif GetCountry(string(l.Country)) == l.Country {\n\t\treturn true\n\t}\n\treturn false\n}", "func CheckCountryCode (SenderCountryCode string, ReceiverCountryCode string) error {\n //I would rather have been mapping this, or use the API github.com/biter777/countries if i where to use country names.\n worlds := []string{\"AF\",\"AX\",\"AL\",\"DZ\",\"AS\",\"AD\",\"AO\",\"AI\",\"AQ\",\"AG\",\"AR\",\"AM\",\"AW\",\"AU\",\"AT\",\"AZ\",\"BS\",\"BH\",\"BD\",\"BB\",\"BY\",\"BE\",\"BZ\",\n \"BJ\",\"BM\",\"BT\",\"BO\",\"BA\",\"BW\",\"BV\",\"BR\",\"IO\",\"BN\",\"BG\",\"BF\",\"BI\",\"KH\",\"CM\",\"CA\",\"CV\",\"KY\",\"CF\",\"TD\",\"CL\",\"CN\",\"CX\",\n \"CC\",\"CO\",\"KM\",\"CG\",\"CD\",\"CK\",\"CR\",\"CI\",\"HR\",\"CU\",\"CY\",\"CZ\",\"DK\",\"DJ\",\"DM\",\"DO\",\"EC\",\"EG\",\"SV\",\"GQ\",\"ER\",\"EE\",\"ET\",\n \"FK\",\"FO\",\"FJ\",\"FI\",\"FR\",\"GF\",\"PF\",\"TF\",\"GA\",\"GM\",\"GE\",\"DE\",\"GH\",\"GI\",\"GR\",\"GL\",\"GD\",\"GP\",\"GU\",\"GT\",\"GG\",\"GN\",\"GW\",\n \"GY\",\"HT\",\"HM\",\"VA\",\"HN\",\"HK\",\"HU\",\"IS\",\"IN\",\"ID\",\"IR\",\"IQ\",\"IE\",\"IM\",\"IL\",\"IT\",\"JM\",\"JP\",\"JE\",\"JO\",\"KZ\",\"KE\",\"KI\",\n \"KR\",\"KW\",\"KG\",\"LA\",\"LV\",\"LB\",\"LS\",\"LR\",\"LY\",\"LI\",\"LT\",\"LU\",\"MO\",\"MK\",\"MG\",\"MW\",\"MY\",\"MV\",\"ML\",\"MT\",\"MH\",\"MQ\",\"MR\",\n \"MU\",\"YT\",\"MX\",\"FM\",\"MD\",\"MC\",\"MN\",\"ME\",\"MS\",\"MA\",\"MZ\",\"MM\",\"NA\",\"NR\",\"NP\",\"NL\",\"AN\",\"NC\",\"NZ\",\"NI\",\"NE\",\"NG\",\"NU\",\n \"NF\",\"MP\",\"NO\",\"OM\",\"PK\",\"PW\",\"PS\",\"PA\",\"PG\",\"PY\",\"PE\",\"PH\",\"PN\",\"PL\",\"PT\",\"PR\",\"QA\",\"RE\",\"RO\",\"RU\",\"RW\",\"BL\",\"SH\",\n \"KN\",\"LC\",\"MF\",\"PM\",\"VC\",\"WS\",\"SM\",\"ST\",\"SA\",\"SN\",\"RS\",\"SC\",\"SL\",\"SG\",\"SK\",\"SI\",\"SB\",\"SO\",\"ZA\",\"GS\",\"ES\",\"LK\",\"SD\",\n \"SR\",\"SJ\",\"SZ\",\"SE\",\"CH\",\"SY\",\"TW\",\"TJ\",\"TZ\",\"TH\",\"TL\",\"TG\",\"TK\",\"TO\",\"TT\",\"TN\",\"TR\",\"TM\",\"TC\",\"TV\",\"UG\",\"UA\",\"AE\",\n \"GB\",\"US\",\"UM\",\"UY\",\"UZ\",\"VU\",\"VE\",\"VN\",\"VG\",\"VI\",\"WF\",\"EH\",\"YE\",\"ZM\",\"ZW\"}\n\n i := ValidateSenderCountry(worlds, SenderCountryCode)\n k := ValidateReceiverCountry(worlds, ReceiverCountryCode)\n\n if k == true && i == true {\n return nil\n }\n\treturn errors.New(\"invalid\")\n}", "func (h *serviceHandler) GeoFind(address string, countries []string) (bool, error) {\n\tlog := logrus.WithFields(logrus.Fields{\n\t\t\"address\": address,\n\t\t\"countries\": countries,\n\t})\n\n\tcountry, err := h.geoHandler.FindCountry(address)\n\tif err != nil {\n\t\tlog.Errorf(\"Could not get geo info. err: %v\", err)\n\t\treturn false, err\n\t}\n\n\t// check the country is in the list\n\tfor _, c := range countries {\n\t\tif c == country {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\t// not listed\n\treturn false, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if province is at war (not necessarily location of conflict)
func (s *State) IsAtWar(id pb.ProvinceId) bool { for _, c := range s.Conflicts { for _, a := range c.Attackers() { if a == id { return true } } for _, d := range c.Defenders() { if d == id { return true } } } return false }
[ "func (s *State) IsSiteOfConflict(id pb.ProvinceId) bool {\n\tfor _, c := range s.Conflicts {\n\t\tfor _, l := range c.Locations() {\n\t\t\tif l == id {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func checkProvinceValid(citizenNo []byte) bool {\n\tprovinceCode := make([]byte, 0)\n\tprovinceCode = append(provinceCode, citizenNo[:2]...)\n\tprovinceStr := string(provinceCode)\n\n\t// 判断省份/地区是否合规\n\tif _, ok := validProvince[provinceStr]; ok {\n\t\treturn true\n\t}\n\treturn false\n}", "func (p *Player) IsInFogOfWar(c Coordinate) bool {\n\tswitch p.state {\n\tcase Alive:\n\t\t_, inFoW := p.fogOfWar[c]\n\t\treturn inFoW\n\tcase PendingRevival:\n\t\treturn true\n\tcase Dead, LeftGame:\n\t\treturn false\n\t}\n\tpanic(\"unknown player state\")\n}", "func (s *State) NewCivilWar(target pb.ProvinceId) bool { // TODO: Error return\n\tif s.IsAtWar(target) || s.IsSiteOfConflict(target) {\n\t\treturn false\n\t}\n\tc := &Conflict{\n\t\tname: \"Civil War\", // TODO\n\t\tlength: 0,\n\t\tattackers: Faction{\n\t\t\trebels: *(s.Get(target).Dissidents()),\n\t\t\tprogress: 0,\n\t\t},\n\t\tdefenders: Faction{\n\t\t\tmembers: []pb.ProvinceId{target},\n\t\t\tprogress: 0,\n\t\t},\n\t\tgoal: s.Settings().GetConflictGoal(pb.ConflictType_CIVIL_WAR),\n\t\tbase_chance: s.Settings().GetConflictBaseChance(pb.ConflictType_CIVIL_WAR),\n\t\tlocations: []pb.ProvinceId{target},\n\t\tconflict_type: pb.ConflictType_CIVIL_WAR,\n\t}\n\ts.Conflicts[target] = c\n\treturn true\n}", "func (w *Worker) In(region string) bool { return region == \"\" || region == w.Zone }", "func (s *State) NewColonialWar(target pb.ProvinceId) bool { // TODO: Error return\n\tif s.IsAtWar(target) || s.IsSiteOfConflict(target) || s.Get(target).Occupier() != pb.ProvinceId_NONE {\n\t\treturn false\n\t}\n\tc := &Conflict{\n\t\tname: \"Colonial War\", // TODO\n\t\tlength: 0,\n\t\tattackers: Faction{\n\t\t\t// Dissidents\n\t\t\tprogress: 0,\n\t\t},\n\t\tdefenders: Faction{\n\t\t\tmembers: []pb.ProvinceId{s.Get(target).Occupier()},\n\t\t\tprogress: 0,\n\t\t},\n\t\tgoal: s.Settings().GetConflictGoal(pb.ConflictType_COLONIAL_WAR),\n\t\tbase_chance: s.Settings().GetConflictBaseChance(pb.ConflictType_COLONIAL_WAR),\n\t\tlocations: []pb.ProvinceId{target},\n\t\tconflict_type: pb.ConflictType_COLONIAL_WAR,\n\t}\n\ts.Conflicts[target] = c\n\treturn true\n}", "func (chile chileDeprecatedTimeZones) EasterIsland() string { return \"Pacific/Easter\" }", "func (s *State) NewConventionalWar(defenders []pb.ProvinceId, attackers []pb.ProvinceId, locations []pb.ProvinceId) bool { // TODO: Error return\n\tfor _, d := range defenders {\n\t\tif s.IsAtWar(d) || s.IsSiteOfConflict(d) {\n\t\t\treturn false\n\t\t}\n\t}\n\tfor _, a := range attackers {\n\t\tif s.IsAtWar(a) || s.IsSiteOfConflict(a) {\n\t\t\treturn false\n\t\t}\n\t}\n\tfor _, l := range locations {\n\t\tif s.IsAtWar(l) || s.IsSiteOfConflict(l) {\n\t\t\treturn false\n\t\t}\n\t}\n\t// TODO: Logic for joining wars?\n\tc := &Conflict{\n\t\tname: \"War!\", // TODO\n\t\tlength: 0,\n\t\tattackers: Faction{\n\t\t\tmembers: attackers,\n\t\t\tprogress: 0,\n\t\t},\n\t\tdefenders: Faction{\n\t\t\tmembers: defenders,\n\t\t\tprogress: 0,\n\t\t},\n\t\tgoal: s.Settings().GetConflictGoal(pb.ConflictType_CONVENTIONAL_WAR),\n\t\tbase_chance: s.Settings().GetConflictBaseChance(pb.ConflictType_CONVENTIONAL_WAR),\n\t\tlocations: locations,\n\t\tconflict_type: pb.ConflictType_CONVENTIONAL_WAR,\n\t}\n\t// For now it maps only to the first location\n\ts.Conflicts[locations[0]] = c\n\treturn true\n}", "func (o *Ga4ghPatient) HasProvinceOfResidence() bool {\n\tif o != nil && o.ProvinceOfResidence != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *Ga4ghPatient) GetProvinceOfResidenceOk() (string, bool) {\n\tif o == nil || o.ProvinceOfResidence == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.ProvinceOfResidence, true\n}", "func verifyProvinceKTP(customer *models.Customer) bool {\n\tsliceProv, _ := strconv.Atoi(customer.Ktp[0:2])\n\n\tvar provPermitted = map[int]string{\n\t\t12: \"Sumatera Utara\",\n\t\t31: \"DKI Jakarta\",\n\t\t32: \"Jawa Barat\",\n\t\t35: \"Jawa Timur\",\n\t}\n\n\t_, isExist := provPermitted[sliceProv]\n\n\treturn isExist\n}", "func (usaTz uSATimeZones) VirginIslands() string {return \"America/Port_of_Spain\" }", "func (antar antarcticaDeprecatedTimeZones) South_Pole() string { return \"Pacific/Auckland\" }", "func (w Workspace) IsBehind(ctx context.Context) bool {\n\tnodes := GetModelContext(ctx).Nodes\n\n\tfor _, id := range w.ProjectIDs {\n\t\tnode := nodes.MustLoadProject(id)\n\t\tif node.IsBehind {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (ctl *AddressAPIController) ShowProvince(c echo.Context) (err error) {\n\tmodel := models.Province{}\n\tif err := ctl.DB(nil).Select([]string{\"CH_ID\", \"CHANGWAT_E\", \"CHANGWAT_T\"}).\n\t\tWhere(\"CH_ID = ?\", c.Param(\"id\")).\n\t\tGroup(\"CH_ID\").\n\t\tFirst(&model).Error; gorm.IsRecordNotFoundError(err) {\n\t\treturn &exceptions.ErrorException{\n\t\t\tMessage: \"Not found.\",\n\t\t\tErrorKey: \"not-found\",\n\t\t\tCode: http.StatusNotFound,\n\t\t}\n\t}\n\treturn c.JSON(http.StatusOK, map[string]interface{}{\n\t\t\"data\": model,\n\t})\n}", "func (data *Invasion) AnyCitiesLeft() bool {\n return len(data.AllCities()) != 0\n}", "func (atlan atlanticDeprecatedTimeZones) St_Helena() string { return \"Africa/Abidjan\" }", "func (m CrossOrderCancelReplaceRequest) HasStateOrProvinceOfIssue() bool {\n\treturn m.Has(tag.StateOrProvinceOfIssue)\n}", "func (ameri americaDeprecatedTimeZones) Louisville() string { return \"America/Kentucky/Louisville\" }" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets a conflict by location
func (s *State) GetConflict(location pb.ProvinceId) *Conflict { return s.Conflicts[location] }
[ "func (b *BranchDAG) Conflict(conflictID ConflictID) *CachedConflict {\n\treturn &CachedConflict{CachedObject: b.conflictStorage.Load(conflictID.Bytes())}\n}", "func (s *server) ResolveConflict(ctx context.Context, in *proto_job.ResultRequest) (*proto_job.ResultReply, error) {\n\tlog.Print(\"ResolveConflict\")\n\treturn s.resultService.ResolveConflict(in)\n}", "func NewConflictResolver(\n\tconfig Config, fbo *folderBranchOps) *ConflictResolver {\n\t// make a logger with an appropriate module name\n\tbranchSuffix := \"\"\n\tif fbo.branch() != data.MasterBranch {\n\t\tbranchSuffix = \" \" + string(fbo.branch())\n\t}\n\ttlfStringFull := fbo.id().String()\n\tlog := config.MakeLogger(\n\t\tfmt.Sprintf(\"CR %s%s\", tlfStringFull[:8], branchSuffix))\n\n\tcr := &ConflictResolver{\n\t\tconfig: config,\n\t\tfbo: fbo,\n\t\tprepper: folderUpdatePrepper{\n\t\t\tconfig: config,\n\t\t\tfolderBranch: fbo.folderBranch,\n\t\t\tblocks: &fbo.blocks,\n\t\t\tlog: log,\n\t\t\tvlog: config.MakeVLogger(log),\n\t\t},\n\t\tlog: traceLogger{log},\n\t\tdeferLog: traceLogger{log.CloneWithAddedDepth(1)},\n\t\tmaxRevsThreshold: crMaxRevsThresholdDefault,\n\t\tcurrInput: conflictInput{\n\t\t\tunmerged: kbfsmd.RevisionUninitialized,\n\t\t\tmerged: kbfsmd.RevisionUninitialized,\n\t\t},\n\t}\n\n\tif fbo.bType == standard && config.Mode().ConflictResolutionEnabled() {\n\t\tcr.startProcessing(libcontext.BackgroundContextWithCancellationDelayer())\n\t}\n\treturn cr\n}", "func Conflict(msg string) Error {\n\te := err{msg: msg, code: conflictCode, group: generic, kind: conflict}\n\treturn &e\n}", "func (sg *ShardingGroup) FirstLocationUnresolved() *binlog.Location {\n\tsg.RLock()\n\tdefer sg.RUnlock()\n\tif sg.remain < len(sg.sources) && sg.firstLocation != nil {\n\t\t// create a new location to return\n\t\tlocation := sg.firstLocation.CloneWithFlavor(sg.flavor)\n\t\treturn &location\n\t}\n\titem := sg.meta.GetGlobalActiveDDL()\n\tif item != nil {\n\t\t// make a new copy\n\t\tlocation := item.FirstLocation.CloneWithFlavor(sg.flavor)\n\t\treturn &location\n\t}\n\treturn nil\n}", "func Conflict(id Identifier) Constraint {\n\treturn conflict(id)\n}", "func (service *ResultService) ResolveConflict(in *proto_job.ResultRequest) (*proto_job.ResultReply, error) {\n\tresult, err := service.accessor.GetByID(uint(in.Id))\n\n\tif err != nil {\n\t\treturn nil, err\n\t} else if result.ID == 0 {\n\t\tlog.Fatal(\"Conflict not found in SetResultState\")\n\t}\n\n\tresult.State = \"RESOLVED\"\n\tresult.TaxonID = uint(in.TaxonId)\n\terr = service.accessor.Save(result)\n\n\treturn converters.ResultModelToProto(result), err\n}", "func (s *Service) Conflicts() []string {\n\treturn s.conflicts\n}", "func (svc *service) Get(ctx context.Context, r *getLocationRequest) (*locationResponse, error) {\n\tlocations, err := svc.repo.Get(r.Referrer, r.UserIDs)\n\tif err != nil {\n\t\treturn &locationResponse{nil, errors.NewErr(http.StatusInternalServerError, err.Error())}, nil\n\t}\n\treturn &locationResponse{locations, nil}, nil\n}", "func (b Bucket) GetLocation(args ...Params) (string, error) {\n\theader, query := getHeaderQuery(args)\n\tquery.Set(\"location\", \"\")\n\tvar location string\n\terr := b.Do(\"GET\", \"\", nil, &location, header, query)\n\treturn location, err\n}", "func NewGetBannersConflict() *GetBannersConflict {\n\treturn &GetBannersConflict{}\n}", "func (rc ResponseController) GetLocation(w http.ResponseWriter, r *http.Request, p httprouter.Params) {\n\tid := p.ByName(\"id\")\n\tfmt.Println(\"GET Request: ID:\", id)\n\n\tresp, err := getDBData(id, rc)\n\tif err != nil {\n\t w.WriteHeader(404)\n\t\tfmt.Println(\"Response: 404 Not Found\")\n\t\treturn\n\t}\n\n\tjsonOut, _ := json.Marshal(resp)\n\thttpResponse(w, jsonOut, 200)\n\tfmt.Println(\"Response:\", string(jsonOut), \" 200 OK\")\n}", "func (l *raftLog) findConflict(from uint64, ents []pb.Entry) uint64 {\n\t// TODO(xiangli): validate the index of ents\n\tfor i, ne := range ents {\n\t\tif oe := l.at(from + uint64(i)); oe == nil || oe.Term != ne.Term {\n\t\t\treturn from + uint64(i)\n\t\t}\n\t}\n\treturn 0\n}", "func Conflict(id, format string, a ...interface{}) error {\n\treturn &Error{\n\t\tId: id,\n\t\tCode: 409,\n\t\tDetail: fmt.Sprintf(format, a...),\n\t\tStatus: http.StatusText(409),\n\t}\n}", "func ConflictResolve(existingVal Value, existingState State, newVal Value, newState State, sysState State) ConflictResolution {\n\t// Existing gunjs impl serializes to JSON first to do lexical comparisons, so we will too\n\tif sysState < newState {\n\t\treturn ConflictResolutionTooFutureDeferred\n\t} else if newState < existingState {\n\t\treturn ConflictResolutionOlderHistorical\n\t} else if existingState < newState {\n\t\treturn ConflictResolutionNewerUpdate\n\t} else if existingVal == newVal {\n\t\treturn ConflictResolutionSameKeep\n\t} else if existingJSON, err := json.Marshal(existingVal); err != nil {\n\t\tpanic(err)\n\t} else if newJSON, err := json.Marshal(newVal); err != nil {\n\t\tpanic(err)\n\t} else if bytes.Compare(existingJSON, newJSON) < 0 {\n\t\treturn ConflictResolutionSameUpdate\n\t} else {\n\t\treturn ConflictResolutionSameKeep\n\t}\n}", "func LocationGet(l int) Location {\n\ti := Locationmap[l]\n\treturn i\n}", "func (d *dirInode) lookUpConflicting(\n\tctx context.Context,\n\tname string) (result LookUpResult, err error) {\n\tstrippedName := strings.TrimSuffix(name, ConflictingFileNameSuffix)\n\n\t// In order to a marked name to be accepted, we require the conflicting\n\t// directory to exist.\n\tvar dirResult LookUpResult\n\tdirResult, err = d.lookUpChildDir(ctx, strippedName)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"lookUpChildDir for stripped name: %v\", err)\n\t\treturn\n\t}\n\n\tif !dirResult.Exists() {\n\t\treturn\n\t}\n\n\t// The directory name exists. Find the conflicting file.\n\tresult, err = d.lookUpChildFile(ctx, strippedName)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"lookUpChildFile for stripped name: %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}", "func Conflict(message ...interface{}) Err {\n\treturn Boomify(http.StatusConflict, message...)\n}", "func NewGetSecurityGroupConflict() *GetSecurityGroupConflict {\n\treturn &GetSecurityGroupConflict{}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ACTIONS Creates a new conventional war
func (s *State) NewConventionalWar(defenders []pb.ProvinceId, attackers []pb.ProvinceId, locations []pb.ProvinceId) bool { // TODO: Error return for _, d := range defenders { if s.IsAtWar(d) || s.IsSiteOfConflict(d) { return false } } for _, a := range attackers { if s.IsAtWar(a) || s.IsSiteOfConflict(a) { return false } } for _, l := range locations { if s.IsAtWar(l) || s.IsSiteOfConflict(l) { return false } } // TODO: Logic for joining wars? c := &Conflict{ name: "War!", // TODO length: 0, attackers: Faction{ members: attackers, progress: 0, }, defenders: Faction{ members: defenders, progress: 0, }, goal: s.Settings().GetConflictGoal(pb.ConflictType_CONVENTIONAL_WAR), base_chance: s.Settings().GetConflictBaseChance(pb.ConflictType_CONVENTIONAL_WAR), locations: locations, conflict_type: pb.ConflictType_CONVENTIONAL_WAR, } // For now it maps only to the first location s.Conflicts[locations[0]] = c return true }
[ "func (server *Server) CreateLift(w http.ResponseWriter, r *http.Request) {\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tresponses.ERROR(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\t}\n\tlift := models.Lift{}\n\terr = json.Unmarshal(body, &lift)\n\tif err != nil {\n\t\tresponses.ERROR(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\t}\n\tlift.Prepare()\n\terr = lift.Validate()\n\tif err != nil {\n\t\tresponses.ERROR(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\t}\n\t// uid, err := auth.ExtractTokenID(r)\n\t// if err != nil {\n\t// \tresponses.ERROR(w, http.StatusUnauthorized, errors.New(\"Unauthorized\"))\n\t// \treturn\n\t// }\n\t// if uid != lift.AuthorID {\n\t// \tresponses.ERROR(w, http.StatusUnauthorized, errors.New(http.StatusText(http.StatusUnauthorized)))\n\t// \treturn\n\t// }\n\tliftCreated, err := lift.SaveLift(server.DB)\n\tif err != nil {\n\t\tformattedError := formaterror.FormatError(err.Error())\n\t\tresponses.ERROR(w, http.StatusInternalServerError, formattedError)\n\t\treturn\n\t}\n\tw.Header().Set(\"Lacation\", fmt.Sprintf(\"%s%s/%d\", r.Host, r.URL.Path, liftCreated.ID))\n\tresponses.JSON(w, http.StatusCreated, liftCreated)\n}", "func (se *DistributedShell) Create(iw router.Party, vs ui.ViewState) {\n\tse.vs = vs\n\tse.vs.ActivePage = \"Distributed Shell\"\n\tiw.Get(\"/\", func(ctx iris.Context) {\n\t\tctx.View(\"dshell.html\", se.defaultConfig())\n\t})\n\tiw.Get(\"/schemas\", func(ctx iris.Context) {\n\t\tctx.JSON(se.db.GetSchemas())\n\t})\n\tiw.Post(\"/query\", se.executeQuery)\n}", "func (s *State) NewColonialWar(target pb.ProvinceId) bool { // TODO: Error return\n\tif s.IsAtWar(target) || s.IsSiteOfConflict(target) || s.Get(target).Occupier() != pb.ProvinceId_NONE {\n\t\treturn false\n\t}\n\tc := &Conflict{\n\t\tname: \"Colonial War\", // TODO\n\t\tlength: 0,\n\t\tattackers: Faction{\n\t\t\t// Dissidents\n\t\t\tprogress: 0,\n\t\t},\n\t\tdefenders: Faction{\n\t\t\tmembers: []pb.ProvinceId{s.Get(target).Occupier()},\n\t\t\tprogress: 0,\n\t\t},\n\t\tgoal: s.Settings().GetConflictGoal(pb.ConflictType_COLONIAL_WAR),\n\t\tbase_chance: s.Settings().GetConflictBaseChance(pb.ConflictType_COLONIAL_WAR),\n\t\tlocations: []pb.ProvinceId{target},\n\t\tconflict_type: pb.ConflictType_COLONIAL_WAR,\n\t}\n\ts.Conflicts[target] = c\n\treturn true\n}", "func (s *State) NewCivilWar(target pb.ProvinceId) bool { // TODO: Error return\n\tif s.IsAtWar(target) || s.IsSiteOfConflict(target) {\n\t\treturn false\n\t}\n\tc := &Conflict{\n\t\tname: \"Civil War\", // TODO\n\t\tlength: 0,\n\t\tattackers: Faction{\n\t\t\trebels: *(s.Get(target).Dissidents()),\n\t\t\tprogress: 0,\n\t\t},\n\t\tdefenders: Faction{\n\t\t\tmembers: []pb.ProvinceId{target},\n\t\t\tprogress: 0,\n\t\t},\n\t\tgoal: s.Settings().GetConflictGoal(pb.ConflictType_CIVIL_WAR),\n\t\tbase_chance: s.Settings().GetConflictBaseChance(pb.ConflictType_CIVIL_WAR),\n\t\tlocations: []pb.ProvinceId{target},\n\t\tconflict_type: pb.ConflictType_CIVIL_WAR,\n\t}\n\ts.Conflicts[target] = c\n\treturn true\n}", "func CreateServiceAction(c *gcli.Context) {\n\tcli := NewServiceClient(c)\n\t\n\treq := service.CreateRequest{}\n\tif val, success := getVal(\"name\", c); success {\n\t\treq.Name = val\n\t}\n\n\tif val, success := getVal(\"teamId\", c); success {\n\t\treq.TeamId = val\n\t}\n\n\tif val, success := getVal(\"visibility\", c); success {\n\t\treq.Visibility = service.Visibility(val)\n\t}\n\n\tif val, success := getVal(\"description\", c); success {\n\t\treq.Description = val\n\t}\n\n\tprintMessage(DEBUG,\"Create Service Request Created. Sending to Opsgenie...\")\n\n\tresp, err := cli.Create(nil, &req)\n\texitOnErr(err)\n\n\tprintMessage(DEBUG,\"Creating Service. RequestID: \" + resp.RequestId)\n\tprintMessage(INFO,\"RequestID: \" + resp.RequestId)\n}", "func createTeam(w http.ResponseWriter, r *http.Request) {\n\tteam := models.NewTeam(\"\")\n\tskue.Create(view, team, w, r)\n}", "func CretaeHandleCreateAction(gameStore *ClientsGameCollection) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\tfmt.Println(\"POST HANDLER\")\n\t\tdecoder := json.NewDecoder(r.Body)\n\t\tvar action actionRequestData\n\t\terr := decoder.Decode(&action)\n\n\t\tif err != nil {\n\t\t\tlog.Println(\"ERROR parsing requiest\")\n\t\t\treturn\n\t\t}\n\n\t\tfmt.Println(\"POST HANDLER ACTION\", action.Action)\n\t\tif action.Action == \"startRound\" {\n\t\t\tgo StartRound(gameStore)\n\t\t}\n\n\t\tio.WriteString(w, \"ok\")\n\t}\n}", "func CreateProject(w http.ResponseWriter, r *http.Request) {\n\t// Get incoming data, content n' stuff\n\t// Pass those data and create em'\n\t// Return new project and response\n}", "func (controller CalendarController) NewCalendarAction(w http.ResponseWriter, r *http.Request) {\n\tuserID := service.DefaultAuthService.GetUserID(r, w)\n\tnewCalendar := model.NewCalendarModel(\"Untitled Calendar\", \"#464D4B\", userID)\n\tmodel.DefaultCalendarRepository.AddCalendar(newCalendar)\n\tjsonResponse, _ := json.Marshal(newCalendar)\n\tw.Write(jsonResponse)\n}", "func CreateWorkflow(w http.ResponseWriter, r *http.Request) {\n\tvar t models.Workflow\n\n\tu := GetUserSession(r)\n\tif u == nil || !u.IsAdmin {\n\t\tw.WriteHeader(403)\n\t\tw.Write(apiError(\"you must be logged in as a system administrator to create a project\"))\n\t\treturn\n\t}\n\n\tdecoder := json.NewDecoder(r.Body)\n\terr := decoder.Decode(&t)\n\tif err != nil {\n\t\tw.WriteHeader(400)\n\t\tw.Write(apiError(\"malformed json\"))\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\t// Because of how chi does routing the id is actually the project key\n\tp := models.Project{Key: chi.URLParam(r, \"id\")}\n\n\terr = Store.Projects().Get(&p)\n\tif err != nil {\n\t\tw.WriteHeader(404)\n\t\tw.Write(apiError(\"project with that key does not exist\"))\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\terr = Store.Workflows().New(p, &t)\n\tif err != nil {\n\t\tw.WriteHeader(400)\n\t\tw.Write(apiError(err.Error()))\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tsendJSON(w, t)\n}", "func AddNewAction(typ int,uuid string, key string, description string){\n\n\telem, ok := ActionBuffer[typ].Description[uuid]\n if !ok {\n elem = make(map[string]string)\n\t if(ActionBuffer[typ].Description == nil){\n\t\t\tActionBuffer[typ].Description=make(map[string]map[string]string)\n\t }\n ActionBuffer[typ].Description[uuid] = elem\n }\n\tActionBuffer[typ].Description[uuid][key]=description\n}", "func create(w http.ResponseWriter, req *http.Request) {\n\tresponse := \"\"\n\tswitch req.RequestURI {\n\tcase \"/get/accounts\":\n\t\tmapD := map[string]int{\"apple\": 5, \"lettuce\": 7}\n\t\tmapB, _ := json.Marshal(mapD)\n\t\tresponse = string(mapB)\n\t\tbreak\n\tdefault:\n\t\tr, _ := json.Marshal(\"Request not found\")\n\t\tresponse = string(r)\n\t\tbreak\n\t}\n\n\tcontext := Context{Title: response}\n\trender(w, \"api\", context)\n}", "func CreateNewSchool(c echo.Context) error {\n\n\tdb, ok := c.Get(\"db\").(*gorm.DB)\n\n\tif !ok {\n\t\treturn c.NoContent(http.StatusInternalServerError)\n\t}\n\n\tvar modelview view.CreateNewSchoolModelView\n\n\tc.Bind(&modelview)\n\n\tcanteens := make([]canteen.Canteen, len(modelview.Canteens))\n\n\tfor index := range modelview.Canteens {\n\n\t\tlocation := canteen.Location{}\n\n\t\tlocation.Latitude = modelview.Canteens[index].Location.Latitude\n\n\t\tlocation.Longitude = modelview.Canteens[index].Location.Longitude\n\n\t\tcanteen, cerr := canteen.New(modelview.Canteens[index].Name, location)\n\t\tif cerr != nil {\n\n\t\t\tmodelview := customerrorview.UsingFieldErrorToErrorMessageModelView(*cerr)\n\n\t\t\treturn c.JSON(http.StatusBadRequest, modelview)\n\t\t}\n\t\tcanteens[index] = canteen\n\t}\n\n\tschool, serr := model.New(modelview.Acronym, modelview.Name, canteens)\n\n\tif serr != nil {\n\n\t\tmodelview := customerrorview.UsingFieldErrorToErrorMessageModelView(*serr)\n\n\t\treturn c.JSON(http.StatusBadRequest, modelview)\n\t}\n\n\tvar existingSchool model.School\n\n\t// Finds if school with same acronym already exists\n\n\terr := db.Where(map[string]interface{}{\"acronym\": modelview.Acronym}).First(&existingSchool).Error\n\n\tif err == nil {\n\n\t\tcerr := customerrormodel.FieldError{Field: \"acronym\", Model: \"school\", Explanation: \"a school with the same acronym already exists\"}\n\n\t\tmodelview := customerrorview.UsingFieldErrorToErrorMessageModelView(cerr)\n\n\t\treturn c.JSON(http.StatusBadRequest, modelview)\n\t}\n\n\t// Creates school\n\tdb.Create(&school)\n\n\tmodelviewres := view.ToGetDetailedSchoolInformationModelView(school)\n\n\treturn c.JSON(http.StatusCreated, modelviewres)\n\n}", "func New(w http.ResponseWriter, r *http.Request) {\n\n\t//Executand aplicacao web para a pagina New\n\ttemplateDaAplicacaoWeb.ExecuteTemplate(w, \"New\", nil)\n}", "func doCreate(enviro env.Project, appJson, rootDir, appName, vendorDir, constraints string) error {\n\tfmt.Printf(\"Creating initial project structure, this might take a few seconds ... \\n\")\n\tdescriptor, err := ParseAppDescriptor(appJson)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif appName != \"\" {\n\t\t// override the application name\n\n\t\taltJson := strings.Replace(appJson, `\"`+descriptor.Name+`\"`, `\"`+appName+`\"`, 1)\n\t\taltDescriptor, err := ParseAppDescriptor(altJson)\n\n\t\t//see if we can get away with simple replace so we don't reorder the existing json\n\t\tif err == nil && altDescriptor.Name == appName {\n\t\t\tappJson = altJson\n\t\t} else {\n\t\t\t//simple replace didn't work so we have to unmarshal & re-marshal the supplied json\n\t\t\tvar appObj map[string]interface{}\n\t\t\terr := json.Unmarshal([]byte(appJson), &appObj)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tappObj[\"name\"] = appName\n\n\t\t\tupdApp, err := json.MarshalIndent(appObj, \"\", \" \")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tappJson = string(updApp)\n\t\t}\n\n\t\tdescriptor.Name = appName\n\t} else {\n\t\tappName = descriptor.Name\n\t\trootDir = filepath.Join(rootDir, appName)\n\t}\n\n\terr = enviro.Init(rootDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = enviro.Create(false, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = fgutil.CreateFileFromString(filepath.Join(rootDir, \"flogo.json\"), appJson)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// create initial structure\n\tappDir := filepath.Join(enviro.GetSourceDir(), descriptor.Name)\n\tos.MkdirAll(appDir, os.ModePerm)\n\n\t// Validate structure\n\terr = enviro.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Create the dep manager\n\tdepManager := &dep.DepManager{Env: enviro}\n\n\t// Initialize the dep manager\n\terr = depManager.Init()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Create initial files\n\tdeps, err := config.ExtractAllDependencies(appJson)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcreateMainGoFile(appDir, \"\")\n\tcreateImportsGoFile(appDir, deps)\n\n\t// Add constraints\n\tif len(constraints) > 0 {\n\t\tnewConstraints := []string{\"-add\"}\n\t\tnewConstraints = append(newConstraints, strings.Split(constraints, \",\")...)\n\t\terr = depManager.Ensure(newConstraints...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tensureArgs := []string{}\n\n\tif len(vendorDir) > 0 {\n\t\t// Copy vendor directory\n\t\tfgutil.CopyDir(vendorDir, enviro.GetVendorDir())\n\t\t// Do not touch vendor folder when ensuring\n\t\tensureArgs = append(ensureArgs, \"-no-vendor\")\n\t}\n\n\t// Sync up\n\terr = depManager.Ensure(ensureArgs...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (t tApp) New(w http.ResponseWriter, r *http.Request, ctr, act string) *contr.App {\n\tc := &contr.App{}\n\tc.Controllers = Controllers.New(w, r, ctr, act)\n\treturn c\n}", "func (o *DesktopApp) Create() (*restapi.SliceResponse, error) {\n\tvar queryArg = make(map[string]interface{})\n\n\tqueryArg[\"ID\"] = []string{o.TemplateName}\n\tLogD.Printf(\"Generated Map for Create(): %+v\", queryArg)\n\n\tresp, err := o.client.CallSliceAPI(o.apiCreate, queryArg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !resp.Success {\n\t\treturn nil, errors.New(resp.Message)\n\t}\n\n\treturn resp, nil\n}", "func CreateDeploy(w http.ResponseWriter, r *http.Request) {\n\tdeploy := models.Deploy{}\n\terr := json.NewDecoder(r.Body).Decode(&deploy)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Todo validate requirement id\n\n\terr = models.InsertDeploy(deploy)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tw.WriteHeader(200)\n\terr = json.NewEncoder(w).Encode(deploy)\n\tif err != nil {\n\t\tpanic(error(err))\n\t}\n}", "func CreateDeployAction() handler.DeployAction {\n\tcloudformation := createCloudformation()\n\treturn actions.ServerlessUI{DNS: CreateDNS(cloudformation), Bucket: CreateS3Bucket(cloudformation)}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a new civil war
func (s *State) NewCivilWar(target pb.ProvinceId) bool { // TODO: Error return if s.IsAtWar(target) || s.IsSiteOfConflict(target) { return false } c := &Conflict{ name: "Civil War", // TODO length: 0, attackers: Faction{ rebels: *(s.Get(target).Dissidents()), progress: 0, }, defenders: Faction{ members: []pb.ProvinceId{target}, progress: 0, }, goal: s.Settings().GetConflictGoal(pb.ConflictType_CIVIL_WAR), base_chance: s.Settings().GetConflictBaseChance(pb.ConflictType_CIVIL_WAR), locations: []pb.ProvinceId{target}, conflict_type: pb.ConflictType_CIVIL_WAR, } s.Conflicts[target] = c return true }
[ "func (s *State) NewColonialWar(target pb.ProvinceId) bool { // TODO: Error return\n\tif s.IsAtWar(target) || s.IsSiteOfConflict(target) || s.Get(target).Occupier() != pb.ProvinceId_NONE {\n\t\treturn false\n\t}\n\tc := &Conflict{\n\t\tname: \"Colonial War\", // TODO\n\t\tlength: 0,\n\t\tattackers: Faction{\n\t\t\t// Dissidents\n\t\t\tprogress: 0,\n\t\t},\n\t\tdefenders: Faction{\n\t\t\tmembers: []pb.ProvinceId{s.Get(target).Occupier()},\n\t\t\tprogress: 0,\n\t\t},\n\t\tgoal: s.Settings().GetConflictGoal(pb.ConflictType_COLONIAL_WAR),\n\t\tbase_chance: s.Settings().GetConflictBaseChance(pb.ConflictType_COLONIAL_WAR),\n\t\tlocations: []pb.ProvinceId{target},\n\t\tconflict_type: pb.ConflictType_COLONIAL_WAR,\n\t}\n\ts.Conflicts[target] = c\n\treturn true\n}", "func (s *State) NewConventionalWar(defenders []pb.ProvinceId, attackers []pb.ProvinceId, locations []pb.ProvinceId) bool { // TODO: Error return\n\tfor _, d := range defenders {\n\t\tif s.IsAtWar(d) || s.IsSiteOfConflict(d) {\n\t\t\treturn false\n\t\t}\n\t}\n\tfor _, a := range attackers {\n\t\tif s.IsAtWar(a) || s.IsSiteOfConflict(a) {\n\t\t\treturn false\n\t\t}\n\t}\n\tfor _, l := range locations {\n\t\tif s.IsAtWar(l) || s.IsSiteOfConflict(l) {\n\t\t\treturn false\n\t\t}\n\t}\n\t// TODO: Logic for joining wars?\n\tc := &Conflict{\n\t\tname: \"War!\", // TODO\n\t\tlength: 0,\n\t\tattackers: Faction{\n\t\t\tmembers: attackers,\n\t\t\tprogress: 0,\n\t\t},\n\t\tdefenders: Faction{\n\t\t\tmembers: defenders,\n\t\t\tprogress: 0,\n\t\t},\n\t\tgoal: s.Settings().GetConflictGoal(pb.ConflictType_CONVENTIONAL_WAR),\n\t\tbase_chance: s.Settings().GetConflictBaseChance(pb.ConflictType_CONVENTIONAL_WAR),\n\t\tlocations: locations,\n\t\tconflict_type: pb.ConflictType_CONVENTIONAL_WAR,\n\t}\n\t// For now it maps only to the first location\n\ts.Conflicts[locations[0]] = c\n\treturn true\n}", "func createWorld() {\n\tspace = chipmunk.NewSpace()\n\tspace.Gravity = vect.Vect{0, -900}\n\n\tstaticBody := chipmunk.NewBodyStatic()\n\tstaticLines = []*chipmunk.Shape{\n\t\tchipmunk.NewSegment(vect.Vect{0, -600}, vect.Vect{800.0, -600}, 0),\n\t\tchipmunk.NewSegment(vect.Vect{0, -600}, vect.Vect{0, 0}, 0),\n\t\tchipmunk.NewSegment(vect.Vect{800, -600}, vect.Vect{800.0, 0}, 0),\n\t}\n\tfor _, segment := range staticLines {\n\t\t// segment.SetElasticity(0.6)\n\t\tstaticBody.AddShape(segment)\n\t}\n\tspace.AddBody(staticBody)\n}", "func newCity(name string, direction []string) *City {\n\tcity := &City{name: name}\n\tcity.addRoute(direction)\n\treturn city\n}", "func (server *Server) CreateLift(w http.ResponseWriter, r *http.Request) {\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tresponses.ERROR(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\t}\n\tlift := models.Lift{}\n\terr = json.Unmarshal(body, &lift)\n\tif err != nil {\n\t\tresponses.ERROR(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\t}\n\tlift.Prepare()\n\terr = lift.Validate()\n\tif err != nil {\n\t\tresponses.ERROR(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\t}\n\t// uid, err := auth.ExtractTokenID(r)\n\t// if err != nil {\n\t// \tresponses.ERROR(w, http.StatusUnauthorized, errors.New(\"Unauthorized\"))\n\t// \treturn\n\t// }\n\t// if uid != lift.AuthorID {\n\t// \tresponses.ERROR(w, http.StatusUnauthorized, errors.New(http.StatusText(http.StatusUnauthorized)))\n\t// \treturn\n\t// }\n\tliftCreated, err := lift.SaveLift(server.DB)\n\tif err != nil {\n\t\tformattedError := formaterror.FormatError(err.Error())\n\t\tresponses.ERROR(w, http.StatusInternalServerError, formattedError)\n\t\treturn\n\t}\n\tw.Header().Set(\"Lacation\", fmt.Sprintf(\"%s%s/%d\", r.Host, r.URL.Path, liftCreated.ID))\n\tresponses.JSON(w, http.StatusCreated, liftCreated)\n}", "func createTeam(w http.ResponseWriter, r *http.Request) {\n\tteam := models.NewTeam(\"\")\n\tskue.Create(view, team, w, r)\n}", "func New(w http.ResponseWriter, r *http.Request) {\n\n\t//Executand aplicacao web para a pagina New\n\ttemplateDaAplicacaoWeb.ExecuteTemplate(w, \"New\", nil)\n}", "func (w *RandomWorld) CreateRandomCarnivore() *GoWorld.Being {\n\t// Create an empty being\n\tbeing := &GoWorld.Being{ID: uuid.New()}\n\tbeing.Type = \"Carnivore\"\n\n\t// Give the being the basic necessities\n\tbeing.Hunger = hungerRange.randomFloat()\n\tbeing.Thirst = thirstRange.randomFloat()\n\tbeing.WantsChild = wantsChildRange.randomFloat()\n\n\t// Shape the being\n\tbeing.LifeExpectancy = lifeExpectancyRange.randomFloat()\n\tbeing.VisionRange = visionRange.randomFloat()\n\tbeing.Speed = speedRange.randomFloat()\n\tbeing.Durability = durabilityRange.randomFloat()\n\tbeing.Stress = stressRange.randomFloat()\n\tbeing.Size = sizeRange.randomFloat()\n\tbeing.Gender = randomGender()\n\tbeing.Fertility = fertilityRange.randomFloat()\n\tbeing.MutationRate = mutationRange.randomFloat()\n\n\t// Pick a random (valid) position and check which habitat it is\n\tw.ThrowBeing(being)\n\n\treturn being\n}", "func CreateBattle(LeaderID string, BattleName string) (*Battle, error) {\n\tnewID, _ := uuid.NewUUID()\n\tid := newID.String()\n\n\tvar b = &Battle{\n\t\tBattleID: id,\n\t\tLeaderID: LeaderID,\n\t\tBattleName: BattleName,\n\t\tWarriors: make([]*Warrior, 0),\n\t\tPlans: make([]*Plan, 0),\n\t\tVotingLocked: true,\n\t\tActivePlanID: \"\",\n\t}\n\n\te := db.QueryRow(`INSERT INTO battles (id, leader_id, name) VALUES ($1, $2, $3) RETURNING id`, id, LeaderID, BattleName).Scan(&b.BattleID)\n\tif e != nil {\n\t\tlog.Println(e)\n\t\treturn nil, errors.New(\"Error Creating Battle\")\n\t}\n\n\treturn b, nil\n}", "func CreateWarrior(WarriorName string) (*Warrior, error) {\n\tnewID, _ := uuid.NewUUID()\n\tid := newID.String()\n\n\tvar WarriorID string\n\te := db.QueryRow(`INSERT INTO warriors (id, name) VALUES ($1, $2) RETURNING id`, id, WarriorName).Scan(&WarriorID)\n\tif e != nil {\n\t\tlog.Println(e)\n\t\treturn nil, errors.New(\"Unable to create new warrior\")\n\t}\n\n\treturn &Warrior{WarriorID: WarriorID, WarriorName: WarriorName}, nil\n}", "func makeNewGame(name string, playerNames []string) *Game {\n\tvar g = new(Game)\n\tid, err := uuid.GenUUID()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tg.ID = id\n\tg.Name = name\n\tg.Messages.Capacity = 500\n\tg.Phase = Development\n\tGames[g.ID] = g\n\tg.addMessage(fmt.Sprintf(\"Created game %s...\", g.Name))\n\tg.loadLocos()\n\tg.prepareLocos()\n\tg.initPlayers(playerNames)\n\tg.determineTurnOrder()\n\n\treturn g\n}", "func (w *RandomWorld) CreateCarnivores(quantity int) {\n\t// Initialize each being to a random one\n\tfor i := 0; i < quantity; i++ {\n\t\t// Create random being and place it into the map\n\t\tb := w.CreateRandomCarnivore()\n\t\tw.BeingList[b.ID.String()] = b\n\t}\n}", "func CreateWare(c *server.Context) error {\n\tvar (\n\t\terr error\n\t\taddReq ware.Ware\n\t\tconn orm.Connection\n\t)\n\n\tisAdmin := c.Request().Context().Value(\"user\").(jwtgo.MapClaims)[util.IsAdmin].(bool)\n\tif !isAdmin {\n\t\tlogger.Error(\"You don't have access\")\n\t\treturn core.WriteStatusAndDataJSON(c, constants.ErrToken, nil)\n\t}\n\n\terr = c.JSONBody(&addReq)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn core.WriteStatusAndDataJSON(c, constants.ErrInvalidParam, nil)\n\t}\n\n\terr = c.Validate(addReq)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn core.WriteStatusAndDataJSON(c, constants.ErrInvalidParam, nil)\n\t}\n\n\tif len(addReq.Avatar) > 0 {\n\t\taddReq.Avatar, err = util.SavePicture(addReq.Avatar, \"ware/\")\n\t\tif err != nil {\n\t\t\tlogger.Error(err)\n\t\t\treturn core.WriteStatusAndDataJSON(c, constants.ErrInternalServerError, nil)\n\t\t}\n\t}\n\tif len(addReq.Image) > 0 {\n\t\taddReq.Image, err = util.SavePicture(addReq.Image, \"ware/\")\n\t\tif err != nil {\n\t\t\tlogger.Error(err)\n\t\t\treturn core.WriteStatusAndDataJSON(c, constants.ErrInternalServerError, nil)\n\t\t}\n\t}\n\tif len(addReq.DetailPic) > 0 {\n\t\taddReq.DetailPic, err = util.SavePicture(addReq.DetailPic, \"wareIntro/\")\n\t\tif err != nil {\n\t\t\tlogger.Error(err)\n\t\t\treturn core.WriteStatusAndDataJSON(c, constants.ErrInternalServerError, nil)\n\t\t}\n\t}\n\n\tconn, err = mysql.Pool.Get()\n\tdefer mysql.Pool.Release(conn)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn core.WriteStatusAndDataJSON(c, constants.ErrMysql, nil)\n\t}\n\n\terr = ware.Service.CreateWare(conn, &addReq)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\tif (len(addReq.Avatar) > 0 && !util.DeletePicture(addReq.Avatar)) ||\n\t\t\t(len(addReq.Image) > 0 && !util.DeletePicture(addReq.Image)) ||\n\t\t\t(len(addReq.DetailPic) > 0 && !util.DeletePicture(addReq.DetailPic)) {\n\t\t\tlogger.Error(errors.New(\"create ware failed and delete it's pictures go wrong, please delete picture manually\"))\n\t\t}\n\t\treturn core.WriteStatusAndDataJSON(c, constants.ErrMysql, nil)\n\t}\n\n\tlogger.Info(\"create ware\", addReq.Name, \"success\")\n\treturn core.WriteStatusAndDataJSON(c, constants.ErrSucceed, nil)\n}", "func (se *DistributedShell) Create(iw router.Party, vs ui.ViewState) {\n\tse.vs = vs\n\tse.vs.ActivePage = \"Distributed Shell\"\n\tiw.Get(\"/\", func(ctx iris.Context) {\n\t\tctx.View(\"dshell.html\", se.defaultConfig())\n\t})\n\tiw.Get(\"/schemas\", func(ctx iris.Context) {\n\t\tctx.JSON(se.db.GetSchemas())\n\t})\n\tiw.Post(\"/query\", se.executeQuery)\n}", "func (planetDeliveryRest *PlanetDeliveryRest) Create(w http.ResponseWriter, r *http.Request) {\n\tvar planet entity.Planet\n\n\terr := json.NewDecoder(r.Body).Decode(&planet)\n\tif err != nil {\n\t\tError(w, \"Failed to decode JSON\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tplanetToInsert := *entity.NewPlanet(planet.Name, planet.Climate, planet.Terrain)\n\n\tnewPlanet, err := planetDeliveryRest.planetUsecase.Create(r.Context(), planetToInsert)\n\tif err != nil {\n\t\tError(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tJSON(w, newPlanet, http.StatusCreated)\n}", "func (s WashingtonPostScraper) CreateNewWashingtonPostScraper() *WashingtonPostScraper {\n\tc := colly.NewCollector()\n\t// c := colly.NewCollector(colly.Debugger(&debug.LogDebugger{}))\n\tc.UserAgent = s.UserAgent()\n\tc.IgnoreRobotsTxt = false\n\n\t// Adding this wait so AJAX can load, might need to look at https://github.com/chromedp/chromedp in the future\n\tc.Limit(&colly.LimitRule{\n\t\tDelay: 5 * time.Second,\n\t})\n\n\tscraper := WashingtonPostScraper{\n\t\tcollector: c,\n\t}\n\treturn &scraper\n}", "func CreateProject(w http.ResponseWriter, r *http.Request) {\n\t// Get incoming data, content n' stuff\n\t// Pass those data and create em'\n\t// Return new project and response\n}", "func New() *WarmerImpl {\n\treturn &WarmerImpl{}\n}", "func (c *CaptainClient) CreateFormation(name string, flightID, CPU, RAM, disk int, baseName, domain string, targetCount int, preflightPlaybook string) (Formation, error) {\n\tresult, err := c.restPOST(\"formation\", map[string]interface{}{\n\t\t\"FlightID\": flightID,\n\t\t\"Name\": name,\n\t\t\"CPU\": CPU,\n\t\t\"RAM\": RAM,\n\t\t\"Disk\": disk,\n\t\t\"BaseName\": baseName,\n\t\t\"Domain\": domain,\n\t\t\"TargetCount\": targetCount,\n\t\t\"PreflightPlaybook\": preflightPlaybook,\n\t})\n\tif err != nil {\n\t\treturn Formation{}, fmt.Errorf(\"unable to create Formation:\\n%w\", err)\n\t}\n\tvar formation Formation\n\terr = json.Unmarshal(result, &formation)\n\tif err != nil {\n\t\treturn Formation{}, fmt.Errorf(\"unable to format response as Formation:\\n%w\", err)\n\t}\n\treturn formation, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a new colonial war
func (s *State) NewColonialWar(target pb.ProvinceId) bool { // TODO: Error return if s.IsAtWar(target) || s.IsSiteOfConflict(target) || s.Get(target).Occupier() != pb.ProvinceId_NONE { return false } c := &Conflict{ name: "Colonial War", // TODO length: 0, attackers: Faction{ // Dissidents progress: 0, }, defenders: Faction{ members: []pb.ProvinceId{s.Get(target).Occupier()}, progress: 0, }, goal: s.Settings().GetConflictGoal(pb.ConflictType_COLONIAL_WAR), base_chance: s.Settings().GetConflictBaseChance(pb.ConflictType_COLONIAL_WAR), locations: []pb.ProvinceId{target}, conflict_type: pb.ConflictType_COLONIAL_WAR, } s.Conflicts[target] = c return true }
[ "func New() *WarmerImpl {\n\treturn &WarmerImpl{}\n}", "func (s *State) NewCivilWar(target pb.ProvinceId) bool { // TODO: Error return\n\tif s.IsAtWar(target) || s.IsSiteOfConflict(target) {\n\t\treturn false\n\t}\n\tc := &Conflict{\n\t\tname: \"Civil War\", // TODO\n\t\tlength: 0,\n\t\tattackers: Faction{\n\t\t\trebels: *(s.Get(target).Dissidents()),\n\t\t\tprogress: 0,\n\t\t},\n\t\tdefenders: Faction{\n\t\t\tmembers: []pb.ProvinceId{target},\n\t\t\tprogress: 0,\n\t\t},\n\t\tgoal: s.Settings().GetConflictGoal(pb.ConflictType_CIVIL_WAR),\n\t\tbase_chance: s.Settings().GetConflictBaseChance(pb.ConflictType_CIVIL_WAR),\n\t\tlocations: []pb.ProvinceId{target},\n\t\tconflict_type: pb.ConflictType_CIVIL_WAR,\n\t}\n\ts.Conflicts[target] = c\n\treturn true\n}", "func createWorld() {\n\tspace = chipmunk.NewSpace()\n\tspace.Gravity = vect.Vect{0, -900}\n\n\tstaticBody := chipmunk.NewBodyStatic()\n\tstaticLines = []*chipmunk.Shape{\n\t\tchipmunk.NewSegment(vect.Vect{0, -600}, vect.Vect{800.0, -600}, 0),\n\t\tchipmunk.NewSegment(vect.Vect{0, -600}, vect.Vect{0, 0}, 0),\n\t\tchipmunk.NewSegment(vect.Vect{800, -600}, vect.Vect{800.0, 0}, 0),\n\t}\n\tfor _, segment := range staticLines {\n\t\t// segment.SetElasticity(0.6)\n\t\tstaticBody.AddShape(segment)\n\t}\n\tspace.AddBody(staticBody)\n}", "func CreateApplication() *Alpha {\n app := &Alpha{}\n app.Request = &Request{}\n app.Response = &Response{}\n app.init()\n return app\n}", "func (s *State) NewConventionalWar(defenders []pb.ProvinceId, attackers []pb.ProvinceId, locations []pb.ProvinceId) bool { // TODO: Error return\n\tfor _, d := range defenders {\n\t\tif s.IsAtWar(d) || s.IsSiteOfConflict(d) {\n\t\t\treturn false\n\t\t}\n\t}\n\tfor _, a := range attackers {\n\t\tif s.IsAtWar(a) || s.IsSiteOfConflict(a) {\n\t\t\treturn false\n\t\t}\n\t}\n\tfor _, l := range locations {\n\t\tif s.IsAtWar(l) || s.IsSiteOfConflict(l) {\n\t\t\treturn false\n\t\t}\n\t}\n\t// TODO: Logic for joining wars?\n\tc := &Conflict{\n\t\tname: \"War!\", // TODO\n\t\tlength: 0,\n\t\tattackers: Faction{\n\t\t\tmembers: attackers,\n\t\t\tprogress: 0,\n\t\t},\n\t\tdefenders: Faction{\n\t\t\tmembers: defenders,\n\t\t\tprogress: 0,\n\t\t},\n\t\tgoal: s.Settings().GetConflictGoal(pb.ConflictType_CONVENTIONAL_WAR),\n\t\tbase_chance: s.Settings().GetConflictBaseChance(pb.ConflictType_CONVENTIONAL_WAR),\n\t\tlocations: locations,\n\t\tconflict_type: pb.ConflictType_CONVENTIONAL_WAR,\n\t}\n\t// For now it maps only to the first location\n\ts.Conflicts[locations[0]] = c\n\treturn true\n}", "func NewColonist() *Colonist {\n\treturn &Colonist{\n\t\tKey: xid.New().String(),\n\t\tName: generateName(),\n\t\tAge: generateAge(),\n\t\tBag: &storage.Storage{\n\t\t\tSize: 30,\n\t\t\tItems: []interface{}{},\n\t\t},\n\t\tEquipment: &Equipment{},\n\t\tDesires: createDesires(),\n\t\tNeeds: createNeeds(),\n\t\tSkills: generateSkills(),\n\t}\n}", "func newWorkspace(session *session, root string) *Workspace {\n\treturn &Workspace{\n\t\tsession: session,\n\t\trootPath: root,\n\t}\n}", "func NewThestralApp(config Config) (app *Thestral, err error) {\n\tif len(config.Downstreams) == 0 {\n\t\terr = errors.New(\"no downstream server defined\")\n\t}\n\tif err == nil && len(config.Upstreams) == 0 {\n\t\terr = errors.New(\"no upstream server defined\")\n\t}\n\n\tapp = &Thestral{\n\t\tdownstreams: make(map[string]ProxyServer),\n\t\tupstreams: make(map[string]ProxyClient),\n\t}\n\n\t// create logger\n\tif err == nil {\n\t\tapp.log, err = CreateLogger(config.Logging)\n\t\tif err != nil {\n\t\t\terr = errors.WithMessage(err, \"failed to create logger\")\n\t\t}\n\t}\n\n\t// init db\n\tif err == nil && config.DB != nil {\n\t\terr = db.InitDB(*config.DB)\n\t}\n\n\t// create downstream servers\n\tif err == nil {\n\t\tdsLogger := app.log.Named(\"downstreams\")\n\t\tfor k, v := range config.Downstreams {\n\t\t\tapp.downstreams[k], err = CreateProxyServer(dsLogger.Named(k), v)\n\t\t\tif err != nil {\n\t\t\t\terr = errors.WithMessage(\n\t\t\t\t\terr, \"failed to create downstream server: \"+k)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t// create upstream clients\n\tif err == nil {\n\t\tfor k, v := range config.Upstreams {\n\t\t\tapp.upstreams[k], err = CreateProxyClient(v)\n\t\t\tif err != nil {\n\t\t\t\terr = errors.WithMessage(\n\t\t\t\t\terr, \"failed to create upstream client: \"+k)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tapp.upstreamNames = append(app.upstreamNames, k)\n\t\t}\n\t}\n\n\t// create rule matcher\n\tif err == nil {\n\t\tapp.ruleMatcher, err = NewRuleMatcher(config.Rules)\n\t\tif err != nil {\n\t\t\terr = errors.WithMessage(err, \"failed to create rule matcher\")\n\t\t}\n\t}\n\tif err == nil {\n\t\tfor _, ruleUpstream := range app.ruleMatcher.AllUpstreams {\n\t\t\tif _, ok := app.upstreams[ruleUpstream]; !ok {\n\t\t\t\terr = errors.Errorf(\n\t\t\t\t\t\"undefined upstream '%s' used in the rule set\",\n\t\t\t\t\truleUpstream)\n\t\t\t}\n\t\t}\n\t}\n\n\t// parse other settings\n\tif err == nil {\n\t\tif config.Misc.ConnectTimeout != \"\" {\n\t\t\tapp.connectTimeout, err = time.ParseDuration(\n\t\t\t\tconfig.Misc.ConnectTimeout)\n\t\t\tif err != nil {\n\t\t\t\terr = errors.WithStack(err)\n\t\t\t}\n\t\t\tif err == nil && app.connectTimeout <= 0 {\n\t\t\t\terr = errors.New(\"'connect_timeout' should be greater than 0\")\n\t\t\t}\n\t\t} else {\n\t\t\tapp.connectTimeout = defaultConnectTimeout\n\t\t}\n\t}\n\tif err == nil && config.Misc.EnableMonitor {\n\t\tapp.monitor.Start(config.Misc.MonitorPath)\n\t}\n\n\treturn\n}", "func New(w http.ResponseWriter, r *http.Request) {\n\n\t//Executand aplicacao web para a pagina New\n\ttemplateDaAplicacaoWeb.ExecuteTemplate(w, \"New\", nil)\n}", "func NewSpace(t *testing.T, awaitilities wait.Awaitilities, opts ...SpaceOption) *toolchainv1alpha1.Space {\n\tnamePrefix := strings.ToLower(t.Name())\n\t// Remove all invalid characters\n\tnamePrefix = notAllowedChars.ReplaceAllString(namePrefix, \"\")\n\n\t// Trim if the length exceeds 40 chars (63 is the max)\n\tif len(namePrefix) > 40 {\n\t\tnamePrefix = namePrefix[0:40]\n\t}\n\n\tspace := &toolchainv1alpha1.Space{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: awaitilities.Host().Namespace,\n\t\t\tGenerateName: namePrefix + \"-\",\n\t\t},\n\t}\n\tfor _, apply := range opts {\n\t\tapply(space)\n\t}\n\treturn space\n}", "func MakeWorley(shaderpath string) Worley {\n\tcomputeshader, err := shader.MakeCompute(shaderpath + \"/noise/worley.comp\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t//create random seed\n\trandomdata := createRandom(1024 * 1024 * 4)\n\tnoisetexture, err := texture.MakeFromData(randomdata, 1024, 1024, gl.RGBA32F, gl.RGBA)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn Worley{\n\t\tcomputeshader: computeshader,\n\t\tnoisetexture: noisetexture,\n\n\t\twidth: 1024,\n\t\theight: 1024,\n\t\tresolution: 32,\n\t\toctaves: 1,\n\t\tradius: 40.0,\n\t\tradiusscale: 1,\n\n\t\tbrightness: 1.0,\n\t\tcontrast: 1.0,\n\t}\n}", "func New() *Yam {\n\ty := &Yam{}\n\ty.Config = NewConfig()\n\ty.Root = &Route{yam: y}\n\n\treturn y\n}", "func newAssembledOrg() *assembledOrg {\n\tao := assembledOrg{}\n\tao.resourceMap = make(resourceMap)\n\tao.org.headerYAML = headerYAML{APIVersion: apiCFTv1alpha1, KindStr: Organization.String()}\n\treturn &ao\n}", "func (v *Vizceral) NewVizceral() *Vizceral {\n\tv.Name = \"Bottle application map\"\n\tv.Renderer = \"region\"\n\tv.Layout = \"ltrTree\"\n\tv.MaxVolume = 0\n\tv.NodeMap = new(VizceralNodes)\n\tv.NodeMap.nodes = make(map[string]*VizceralNode)\n\tv.ConnectionMap = new(VizceralConnections)\n\tv.ConnectionMap.connections = make(map[string]*VizceralConnection)\n\n\tv.config.getConfig()\n\tv.createScenario()\n\tgo v.snapshotLoop()\n\treturn v\n}", "func (se *DistributedShell) Create(iw router.Party, vs ui.ViewState) {\n\tse.vs = vs\n\tse.vs.ActivePage = \"Distributed Shell\"\n\tiw.Get(\"/\", func(ctx iris.Context) {\n\t\tctx.View(\"dshell.html\", se.defaultConfig())\n\t})\n\tiw.Get(\"/schemas\", func(ctx iris.Context) {\n\t\tctx.JSON(se.db.GetSchemas())\n\t})\n\tiw.Post(\"/query\", se.executeQuery)\n}", "func New(width int, visualization string, zeitpunkte []time.Time) (slm sunlightmap) {\n\tslm = sunlightmap{}\n\tslm.Width = width - width%2\n\tslm.Height = slm.Width / 2\n\tslm.visualization = visualization\n\tslm.DaylightImageFilename = \"world_mine_day_solarized_720-360.png\"\n\tslm.NighttimeImageFilename = \"world_mine_night_solarized_720-360.png\"\n\tslm.zeitpunkte = zeitpunkte //[]time.Time{time.Date(2017, 10, 24, 17, 30, 0, 0, time.UTC)}\n\treturn\n}", "func New(abbrev, aranges, frame, info, line, pubnames, ranges, str []byte) (*dwarf.Data, error)", "func newCity(name string, direction []string) *City {\n\tcity := &City{name: name}\n\tcity.addRoute(direction)\n\treturn city\n}", "func NewWhale(className string, ) *Whale {\n this := Whale{}\n this.ClassName = className\n return &this\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }