query
stringlengths 8
6.75k
| document
stringlengths 9
1.89M
| negatives
listlengths 19
19
| metadata
dict |
---|---|---|---|
setFnName returns the identifier of the function that sets concrete types of the property.
|
func (p *PropertyGenerator) setFnName(i int) string {
if len(p.Kinds) == 1 {
return setMethod
}
return fmt.Sprintf("%s%s", setMethod, p.kindCamelName(i))
}
|
[
"func setterName(typeName string) string {\n\treturn fmt.Sprintf(\"Set%s\", accessorName(typeName))\n}",
"func (m *ExtensionProperty) SetName(value *string)() {\n m.name = value\n}",
"func (m *ExtensionSchemaProperty) SetName(value *string)() {\n m.name = value\n}",
"func SetTypeName(tau TypeT, name string) int32 {\n\tcs := C.CString(name)\n\tdefer C.free(unsafe.Pointer(cs))\n\treturn int32(C.yices_set_type_name(C.type_t(tau), cs))\n}",
"func (m *WorkbookPivotTable) SetName(value *string)() {\n m.name = value\n}",
"func (m *Win32LobAppRegistryRule) SetValueName(value *string)() {\n m.valueName = value\n}",
"func (m *IdentityProvider) SetName(value *string)() {\n m.name = value\n}",
"func (m *CertificationControl) SetName(value *string)() {\n m.name = value\n}",
"func Name(name string) omfunc {\n\treturn func(o metav1.Object) {\n\t\to.SetName(name)\n\t}\n}",
"func (m *WorkbookWorksheet) SetName(value *string)() {\n m.name = value\n}",
"func (p *PropertyGenerator) serializeFnName() string {\n\tif p.asIterator {\n\t\treturn serializeIteratorMethod\n\t}\n\treturn serializeMethod\n}",
"func (m *ItemReference) SetName(value *string)() {\n m.name = value\n}",
"func (cli *SetWrapper) SetName(name string) error {\n\treturn cli.set.SetValue(fieldSetName, name)\n}",
"func (h *Handler) SetFunc(name string, fn Callable) {\n\th.Functions[name] = fn\n}",
"func (t *Type) SetNname(n *Node)",
"func SetFunction(name string, fn interface{}) uintptr {\n\tc_name := C.CString(name)\n\tdefer C.free(unsafe.Pointer(c_name))\n\n\t//Icallback IupSetFunction(const char *name, Icallback func);\n\n\tswitch fn.(type) {\n\tcase nil:\n\t\treturn uintptr(unsafe.Pointer(C.__IupSetFunction(c_name, unsafe.Pointer(uintptr(0)))))\n\tcase uintptr:\n\t\treturn uintptr(unsafe.Pointer(C.__IupSetFunction(c_name, unsafe.Pointer(fn.(uintptr)))))\n\tdefault:\n\t\treturn uintptr(unsafe.Pointer(C.__IupSetFunction(c_name, unsafe.Pointer(syscall.NewCallbackCDecl(fn)))))\n\t}\n}",
"func (m *DeviceManagementConfigurationPolicy) SetName(value *string)() {\n err := m.GetBackingStore().Set(\"name\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (m *UserExperienceAnalyticsDeviceScopesItemTriggerDeviceScopeActionPostRequestBody) SetActionName(value *string)() {\n err := m.GetBackingStore().Set(\"actionName\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (m *ExternalConnection) SetName(value *string)() {\n m.name = value\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
serializeFnName returns the identifier of the function that serializes the generated Go type into raw JSON.
|
func (p *PropertyGenerator) serializeFnName() string {
if p.asIterator {
return serializeIteratorMethod
}
return serializeMethod
}
|
[
"func (p *PropertyGenerator) deserializeFnName() string {\n\tif p.asIterator {\n\t\treturn fmt.Sprintf(\"%s%s\", deserializeIteratorMethod, p.Name.CamelName)\n\t}\n\treturn fmt.Sprintf(\"%s%sProperty\", deserializeMethod, p.Name.CamelName)\n}",
"func (s *Instruction) FuncName() string {\n\tif name, ok := protoNameToFuncName[s.Protobuf.TypeName]; ok {\n\t\treturn name\n\t}\n\treturn \"?\"\n}",
"func GetName(functionType uint64) string {\n\tvar s string\n\n\tfor i := uint(0); i < 8; i++ {\n\t\tt := (functionType >> (_BFF_MAX_SHIFT - _BFF_ONE_SHIFT*i)) & _BFF_MASK\n\n\t\tif t == NONE_TYPE {\n\t\t\tcontinue\n\t\t}\n\n\t\tname := getByteFunctionNameToken(t)\n\n\t\tif len(s) != 0 {\n\t\t\ts += \"+\"\n\t\t}\n\n\t\ts += name\n\t}\n\n\tif len(s) == 0 {\n\t\ts += getByteFunctionNameToken(NONE_TYPE)\n\t}\n\n\treturn s\n}",
"func JSONTagNameFunc(fld reflect.StructField) string {\n\tname := strings.SplitN(fld.Tag.Get(\"json\"), \",\", 2)[0]\n\n\t// if there is no defined json tag or the tag is to be skipped fall back to the field name\n\tif name == \"\" || name == \"-\" {\n\t\treturn fld.Name\n\t}\n\n\treturn name\n}",
"func (Functions) ProtoName(obj interface{}) string {\n\treturn nameOptions{}.convert(nameOf(obj))\n}",
"func (f Function) GetName() string {\n\treturn f.ident.String()\n}",
"func (o AzureFunctionOutputDataSourceResponseOutput) FunctionName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v AzureFunctionOutputDataSourceResponse) *string { return v.FunctionName }).(pulumi.StringPtrOutput)\n}",
"func (o AzureFunctionOutputDataSourceOutput) FunctionName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v AzureFunctionOutputDataSource) *string { return v.FunctionName }).(pulumi.StringPtrOutput)\n}",
"func (p *Param) JsTypeName() string {\n\tswitch p.Type.Name {\n\tcase TypeString.Name:\n\t\treturn \"string\"\n\tcase TypeBool.Name:\n\t\treturn \"boolean\"\n\tcase TypeInt.Name, TypeInt8.Name, TypeInt16.Name, TypeInt32.Name, TypeInt64.Name,\n\t\tTypeUint.Name, TypeUint8.Name, TypeUint16.Name, TypeUint32.Name, TypeUint64.Name:\n\t\treturn \"number\"\n\tdefault:\n\t\treturn \"custom\" + p.Type.CapitalizedName()\n\t}\n}",
"func (st SignatureType) Name() string {\n\treturn string(st)\n}",
"func (o FunctionEventInvokeConfigOutput) FunctionName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *FunctionEventInvokeConfig) pulumi.StringOutput { return v.FunctionName }).(pulumi.StringOutput)\n}",
"func NameOfFunc(fn interface{}) string {\n\treturn runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name()\n}",
"func (f *Func) Name() string",
"func nameOfFunction(f interface{}) string {\n\tfun := runtime.FuncForPC(reflect.ValueOf(f).Pointer())\n\ttokenized := strings.Split(fun.Name(), \".\")\n\tlast := tokenized[len(tokenized)-1]\n\tlast = strings.TrimSuffix(last, \")·fm\") // < Go 1.5\n\tlast = strings.TrimSuffix(last, \")-fm\") // Go 1.5\n\tlast = strings.TrimSuffix(last, \"·fm\") // < Go 1.5\n\tlast = strings.TrimSuffix(last, \"-fm\") // Go 1.5\n\tif last == \"func1\" { // this could mean conflicts in API docs\n\t\tval := atomic.AddInt32(&anonymousFuncCount, 1)\n\t\tlast = \"func\" + fmt.Sprintf(\"%d\", val)\n\t\tatomic.StoreInt32(&anonymousFuncCount, val)\n\t}\n\treturn last\n}",
"func GetFuncName(f interface{}) string {\n\treturn runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name()\n}",
"func identifier(options SerializerOptions) runtime.Identifier {\n\tresult := map[string]string{\n\t\t\"name\": \"json\",\n\t\t\"yaml\": strconv.FormatBool(options.Yaml),\n\t\t\"pretty\": strconv.FormatBool(options.Pretty),\n\t}\n\tidentifier, err := json.Marshal(result)\n\tif err != nil {\n\t\tklog.Fatalf(\"Failed marshaling identifier for json Serializer: %v\", err)\n\t}\n\treturn runtime.Identifier(identifier)\n}",
"func Name(ctx context.Context) string {\n\tf, ok := ctx.Value(stateKey).(*Func)\n\tif !ok {\n\t\treturn \"<Undefined>\"\n\t}\n\tname := runtime.FuncForPC(reflect.ValueOf(*f).Pointer()).Name()\n\treturn strings.TrimRight(nameRe.FindStringSubmatch(name)[1], \")\")\n}",
"func (f *feature) Serialize() string {\n\tstream, err := json.Marshal(f)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(stream)\n}",
"func (g *jsonschema) Name() string {\n\treturn \"jsonschema\"\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
kindCamelName returns an identifierfriendly name for the kind at the specified index. It will panic if 'i' is out of range.
|
func (p *PropertyGenerator) kindCamelName(i int) string {
return p.Kinds[i].Name.CamelName
}
|
[
"func KindNamed(s string) Kind {\n\ts = strings.Title(strings.ToLower(s))\n\tfor i := 0; i < len(_Kind_index)-1; i++ {\n\t\tif _Kind_name[_Kind_index[i]:_Kind_index[i+1]] == s {\n\t\t\treturn Kind(i)\n\t\t}\n\t}\n\treturn Int\n}",
"func (p *PropertyGenerator) getFnName(i int) string {\n\tif len(p.Kinds) == 1 {\n\t\treturn getMethod\n\t}\n\treturn fmt.Sprintf(\"%s%s\", getMethod, p.kindCamelName(i))\n}",
"func (e BaseEnum) name(index int) string {\n names := [...]string {\n \"BASEENUM_UNKNOWN\",\n }\n if index < 0 || index > len(names) {\n return \"_UNKNOWN\"\n }\n return names[index]\n}",
"func classNameFromIdx(idx int) string {\n\tvar buf [8]byte\n\tstrIdx := len(buf) - 1\n\tbuf[strIdx] = byte('a' + (idx % 26))\n\tfor idx /= 26; idx > 0; idx /= 26 {\n\t\tidx--\n\t\tstrIdx--\n\t\tbuf[strIdx] = byte('a' + (idx % 26))\n\t}\n\treturn string(buf[strIdx:])\n}",
"func (k Kind) Name() string {\n\treturn strings.ToLower(k.String())\n}",
"func kindVarName(k vdl.Kind) string {\n\tif k == vdl.TypeObject {\n\t\treturn \"TypeObject\"\n\t}\n\treturn vdlutil.FirstRuneToUpper(k.String())\n}",
"func nthCounterName(n int) string { return fmt.Sprintf(\"C%02d\", n) }",
"func ControllerName(kind string) string {\n\treturn \"managed/\" + strings.ToLower(kind)\n}",
"func ControllerName(kind string) string {\n\treturn \"claimscheduling/\" + strings.ToLower(kind)\n}",
"func groupKindToCRDName(gk schema.GroupKind) string {\n\tplural := strings.ToLower(gk.Kind) + \"s\"\n\treturn fmt.Sprintf(\"%s.%s\", plural, gk.Group)\n}",
"func KindToSchemaID(kind string) string {\n\treturn strings.Replace(kind, \"-\", \"_\", -1)\n}",
"func spanKindName(k ptrace.SpanKind) string {\n\tname, ok := spanKindNames[int32(k)]\n\tif !ok {\n\t\treturn \"unknown\"\n\t}\n\treturn name\n}",
"func Name(kind uint16) string {\n\tname := MessageType_name[int32(kind)]\n\tif len(name) < 12 {\n\t\treturn name\n\t}\n\treturn name[12:]\n}",
"func KindClusterName(clusterName string) string {\n\tswitch {\n\t// With kind version < 0.6.0, the k8s context\n\t// is `[CLUSTER NAME]@kind`.\n\t// For eg: `cluster@kind`\n\t// the default name is `kind@kind`\n\tcase strings.HasSuffix(clusterName, \"@kind\"):\n\t\treturn strings.TrimSuffix(clusterName, \"@kind\")\n\n\t// With kind version >= 0.6.0, the k8s context\n\t// is `kind-[CLUSTER NAME]`.\n\t// For eg: `kind-cluster`\n\t// the default name is `kind-kind`\n\tcase strings.HasPrefix(clusterName, \"kind-\"):\n\t\treturn strings.TrimPrefix(clusterName, \"kind-\")\n\t}\n\n\treturn clusterName\n}",
"func (this ActivityStreamsImageProperty) KindIndex(idx int) int {\n\treturn this.properties[idx].KindIndex()\n}",
"func OrdinalPositionName(pos int) string {\n\tcardinal := pos + 1\n\tif cardinal < 1 {\n\t\treturn \"invalid position\"\n\t}\n\tsuffix := \"th\"\n\tswitch cardinal % 10 {\n\tcase 1:\n\t\tif cardinal%100 != 11 {\n\t\t\tsuffix = \"st\"\n\t\t}\n\tcase 2:\n\t\tif cardinal%100 != 12 {\n\t\t\tsuffix = \"nd\"\n\t\t}\n\tcase 3:\n\t\tif cardinal%100 != 13 {\n\t\t\tsuffix = \"rd\"\n\t\t}\n\t}\n\treturn strconv.Itoa(cardinal) + suffix\n}",
"func IndexKind(tokens []Token, kind Kind) Position {\n\tfor i, token := range tokens {\n\t\tif token.Kind == kind {\n\t\t\treturn i\n\t\t}\n\t}\n\n\treturn -1\n}",
"func DefaultNameForKind(kind string) string {\n\treturn fmt.Sprintf(\"default-%s\", kind)\n}",
"func crdName(gvk schema.GroupVersionKind) string {\n\t// make kind plural, the cheap and easy and brittle way\n\tgvk.Kind += \"s\"\n\n\treturn gvk.GroupKind().String()\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
memberName returns the identifier to use for the kind at the specified index. It will panic if 'i' is out of range.
|
func (p *PropertyGenerator) memberName(i int) string {
return fmt.Sprintf("%sMember", p.Kinds[i].Name.LowerName)
}
|
[
"func (t *CompType) MemberIndex(name string) int {\n\tc_name := C.CString(name)\n\tdefer C.free(unsafe.Pointer(c_name))\n\treturn int(C.H5Tget_member_index(t.id, c_name))\n}",
"func (p *PropertyGenerator) hasMemberName(i int) string {\n\tif len(p.Kinds) == 1 && p.Kinds[0].Nilable {\n\t\tpanic(\"PropertyGenerator.hasMemberName called for nilable single value\")\n\t}\n\treturn fmt.Sprintf(\"has%sMember\", p.Kinds[i].Name.CamelName)\n}",
"func (e BaseEnum) name(index int) string {\n names := [...]string {\n \"BASEENUM_UNKNOWN\",\n }\n if index < 0 || index > len(names) {\n return \"_UNKNOWN\"\n }\n return names[index]\n}",
"func (t *CompType) MemberName(mbr_idx int) string {\n\tc_name := C.H5Tget_member_name(t.id, C.uint(mbr_idx))\n\treturn C.GoString(c_name)\n}",
"func (t *Namespace) Member(i int) Id {\n\tif i < 0 || i >= len(t.Members) {\n\t\tpanic(\"cxxtypes: Member index out of range\")\n\t}\n\treturn IdByName(t.Members[i])\n}",
"func (p Path) NameAt(idx int) string {\n\treturn p.Names()[idx]\n}",
"func (i *Index) Name() string { return i.name }",
"func (z *Zzz) IdxName() int { //nolint:dupl false positive\n\treturn 3\n}",
"func (s *spec) stateName(i Index) (name string) {\n\tname = fmt.Sprintf(\"%v\", i)\n\tif s == nil {\n\t\treturn\n\t}\n\tif s.stateNames == nil {\n\t\treturn\n\t}\n\tif v, has := s.stateNames[i]; has {\n\t\tname = v\n\t}\n\treturn\n}",
"func (p *PropertyGenerator) getFnName(i int) string {\n\tif len(p.Kinds) == 1 {\n\t\treturn getMethod\n\t}\n\treturn fmt.Sprintf(\"%s%s\", getMethod, p.kindCamelName(i))\n}",
"func (p *PropertyGenerator) kindCamelName(i int) string {\n\treturn p.Kinds[i].Name.CamelName\n}",
"func (g *Group) ObjectNameByIndex(idx uint) (string, error) {\n\treturn objectNameByIndex(g.id, idx)\n}",
"func (this ActivityStreamsImageProperty) KindIndex(idx int) int {\n\treturn this.properties[idx].KindIndex()\n}",
"func (t *itype) fieldIndex(name string) int {\n\tswitch t.cat {\n\tcase aliasT, ptrT:\n\t\treturn t.val.fieldIndex(name)\n\t}\n\tfor i, field := range t.field {\n\t\tif name == field.name {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}",
"func (this ActivityStreamsActorProperty) KindIndex(idx int) int {\n\treturn this.properties[idx].KindIndex()\n}",
"func (f *Fields) Index(i int) *Field",
"func (tm *TableManager) Name(i Index) string {\n\ttm.mu.RLock()\n\tdefer tm.mu.RUnlock()\n\tif ts, ok := tm.ts[i]; ok && ts != nil {\n\t\treturn ts.Name\n\t}\n\treturn \"\"\n}",
"func (doc *Document) MemberName() (name string, err error) {\n\tid, err := doc.parseMemberID()\n\tif err != nil {\n\t\treturn\n\t}\n\tname, err = LookupMemberName(id)\n\treturn\n}",
"func (r *Resultset) NameIndex(name string) (int, error) {\n\tcolumn, ok := r.FieldNames[name]\n\tif ok {\n\t\treturn column, nil\n\t}\n\treturn 0, fmt.Errorf(\"invalid field name %s\", name)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
hasMemberName returns the identifier to use for struct members that determine whether nonnilable types have been set. Panics if called for a Kind that is nilable.
|
func (p *PropertyGenerator) hasMemberName(i int) string {
if len(p.Kinds) == 1 && p.Kinds[0].Nilable {
panic("PropertyGenerator.hasMemberName called for nilable single value")
}
return fmt.Sprintf("has%sMember", p.Kinds[i].Name.CamelName)
}
|
[
"func hasName(t Type) bool {\n\tswitch t.(type) {\n\tcase *Basic, *Named, *TypeParam:\n\t\treturn true\n\t}\n\treturn false\n}",
"func (t Token) HasName() bool {\n\t// only lc-ident-full has a name\n\tif t.Token != ItemLowerIdent {\n\t\treturn false\n\t}\n\n\tfields := strings.Split(t.Literal, \"#\")\n\tif len(fields) == 1 || (len(fields) == 2 && fields[1] == \"\") {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (locals *KnownLocalsSet) HasName(name string) bool {\n\t_, ok := locals.names[name]\n\treturn ok\n}",
"func (t *Link) HasUnknownName() (ok bool) {\n\treturn t.name != nil && t.name[0].unknown_ != nil\n\n}",
"func (o *OutputField) HasFieldName() bool {\n\tif o != nil && !IsNil(o.FieldName) {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (f Unstructured) HasByName(field string) bool {\n\tif f.IsUndefined() {\n\t\treturn true\n\t}\n\t_, ok := f.fields[field]\n\treturn ok\n}",
"func (member *EnumMember) IsUnknown() bool {\n\treturn member.HasAttribute(\"Unknown\")\n}",
"func (*OpenconfigLacp_Lacp_Interfaces_Interface_Members_Member_State) IsYANGGoStruct() {}",
"func isOnUnexportedMember(s string) bool {\n\tr := regexp.MustCompile(`\\([a-zA-Z]* \\*?[a-z]+.*\\)`)\n\treturn r.MatchString(s)\n}",
"func (*OpenconfigLacp_Lacp_Interfaces_Interface_Members_Member) IsYANGGoStruct() {}",
"func (o *FormField) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (s *SyncStorage) IsMember(ns string, group string, member interface{}) (bool, error) {\n\tretVal, err := s.getDbBackend(ns).SIsMember(getNsPrefix(ns)+group, member)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn retVal, err\n}",
"func (funcs *NamedFuncs) HasNameLike(nameSubstring string) bool {\n\tfor _, nf := range *funcs {\n\t\tif strings.Contains(nf.Name, nameSubstring) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (p *FunctionalPropertyGenerator) unknownMemberDef() jen.Code {\n\treturn jen.Id(unknownMemberName).Interface()\n}",
"func (m *Measurement) HasField(name string) bool {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\t_, hasField := m.fieldNames[name]\n\treturn hasField\n}",
"func Has(obj interface{}, fieldName string) (bool, error) {\n\tobjValue, err := getReflectValue(obj)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tstructType := objValue.Type()\n\t_, found := structType.FieldByName(fieldName)\n\treturn found, nil\n}",
"func (*UseCase_UseCase_UseCase_Visibility) IsYANGGoStruct() {}",
"func IsNameOnly(ref Named) bool {\n\tif _, ok := ref.(NamedTagged); ok {\n\t\treturn false\n\t}\n\tif _, ok := ref.(Canonical); ok {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (*OpenconfigLacp_Lacp_Interfaces_Interface_Members_Member_State_Counters) IsYANGGoStruct() {}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
clearMethodName returns the identifier to use for methods that clear all values from the property.
|
func (p *PropertyGenerator) clearMethodName() string {
if p.asIterator {
return iteratorClearMethod
}
return clearMethod
}
|
[
"func (nmd *NetMethodDispatch) Clear() {\n\tnmd.sync.Lock()\n\tdefer nmd.sync.Unlock()\n\tnmd.m = make(map[interface{}]NetMethodFun)\n}",
"func (v *Cache_ClearAfter_Args) MethodName() string {\n\treturn \"clearAfter\"\n}",
"func ClearName(key string) string {\n\tif strings.Contains(key, \"(\") {\n\t\tkey = strings.Split(key, \"(\")[0]\n\t}\n\n\tkey = strings.Replace(key, \"-\", \" \", -1)\n\tkey = strings.Replace(key, \"_\", \" \", -1)\n\tkey = strings.Replace(key, \"/\", \" \", -1)\n\tkey = strings.Replace(key, \"\\\\\", \" \", -1)\n\tkey = strings.Replace(key, \"'\", \" \", -1)\n\tkey = strings.Replace(key, \".\", \" \", -1)\n\n\tkey = strings.TrimPrefix(key, \"Registry \")\n\tkey = strings.TrimPrefix(key, \"Sponsoring \")\n\n\tkey = strings.TrimSpace(key)\n\tkey = strings.ToLower(key)\n\n\treturn key\n}",
"func ClearTypeName(tau TypeT) int32 {\n\treturn int32(C.yices_clear_type_name(C.type_t(tau)))\n}",
"func ClearNames(p *ir.Program) error {\n\tif err := CanonicalizeOperands(p); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, operand := range p.Operands {\n\t\toperand.Identifier = \"\"\n\t}\n\n\treturn nil\n}",
"func (aauo *APIAuditUpdateOne) ClearHTTPMethod() *APIAuditUpdateOne {\n\taauo.mutation.ClearHTTPMethod()\n\treturn aauo\n}",
"func (password *Password) Clear() {\n\tpassword.Value = \"\"\n}",
"func (m *CardMutation) ClearName() {\n\tm.name = nil\n\tm.clearedFields[card.FieldName] = struct{}{}\n}",
"func (r *PackageAggRow) ClearName() { r.Data.Name = nil }",
"func (_m *ORM) Clear(chainID *big.Int, key string) error {\n\tret := _m.Called(chainID, key)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(*big.Int, string) error); ok {\n\t\tr0 = rf(chainID, key)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func (x *fastReflection_ValueOp) Clear(fd protoreflect.FieldDescriptor) {\n\tswitch fd.FullName() {\n\tcase \"tendermint.crypto.ValueOp.key\":\n\t\tx.Key = nil\n\tcase \"tendermint.crypto.ValueOp.proof\":\n\t\tx.Proof = nil\n\tdefault:\n\t\tif fd.IsExtension() {\n\t\t\tpanic(fmt.Errorf(\"proto3 declared messages do not support extensions: tendermint.crypto.ValueOp\"))\n\t\t}\n\t\tpanic(fmt.Errorf(\"message tendermint.crypto.ValueOp does not contain field %s\", fd.FullName()))\n\t}\n}",
"func (self *PhysicsP2) Clear() {\n self.Object.Call(\"clear\")\n}",
"func (aau *APIAuditUpdate) ClearHTTPMethod() *APIAuditUpdate {\n\taau.mutation.ClearHTTPMethod()\n\treturn aau\n}",
"func (sp *ScalarProperties) Clear() {\n\t*sp = ScalarProperties{}\n}",
"func (oiuo *OrderInfoUpdateOne) ClearPayMethod() *OrderInfoUpdateOne {\n\toiuo.mutation.ClearPayMethod()\n\treturn oiuo\n}",
"func (v *CoinIdentifier) Reset() {\n\tv.Identifier = \"\"\n}",
"func Clear(name string) FieldClearer {\n\treturn FieldClearer{Name: name}\n}",
"func (m *DrugMutation) ResetProperty() {\n\tm._Property = nil\n}",
"func (m *RightToTreatmentTypeMutation) ResetTypeName() {\n\tm._TypeName = nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
commonMethods returns methods common to every property.
|
func (p *PropertyGenerator) commonMethods() []*codegen.Method {
return []*codegen.Method{
codegen.NewCommentedValueMethod(
p.packageName(),
nameMethod,
p.StructName(),
/*params=*/ nil,
[]jen.Code{jen.String()},
[]jen.Code{
jen.Return(
jen.Lit(p.PropertyName()),
),
},
jen.Commentf("%s returns the name of this property: %q.", nameMethod, p.PropertyName()),
),
}
}
|
[
"func (o *CommonObjectProperties) GetCommonPropertyList() []string {\n\treturn []string{\n\t\t\"type\",\n\t\t\"spec_version\",\n\t\t\"id\",\n\t\t\"created_by_ref\",\n\t\t\"created\",\n\t\t\"modified\",\n\t\t\"revoked\",\n\t\t\"labels\",\n\t\t\"confidence\",\n\t\t\"lang\",\n\t\t\"external_references\",\n\t\t\"object_marking_refs\",\n\t\t\"granular_markings\",\n\t}\n}",
"func (md ImageMixinModel) Methods() image_mixin.MethodsCollection {\n\treturn image_mixin.MethodsCollection{\n\t\tMethodsCollection: md.Model.Methods(),\n\t}\n}",
"func GetMethods(v interface{}) (r []string) {\n\tvalue := reflect.ValueOf(v)\n\ttyp := value.Type()\n\tfor i := 0; i < value.NumMethod(); i++ {\n\t\tr = append(r, typ.Method(i).Name)\n\t}\n\treturn\n}",
"func (md CountryModel) Methods() country.MethodsCollection {\n\treturn country.MethodsCollection{\n\t\tMethodsCollection: md.Model.Methods(),\n\t}\n}",
"func (md CountryStateModel) Methods() country_state.MethodsCollection {\n\treturn country_state.MethodsCollection{\n\t\tMethodsCollection: md.Model.Methods(),\n\t}\n}",
"func CommonAccessor(obj interface{}) (metav1.Common, error) {\n\tswitch t := obj.(type) {\n\tcase List:\n\t\treturn t, nil\n\tcase ListMetaAccessor:\n\t\tif m := t.GetListMeta(); m != nil {\n\t\t\treturn m, nil\n\t\t}\n\t\treturn nil, errNotCommon\n\tcase metav1.ListMetaAccessor:\n\t\tif m := t.GetListMeta(); m != nil {\n\t\t\treturn m, nil\n\t\t}\n\t\treturn nil, errNotCommon\n\tcase metav1.Object:\n\t\treturn t, nil\n\tcase metav1.ObjectMetaAccessor:\n\t\tif m := t.GetObjectMeta(); m != nil {\n\t\t\treturn m, nil\n\t\t}\n\t\treturn nil, errNotCommon\n\tdefault:\n\t\treturn nil, errNotCommon\n\t}\n}",
"func getAllProps(opts RandomOptions, d2files d2file.D2Files) Props {\n\tprops := Props{}\n\t\n\tuniqueItemProps := getAllUniqueProps(d2files)\n\tprops = append(props, uniqueItemProps...)\n\t\n\tsetProps := getAllSetProps(d2files)\n\tprops = append(props, setProps...)\n\t\n\tsetItemProps := getAllSetItemsProps(d2files)\n\tprops = append(props, setItemProps...)\n\t\n\truneWordProps := getAllRWProps(d2files)\n\tprops = append(props, runeWordProps...)\n\t\n\tgemProps := getAllGemsProps(d2files)\n\tprops = append(props, gemProps...)\n\n\tfor i := range props {\n\t\t// Set all props Min to the Max value\n\t\tif opts.PerfectProps {\n\t\t\tprops[i].Min = props[i].Max\n\t\t}\n\t\t// sets skill = oskill\n\t\tif opts.UseOSkills {\n\t\t\tif props[i].Name == \"skill\" {\n\t\t\t\tprops[i].Name = \"oskill\"\n\t\t\t}\n\t\t}\n\t}\n\n\treturn props\n}",
"func (o *OfflineWebCartPaymentGateway) Methods() []domain.Method {\n\treturn []domain.Method{\n\t\t{\n\t\t\tTitle: \"cash on delivery\",\n\t\t\tCode: \"offlinepayment_cashondelivery\",\n\t\t},\n\t\t{\n\t\t\tTitle: \"cash in advance\",\n\t\t\tCode: \"offlinepayment_cashinadvance\",\n\t\t},\n\t}\n}",
"func (md UserModel) Methods() user.MethodsCollection {\n\treturn user.MethodsCollection{\n\t\tMethodsCollection: md.Model.Methods(),\n\t}\n}",
"func (obj *properties) All() []Property {\n\treturn obj.list\n}",
"func (s *ServiceDescriptor) GetMethods() []*MethodDescriptor { return s.Methods }",
"func (o *RuleMatch) GetMethods() []string {\n\tif o == nil || o.Methods == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn o.Methods\n}",
"func Methods() []SingleSignOn {\n\treturn ssoMethods\n}",
"func (m *Methods) All() []*Method {\n\treturn m.list\n}",
"func (m *RPCModule) methods() []*types.VMMember {\n\tm.modFuncs = []*types.VMMember{\n\t\t{\n\t\t\tName: \"connect\",\n\t\t\tValue: m.connect,\n\t\t\tDescription: \"connect to a RPC server\",\n\t\t},\n\t\t{\n\t\t\tName: \"local\",\n\t\t\tValue: m.ConnectLocal,\n\t\t\tDescription: \"connect to the local RPC server\",\n\t\t},\n\t}\n\n\treturn m.modFuncs\n}",
"func (r *REST) ConnectMethods() []string {\n\treturn proxyMethods\n}",
"func (s *Service) TwowayMethods() []*Method {\n\tmethods := make([]*Method, 0, len(s.Methods))\n\tfor _, method := range s.Methods {\n\t\tif !method.Oneway {\n\t\t\tmethods = append(methods, method)\n\t\t}\n\t}\n\treturn methods\n}",
"func (p *FunctionalPropertyGenerator) multiTypeFuncs() []*codegen.Method {\n\tvar methods []*codegen.Method\n\t// HasAny Method\n\tisLine := make([]jen.Code, 0, len(p.kinds)+1)\n\tfor i := range p.kinds {\n\t\tor := jen.Empty()\n\t\tif i < len(p.kinds)-1 {\n\t\t\tor = jen.Op(\"||\")\n\t\t}\n\t\tisLine = append(isLine, jen.Id(codegen.This()).Dot(p.isMethodName(i)).Call().Add(or))\n\t}\n\tif !p.hasURIKind() {\n\t\tisLine[len(isLine)-1] = jen.Add(isLine[len(isLine)-1], jen.Op(\"||\"))\n\t\tisLine = append(isLine, jen.Id(codegen.This()).Dot(iriMember).Op(\"!=\").Nil())\n\t}\n\thasAnyComment := fmt.Sprintf(\n\t\t\"%s returns true if any of the different values is set.\", hasAnyMethod,\n\t)\n\tif p.hasNaturalLanguageMap {\n\t\thasAnyComment = fmt.Sprintf(\n\t\t\t\"%s returns true if any of the values are set, except for the natural language map. When true, the specific has, getter, and setter methods may be used to determine what kind of value there is to access and set this property. To determine if the property was set as a natural language map, use the %s method instead.\",\n\t\t\thasAnyMethod,\n\t\t\tisLanguageMapMethod,\n\t\t)\n\t}\n\tmethods = append(methods, codegen.NewCommentedValueMethod(\n\t\tp.GetPrivatePackage().Path(),\n\t\thasAnyMethod,\n\t\tp.StructName(),\n\t\t/*params=*/ nil,\n\t\t[]jen.Code{jen.Bool()},\n\t\t[]jen.Code{jen.Return(join(isLine))},\n\t\thasAnyComment,\n\t))\n\t// Clear Method\n\tclearComment := fmt.Sprintf(\n\t\t\"%s ensures no value of this property is set. Calling %s or any of the 'Is' methods afterwards will return false.\", p.clearMethodName(), hasAnyMethod,\n\t)\n\tclearLine := p.multiTypeClearNonLanguageMapMembers()\n\tif p.hasNaturalLanguageMap {\n\t\tclearComment = fmt.Sprintf(\n\t\t\t\"%s ensures no value and no language map for this property is set. Calling %s or any of the 'Is' methods afterwards will return false.\",\n\t\t\tp.clearMethodName(),\n\t\t\thasAnyMethod,\n\t\t)\n\t\tclearLine = append(clearLine, jen.Id(codegen.This()).Dot(langMapMember).Op(\"=\").Nil())\n\t}\n\tmethods = append(methods, codegen.NewCommentedPointerMethod(\n\t\tp.GetPrivatePackage().Path(),\n\t\tp.clearMethodName(),\n\t\tp.StructName(),\n\t\t/*params=*/ nil,\n\t\t/*ret=*/ nil,\n\t\tclearLine,\n\t\tclearComment,\n\t))\n\t// Is Method\n\tfor i, kind := range p.kinds {\n\t\tisComment := fmt.Sprintf(\n\t\t\t\"%s returns true if this property has a type of %q. When true, use the %s and %s methods to access and set this property.\",\n\t\t\tp.isMethodName(i),\n\t\t\tkind.Name.LowerName,\n\t\t\tp.getFnName(i),\n\t\t\tp.setFnName(i),\n\t\t)\n\t\tif p.hasNaturalLanguageMap {\n\t\t\tisComment = fmt.Sprintf(\n\t\t\t\t\"%s. To determine if the property was set as a natural language map, use the %s method instead.\",\n\t\t\t\tisComment,\n\t\t\t\tisLanguageMapMethod,\n\t\t\t)\n\t\t}\n\t\tif kind.Nilable {\n\t\t\tmethods = append(methods, codegen.NewCommentedValueMethod(\n\t\t\t\tp.GetPrivatePackage().Path(),\n\t\t\t\tp.isMethodName(i),\n\t\t\t\tp.StructName(),\n\t\t\t\t/*params=*/ nil,\n\t\t\t\t[]jen.Code{jen.Bool()},\n\t\t\t\t[]jen.Code{jen.Return(jen.Id(codegen.This()).Dot(p.memberName(i)).Op(\"!=\").Nil())},\n\t\t\t\tisComment,\n\t\t\t))\n\t\t} else {\n\t\t\tmethods = append(methods, codegen.NewCommentedValueMethod(\n\t\t\t\tp.GetPrivatePackage().Path(),\n\t\t\t\tp.isMethodName(i),\n\t\t\t\tp.StructName(),\n\t\t\t\t/*params=*/ nil,\n\t\t\t\t[]jen.Code{jen.Bool()},\n\t\t\t\t[]jen.Code{jen.Return(jen.Id(codegen.This()).Dot(p.hasMemberName(i)))},\n\t\t\t\tisComment,\n\t\t\t))\n\t\t}\n\t}\n\tmethods = append(methods, codegen.NewCommentedValueMethod(\n\t\tp.GetPrivatePackage().Path(),\n\t\tisIRIMethod,\n\t\tp.StructName(),\n\t\t/*params=*/ nil,\n\t\t[]jen.Code{jen.Bool()},\n\t\t[]jen.Code{jen.Return(p.thisIRI().Op(\"!=\").Nil())},\n\t\tfmt.Sprintf(\n\t\t\t\"%s returns true if this property is an IRI. When true, use %s and %s to access and set this property\",\n\t\t\tisIRIMethod,\n\t\t\tgetIRIMethod,\n\t\t\tsetIRIMethod,\n\t\t)))\n\t// Set Method\n\tfor i, kind := range p.kinds {\n\t\tsetComment := fmt.Sprintf(\"%s sets the value of this property. Calling %s afterwards returns true.\", p.setFnName(i), p.isMethodName(i))\n\t\tif p.hasNaturalLanguageMap {\n\t\t\tsetComment = fmt.Sprintf(\n\t\t\t\t\"%s sets the value of this property and clears the natural language map. Calling %s afterwards will return true. Calling %s afterwards returns false.\",\n\t\t\t\tp.setFnName(i),\n\t\t\t\tp.isMethodName(i),\n\t\t\t\tisLanguageMapMethod,\n\t\t\t)\n\t\t}\n\t\tif kind.Nilable {\n\t\t\tmethods = append(methods, codegen.NewCommentedPointerMethod(\n\t\t\t\tp.GetPrivatePackage().Path(),\n\t\t\t\tp.setFnName(i),\n\t\t\t\tp.StructName(),\n\t\t\t\t[]jen.Code{jen.Id(\"v\").Add(kind.ConcreteKind)},\n\t\t\t\t/*ret=*/ nil,\n\t\t\t\t[]jen.Code{\n\t\t\t\t\tjen.Id(codegen.This()).Dot(p.clearMethodName()).Call(),\n\t\t\t\t\tjen.Id(codegen.This()).Dot(p.memberName(i)).Op(\"=\").Id(\"v\"),\n\t\t\t\t},\n\t\t\t\tsetComment,\n\t\t\t))\n\t\t} else {\n\t\t\tmethods = append(methods, codegen.NewCommentedPointerMethod(\n\t\t\t\tp.GetPrivatePackage().Path(),\n\t\t\t\tp.setFnName(i),\n\t\t\t\tp.StructName(),\n\t\t\t\t[]jen.Code{jen.Id(\"v\").Add(kind.ConcreteKind)},\n\t\t\t\t/*ret=*/ nil,\n\t\t\t\t[]jen.Code{\n\t\t\t\t\tjen.Id(codegen.This()).Dot(p.clearMethodName()).Call(),\n\t\t\t\t\tjen.Id(codegen.This()).Dot(p.memberName(i)).Op(\"=\").Id(\"v\"),\n\t\t\t\t\tjen.Id(codegen.This()).Dot(p.hasMemberName(i)).Op(\"=\").True(),\n\t\t\t\t},\n\t\t\t\tsetComment,\n\t\t\t))\n\t\t}\n\t}\n\tmethods = append(methods, codegen.NewCommentedPointerMethod(\n\t\tp.GetPrivatePackage().Path(),\n\t\tsetIRIMethod,\n\t\tp.StructName(),\n\t\t[]jen.Code{jen.Id(\"v\").Op(\"*\").Qual(\"net/url\", \"URL\")},\n\t\t/*ret=*/ nil,\n\t\t[]jen.Code{\n\t\t\tjen.Id(codegen.This()).Dot(p.clearMethodName()).Call(),\n\t\t\tp.thisIRISetFn(),\n\t\t},\n\t\tfmt.Sprintf(\"%s sets the value of this property. Calling %s afterwards returns true.\", setIRIMethod, isIRIMethod),\n\t))\n\t// Get Method\n\tfor i, kind := range p.kinds {\n\t\tgetComment := fmt.Sprintf(\"%s returns the value of this property. When %s returns false, %s will return an arbitrary value.\", p.getFnName(i), p.isMethodName(i), p.getFnName(i))\n\t\tmethods = append(methods, codegen.NewCommentedValueMethod(\n\t\t\tp.GetPrivatePackage().Path(),\n\t\t\tp.getFnName(i),\n\t\t\tp.StructName(),\n\t\t\t/*params=*/ nil,\n\t\t\t[]jen.Code{jen.Add(kind.ConcreteKind)},\n\t\t\t[]jen.Code{jen.Return(jen.Id(codegen.This()).Dot(p.memberName(i)))},\n\t\t\tgetComment,\n\t\t))\n\t}\n\tmethods = append(methods, codegen.NewCommentedValueMethod(\n\t\tp.GetPrivatePackage().Path(),\n\t\tgetIRIMethod,\n\t\tp.StructName(),\n\t\t/*params=*/ nil,\n\t\t[]jen.Code{jen.Op(\"*\").Qual(\"net/url\", \"URL\")},\n\t\t[]jen.Code{jen.Return(p.thisIRI())},\n\t\tfmt.Sprintf(\"%s returns the IRI of this property. When %s returns false, %s will return an arbitrary value.\", getIRIMethod, isIRIMethod, getIRIMethod),\n\t))\n\t// LessThan Method\n\tlessCode := jen.Empty().Add(\n\t\tjen.Id(\"idx1\").Op(\":=\").Id(codegen.This()).Dot(kindIndexMethod).Call().Line(),\n\t\tjen.Id(\"idx2\").Op(\":=\").Id(\"o\").Dot(kindIndexMethod).Call().Line(),\n\t\tjen.If(jen.Id(\"idx1\").Op(\"<\").Id(\"idx2\")).Block(\n\t\t\tjen.Return(jen.True()),\n\t\t).Else().If(jen.Id(\"idx1\").Op(\">\").Id(\"idx2\")).Block(\n\t\t\tjen.Return(jen.False()),\n\t\t))\n\tfor i, kind := range p.kinds {\n\t\tlessCode.Add(\n\t\t\tjen.Else().If(\n\t\t\t\tjen.Id(codegen.This()).Dot(p.isMethodName(i)).Call(),\n\t\t\t).Block(\n\t\t\t\tjen.Return(kind.lessFnCode(jen.Id(codegen.This()).Dot(p.getFnName(i)).Call(), jen.Id(\"o\").Dot(p.getFnName(i)).Call()))))\n\t}\n\tif !p.hasURIKind() {\n\t\tlessCode.Add(\n\t\t\tjen.Else().If(\n\t\t\t\tjen.Id(codegen.This()).Dot(isIRIMethod).Call(),\n\t\t\t).Block(\n\t\t\t\tjen.Return(\n\t\t\t\t\tjen.Id(codegen.This()).Dot(iriMember).Dot(\"String\").Call().Op(\"<\").Id(\"o\").Dot(getIRIMethod).Call().Dot(\"String\").Call(),\n\t\t\t\t),\n\t\t\t))\n\t}\n\tmethods = append(methods, codegen.NewCommentedValueMethod(\n\t\tp.GetPrivatePackage().Path(),\n\t\tcompareLessMethod,\n\t\tp.StructName(),\n\t\t[]jen.Code{jen.Id(\"o\").Qual(p.GetPublicPackage().Path(), p.InterfaceName())},\n\t\t[]jen.Code{jen.Bool()},\n\t\t[]jen.Code{\n\t\t\tlessCode,\n\t\t\tjen.Return(jen.False()),\n\t\t},\n\t\tfmt.Sprintf(\"%s compares two instances of this property with an arbitrary but stable comparison. Applications should not use this because it is only meant to help alternative implementations to go-fed to be able to normalize nonfunctional properties.\", compareLessMethod),\n\t))\n\treturn methods\n}",
"func mergeMethods(methods, embeddedMethods methodsList) (methodsList, error) {\n\tif methods == nil || embeddedMethods == nil {\n\t\treturn methods, nil\n\t}\n\n\tresult := make(methodsList, len(methods)+len(embeddedMethods))\n\tfor name, signature := range embeddedMethods {\n\t\tresult[name] = signature\n\t}\n\n\tfor name, signature := range methods {\n\t\tresult[name] = signature\n\t}\n\n\treturn result, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
isMethodName returns the identifier to use for methods that determine if a property holds a specific Kind of value.
|
func (p *PropertyGenerator) isMethodName(i int) string {
return fmt.Sprintf("%s%s", isMethod, p.kindCamelName(i))
}
|
[
"func MethodName(ctx context.Context) (string, bool) {\n\tname, ok := ctx.Value(MethodNameKey).(string)\n\treturn name, ok\n}",
"func (p *PropertyGenerator) hasMemberName(i int) string {\n\tif len(p.Kinds) == 1 && p.Kinds[0].Nilable {\n\t\tpanic(\"PropertyGenerator.hasMemberName called for nilable single value\")\n\t}\n\treturn fmt.Sprintf(\"has%sMember\", p.Kinds[i].Name.CamelName)\n}",
"func (me TSearchQualificationTypesSortProperty) IsName() bool { return me.String() == \"Name\" }",
"func (p *FunctionalPropertyGenerator) nameMethod() *codegen.Method {\n\tnameImpl := jen.If(\n\t\tjen.Len(jen.Id(codegen.This()).Dot(aliasMember)).Op(\">\").Lit(0),\n\t).Block(\n\t\tjen.Return(\n\t\t\tjen.Id(codegen.This()).Dot(aliasMember).Op(\"+\").Lit(\":\").Op(\"+\").Lit(p.PropertyName()),\n\t\t),\n\t).Else().Block(\n\t\tjen.Return(\n\t\t\tjen.Lit(p.PropertyName()),\n\t\t),\n\t)\n\tif p.hasNaturalLanguageMap {\n\t\tnameImpl = jen.If(\n\t\t\tjen.Id(codegen.This()).Dot(isLanguageMapMethod).Call(),\n\t\t).Block(\n\t\t\tjen.Return(\n\t\t\t\tjen.Lit(p.PropertyName() + \"Map\"),\n\t\t\t),\n\t\t).Else().Block(\n\t\t\tjen.Return(\n\t\t\t\tjen.Lit(p.PropertyName()),\n\t\t\t),\n\t\t)\n\t}\n\treturn codegen.NewCommentedValueMethod(\n\t\tp.GetPrivatePackage().Path(),\n\t\tnameMethod,\n\t\tp.StructName(),\n\t\t/*params=*/ nil,\n\t\t[]jen.Code{jen.String()},\n\t\t[]jen.Code{\n\t\t\tnameImpl,\n\t\t},\n\t\tfmt.Sprintf(\"%s returns the name of this property: %q.\", nameMethod, p.PropertyName()),\n\t)\n}",
"func (ExprType) HasMethod(fn string) bool { return boolResult }",
"func IsMethodExists(value interface{}, name string) bool {\n\t_, exists := reflect.TypeOf(value).MethodByName(name)\n\treturn exists\n}",
"func (method *Method) GetName() string { return method.Name }",
"func (p *PropertyGenerator) getFnName(i int) string {\n\tif len(p.Kinds) == 1 {\n\t\treturn getMethod\n\t}\n\treturn fmt.Sprintf(\"%s%s\", getMethod, p.kindCamelName(i))\n}",
"func IsIdentifier(name string) bool {}",
"func runtimeMethodName(v interface{}) string {\n\t// https://github.com/diamondburned/arikawa/issues/146\n\n\tptr := reflect.ValueOf(v).Pointer()\n\n\tfuncPC := runtime.FuncForPC(ptr)\n\tif funcPC == nil {\n\t\tpanic(\"given method is not a function\")\n\t}\n\n\tfuncName := funcPC.Name()\n\n\t// Do weird string parsing because Go wants us to.\n\tnameParts := strings.Split(funcName, \".\")\n\tmName := nameParts[len(nameParts)-1]\n\tnameParts = strings.Split(mName, \"-\")\n\tif len(nameParts) > 1 { // extract the string before -fm if possible\n\t\tmName = nameParts[len(nameParts)-2]\n\t}\n\n\treturn mName\n}",
"func (p *PropertyGenerator) setFnName(i int) string {\n\tif len(p.Kinds) == 1 {\n\t\treturn setMethod\n\t}\n\treturn fmt.Sprintf(\"%s%s\", setMethod, p.kindCamelName(i))\n}",
"func (v Value) MethodByName(name string) Value {\n\tif v.typ == nil {\n\t\t//panic(&ValueError{\"reflect.Value.MethodByName\", Invalid})\n\t}\n\tif v.flag&flagMethod != 0 {\n\t\treturn Value{}\n\t}\n\tm, ok := v.typ.MethodByName(name)\n\tif !ok {\n\t\treturn Value{}\n\t}\n\treturn v.Method(m.Index)\n}",
"func (p *PropertyGenerator) clearMethodName() string {\n\tif p.asIterator {\n\t\treturn iteratorClearMethod\n\t}\n\treturn clearMethod\n}",
"func (n *Node) IsMethod() bool {\n\treturn n.Type.Recv() != nil\n}",
"func funcName(v *ast.FuncDecl) (string, bool) {\n\tname := v.Name.Name\n\tif recv := fieldListType(v.Recv); recv != \"\" {\n\t\tname = recv + \".\" + v.Name.Name\n\t\tif !*privRecv && !ast.IsExported(strings.TrimPrefix(recv, \"(*\")) {\n\t\t\t// an exported method un an unexported receiver, skip\n\t\t\treturn name, false\n\t\t}\n\t}\n\treturn name, true\n}",
"func (m *methodDesc) HandlerName() string {\n\treturn fmt.Sprintf(\"%s_%d\", m.Name, m.Num)\n}",
"func MethodHasSuffix(v string) predicate.Job {\n\treturn predicate.Job(func(s *sql.Selector) {\n\t\ts.Where(sql.HasSuffix(s.C(FieldMethod), v))\n\t})\n}",
"func IsIdentifier(name string) bool {\n\treturn token.IsIdentifier(name)\n}",
"func (ctx *Context) Is(method string) bool {\r\n\treturn ctx.Method() == method\r\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewObserver creates an Observer.
|
func NewObserver() *Observer {
return &Observer{
close: make(chan interface{}),
events: make(chan Event),
}
}
|
[
"func NewObserver() *Observer {\n\treturn &Observer{listeners: make(map[string]func(json.RawMessage))}\n}",
"func New() *Observer {\n\treturn &Observer{\n\t\tcallbacks: make(map[string][]func(interface{})),\n\t}\n}",
"func NewObserver(channel chan Observation, blocking bool, filter FilterFn) *Observer {\n\treturn &Observer{\n\t\tchannel: channel,\n\t\tblocking: blocking,\n\t\tfilter: filter,\n\t\tid: atomic.AddUint64(&nextObserverID, 1),\n\t}\n}",
"func newObserver() observerSubComponent {\n\treturn observerSubComponent{subscribed: make(chan struct{})}\n}",
"func newObserver() *observerManager {\n\treturn &observerManager{eventRegistryMap:make(map[reflect.Type]eventInterface)}\n}",
"func NewObserver(duration time.Duration, repeatModulus int) Observable {\n\treturn &observer{\n\t\tround: 0,\n\t\tstarted: time.Now(),\n\t\tmistakes: mistakes.Mistakes{},\n\t\tduration: duration,\n\t\trepeatModulus: repeatModulus,\n\t}\n}",
"func NewObserver(m Measurer, r reducer.Reducer) *Observer {\n\to := &Observer{\n\t\tMeasurer: m,\n\t\tReducer: r,\n\t}\n\to.StartStopper = startstopper.NewGo(startstopper.RunnerFunc(o.run))\n\n\treturn o\n}",
"func NewObserver(cfg ElectionConfig, scope tally.Scope, role string, newLeaderCallback func(string) error) (Observer, error) {\n\tlog.WithFields(log.Fields{\"role\": role}).Debug(\"Creating new observer of election\")\n\tclient, err := zookeeper.New(cfg.ZKServers, &store.Config{ConnectionTimeout: zkConnErrRetry})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tobs := observer{\n\t\trole: role,\n\t\tmetrics: newObserverMetrics(scope, role),\n\t\tcallback: newLeaderCallback,\n\t\tfollower: leadership.NewFollower(client, leaderZkPath(cfg.Root, role)),\n\t\tstopChan: make(chan struct{}),\n\t}\n\treturn &obs, nil\n}",
"func New(log logger, client restclient, notif event.Notifier, factory ControllerFactory, excluded []string, namespace string) *Observer {\n\treturn &Observer{\n\t\tnotifier: notif,\n\t\tdiscovery: discovery.NewDiscoveryClientForConfigOrDie(client.GetRestConfig()),\n\t\tcpool: dynamic.NewForConfigOrDie(client.GetRestConfig()),\n\t\tctrls: make(controllerCollection),\n\t\tfactory: factory,\n\t\tlogger: log,\n\t\texcludedkind: excluded,\n\t\tnamespace: namespace,\n\t}\n}",
"func newObserver(config *Config, telemetrySettings component.TelemetrySettings) (component.Extension, error) {\n\tclient, err := k8sconfig.MakeClient(config.APIConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trestClient := client.CoreV1().RESTClient()\n\n\tvar podListerWatcher cache.ListerWatcher\n\tif config.ObservePods {\n\t\tvar podSelector fields.Selector\n\t\tif config.Node == \"\" {\n\t\t\tpodSelector = fields.Everything()\n\t\t} else {\n\t\t\tpodSelector = fields.OneTermEqualSelector(\"spec.nodeName\", config.Node)\n\t\t}\n\t\ttelemetrySettings.Logger.Debug(\"observing pods\")\n\t\tpodListerWatcher = cache.NewListWatchFromClient(restClient, \"pods\", v1.NamespaceAll, podSelector)\n\t}\n\n\tvar nodeListerWatcher cache.ListerWatcher\n\tif config.ObserveNodes {\n\t\tvar nodeSelector fields.Selector\n\t\tif config.Node == \"\" {\n\t\t\tnodeSelector = fields.Everything()\n\t\t} else {\n\t\t\tnodeSelector = fields.OneTermEqualSelector(\"metadata.name\", config.Node)\n\t\t}\n\t\ttelemetrySettings.Logger.Debug(\"observing nodes\")\n\t\tnodeListerWatcher = cache.NewListWatchFromClient(restClient, \"nodes\", v1.NamespaceAll, nodeSelector)\n\t}\n\n\tobs := &k8sObserver{\n\t\ttelemetry: telemetrySettings,\n\t\tpodListerWatcher: podListerWatcher,\n\t\tnodeListerWatcher: nodeListerWatcher,\n\t\tstop: make(chan struct{}),\n\t\tconfig: config,\n\t}\n\n\treturn obs, nil\n}",
"func newObserver(logger *zap.Logger, config *Config) (component.Extension, error) {\n\treturn &dockerObserver{logger: logger, config: config}, nil\n}",
"func newObserverReplicator(nodeID proto.NodeID, startHeight int32, c *Chain) *observerReplicator {\n\treturn &observerReplicator{\n\t\tnodeID: nodeID,\n\t\theight: startHeight,\n\t\ttriggerCh: make(chan struct{}, 1),\n\t\tstopCh: make(chan struct{}, 1),\n\t\tc: c,\n\t}\n}",
"func NewMockObserver(ctrl *gomock.Controller) *MockObserver {\n\tmock := &MockObserver{ctrl: ctrl}\n\tmock.recorder = &MockObserverMockRecorder{mock}\n\treturn mock\n}",
"func New(done <-chan bool) *Notifier {\n\tnotifier := Notifier{\n\t\tnotificationMessages: make(chan string),\n\t\tobservers: make(map[chan *model.Notification]bool),\n\t\tdone: done,\n\t}\n\n\tgo notifier.dispatch()\n\n\treturn ¬ifier\n}",
"func NewObserver(options ...Option) (Observer, error) {\n\tcfg := &config{\n\t\tusages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},\n\t\tlog: logr.Discard(),\n\t}\n\tfor _, o := range sortedOptions(options) {\n\t\tif err := o.apply(cfg); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\to := &observer{\n\t\tupdateError: prometheus.NewGauge(prometheus.GaugeOpts{\n\t\t\tNamespace: cfg.namespace,\n\t\t\tSubsystem: cfg.subsystem,\n\t\t\tName: updateErrorName,\n\t\t\tHelp: \"Indicates if there was an error updating the TLS configuration.\",\n\t\t}),\n\t\tverifyError: prometheus.NewGauge(prometheus.GaugeOpts{\n\t\t\tNamespace: cfg.namespace,\n\t\t\tSubsystem: cfg.subsystem,\n\t\t\tName: verifyErrorName,\n\t\t\tHelp: \"Indicates if there was an error verifying the TLS configuration's certificates and expirations.\",\n\t\t}),\n\t\texpiration: prometheus.NewGauge(prometheus.GaugeOpts{\n\t\t\tNamespace: cfg.namespace,\n\t\t\tSubsystem: cfg.subsystem,\n\t\t\tName: expirationName,\n\t\t\tHelp: \"Earliest expiration time of the TLS configuration's certificates in seconds since the Unix epoch.\",\n\t\t}),\n\t\tusages: cfg.usages,\n\t\tlog: cfg.log,\n\t}\n\treturn o, nil\n}",
"func NewObservable(labels map[string]string) reconciler.Observable {\n\treturn reconciler.Observable{\n\t\tType: Type,\n\t\tObj: Observable{\n\t\t\tLabels: labels,\n\t\t},\n\t}\n}",
"func (n *StatusChangeNotifier) AddObserver(obs *Observer) {\n\tfor _, evt := range obs.Events {\n\t\texists := false\n\t\tnilIdx := -1\n\t\tfor i, observer := range n.observers[evt] {\n\t\t\tif observer == nil {\n\t\t\t\tnilIdx = i\n\t\t\t} else if obs.Id == observer.Id {\n\t\t\t\texists = true\n\t\t\t}\n\t\t}\n\t\tif !exists {\n\t\t\tobs.Conn.SetCloseHandler(n.CloseHandler(obs))\n\t\t\tif nilIdx >= 0 {\n\t\t\t\tn.observers[evt][nilIdx] = obs\n\t\t\t} else {\n\t\t\t\tn.observers[evt] = append(n.observers[evt], obs)\n\t\t\t}\n\t\t\tlog.Println(\"added observer:\", obs)\n\t\t}\n\t}\n}",
"func NewObserverForResource(conf *rest.Config, res *v1alpha1.Resource, filters []string) (*Observer, error) {\n\tns, err := common.GetWatchNamespace()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tgvr := schema.GroupVersionResource{\n\t\tGroup: res.Group,\n\t\tVersion: res.Version,\n\t\tResource: res.Name,\n\t}\n\tresourceClient := dynamic.NewForConfigOrDie(conf).Resource(gvr)\n\n\treturn &Observer{\n\t\tclient: resourceClient,\n\t\tnamespace: ns,\n\t\tresource: res,\n\t\tfilters: filters,\n\t\tK8RestConfig: conf,\n\t}, nil\n}",
"func (s *StockGrabber) AddObserver(o interfaces.Observer) {\n\ts.Observers = append(s.Observers, o)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
AddTracepoint adds a tracepoint to watch for.
|
func (o *Observer) AddTracepoint(name string) EventSource {
source := atomic.AddUint32(&o.nextEventSource, 1)
o.tracepoints = append(o.tracepoints, tracepointData{
source: EventSource(source),
tp: newTracepoint(name),
})
return EventSource(source)
}
|
[
"func (monitor *EventMonitor) RegisterTracepoint(\n\tname string,\n\tfn TraceEventDecoderFn,\n\toptions ...RegisterEventOption,\n) (uint64, error) {\n\topts := newRegisterEventOptions()\n\topts.processOptions(options...)\n\n\tmonitor.lock.Lock()\n\tdefer monitor.lock.Unlock()\n\n\treturn monitor.newRegisteredTraceEvent(name, fn, opts, EventTypeTracepoint)\n}",
"func (a *Adapter) AppendTracePoint(ctx context.Context, point string) context.Context {\n\n\tpath := ctx.Value(internal.TraceKey)\n\tif path == nil {\n\t\treturn context.WithValue(ctx, internal.TraceKey, point)\n\t}\n\n\treturn context.WithValue(ctx, internal.TraceKey, path.(string)+\">\"+point)\n}",
"func (p *Probe) attachTracepoint() error {\n\t// Parse section\n\ttraceGroup := strings.SplitN(p.Section, \"/\", 3)\n\tif len(traceGroup) != 3 {\n\t\treturn errors.Wrapf(ErrSectionFormat, \"expected SEC(\\\"tracepoint/[category]/[name]\\\") got %s\", p.Section)\n\t}\n\tcategory := traceGroup[1]\n\tname := traceGroup[2]\n\n\t// Get the ID of the tracepoint to activate\n\ttracepointID, err := GetTracepointID(category, name)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"couldn's activate tracepoint %s\", p.Section)\n\t}\n\n\t// Hook the eBPF program to the tracepoint\n\tp.perfEventFD, err = perfEventOpenTracepoint(tracepointID, p.program.FD())\n\treturn errors.Wrapf(err, \"couldn't enable tracepoint %s\", p.Section)\n}",
"func (tracer *Tracer) Add(ctx context.Context, start *wire.OutPoint) {\r\n\tnewNode := traceNode{\r\n\t\toutpoint: *start,\r\n\t}\r\n\ttracer.traces = append(tracer.traces, &newNode)\r\n}",
"func (t *TracerImpl) AddTrace(trace *Trace) {\n\tlogTraceDone(t.logger, trace)\n\n\tif trace.Start == 0 {\n\t\ttrace.Start = t.timeProvider.NowUnixNano()\n\t}\n\n\tt.traces = append(t.traces, trace)\n}",
"func (c *StepLookbackAccumulator) AddPoint(dp ts.Datapoint) {\n\tif dp.TimestampNanos.Before(c.earliestLookback) {\n\t\t// this datapoint is too far in the past, it can be dropped.\n\t\treturn\n\t}\n\n\tc.datapoints = append(c.datapoints, xts.Datapoint{\n\t\tTimestamp: dp.TimestampNanos,\n\t\tValue: dp.Value,\n\t})\n}",
"func Tracepoint(group, name string, prog *ebpf.Program, opts *TracepointOptions) (Link, error) {\n\tif group == \"\" || name == \"\" {\n\t\treturn nil, fmt.Errorf(\"group and name cannot be empty: %w\", errInvalidInput)\n\t}\n\tif prog == nil {\n\t\treturn nil, fmt.Errorf(\"prog cannot be nil: %w\", errInvalidInput)\n\t}\n\tif !isValidTraceID(group) || !isValidTraceID(name) {\n\t\treturn nil, fmt.Errorf(\"group and name '%s/%s' must be alphanumeric or underscore: %w\", group, name, errInvalidInput)\n\t}\n\tif prog.Type() != ebpf.TracePoint {\n\t\treturn nil, fmt.Errorf(\"eBPF program type %s is not a Tracepoint: %w\", prog.Type(), errInvalidInput)\n\t}\n\n\ttid, err := getTraceEventID(group, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfd, err := openTracepointPerfEvent(tid, perfAllThreads)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar cookie uint64\n\tif opts != nil {\n\t\tcookie = opts.Cookie\n\t}\n\n\tpe := &perfEvent{\n\t\ttyp: tracepointEvent,\n\t\tgroup: group,\n\t\tname: name,\n\t\ttracefsID: tid,\n\t\tcookie: cookie,\n\t\tfd: fd,\n\t}\n\n\tlnk, err := attachPerfEvent(pe, prog)\n\tif err != nil {\n\t\tpe.Close()\n\t\treturn nil, err\n\t}\n\n\treturn lnk, nil\n}",
"func (v *VM) AddBreakPoint(n int) {\n\tctx := v.Context()\n\tctx.breakPoints = append(ctx.breakPoints, n)\n}",
"func (coll *Collection) EnableTracepoint(secName string) error {\n\t// Check if section exists\n\tprog, ok := coll.Programs[secName]\n\tif !ok {\n\t\treturn errors.Wrapf(\n\t\t\terrors.New(\"section not found\"),\n\t\t\t\"couldn't enable tracepoint %s\",\n\t\t\tsecName,\n\t\t)\n\t}\n\tif prog.ProgramSpec.Type == TracePoint {\n\t\treturn prog.EnableTracepoint()\n\t}\n\treturn errors.Wrapf(\n\t\terrors.New(\"not a tracepoint\"),\n\t\t\"couldn't enable program %s\",\n\t\tsecName,\n\t)\n}",
"func AddTracing() EventRecordOption {\n\treturn envOption(\"ADD_TRACING\", \"true\")\n}",
"func (bp *BatchPoints) AddPoint(measurement string, tags map[string]string, fields map[string]interface{}, ts time.Time) error {\n\tbp.mtx.Lock()\n\tdefer bp.mtx.Unlock()\n\tpt, err := influx_client.NewPoint(measurement, tags, fields, ts)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbp.bp.AddPoint(pt)\n\treturn nil\n}",
"func (s *timingService) RecordTimingPoint(ctx context.Context, in *pb.TimingSystemRequest) (*pb.TimingSystemResponse, error) {\n\tlog.Printf(\"Id: %v, Type: %v, TimePoint: %v\", in.Id, in.Type, in.TimePoint)\n\n\thandleTheRequest(in, s.serverHub)\n\n\treturn &pb.TimingSystemResponse{ResultStatus: true}, nil\n}",
"func ProfileTracepoint(subsystem, event string, pid, cpu int, opts ...int) (BPFProfiler, error) {\n\tconfig, err := GetTracepointConfig(subsystem, event)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\teventAttr := &unix.PerfEventAttr{\n\t\tType: PERF_TYPE_TRACEPOINT,\n\t\tConfig: config,\n\t\tSize: uint32(unsafe.Sizeof(unix.PerfEventAttr{})),\n\t\tBits: unix.PerfBitDisabled | unix.PerfBitExcludeHv,\n\t\tRead_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,\n\t\tSample_type: PERF_SAMPLE_IDENTIFIER,\n\t}\n\tvar eventOps int\n\tif len(opts) > 0 {\n\t\teventOps = opts[0]\n\t}\n\tfd, err := unix.PerfEventOpen(\n\t\teventAttr,\n\t\tpid,\n\t\tcpu,\n\t\t-1,\n\t\teventOps,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &profiler{\n\t\tfd: fd,\n\t}, nil\n}",
"func (d *Distribution) AddPoint(timestamp time.Time, value float64) {\n\td.Values = append(d.Values, MetricValue{Timestamp: timestamp, Value: value})\n}",
"func AddStacktrace(lvl zapcore.LevelEnabler) Option {\n\treturn optionFunc(func(log *Logger) {\n\t\tlog.addStack = lvl\n\t})\n}",
"func AddNewTracer(name string) *Tracer {\n\tsrc := NewTracer(name)\n\tif err := gwr.AddGenericDataSource(src); err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn src\n}",
"func (s *MetricsService) AddPoint(id int, value int, timestamp string) (*Point, *Response, error) {\n\tu := fmt.Sprintf(\"api/v1/metrics/%d/points\", id)\n\tv := new(metricPointAPIResponse)\n\n\tp := struct {\n\t\tValue int `json:\"value\"`\n\t\tTimestamp string `json:\"timestamp\"`\n\t}{\n\t\tValue: value,\n\t\tTimestamp: timestamp,\n\t}\n\n\tresp, err := s.client.Call(\"POST\", u, p, v)\n\treturn v.Data, resp, err\n}",
"func AddTraceID(s opentracing.Span, id string) {\n\tif id != \"\" {\n\t\ts.SetTag(\"traceID\", id)\n\t}\n}",
"func (ts *Timeseries) AddNewPoint(v float64, x interface{}) error {\n\tts.Lock()\n\tdefer ts.Unlock() // unlocks at the end\n\n\tswitch T := x.(type) {\n\tcase int64:\n\t\tts.XY[T] = v\n\tcase time.Time:\n\t\tts.XY[T.UnixNano()] = v\n\tcase int:\n\t\tts.XY[int64(T)] = v\n\tdefault:\n\t\treturn fmt.Errorf(\"Adding point not possible\")\n\t}\n\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Open finish initializing the observer. From then on, events can be received with ReadEvent().
|
func (o *Observer) Open() error {
var err error
defer func() {
if err != nil {
o.Close()
}
}()
for _, data := range o.tracepoints {
tp := data.tp
source := data.source
if err = tp.open(); err != nil {
return err
}
o.wg.Add(1)
go func(tp *tracepoint, source EventSource) {
// TODO(damien): should we hide the implementation details into the
// tracepoint object and have it provide a channel?
for {
var nFds int
nFds, err = tp.perf.poll(-1)
if err != nil {
break
}
if nFds == 0 {
break
}
tp.perf.read(func(msg *perfEventSample, cpu int) {
event := &TracepointEvent{
baseEvent: baseEvent{
source: source,
},
tp: tp,
data: msg.DataCopy(),
}
o.events <- event
}, nil)
}
o.wg.Done()
}(tp, source)
}
return nil
}
|
[
"func (p *Session) OnOpen() {\n\tlevel.Debug(p.logger).Log(\"msg\", \"Session open\")\n\tp.open = true\n}",
"func (w *BaseWebsocketClient) OnOpen() {}",
"func newObserver() observerSubComponent {\n\treturn observerSubComponent{subscribed: make(chan struct{})}\n}",
"func (s *Streamer) init() error {\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.fsNotify = watcher // we closed it during Stop() process\n\n\ts.changedFileNames = make(chan string, 1000) // we closed it during Stop() process\n\n\treturn nil\n}",
"func (jobs *Jobs) Open() {\n\tjobs.ch = channels.NewInfiniteChannel()\n}",
"func initialise(h *HTTP) {\n\th.events = make(chan error)\n\th.signals = make(chan os.Signal, 1)\n}",
"func (w *pollWorker) observe(ballot *Ballot) {\n\t// This synchronous. This is to ensure that listen() receives the ballot\n\t// before this function return to the ballotMaster.\n\tw.ballot = ballot\n\tw.listench <- ballot\n}",
"func (h *RpcServerHandler) OnOpen(session getty.Session) error {\n\tvar err error\n\th.rwlock.RLock()\n\tif h.maxSessionNum <= len(h.sessionMap) {\n\t\terr = errTooManySessions\n\t}\n\th.rwlock.RUnlock()\n\tif err != nil {\n\t\treturn perrors.WithStack(err)\n\t}\n\n\tlog.Infof(\"got session:%s\", session.Stat())\n\th.rwlock.Lock()\n\th.sessionMap[session] = &rpcSession{session: session}\n\th.rwlock.Unlock()\n\treturn nil\n}",
"func NewObserver() *Observer {\n\treturn &Observer{\n\t\tclose: make(chan interface{}),\n\t\tevents: make(chan Event),\n\t}\n}",
"func (s *BasevhdlListener) EnterFile_open_information(ctx *File_open_informationContext) {}",
"func onFileOpen(filename string) {\n\t//\n}",
"func (br *Broker) Open(ctx context.Context) error {\n\twg := sync.WaitGroup{}\n\twg.Add(br.worker)\n\tbr.wg.Add(br.worker)\n\tfor i := 0; i < br.worker; i++ {\n\t\tgo func() {\n\t\t\twg.Done()\n\t\t\tdefer br.wg.Done()\n\t\t\tfor h := range br.ch {\n\t\t\t\t_ = h()\n\t\t\t}\n\t\t}()\n\t}\n\twg.Wait()\n\tbr.opened = true\n\treturn nil\n}",
"func NewObserver() *Observer {\n\treturn &Observer{listeners: make(map[string]func(json.RawMessage))}\n}",
"func (o *Observer) ReadEvent() (Event, error) {\n\tselect {\n\tcase <-o.close:\n\t\treturn nil, nil\n\tcase event := <-o.events:\n\t\treturn event, nil\n\t}\n}",
"func (e *Emitter) Init() {\n\te.handlers = make(map[string]EventHandlerFunc)\n\te.event = make(chan XpcEvent)\n\n\t// event handler\n\tgo func() {\n\t\tfor {\n\t\t\tev := <-e.event\n\n\t\t\tif fn, ok := e.handlers[ALL]; ok {\n\t\t\t\tif fn(ev) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\trawEvent, _ := json.MarshalIndent(ev, \"\", \"\t\")\n\t\t\t\tfmt.Printf(\"%s\", string(rawEvent))\n\t\t\t}\n\t\t}\n\n\t\tclose(e.event) // TOFIX: this causes new \"emits\" to panic.\n\t}()\n}",
"func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse {\n\tretc := make(chan v3.GetResponse)\n\tgo e.observe(ctx, retc)\n\treturn retc\n}",
"func (v *App) Open() {\n\tv.initialize()\n\n\tv.opened = true\n}",
"func init() {\n\tevents.OnEventManagerReady(func() error {\n\t\t// The main NetEventChannel is a fanout (separate queues for each subscriber) and is non-persistent\n\t\tvar ok bool\n\t\tvar err error\n\t\tok, netEventChannel, err = events.MakeEventChannel(netEventChannelName, true, false)\n\t\tif err != nil || !ok {\n\t\t\tif err != nil {\n\t\t\t\tlog.MaestroErrorf(\"NetworkManager: Can't create network event channel. Not good. --> %s\\n\", err.Error())\n\t\t\t} else {\n\t\t\t\tlog.MaestroErrorf(\"NetworkManager: Can't create network event channel. Not good. Unknown Error\\n\")\n\t\t\t}\n\t\t}\n\t\treturn err\n\t})\n}",
"func (o *ServiceObsListener) Start() error {\n\t_, err := o.nc.Subscribe(o.opts.Topic, o.observationHandler)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not subscribe to observation topic for %s (%s): %s\", o.opts.ServiceName, o.opts.Topic, err)\n\t}\n\terr = o.nc.Flush()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tobservationsGauge.Inc()\n\tlog.Printf(\"Started observing stats on %s for %s\", o.opts.Topic, o.opts.ServiceName)\n\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ReadEvent returns one event. This call blocks until an event is received.
|
func (o *Observer) ReadEvent() (Event, error) {
select {
case <-o.close:
return nil, nil
case event := <-o.events:
return event, nil
}
}
|
[
"func (r *Replayer) ReadEvent() ([]byte, error) {\n\treturn nil, errors.New(\"implement me\")\n}",
"func (s *socket) readEvent() (ev *event, identity string, err error) {\n\tvar arr [][]byte\n\tfor {\n\t\tarr, err = s.sock.RecvMessageBytes(0)\n\t\tif err != nil {\n\t\t\tif IsIntr(err) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn nil, \"\", errors.Wrap(err, \"event read (IO err)\")\n\t\t}\n\t\tbreak\n\t}\n\n\tev = new(event)\n\tif err := ev.UnmarshalBinary(bytes.NewReader(arr[len(arr)-1])); err != nil {\n\t\treturn nil, \"\", errors.Wrap(err, \"event read\")\n\t}\n\n\tif len(arr) > 1 {\n\t\treturn ev, string(arr[0]), nil\n\t}\n\n\treturn ev, \"\", nil\n}",
"func ReadSingleEvent(conn *EventStoreConnection, streamID string, eventNumber int32, resolveLinkTos bool, requireMaster bool) (protobuf.ReadEventCompleted, error) {\n\treadEventsData := &protobuf.ReadEvent{\n\t\tEventStreamId: proto.String(streamID),\n\t\tEventNumber: proto.Int32(eventNumber),\n\t\tResolveLinkTos: proto.Bool(resolveLinkTos),\n\t\tRequireMaster: proto.Bool(requireMaster),\n\t}\n\tdata, err := proto.Marshal(readEventsData)\n\tif err != nil {\n\t\tlog.Fatal(\"marshaling error: \", err)\n\t}\n\n\tpkg, err := newPackage(readEvent, data, newPackageCorrelationID().Bytes(), conn.Config.Login, conn.Config.Password)\n\tif err != nil {\n\t\tlog.Printf(\"[error] failed to create new read event package\")\n\t}\n\n\tresultPackage, err := performOperation(conn, pkg, readEventCompleted)\n\tif err != nil {\n\t\treturn protobuf.ReadEventCompleted{}, err\n\t}\n\tmessage := &protobuf.ReadEventCompleted{}\n\tproto.Unmarshal(resultPackage.Data, message)\n\n\tif *message.Result == protobuf.ReadEventCompleted_AccessDenied ||\n\t\t*message.Result == protobuf.ReadEventCompleted_Error {\n\t\treturn *message, errors.New(message.Result.String())\n\t}\n\n\tif *message.Result == protobuf.ReadEventCompleted_Success {\n\t\tmessage.Event.Event.EventId = DecodeNetUUID(message.Event.Event.EventId)\n\t}\n\n\treturn *message, nil\n}",
"func (s *Stream) Read() (*EventNode, error) {\n\ts.mtx.Lock()\n\tdefer s.mtx.Unlock()\n\tif s.i == len(s.ec.CurrentResults()) {\n\t\tif s.ec.HasNextPage() {\n\t\t\tif err := s.ec.NextPage(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ts.i = 0\n\t\t}\n\t\tif s.i == len(s.ec.CurrentResults()) {\n\t\t\treturn nil, io.EOF\n\t\t}\n\t}\n\tevent := s.ec.CurrentResults()[s.i]\n\ts.i++\n\treturn event, nil\n}",
"func ReadEvent(r *MlpdReader) (*Event, error) {\n\tvar data EventData\n\tvar err error\n\n\tbs, err := r.data.Peek(4)\n\tif len(bs) == 0 || err != nil {\n\t\treturn nil, &EventEOFError{}\n\t}\n\tev := &Event{\n\t\teventType: r.readByte(),\n\t\ttimeDiff: r.readULEB128(),\n\t}\n\ttp := ev.Type()\n\tswitch tp {\n\tcase TypeAlloc:\n\t\tdata, err = ReadEventAlloc(r, ev)\n\tcase TypeGC:\n\t\tdata, err = ReadEventGC(r, ev)\n\tcase TypeMetadata:\n\t\tdata, err = ReadEventMetadata(r, ev)\n\tcase TypeMethod:\n\t\tdata, err = ReadEventMethod(r, ev)\n\tcase TypeException:\n\t\tdata, err = ReadEventException(r, ev)\n\tcase TypeRuntime:\n\t\tdata, err = ReadEventRuntime(r, ev)\n\tcase TypeMonitor:\n\t\tdata, err = ReadEventMonitor(r, ev)\n\tcase TypeHeap:\n\t\tdata, err = ReadEventHeap(r, ev)\n\tcase TypeSample:\n\t\tdata, err = ReadEventSample(r, ev)\n\tcase TypeCoverage:\n\t\tdata, err = ReadEventCoverage(r, ev)\n\tcase TypeMeta:\n\t\tdata, err = ReadEventMeta(r, ev)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Unsupported event type %v\", tp)\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tev.data = data\n\treturn ev, nil\n}",
"func EventRead(buf []byte, v *Event) int {\n\tb := 0\n\n\tb += 32 // padding\n\n\treturn b\n}",
"func (p *protocol) Read() (transports.Event, error) {\n\tmessage, err := p.readMsg()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif p.isClient {\n\t\tswitch transportEvent := message.(type) {\n\t\tcase transports.AckEvent:\n\t\t\tlog.Debugf(\"[T %s < %s] Received acknowledgement for nonce %x with sequence %d\", p.conn.LocalAddr().String(), p.conn.RemoteAddr().String(), *transportEvent.Nonce(), transportEvent.Sequence())\n\t\t\treturn transportEvent, nil\n\t\tcase *protocolPONG:\n\t\t\tlog.Debugf(\"[T %s < %s] Received pong\", p.conn.LocalAddr().String(), p.conn.RemoteAddr().String())\n\t\t\treturn transports.NewPongEvent(p.conn.Context()), nil\n\t\t}\n\t} else {\n\t\tswitch transportEvent := message.(type) {\n\t\tcase *protocolPING:\n\t\t\tlog.Debugf(\"[R %s < %s] Received ping\", p.conn.LocalAddr().String(), p.conn.RemoteAddr().String())\n\t\t\treturn transports.NewPingEvent(p.conn.Context()), nil\n\t\tcase transports.EventsEvent:\n\t\t\tlog.Debugf(\"[R %s < %s] Received payload with nonce %x and %d events\", p.conn.LocalAddr().String(), p.conn.RemoteAddr().String(), *transportEvent.Nonce(), transportEvent.Count())\n\t\t\treturn transportEvent, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"unknown protocol message %T\", message)\n}",
"func (c *Log) ReadSingle(module, version string) (eventlog.Event, error) {\n\treturn eventlog.Event{}, errors.New(\"TODO: implement\")\n}",
"func GetEvent(name string) (Event, bool) {\n\treturn std.GetEvent(name)\n}",
"func (er *EventReader[T]) Read() (T, error) {\n\t// https://html.spec.whatwg.org/multipage/server-sent-events.html\n\tfor er.scanner.Scan() { // Scan while no error\n\t\tline := er.scanner.Text() // Get the line & interpret the event stream:\n\n\t\tif line == \"\" || line[0] == ':' { // If the line is blank or is a comment, skip it\n\t\t\tcontinue\n\t\t}\n\n\t\tif strings.Contains(line, \":\") { // If the line contains a U+003A COLON character (:), process the field\n\t\t\ttokens := strings.SplitN(line, \":\", 2)\n\t\t\ttokens[0], tokens[1] = strings.TrimSpace(tokens[0]), strings.TrimSpace(tokens[1])\n\t\t\tvar data T\n\t\t\tswitch tokens[0] {\n\t\t\tcase \"data\": // return the deserialized JSON object\n\t\t\t\tif tokens[1] == \"[DONE]\" { // If data is [DONE], end of stream was reached\n\t\t\t\t\treturn data, io.EOF\n\t\t\t\t}\n\t\t\t\terr := json.Unmarshal([]byte(tokens[1]), &data)\n\t\t\t\treturn data, err\n\t\t\tdefault: // Any other event type is an unexpected\n\t\t\t\treturn data, errors.New(\"Unexpected event type: \" + tokens[0])\n\t\t\t}\n\t\t\t// Unreachable\n\t\t}\n\t}\n\treturn *new(T), er.scanner.Err()\n}",
"func(this*Window)ReadEvent()(*Event,error){\nif this.ch!=nil{\nreturn nil,ErrChannelAlreadyOpened\n}\nf,err:=this.File(\"event\")\nif err!=nil{\nreturn nil,err\n}\nreturn readEvent(f)\n}",
"func (d *Device) readEvent() ([]byte, error) {\n\tif d.file == nil {\n\t\treturn nil, errors.New(\"device has not been initialized\")\n\t}\n\n\tbuf := make([]byte, uhidEventSize)\n\tn, err := d.file.Read(buf)\n\tif err != nil {\n\t\treturn buf, err\n\t}\n\tif n != uhidEventSize {\n\t\treturn buf, fmt.Errorf(\"unexpected number of bytes of UHID event; got %d, want %d\", n, uhidEventSize)\n\t}\n\treturn buf, nil\n}",
"func (c *Channel) Await(ctx context.Context, eventID string) (Event, error) {\n\n\t// start subscription before the read so we won't miss the notification\n\tsub := c.NewEventStateSubscription(eventID)\n\tdefer sub.Close()\n\n\te, err := c.Get(eventID)\n\tif err != nil && err != ErrNotFound {\n\t\treturn Event{}, err\n\t}\n\tif err == nil {\n\t\t// event already exists, no need to wait.\n\t\treturn e, nil\n\t}\n\n\t_, err = sub.Next(ctx)\n\tif err != nil {\n\t\treturn Event{}, err\n\t}\n\n\te, err = c.Get(eventID)\n\tif err != nil {\n\t\treturn Event{}, fmt.Errorf(\"retry get after await: %v\", err)\n\t}\n\n\treturn e, nil\n}",
"func (s *EventSub) Read(ctx context.Context, sink chan<- *Event) error {\n\t// First read into the past.\n\tif err := s.readPast(ctx, sink); err != nil {\n\t\treturn errors.WithMessage(err, \"reading logs\")\n\t}\n\t// Then wait for new events.\n\tif err := s.readFuture(ctx, sink); err != nil {\n\t\treturn errors.WithMessage(err, \"reading logs\")\n\t}\n\treturn nil\n}",
"func readEvent(eventNode confl.Node) (*Event, error) {\n\tif eventNode.Type() != confl.MapType {\n\t\treturn nil, errors.New(\"Invalid event\")\n\t}\n\n\tevent := &Event{}\n\n\tfor _, pair := range confl.KVPairs(eventNode) {\n\t\tswitch pair.Key.Value() {\n\t\tcase \"Source\":\n\t\t\tif !confl.IsText(pair.Value) {\n\t\t\t\treturn nil, errors.New(\"Invalid event source\")\n\t\t\t}\n\n\t\t\tevent.Source = pair.Value.Value()\n\t\tcase \"Target\":\n\t\t\tif !confl.IsText(pair.Value) {\n\t\t\t\treturn nil, errors.New(\"Invalid event source\")\n\t\t\t}\n\n\t\t\tevent.Target = pair.Value.Value()\n\n\t\tcase \"Meta\":\n\t\t\tmeta, metaErr := readEventMeta(pair.Value)\n\t\t\tif metaErr != nil {\n\t\t\t\treturn nil, metaErr\n\t\t\t}\n\n\t\t\tevent.Meta = meta\n\t\tdefault:\n\t\t\treturn nil, errors.New(\"Invalid key\")\n\t\t}\n\t}\n\n\treturn event, nil\n}",
"func (c *channel) RecvEvent() (*event, error) {\n\tselect {\n\tcase ev := <-c.queue:\n\t\treturn ev, nil\n\tcase err := <-c.errorCh:\n\t\treturn nil, err\n\tcase <-c.stopCh:\n\t\treturn nil, errors.New(\"RecvEvent: cancelled\")\n\t}\n}",
"func (h *Handler) Read() (*Uevent, error) {\n\tuEv := &Uevent{}\n\n\t// Read header first.\n\theader, err := h.bufioReader.ReadString(paramDelim)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Fill uevent header.\n\tuEv.Header = header\n\n\texitLoop := false\n\n\t// Read every parameter as \"key=value\".\n\tfor !exitLoop {\n\t\tkeyValue, err := h.bufioReader.ReadString(paramDelim)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tidx := strings.Index(keyValue, \"=\")\n\t\tif idx < 1 {\n\t\t\treturn nil, grpcStatus.Errorf(codes.InvalidArgument, \"Could not decode uevent: Wrong format %q\", keyValue)\n\t\t}\n\n\t\t// The key is the first parameter, and the value is the rest\n\t\t// without the \"=\" sign, and without the last character since\n\t\t// it is the delimiter.\n\t\tkey, val := keyValue[:idx], keyValue[idx+1:len(keyValue)-1]\n\n\t\tswitch key {\n\t\tcase uEventAction:\n\t\t\tuEv.Action = val\n\t\tcase uEventDevPath:\n\t\t\tuEv.DevPath = val\n\t\tcase uEventSubSystem:\n\t\t\tuEv.SubSystem = val\n\t\tcase uEventDevName:\n\t\t\tuEv.DevName = val\n\t\tcase uEventInterface:\n\t\t\t// In case of network interfaces, DevName will be empty since a device node\n\t\t\t// is not created. Instead store the \"INTERFACE\" field as devName\n\t\t\tuEv.DevName = val\n\t\tcase uEventSeqNum:\n\t\t\tuEv.SeqNum = val\n\n\t\t\t// \"SEQNUM\" signals the uevent is complete.\n\t\t\texitLoop = true\n\t\t}\n\t}\n\n\treturn uEv, nil\n}",
"func (h *Impl) GetEvent() <-chan sdk.Event {\n\treturn h.events\n}",
"func (sc *SnippetClient) EventWaitAndGet(ctx context.Context, callbackID, eventName string, timeout time.Duration) (*EventWaitAndGetResult, error) {\n\t// Read the response with a slightly extended timeout. `eventWaitAndGet` won't respond until the event is posted in the snippet cache,\n\t// or the timeout is reached. In the timeout case, we need to set the TCP read deadline a little later so we'll get the response before the conn times out.\n\tres, err := sc.RPC(ctx, timeout+time.Second, \"eventWaitAndGet\", callbackID, eventName, int(timeout.Milliseconds()))\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to get eventWaitAndGet response\")\n\t}\n\t// Sample response: {\"callback_id\":\"1-1\", \"name\":\"eventName\", \"creation_time\":\"1642817334319\", \"data\":{'key': 'value'}}\n\tvar result EventWaitAndGetResult\n\tif err := json.Unmarshal(res.Result, &result); err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to read result map from json response\")\n\t}\n\treturn &result, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetBitmarkFullProvenance returns the provenance for a bitmark.
|
func (bc *BitmarkdRPCClient) GetBitmarkFullProvenance(bitmarkID string) (json.RawMessage, error) {
args := FullProvenanceArguments{Id: bitmarkID}
var reply json.RawMessage
err := bc.call("Bitmark.FullProvenance", &args, &reply)
return reply, err
}
|
[
"func (_e *MockProvisioningStore_Expecter) GetProvenance(ctx interface{}, o interface{}, org interface{}) *MockProvisioningStore_GetProvenance_Call {\n\treturn &MockProvisioningStore_GetProvenance_Call{Call: _e.mock.On(\"GetProvenance\", ctx, o, org)}\n}",
"func (B *block) GetProof() uint64{\n\treturn B.proof\n}",
"func CommitSetProvenance(tx *pachsql.Tx, id string) (_ []*pfs.Commit, retErr error) {\n\tq := `\n WITH RECURSIVE prov(from_id, to_id) AS (\n SELECT from_id, to_id\n FROM pfs.commit_provenance JOIN pfs.commits ON int_id = from_id\n WHERE commit_set_id = $1\n UNION ALL\n SELECT cp.from_id, cp.to_id\n FROM prov p, pfs.commit_provenance cp\n WHERE cp.from_id = p.to_id\n )\n SELECT DISTINCT commit_id\n FROM pfs.commits, prov\n WHERE int_id = prov.to_id AND commit_set_id != $1;`\n\trows, err := tx.Queryx(q, id)\n\tif err != nil {\n\t\treturn nil, errors.EnsureStack(err)\n\t}\n\tdefer errors.Close(&retErr, rows, \"close rows\")\n\tcs := make([]*pfs.Commit, 0)\n\tfor rows.Next() {\n\t\tvar commit string\n\t\tif err := rows.Scan(&commit); err != nil {\n\t\t\treturn nil, errors.EnsureStack(err)\n\t\t}\n\t\tcs = append(cs, ParseCommit(commit))\n\t}\n\tif err = rows.Err(); err != nil {\n\t\treturn nil, errors.EnsureStack(err)\n\t}\n\treturn cs, nil\n}",
"func (jm JSONMeta) Provenance() Provenance {\n\treturn jm.provenance\n}",
"func (db *PoetDb) GetProofMessage(proofRef []byte) ([]byte, error) {\n\treturn db.store.Get(proofRef)\n}",
"func (p *Proof) ProofBytes() []byte {\n\tbuff := new(bytes.Buffer)\n\n\t// The solution we serialise depends on the size of the cuckoo graph. The\n\t// cycle is always of length 42, but each vertex takes up more bits on\n\t// larger graphs, nonceLengthBits is this number of bits.\n\tnonceLengthBits := uint(p.EdgeBits)\n\n\t// Make a slice just large enough to fit all of the POW bits.\n\tbitvecLengthBits := nonceLengthBits * uint(ProofSize)\n\tbitvec := make([]uint8, (bitvecLengthBits+7)/8)\n\n\tfor n, nonce := range p.Nonces {\n\t\t// Pack this nonce into the bit stream.\n\t\tfor bit := uint(0); bit < nonceLengthBits; bit++ {\n\t\t\t// If this bit is set, then write it to the correct position in the\n\t\t\t// stream.\n\t\t\tif nonce&(1<<bit) != 0 {\n\t\t\t\toffsetBits := uint(n)*nonceLengthBits + bit\n\t\t\t\tbitvec[offsetBits/8] |= 1 << (offsetBits % 8)\n\t\t\t}\n\t\t}\n\t}\n\n\tif _, err := buff.Write(bitvec); err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n\n\treturn buff.Bytes()\n}",
"func (o *ProvenanceOptionsEntity) GetProvenanceOptions() ProvenanceOptionsDTO {\n\tif o == nil || o.ProvenanceOptions == nil {\n\t\tvar ret ProvenanceOptionsDTO\n\t\treturn ret\n\t}\n\treturn *o.ProvenanceOptions\n}",
"func (jm JSONMeta) ProvenanceString() string {\n\tif jm.Key() == \"\" {\n\t\treturn jm.provenance.ShallowString()\n\t}\n\tcp := jm.Provenance().ShallowString()\n\tif cp == \"\" {\n\t\treturn jm.Key()\n\t}\n\n\treturn fmt.Sprintf(\"%s.%s\", cp, jm.Key())\n}",
"func (s *TrackerSource) chartHasProvenanceFile(chartURL *url.URL) (bool, error) {\n\tvar data []byte\n\n\tswitch chartURL.Scheme {\n\tcase \"http\", \"https\":\n\t\treq, _ := http.NewRequest(\"GET\", chartURL.String()+\".prov\", nil)\n\t\treq = req.WithContext(s.i.Svc.Ctx)\n\t\tif s.i.Repository.AuthUser != \"\" || s.i.Repository.AuthPass != \"\" {\n\t\t\treq.SetBasicAuth(s.i.Repository.AuthUser, s.i.Repository.AuthPass)\n\t\t}\n\t\tresp, err := s.i.Svc.Hc.Do(req)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\treturn false, nil\n\t\t}\n\t\tdata, err = io.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn false, fmt.Errorf(\"error reading provenance file: %w\", err)\n\t\t}\n\tcase \"oci\":\n\t\tvar err error\n\t\t_, data, err = s.i.Svc.Op.PullLayer(\n\t\t\ts.i.Svc.Ctx,\n\t\t\tstrings.TrimPrefix(chartURL.String(), hub.RepositoryOCIPrefix),\n\t\t\tChartProvenanceLayerMediaType,\n\t\t\ts.i.Repository.AuthUser,\n\t\t\ts.i.Repository.AuthPass,\n\t\t)\n\t\tif err != nil {\n\t\t\tif errors.Is(err, oci.ErrLayerNotFound) {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\treturn false, fmt.Errorf(\"error pulling provenance layer: %w\", err)\n\t\t}\n\tdefault:\n\t\treturn false, nil\n\t}\n\n\tif !bytes.Contains(data, []byte(\"PGP SIGNATURE\")) {\n\t\treturn false, errors.New(\"invalid provenance file\")\n\t}\n\n\treturn true, nil\n}",
"func (m *MailTips) GetMailboxFull()(*bool) {\n val, err := m.GetBackingStore().Get(\"mailboxFull\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*bool)\n }\n return nil\n}",
"func (w *Worker) chartVersionHasProvenanceFile(u string) (bool, error) {\n\tresp, err := w.hg.Get(u + \".prov\")\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode == http.StatusOK {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}",
"func GetMalfeasanceBlob(db sql.Executor, nodeID []byte) ([]byte, error) {\n\tvar (\n\t\tproof []byte\n\t\terr error\n\t)\n\trows, err := db.Exec(\"select proof from identities where pubkey = ?1;\",\n\t\tfunc(stmt *sql.Statement) {\n\t\t\tstmt.BindBytes(1, nodeID)\n\t\t}, func(stmt *sql.Statement) bool {\n\t\t\tproof = make([]byte, stmt.ColumnLen(0))\n\t\t\tstmt.ColumnBytes(0, proof[:])\n\t\t\treturn true\n\t\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"proof blob %v: %w\", nodeID, err)\n\t}\n\tif rows == 0 {\n\t\treturn nil, sql.ErrNotFound\n\t}\n\treturn proof, nil\n}",
"func (c *Client) GetProof(r *onet.Roster, id skipchain.SkipBlockID, key []byte) (*GetProofResponse, error) {\n\treply := &GetProofResponse{}\n\terr := c.SendProtobuf(r.List[0], &GetProof{\n\t\tVersion: CurrentVersion,\n\t\tID: id,\n\t\tKey: key,\n\t}, reply)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn reply, nil\n}",
"func GetBytesProposal(prop *peer.Proposal) ([]byte, error) {\n\tpropBytes, err := proto.Marshal(prop)\n\treturn propBytes, err\n}",
"func CommitSetSubvenance(tx *pachsql.Tx, id string) (_ []*pfs.Commit, retErr error) {\n\tq := `\n WITH RECURSIVE subv(from_id, to_id) AS (\n SELECT from_id, to_id\n FROM pfs.commit_provenance JOIN pfs.commits ON int_id = to_id\n WHERE commit_set_id = $1\n UNION ALL\n SELECT cp.from_id, cp.to_id\n FROM subv s, pfs.commit_provenance cp\n WHERE cp.to_id = s.from_id\n )\n SELECT DISTINCT commit_id\n FROM pfs.commits, subv\n WHERE int_id = subv.from_id AND commit_set_id != $1;`\n\trows, err := tx.Queryx(q, id)\n\tif err != nil {\n\t\treturn nil, errors.EnsureStack(err)\n\t}\n\tdefer errors.Close(&retErr, rows, \"close rows\")\n\tcs := make([]*pfs.Commit, 0)\n\tfor rows.Next() {\n\t\tvar commit string\n\t\tif err := rows.Scan(&commit); err != nil {\n\t\t\treturn nil, errors.EnsureStack(err)\n\t\t}\n\t\tcs = append(cs, ParseCommit(commit))\n\t}\n\tif err = rows.Err(); err != nil {\n\t\treturn nil, errors.EnsureStack(err)\n\t}\n\treturn cs, nil\n}",
"func UnmarshalProvenance(b []byte) (Provenance, error) {\n\tvar provenance Provenance\n\tif err := json.Unmarshal(b, &provenance); err != nil {\n\t\treturn provenance, err\n\t}\n\treturn provenance, nil\n}",
"func (p *PacketCryptAnn) GetMerkleProof() []byte {\n\treturn p.Header[PcAnnHeaderLen : PcAnnHeaderLen+PcAnnMerkleProofLen]\n}",
"func (_e *MockProvisioningStore_Expecter) SetProvenance(ctx interface{}, o interface{}, org interface{}, p interface{}) *MockProvisioningStore_SetProvenance_Call {\n\treturn &MockProvisioningStore_SetProvenance_Call{Call: _e.mock.On(\"SetProvenance\", ctx, o, org, p)}\n}",
"func GetProof(header SessionHeader, evidenceType EvidenceType, index int64, evidenceStore *CacheStorage) Proof {\n\t// retrieve the GOBEvidence\n\tevidence, err := GetEvidence(header, evidenceType, sdk.ZeroInt(), evidenceStore)\n\tif err != nil {\n\t\treturn nil\n\t}\n\t// check for out of bounds\n\tif evidence.NumOfProofs-1 < index || index < 0 {\n\t\treturn nil\n\t}\n\t// return the propoer proof\n\treturn evidence.Proofs[index]\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewDataTierPhaseStatistics provides a builder for the DataTierPhaseStatistics struct.
|
func NewDataTierPhaseStatisticsBuilder() *DataTierPhaseStatisticsBuilder {
r := DataTierPhaseStatisticsBuilder{
&DataTierPhaseStatistics{},
}
return &r
}
|
[
"func NewDataTierPhaseStatistics() *DataTierPhaseStatistics {\n\tr := &DataTierPhaseStatistics{}\n\n\treturn r\n}",
"func (rb *DataTierPhaseStatisticsBuilder) Build() DataTierPhaseStatistics {\n\treturn *rb.v\n}",
"func NewPhase(name string, message string) *Phase {\n\treturn &Phase{\n\t\tName: name,\n\t\tMessage: message,\n\t}\n}",
"func NewTeamSummary()(*TeamSummary) {\n m := &TeamSummary{\n }\n m.SetAdditionalData(make(map[string]interface{}));\n return m\n}",
"func NewCmdPhase(out io.Writer) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"phase\",\n\t\tShort: \"Invoke subsets of kubeadm functions separately for a manual install.\",\n\t\tLong: cmdutil.MacroCommandLongDescription,\n\t}\n\n\tcmd.AddCommand(NewCmdAddon())\n\tcmd.AddCommand(NewCmdBootstrapToken())\n\tcmd.AddCommand(NewCmdCerts())\n\tcmd.AddCommand(NewCmdControlplane())\n\tcmd.AddCommand(NewCmdEtcd())\n\tcmd.AddCommand(NewCmdKubelet())\n\tcmd.AddCommand(NewCmdKubeConfig(out))\n\tcmd.AddCommand(NewCmdMarkMaster())\n\tcmd.AddCommand(NewCmdPreFlight())\n\tcmd.AddCommand(NewCmdSelfhosting())\n\tcmd.AddCommand(NewCmdUploadConfig())\n\n\treturn cmd\n}",
"func NewTestPhase(t *testing.T, callback TestCallback, streamer *eventbus.GossipStreamer, aChan chan message.Message) *TestPhase {\n\treturn &TestPhase{\n\t\treq: require.New(t),\n\t\tcallback: callback,\n\t\tstreamer: streamer,\n\t\taChan: aChan,\n\t}\n}",
"func newHospitalMutation(c config, op Op, opts ...hospitalOption) *HospitalMutation {\n\tm := &HospitalMutation{\n\t\tconfig: c,\n\t\top: op,\n\t\ttyp: TypeHospital,\n\t\tclearedFields: make(map[string]struct{}),\n\t}\n\tfor _, opt := range opts {\n\t\topt(m)\n\t}\n\treturn m\n}",
"func NewStatistics(votes *Vote) Statistics {\n\n\ttr := make(map[string][]string)\n\tfor u, v := range votes.Votes {\n\t\ttr[v] = append(tr[v], u)\n\t}\n\n\ttotal := len(votes.Votes)\n\n\tlog.WithFields(log.Fields{\n\t\t\"total\": total,\n\t}).Info(\"Statistics struct generated\")\n\n\treturn Statistics{\n\t\tTotal: total,\n\t\tTransformed: tr,\n\t}\n\n}",
"func NewFieldStatisticsBuilder() *FieldStatisticsBuilder {\n\tr := FieldStatisticsBuilder{\n\t\t&FieldStatistics{},\n\t}\n\n\treturn &r\n}",
"func New(prefix string, logger log.Logger, lvs ...string) *Influxstatsd {\n\tif len(lvs)%2 != 0 {\n\t\tpanic(\"odd number of LabelValues; programmer error!\")\n\t}\n\treturn &Influxstatsd{\n\t\tprefix: prefix,\n\t\trates: ratemap.New(),\n\t\tcounters: lv.NewSpace(),\n\t\tgauges: map[string]*gaugeNode{}, // https://github.com/go-kit/kit/pull/588\n\t\ttimings: lv.NewSpace(),\n\t\thistograms: lv.NewSpace(),\n\t\tlogger: logger,\n\t\tlvs: lvs,\n\t}\n}",
"func NewTDigest() *TDigest {\n\t// TODO(ajwerner): add configuration.\n\tw := &TDigest{\n\t\ttickInterval: time.Second,\n\t}\n\t// TODO(ajwerner): fix buf where the last one has only 5 buckets\n\tw.mu.levels = []level{\n\t\t{\n\t\t\t// (0-1)-(1-2)s\n\t\t\tperiod: 1,\n\t\t\tdigestRingBuf: makeDigests(1, size),\n\t\t},\n\t\t{\n\t\t\t// (0-2)-(2-4), (0-2)-(4-6), (0-2)-(6-8), (0-2)-(8-10)s\n\t\t\tperiod: 2,\n\t\t\tdigestRingBuf: makeDigests(4, size),\n\t\t},\n\t\t{\n\t\t\tperiod: 10,\n\t\t\tdigestRingBuf: makeDigests(5, size),\n\t\t},\n\t\t{\n\t\t\tperiod: 60,\n\t\t\tdigestRingBuf: makeDigests(3, size),\n\t\t},\n\t\t{\n\t\t\tperiod: 120,\n\t\t\tdigestRingBuf: makeDigests(10, size),\n\t\t},\n\t}\n\tw.mu.open = tdigest.NewConcurrent(tdigest.Compression(size), tdigest.BufferFactor(10))\n\tw.mu.spare = tdigest.New(tdigest.Compression(size), tdigest.BufferFactor(2))\n\treturn w\n}",
"func (in *TestPhase) DeepCopy() *TestPhase {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(TestPhase)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func NewDataTiersBuilder() *DataTiersBuilder {\n\tr := DataTiersBuilder{\n\t\t&DataTiers{},\n\t}\n\n\treturn &r\n}",
"func NewStatistics(loggingPeriod time.Duration) *Statistics {\n\tsw := Statistics{\n\t\tstatistics: make(chan uint8, statisticsChannelSize),\n\t\tcounter: 0,\n\t\tstart: time.Now(),\n\t\tloggingPeriod: loggingPeriod,\n\t}\n\tgo sw.run()\n\treturn &sw\n}",
"func NewTeamworkSoftwareUpdateHealth()(*TeamworkSoftwareUpdateHealth) {\n m := &TeamworkSoftwareUpdateHealth{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}",
"func newAreaStatistics() *AreaStatistics {\n\tvar areaStatistics AreaStatistics\n\tareaStatistics.HealthCenters = make(map[string]*HealthCenter)\n\treturn &areaStatistics\n}",
"func newTotalAggregation(window time.Duration) *totalAggregation {\n\treturn &totalAggregation{\n\t\twindow: window,\n\t\tperPodAggregations: make(map[string]*perPodAggregation),\n\t\tactivatorsContained: make(map[string]struct{}),\n\t}\n}",
"func NewActionStats() ActionStats {\n stats := ActionStats{}\n stats.stats = make(map[string]*actionData)\n return stats\n}",
"func newHamt(level uint8) hamt {\n\treturn hamt{level: level}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Build finalize the chain and returns the DataTierPhaseStatistics struct
|
func (rb *DataTierPhaseStatisticsBuilder) Build() DataTierPhaseStatistics {
return *rb.v
}
|
[
"func NewDataTierPhaseStatisticsBuilder() *DataTierPhaseStatisticsBuilder {\n\tr := DataTierPhaseStatisticsBuilder{\n\t\t&DataTierPhaseStatistics{},\n\t}\n\n\treturn &r\n}",
"func (h *AuditLogHandler) FinalizeChain() {\n\th.mutex.Lock()\n\tdefer h.mutex.Unlock()\n\n\tsendServiceLogs()\n}",
"func (apd allProviderDetails) finalize(totalQoSes totalQoSes) {\n\tfor providerID, providerDetails := range apd {\n\t\tvar (\n\t\t\ttotalDownloaded = totalQoSes[providerID].DownloadMbps\n\t\t\ttotalUploaded = totalQoSes[providerID].UploadMbps\n\t\t)\n\n\t\tif providerDetails.totalSessions != 0 {\n\t\t\tproviderDetails.AverageQoS.DownloadMbps, _ = totalDownloaded.Quo(\n\t\t\t\ttotalDownloaded,\n\t\t\t\tbig.NewFloat(float64(providerDetails.totalSessions)),\n\t\t\t).Float32()\n\t\t\tproviderDetails.AverageQoS.UploadMbps, _ = totalUploaded.Quo(\n\t\t\t\ttotalUploaded,\n\t\t\t\tbig.NewFloat(float64(providerDetails.totalSessions)),\n\t\t\t).Float32()\n\n\t\t\taveragePrice, _ := providerDetails.totalPrice.Quo(\n\t\t\t\tproviderDetails.totalPrice,\n\t\t\t\tbig.NewFloat(float64(providerDetails.totalSessions)),\n\t\t\t).Float64()\n\t\t\tproviderDetails.AveragePrice = fmt.Sprintf(\"%.10f\", averagePrice)\n\t\t}\n\n\t\tearned, _ := providerDetails.earned.Quo( // convert to ZCN\n\t\t\tproviderDetails.earned,\n\t\t\tbig.NewFloat(float64(billion)),\n\t\t).Float64()\n\t\tproviderDetails.Earned = fmt.Sprintf(\"%.10f\", earned)\n\t}\n}",
"func (b *DownloadBuilder) Phase(phase velerov1api.DownloadPhase) *DownloadBuilder {\n\tb.object.Status.Phase = phase\n\treturn b\n}",
"func NewDataTierPhaseStatistics() *DataTierPhaseStatistics {\n\tr := &DataTierPhaseStatistics{}\n\n\treturn r\n}",
"func (g *GDesc) finalize(lastTs, activeGCStartTime int64, trigger *Event) {\n\tif trigger != nil {\n\t\tg.EndTime = trigger.Ts\n\t}\n\tfinalStat := g.snapshotStat(lastTs, activeGCStartTime)\n\n\tg.GExecutionStat = finalStat\n\tfor _, s := range g.activeRegions {\n\t\ts.End = trigger\n\t\ts.GExecutionStat = finalStat.sub(s.GExecutionStat)\n\t\tg.Regions = append(g.Regions, s)\n\t}\n\t*(g.gdesc) = gdesc{}\n}",
"func (rb *ShardsStatsBuilder) Build() ShardsStats {\n\treturn *rb.v\n}",
"func (c *ChiMergeFilter) Build() {\n\tfor _, attr := range c.Attributes {\n\t\ttab := chiMerge(c.Instances, attr, c.Significance, c.MinRows, c.MaxRows)\n\t\tc.Tables[attr] = tab\n\t\tc._Trained = true\n\t}\n}",
"func (st *State) Analysis() (_ analysisTmpl) { return }",
"func (d *d05) stats(wg *sync.WaitGroup, block chan *d05) {\n\tfor {\n\t\tselect {\n\t\tcase partialSolution := <-block:\n\t\t\td.employeeCount += partialSolution.employeeCount\n\n\t\t\tfor k := range partialSolution.areas {\n\t\t\t\td.areas[k] = partialSolution.areas[k]\n\t\t\t}\n\n\t\t\tfor k := range partialSolution.employeesByArea {\n\t\t\t\td.employeesByArea[k] = append(d.employeesByArea[k], partialSolution.employeesByArea[k]...)\n\t\t\t}\n\n\t\t\tfor k, v := range partialSolution.salaryBySurname {\n\t\t\t\td.salaryBySurname.merge(k, v)\n\t\t\t}\n\n\t\t\tfor k, v := range partialSolution.salaryByArea {\n\t\t\t\td.salaryByArea.merge(k, v)\n\t\t\t}\n\n\t\t\td.salaries.merge(&partialSolution.salaries)\n\t\t\twg.Done()\n\t\t}\n\t}\n}",
"func (ste *simpleTestExecutor) ExecutePhase(ctx Context, phase *api.Phase) *errors.ErrorList {\n\t// TODO: add tuning set\n\terrList := errors.NewErrorList()\n\tautomanagedNamespacesList, _, err := ctx.GetClusterFramework().ListAutomanagedNamespaces()\n\tif err != nil {\n\t\treturn errors.NewErrorList(fmt.Errorf(\"automanaged namespaces listing failed: %v\", err))\n\t}\n\ttuningSet, err := ctx.GetTuningSetFactory().CreateTuningSet(phase.TuningSet)\n\tif err != nil {\n\t\treturn errors.NewErrorList(fmt.Errorf(\"tuning set creation error: %v\", err))\n\t}\n\n\tvar actions []func()\n\tfor namespaceIndex := range automanagedNamespacesList {\n\t\tnsName := automanagedNamespacesList[namespaceIndex]\n\t\tinstancesStates := make([]*state.InstancesState, 0)\n\t\t// Updating state (DesiredReplicaCount) of every object in object bundle.\n\t\tfor j := range phase.ObjectBundle {\n\t\t\tid, err := getIdentifier(ctx, &phase.ObjectBundle[j])\n\t\t\tif err != nil {\n\t\t\t\terrList.Append(err)\n\t\t\t\treturn errList\n\t\t\t}\n\t\t\tinstances, exists := ctx.GetState().GetNamespacesState().Get(nsName, id)\n\t\t\tif !exists {\n\t\t\t\tcurrentReplicaCount, err := getReplicaCountOfNewObject(ctx, nsName, &phase.ObjectBundle[j])\n\t\t\t\tif err != nil {\n\t\t\t\t\terrList.Append(err)\n\t\t\t\t\treturn errList\n\t\t\t\t}\n\t\t\t\tinstances = &state.InstancesState{\n\t\t\t\t\tDesiredReplicaCount: 0,\n\t\t\t\t\tCurrentReplicaCount: currentReplicaCount,\n\t\t\t\t\tObject: phase.ObjectBundle[j],\n\t\t\t\t}\n\t\t\t}\n\t\t\tinstances.DesiredReplicaCount = phase.ReplicasPerNamespace\n\t\t\tctx.GetState().GetNamespacesState().Set(nsName, id, instances)\n\t\t\tinstancesStates = append(instancesStates, instances)\n\t\t}\n\n\t\tif err := verifyBundleCorrectness(instancesStates); err != nil {\n\t\t\tklog.Errorf(\"Skipping phase. Incorrect bundle in phase: %+v\", *phase)\n\t\t\treturn errors.NewErrorList(err)\n\t\t}\n\n\t\t// Deleting objects with index greater or equal requested replicas per namespace number.\n\t\t// Objects will be deleted in reversed order.\n\t\tfor replicaCounter := phase.ReplicasPerNamespace; replicaCounter < instancesStates[0].CurrentReplicaCount; replicaCounter++ {\n\t\t\treplicaIndex := replicaCounter\n\t\t\tactions = append(actions, func() {\n\t\t\t\tfor j := len(phase.ObjectBundle) - 1; j >= 0; j-- {\n\t\t\t\t\tif replicaIndex < instancesStates[j].CurrentReplicaCount {\n\t\t\t\t\t\tif objectErrList := ste.ExecuteObject(ctx, &phase.ObjectBundle[j], nsName, replicaIndex, DELETE_OBJECT); !objectErrList.IsEmpty() {\n\t\t\t\t\t\t\terrList.Concat(objectErrList)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\n\t\t// Updating objects when desired replicas per namespace equals current replica count.\n\t\tif instancesStates[0].CurrentReplicaCount == phase.ReplicasPerNamespace {\n\t\t\tfor replicaCounter := int32(0); replicaCounter < phase.ReplicasPerNamespace; replicaCounter++ {\n\t\t\t\treplicaIndex := replicaCounter\n\t\t\t\tactions = append(actions, func() {\n\t\t\t\t\tfor j := range phase.ObjectBundle {\n\t\t\t\t\t\tif objectErrList := ste.ExecuteObject(ctx, &phase.ObjectBundle[j], nsName, replicaIndex, PATCH_OBJECT); !objectErrList.IsEmpty() {\n\t\t\t\t\t\t\terrList.Concat(objectErrList)\n\t\t\t\t\t\t\t// If error then skip this bundle\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\t// Adding objects with index greater than current replica count and lesser than desired replicas per namespace.\n\t\tfor replicaCounter := instancesStates[0].CurrentReplicaCount; replicaCounter < phase.ReplicasPerNamespace; replicaCounter++ {\n\t\t\treplicaIndex := replicaCounter\n\t\t\tactions = append(actions, func() {\n\t\t\t\tfor j := range phase.ObjectBundle {\n\t\t\t\t\tif objectErrList := ste.ExecuteObject(ctx, &phase.ObjectBundle[j], nsName, replicaIndex, CREATE_OBJECT); !objectErrList.IsEmpty() {\n\t\t\t\t\t\terrList.Concat(objectErrList)\n\t\t\t\t\t\t// If error then skip this bundle\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\n\t\t// Updating state (CurrentReplicaCount) of every object in object bundle.\n\t\tdefer func() {\n\t\t\tfor j := range phase.ObjectBundle {\n\t\t\t\tid, _ := getIdentifier(ctx, &phase.ObjectBundle[j])\n\t\t\t\tinstancesStates[j].CurrentReplicaCount = instancesStates[j].DesiredReplicaCount\n\t\t\t\tctx.GetState().GetNamespacesState().Set(nsName, id, instancesStates[j])\n\t\t\t}\n\t\t}()\n\n\t}\n\ttuningSet.Execute(actions)\n\treturn errList\n}",
"func (sl *StagesLatency) calculate() {\n\t// log.Println( \"- t.results set from t.current\" )\n\tsl.Results = make(Rows, len(sl.last))\n\tcopy(sl.Results, sl.last)\n\tif sl.WantRelativeStats() {\n\t\tsl.Results.subtract(sl.first)\n\t}\n\tsl.Totals = totals(sl.Results)\n}",
"func (o ApplicationStatusWorkflowStepsSubstepsOutput) Phase() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusWorkflowStepsSubsteps) *string { return v.Phase }).(pulumi.StringPtrOutput)\n}",
"func (gen *ComparativeStatGenerator) Finalize(statStream chan *OpStat) {}",
"func (res *Results) Finalize() {\n res.Replies = len(res.Took)\n res.min()\n res.max()\n res.avg()\n res.med()\n res.pct()\n\n // Code counts\n for _, code := range res.Code {\n if code < 100 { // ignore\n } else if code < 200 {\n res.Code1xx++\n } else if code < 300 {\n res.Code2xx++\n } else if code < 400 {\n res.Code3xx++\n } else if code < 500 {\n res.Code4xx++\n } else if code < 600 {\n res.Code5xx++\n }\n }\n\n // Error counts\n res.ErrorsTotal = len(res.Errors)\n\n for _, err := range res.Errors {\n e := err.(*url.Error).Err.(*net.OpError).Error()\n if strings.Contains(e, \"connection refused\") {\n res.ErrorsConnRefused++\n } else if strings.Contains(e, \"connection reset\") {\n res.ErrorsConnReset++\n } else if strings.Contains(e, \"connection timed out\") {\n res.ErrorsConnTimeout++\n } else if strings.Contains(e, \"no free file descriptors\") {\n res.ErrorsFdUnavail++\n } else if strings.Contains(e, \"no such host\") {\n res.ErrorsAddrUnavail++\n } else {\n res.ErrorsOther++\n }\n }\n}",
"func (p Phase) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]interface{})\n\tif p.Condition != nil {\n\t\tobjectMap[\"condition\"] = p.Condition\n\t}\n\tif p.Dependencies != nil {\n\t\tobjectMap[\"dependencies\"] = p.Dependencies\n\t}\n\tif p.JobAuthorizationScope != nil {\n\t\tobjectMap[\"jobAuthorizationScope\"] = p.JobAuthorizationScope\n\t}\n\tif p.JobCancelTimeoutInMinutes != nil {\n\t\tobjectMap[\"jobCancelTimeoutInMinutes\"] = p.JobCancelTimeoutInMinutes\n\t}\n\tif p.JobTimeoutInMinutes != nil {\n\t\tobjectMap[\"jobTimeoutInMinutes\"] = p.JobTimeoutInMinutes\n\t}\n\tif p.Name != nil {\n\t\tobjectMap[\"name\"] = p.Name\n\t}\n\tif p.RefName != nil {\n\t\tobjectMap[\"refName\"] = p.RefName\n\t}\n\tif p.Steps != nil {\n\t\tobjectMap[\"steps\"] = p.Steps\n\t}\n\tif p.Target != nil {\n\t\tobjectMap[\"target\"] = p.Target\n\t}\n\tif p.Variables != nil {\n\t\tobjectMap[\"variables\"] = p.Variables\n\t}\n\treturn json.Marshal(objectMap)\n}",
"func (m *metricBigipPoolDataTransmitted) init() {\n\tm.data.SetName(\"bigip.pool.data.transmitted\")\n\tm.data.SetDescription(\"Amount of data transmitted to and from the pool.\")\n\tm.data.SetUnit(\"By\")\n\tm.data.SetEmptySum()\n\tm.data.Sum().SetIsMonotonic(true)\n\tm.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)\n\tm.data.Sum().DataPoints().EnsureCapacity(m.capacity)\n}",
"func (b *SummaryMetricsBuilder) Build() (object *SummaryMetrics, err error) {\n\tobject = new(SummaryMetrics)\n\tobject.bitmap_ = b.bitmap_\n\tobject.name = b.name\n\tif b.vector != nil {\n\t\tobject.vector = make([]*SummarySample, len(b.vector))\n\t\tfor i, v := range b.vector {\n\t\t\tobject.vector[i], err = v.Build()\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}",
"func (e *engineImpl) Finalize(\n\tchain engine.ChainReader, header *block.Header,\n\tstate *state.DB, txs []*types.Transaction,\n\treceipts []*types.Receipt, outcxs []*types.CXReceipt,\n\tincxs []*types.CXReceiptsProof, stks []*staking.StakingTransaction,\n) (*types.Block, *big.Int, error) {\n\n\t// Accumulate any block and uncle rewards and commit the final state root\n\t// Header seems complete, assemble into a block and return\n\tpayout, err := AccumulateRewards(\n\t\tchain, state, header, e.Rewarder(), e.Slasher(), e.Beaconchain(),\n\t)\n\tif err != nil {\n\t\treturn nil, nil, ctxerror.New(\"cannot pay block reward\").WithCause(err)\n\t}\n\n\t// Withdraw unlocked tokens to the delegators' accounts\n\t// Only do such at the last block of an epoch\n\tif header.ShardID() == shard.BeaconChainShardID && len(header.ShardState()) > 0 {\n\t\tvalidators, err := chain.ReadValidatorList()\n\t\tif err != nil {\n\t\t\treturn nil, nil, ctxerror.New(\"[Finalize] failed to read active validators\").WithCause(err)\n\t\t}\n\t\t// Payout undelegated/unlocked tokens\n\t\tfor _, validator := range validators {\n\t\t\twrapper := state.GetStakingInfo(validator)\n\t\t\tif wrapper != nil {\n\t\t\t\tfor i := range wrapper.Delegations {\n\t\t\t\t\tdelegation := &wrapper.Delegations[i]\n\t\t\t\t\ttotalWithdraw := delegation.RemoveUnlockedUndelegations(header.Epoch(), wrapper.LastEpochInCommittee)\n\t\t\t\t\tstate.AddBalance(delegation.DelegatorAddress, totalWithdraw)\n\t\t\t\t}\n\t\t\t\tif err := state.UpdateStakingInfo(validator, wrapper); err != nil {\n\t\t\t\t\treturn nil, nil, ctxerror.New(\"[Finalize] failed update validator info\").WithCause(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terr = errors.New(\"[Finalize] validator came back empty \" + common2.MustAddressToBech32(validator))\n\t\t\t\treturn nil, nil, ctxerror.New(\"[Finalize] failed getting validator info\").WithCause(err)\n\t\t\t}\n\t\t}\n\n\t\t// Set the LastEpochInCommittee field for all external validators in the upcoming epoch.\n\t\tnewShardState, err := header.GetShardState()\n\t\tif err != nil {\n\t\t\treturn nil, nil, ctxerror.New(\"[Finalize] failed to read shard state\").WithCause(err)\n\t\t}\n\t\tprocessed := make(map[common.Address]struct{})\n\t\tfor i := range newShardState.Shards {\n\t\t\tshard := newShardState.Shards[i]\n\t\t\tfor j := range shard.Slots {\n\t\t\t\tslot := shard.Slots[j]\n\t\t\t\tif slot.EffectiveStake != nil { // For external validator\n\t\t\t\t\t_, ok := processed[slot.EcdsaAddress]\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tprocessed[slot.EcdsaAddress] = struct{}{}\n\t\t\t\t\t\twrapper := state.GetStakingInfo(slot.EcdsaAddress)\n\t\t\t\t\t\twrapper.LastEpochInCommittee = newShardState.Epoch\n\n\t\t\t\t\t\tif err := state.UpdateStakingInfo(slot.EcdsaAddress, wrapper); err != nil {\n\t\t\t\t\t\t\treturn nil, nil, ctxerror.New(\"[Finalize] failed update validator info\").WithCause(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\theader.SetRoot(state.IntermediateRoot(chain.Config().IsS3(header.Epoch())))\n\treturn types.NewBlock(header, txs, receipts, outcxs, incxs, stks), payout, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Make sure that existing stream widgets are discarded if the user loads a new pcap.
|
func (t ManageCapinfoCache) OnNewSource(pcap.HandlerCode, gowid.IApp) {
clearCapinfoState()
}
|
[
"func (s *sentPacketList) discard() {\n\t*s = sentPacketList{}\n}",
"func UnsetFrameCapture() {\n\tframeCapture = false\n}",
"func (f *tcpStreamFactory) CollectOldStreams(timeout time.Duration) {\n\tcutoff := time.Now().Add(-timeout)\n\t// map iteration should be protected\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\n\tf.assembler.FlushCloseOlderThan(cutoff)\n\n\tfor k, s := range f.streams {\n\t\tif s.lastPacketSeen.Before(cutoff) {\n\t\t\tlog.V(5).Infof(\"[%v] timing out old session\", s.key)\n\t\t\tdelete(f.streams, k) // remove it from our map.\n\t\t\ts.maybeFinish() // Do something...?\n\t\t}\n\t}\n}",
"func streamSource(ctx context.Context, once func(), is ImageSource, name string, view View) {\n\tif once != nil {\n\t\tonce()\n\t}\n\tstream := view.ReserveStream(name)\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn\n\tcase <-view.StreamingReady():\n\t}\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t\tframe, release, err := is.Next(ctx)\n\n\t\tif err != nil {\n\t\t\tLogger.Debugw(\"error getting frame\", \"error\", err)\n\t\t\tcontinue\n\t\t}\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase stream.InputFrames() <- FrameReleasePair{frame, release}:\n\t\t}\n\t}\n}",
"func (m *Manager) manageStreamPackets(wg *sync.WaitGroup, ctx context.Context, stream *drpcstream.Stream) {\n\tdefer wg.Done()\n\n\tfor {\n\t\tselect {\n\t\tcase <-m.term.Signal():\n\t\t\treturn\n\n\t\tcase <-stream.Terminated():\n\t\t\treturn\n\n\t\tcase pkt := <-m.queue:\n\t\t\tdrpcdebug.Log(func() string { return fmt.Sprintf(\"FWD[%p][%p]: %v\", m, stream, pkt) })\n\n\t\t\terr, ok := stream.HandlePacket(pkt)\n\t\t\tif err != nil {\n\t\t\t\tm.term.Set(errs.Wrap(err))\n\t\t\t\treturn\n\t\t\t} else if !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}",
"func (c *PacketLoader) Renew() {\n\tif c.ParentLoader != nil {\n\t\tc.ParentLoader.CloseMain()\n\t}\n\tc.ParentLoader = NewPcapLoader(c.ParentLoader.cmds, c.runner, c.ParentLoader.opt)\n}",
"func (fwd *Forwarder) handleUpstreamPackets() {\n\tvar respCnt uint64\n\tfor pa := range fwd.upstreamMsgCh {\n\t\tif fwd.isClosed() {\n\t\t\tbreak\n\t\t}\n\n\t\tfwd.bufferPool.Put(pa.data)\n\t\tatomic.AddUint64(&respCnt, 1)\n\t}\n}",
"func discard(link rtuLink) {\n\tvar rxbuf\t= make([]byte, 1024)\n\n\tlink.SetDeadline(time.Now().Add(time.Millisecond))\n\tlink.Read(rxbuf)\n\treturn\n}",
"func (mc *Memcache) GapInStream(\n\ttcptuple *common.TcpTuple,\n\tdir uint8, nbytes int,\n\tprivate protos.ProtocolData,\n) (priv protos.ProtocolData, drop bool) {\n\tdebug(\"memcache(tcp) stream gap detected\")\n\n\tdefer logp.Recover(\"GapInStream(memcache) exception\")\n\tif !isMemcacheConnection(private) {\n\t\treturn private, false\n\t}\n\n\tconn := private.(*tcpConnectionData)\n\tstream := conn.Streams[dir]\n\tparser := stream.parser\n\tmsg := parser.message\n\n\tif msg != nil {\n\t\tif msg.IsRequest {\n\t\t\tmsg.AddNotes(NoteRequestPacketLoss)\n\t\t} else {\n\t\t\tmsg.AddNotes(NoteResponsePacketLoss)\n\t\t}\n\t}\n\n\t// If we are about to read binary data (length) encoded, but missing gab\n\t// does fully cover data area, we might be able to continue processing the\n\t// stream + transactions\n\tinData := parser.state == parseStateDataBinary ||\n\t\tparser.state == parseStateIncompleteDataBinary ||\n\t\tparser.state == parseStateData ||\n\t\tparser.state == parseStateIncompleteData\n\tif inData {\n\t\tif msg == nil {\n\t\t\tlogp.WTF(\"parser message is nil on data load\")\n\t\t\treturn private, true\n\t\t}\n\n\t\talreadyRead := stream.Buf.Len() - int(msg.bytesLost)\n\t\tdataRequired := int(msg.bytes) - alreadyRead\n\t\tif nbytes <= dataRequired {\n\t\t\t// yay, all bytes included in message binary data part.\n\t\t\t// just drop binary data part and recover parsing.\n\t\t\tif msg.isBinary {\n\t\t\t\tparser.state = parseStateIncompleteDataBinary\n\t\t\t} else {\n\t\t\t\tparser.state = parseStateIncompleteData\n\t\t\t}\n\t\t\tmsg.bytesLost += uint(nbytes)\n\t\t\treturn private, false\n\t\t}\n\t}\n\n\t// need to drop TCP stream. But try to publish all cached trancsactions first\n\tmc.pushAllTCPTrans(conn.connection)\n\treturn private, true\n}",
"func (s *session) garbageCollectStreams() {\n\ts.streamsMap.Iterate(func(str *stream) (bool, error) {\n\t\tid := str.StreamID()\n\t\tif str.finished() {\n\t\t\terr := s.streamsMap.RemoveStream(id)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\ts.flowControlManager.RemoveStream(id)\n\t\t}\n\t\treturn true, nil\n\t})\n}",
"func (player *Player) PacketKeepAlive() {\n}",
"func (s *rawConnStreams) remove(stream *rawStream) {\n\ts.conn.mu.Lock()\n\tif s.live[stream.streamID] == stream {\n\t\tdelete(s.live, stream.streamID)\n\t}\n\ts.conn.mu.Unlock()\n}",
"func (c *Conn) ignoreSTUNPackets() {\n\tc.stunReceiveFunc.Store(func([]byte, *net.UDPAddr) {})\n}",
"func noSink(src *v1alpha1.AWSSNSSource) {\n\tsrc.Status.SinkURI = nil\n}",
"func (bow *Browser) preSend() {\n\tif bow.refresh != nil {\n\t\tbow.refresh.Stop()\n\t}\n}",
"func disengage() {\n\tglfwWindow := flyCam.glfwWindow\n\tglfwWindow.UnsubscribeID(window.OnKeyUp, DefaultEvId)\n\tglfwWindow.UnsubscribeID(window.OnKeyRepeat, DefaultEvId)\n\tglfwWindow.UnsubscribeID(window.OnKeyDown, DefaultEvId)\n\tglfwWindow.UnsubscribeID(window.OnCursor, DefaultEvId)\n\tw := glfwWindow.Window\n\tw.SetInputMode(glfw.RawMouseMotion, False)\n\tw.SetInputMode(glfw.CursorMode, glfw.CursorNormal)\n}",
"func ReadPacketsFromPcap(pcapfile string, filter layers.IPProtocol, raw bool) ([]events.Event, []gopacket.Packet, error) {\n\tvar Events []events.Event\n\tvar rawPackets []gopacket.Packet\n\tvar ret []events.Event\n\tvar rawRet []gopacket.Packet\n\tpcapfilePath := MakeAssetFullPath(pcapfile)\n\n\tf, err := os.Open(pcapfilePath)\n\tif err != nil {\n\t\treturn []events.Event{}, []gopacket.Packet{}, err\n\t}\n\thandle, err := pcap.OpenOfflineFile(f)\n\tif err != nil {\n\t\treturn []events.Event{}, []gopacket.Packet{}, err\n\t}\n\n\tsrc := gopacket.NewPacketSource(handle, handle.LinkType())\n\tin := src.Packets()\n\nloop:\n\tfor {\n\t\tpacket := <-in\n\t\tif packet == nil {\n\t\t\tbreak loop\n\t\t}\n\n\t\tif _, ok := packet.NetworkLayer().(*layers.IPv4); ok {\n\t\t\tif packet.NetworkLayer().(*layers.IPv4).Protocol == filter {\n\t\t\t\tif raw {\n\t\t\t\t\trawPackets = append(rawPackets, packet)\n\t\t\t\t} else {\n\t\t\t\t\tswitch filter {\n\t\t\t\t\tcase layers.IPProtocolICMPv4:\n\t\t\t\t\t\tev, err := events.NewICMPv4Event(packet)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn []events.Event{}, []gopacket.Packet{}, err\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tEvents = append(Events, ev)\n\n\t\t\t\t\tcase layers.IPProtocolUDP:\n\t\t\t\t\t\tev, err := events.NewUDPEvent(packet, 4)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn []events.Event{}, []gopacket.Packet{}, err\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tEvents = append(Events, ev)\n\n\t\t\t\t\tcase layers.IPProtocolTCP:\n\t\t\t\t\t\tev, err := events.NewTCPEvent(packet, 4)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn []events.Event{}, []gopacket.Packet{}, err\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tEvents = append(Events, ev)\n\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tcontinue loop\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else if _, ok := packet.NetworkLayer().(*layers.IPv6); ok {\n\t\t\tif packet.NetworkLayer().(*layers.IPv6).NextHeader == filter {\n\t\t\t\tswitch filter {\n\t\t\t\tcase layers.IPProtocolICMPv6:\n\t\t\t\t\tev, err := events.NewICMPv6Event(packet)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn []events.Event{}, []gopacket.Packet{}, err\n\t\t\t\t\t}\n\n\t\t\t\t\tEvents = append(Events, ev)\n\n\t\t\t\tdefault:\n\t\t\t\t\tcontinue loop\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// I'm so lazy\n\tif raw {\n\t\trawRet = make([]gopacket.Packet, len(rawPackets))\n\t\tcopy(rawRet, rawPackets)\n\t}\n\n\tret = make([]events.Event, len(Events))\n\tcopy(ret, Events)\n\n\treturn ret, rawRet, nil\n}",
"func (fwd *Forwarder) handleDownstreamPackets() {\n\tfor pkt := range fwd.downstreamMsgCh {\n\t\tif fwd.isClosed() {\n\t\t\tbreak\n\t\t}\n\n\t\tclientAddr := pkt.src.String()\n\n\t\tconn, found := fwd.connsMap.Load(clientAddr)\n\t\tif !found {\n\t\t\tconn, err := net.ListenUDP(\"udp\", fwd.client)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Warnf(\"UDP forwarder failed to dail, drop packet, err %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfwd.connsMap.Store(clientAddr, &connection{\n\t\t\t\tudp: conn,\n\t\t\t\tlastActivity: time.Now(),\n\t\t\t})\n\n\t\t\tconn.WriteTo(pkt.data, fwd.upstream)\n\t\t\tgo fwd.downstreamReadLoop(pkt.src, conn)\n\t\t} else {\n\t\t\tconn.(*connection).udp.WriteTo(pkt.data, fwd.upstream)\n\t\t\tshouldUpdateLastActivity := false\n\t\t\tif conn, found := fwd.connsMap.Load(clientAddr); found {\n\t\t\t\tif conn.(*connection).lastActivity.Before(\n\t\t\t\t\ttime.Now().Add(-fwd.connTimeout / 4)) {\n\t\t\t\t\tshouldUpdateLastActivity = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tif shouldUpdateLastActivity {\n\t\t\t\tfwd.updateClientLastActivity(clientAddr)\n\t\t\t}\n\t\t}\n\t}\n}",
"func (o *BasicBotBase) UnsetDefaultEventsRegisterStream() {\n\to.DefaultEventsRegisterStream.Unset()\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewPlanner returns a new instance of Planner.
|
func NewPlanner(db DB) *Planner {
return &Planner{
DB: db,
Now: time.Now,
}
}
|
[
"func NewPlanner() Planner {\n\treturn &simplePlanner{}\n}",
"func NewPlanner(ctx *Context) *PlannerDefault {\n\tp := &PlannerDefault{\n\t\tCtx: ctx,\n\t\tchildren: make([]Task, 0),\n\t}\n\tp.Planner = p\n\treturn p\n}",
"func New(db PlanData) Planner {\n\treturn Planner{\n\t\tdata: db,\n\t}\n}",
"func NewPlannerPlanConfiguration()(*PlannerPlanConfiguration) {\n m := &PlannerPlanConfiguration{\n Entity: *NewEntity(),\n }\n return m\n}",
"func NewJourneyPlanner(database db.Database) (*journeyPlanner,error) {\n\n\t// Create planner\n\tjp := new(journeyPlanner)\n\n\t// Create or open table\n\ttable,err := database.OpenTable(journeyPlannerTableName)\n\tif err == db.ETABLENOTFOUND { \n\t\ttable,err = database.CreateTable(journeyPlannerTableName)\n\t}\n\tif err != nil {\n\t\treturn jp,err\n\t}\n\tjp.table = table\n\treturn jp,nil\n}",
"func newProcessPlanner(request ImageImportRequest, diskInspector disk.Inspector, logger logging.Logger) processPlanner {\n\treturn &defaultPlanner{request, diskInspector, logger}\n}",
"func NewPlan(planProperties *PlanProperties) Plan {\n\treturn &plan{\n\t\tplanProperties,\n\t}\n}",
"func NewBusinessScenarioPlanner()(*BusinessScenarioPlanner) {\n m := &BusinessScenarioPlanner{\n Entity: *NewEntity(),\n }\n return m\n}",
"func New(visualizer string) (pgscanner.PlanExporter, error) {\n\tswitch visualizer {\n\tcase dalibo.VisualizerType:\n\t\treturn dalibo.New(), nil\n\n\tcase depesz.VisualizerType:\n\t\treturn depesz.New(), nil\n\t}\n\n\treturn nil, fmt.Errorf(\"unknown visualizer given %q\", visualizer)\n}",
"func NewPlan(obj objects.ObjectConfig) (objects.Object, error) {\n\treturn (&Plan{}).Parse(obj)\n}",
"func newMockPlanner(t *testing.T, functions InfoFunctions) *mockPlanner {\n\tctrl := gomock.NewController(t)\n\tmp := mockPlanner{\n\t\trkeBootstrap: fake.NewMockClientInterface[*rkev1.RKEBootstrap, *rkev1.RKEBootstrapList](ctrl),\n\t\trkeBootstrapCache: fake.NewMockCacheInterface[*rkev1.RKEBootstrap](ctrl),\n\t\trkeControlPlanes: fake.NewMockControllerInterface[*rkev1.RKEControlPlane, *rkev1.RKEControlPlaneList](ctrl),\n\t\tetcdSnapshotCache: fake.NewMockCacheInterface[*rkev1.ETCDSnapshot](ctrl),\n\t\tsecretClient: fake.NewMockClientInterface[*v1.Secret, *v1.SecretList](ctrl),\n\t\tsecretCache: fake.NewMockCacheInterface[*v1.Secret](ctrl),\n\t\tconfigMapCache: fake.NewMockCacheInterface[*v1.ConfigMap](ctrl),\n\t\tmachines: fake.NewMockClientInterface[*capi.Machine, *capi.MachineList](ctrl),\n\t\tmachinesCache: fake.NewMockCacheInterface[*capi.Machine](ctrl),\n\t\tclusterRegistrationTokenCache: fake.NewMockCacheInterface[*apisv3.ClusterRegistrationToken](ctrl),\n\t\tcapiClient: fake.NewMockClientInterface[*capi.Cluster, *capi.ClusterList](ctrl),\n\t\tcapiClusters: fake.NewMockCacheInterface[*capi.Cluster](ctrl),\n\t\tmanagementClusters: fake.NewMockNonNamespacedCacheInterface[*apisv3.Cluster](ctrl),\n\t\trancherClusterCache: fake.NewMockCacheInterface[*apisv1.Cluster](ctrl),\n\t}\n\tstore := PlanStore{\n\t\tsecrets: mp.secretClient,\n\t\tsecretsCache: mp.secretCache,\n\t\tmachineCache: mp.machinesCache,\n\t}\n\tp := Planner{\n\t\tctx: context.TODO(),\n\t\tstore: &store,\n\t\tmachines: mp.machines,\n\t\tmachinesCache: mp.machinesCache,\n\t\tsecretClient: mp.secretClient,\n\t\tsecretCache: mp.secretCache,\n\t\tconfigMapCache: mp.configMapCache,\n\t\tclusterRegistrationTokenCache: mp.clusterRegistrationTokenCache,\n\t\tcapiClient: mp.capiClient,\n\t\tcapiClusters: mp.capiClusters,\n\t\tmanagementClusters: mp.managementClusters,\n\t\trancherClusterCache: mp.rancherClusterCache,\n\t\trkeControlPlanes: mp.rkeControlPlanes,\n\t\trkeBootstrap: mp.rkeBootstrap,\n\t\trkeBootstrapCache: mp.rkeBootstrapCache,\n\t\tetcdSnapshotCache: mp.etcdSnapshotCache,\n\t\tetcdS3Args: s3Args{\n\t\t\tsecretCache: mp.secretCache,\n\t\t},\n\t\tretrievalFunctions: functions,\n\t}\n\tmp.planner = &p\n\treturn &mp\n}",
"func NewPlannerTaskDetails()(*PlannerTaskDetails) {\n m := &PlannerTaskDetails{\n Entity: *NewEntity(),\n }\n return m\n}",
"func newInternalPlanner(\n\topName string,\n\ttxn *kv.Txn,\n\tuser security.SQLUsername,\n\tmemMetrics *MemoryMetrics,\n\texecCfg *ExecutorConfig,\n\tsessionData sessiondatapb.SessionData,\n\topts ...InternalPlannerParamsOption,\n) (*planner, func()) {\n\t// Default parameters which may be override by the supplied options.\n\tparams := &internalPlannerParams{\n\t\t// The table collection used by the internal planner does not rely on the\n\t\t// deprecatedDatabaseCache and there are no subscribers to the\n\t\t// deprecatedDatabaseCache, so we can leave it uninitialized.\n\t\t// Furthermore, we're not concerned about the efficiency of querying tables\n\t\t// with user-defined types, hence the nil hydratedTables.\n\t\tcollection: descs.NewCollection(execCfg.Settings, execCfg.LeaseManager, nil /* hydratedTables */),\n\t}\n\tfor _, opt := range opts {\n\t\topt(params)\n\t}\n\n\t// We need a context that outlives all the uses of the planner (since the\n\t// planner captures it in the EvalCtx, and so does the cleanup function that\n\t// we're going to return. We just create one here instead of asking the caller\n\t// for a ctx with this property. This is really ugly, but the alternative of\n\t// asking the caller for one is hard to explain. What we need is better and\n\t// separate interfaces for planning and running plans, which could take\n\t// suitable contexts.\n\tctx := logtags.AddTag(context.Background(), opName, \"\")\n\n\tsd := &sessiondata.SessionData{\n\t\tSessionData: sessionData,\n\t\tSearchPath: sessiondata.DefaultSearchPathForUser(user),\n\t\tSequenceState: sessiondata.NewSequenceState(),\n\t\tLocation: time.UTC,\n\t}\n\tsd.SessionData.Database = \"system\"\n\tsd.SessionData.UserProto = user.EncodeProto()\n\tdataMutator := &sessionDataMutator{\n\t\tdata: sd,\n\t\tdefaults: SessionDefaults(map[string]string{\n\t\t\t\"application_name\": \"crdb-internal\",\n\t\t\t\"database\": \"system\",\n\t\t}),\n\t\tsettings: execCfg.Settings,\n\t\tparamStatusUpdater: &noopParamStatusUpdater{},\n\t\tsetCurTxnReadOnly: func(bool) {},\n\t}\n\n\tvar ts time.Time\n\tif txn != nil {\n\t\treadTimestamp := txn.ReadTimestamp()\n\t\tif readTimestamp.IsEmpty() {\n\t\t\tpanic(\"makeInternalPlanner called with a transaction without timestamps\")\n\t\t}\n\t\tts = readTimestamp.GoTime()\n\t}\n\n\tp := &planner{execCfg: execCfg, alloc: &rowenc.DatumAlloc{}}\n\n\tp.txn = txn\n\tp.stmt = Statement{}\n\tp.cancelChecker = cancelchecker.NewCancelChecker(ctx)\n\tp.isInternalPlanner = true\n\n\tp.semaCtx = tree.MakeSemaContext()\n\tp.semaCtx.SearchPath = sd.SearchPath\n\tp.semaCtx.TypeResolver = p\n\n\tplannerMon := mon.NewUnlimitedMonitor(ctx,\n\t\tfmt.Sprintf(\"internal-planner.%s.%s\", user, opName),\n\t\tmon.MemoryResource,\n\t\tmemMetrics.CurBytesCount, memMetrics.MaxBytesHist,\n\t\tnoteworthyInternalMemoryUsageBytes, execCfg.Settings)\n\n\tp.extendedEvalCtx = internalExtendedEvalCtx(\n\t\tctx, sd, dataMutator, params.collection, txn, ts, ts, execCfg, plannerMon,\n\t)\n\tp.extendedEvalCtx.Planner = p\n\tp.extendedEvalCtx.PrivilegedAccessor = p\n\tp.extendedEvalCtx.SessionAccessor = p\n\tp.extendedEvalCtx.ClientNoticeSender = p\n\tp.extendedEvalCtx.Sequence = p\n\tp.extendedEvalCtx.Tenant = p\n\tp.extendedEvalCtx.JoinTokenCreator = p\n\tp.extendedEvalCtx.ClusterID = execCfg.ClusterID()\n\tp.extendedEvalCtx.ClusterName = execCfg.RPCContext.ClusterName()\n\tp.extendedEvalCtx.NodeID = execCfg.NodeID\n\tp.extendedEvalCtx.Locality = execCfg.Locality\n\n\tp.sessionDataMutator = dataMutator\n\tp.autoCommit = false\n\n\tp.extendedEvalCtx.MemMetrics = memMetrics\n\tp.extendedEvalCtx.ExecCfg = execCfg\n\tp.extendedEvalCtx.Placeholders = &p.semaCtx.Placeholders\n\tp.extendedEvalCtx.Annotations = &p.semaCtx.Annotations\n\tp.extendedEvalCtx.Descs = params.collection\n\n\tp.queryCacheSession.Init()\n\tp.optPlanningCtx.init(p)\n\n\treturn p, func() {\n\t\t// Note that we capture ctx here. This is only valid as long as we create\n\t\t// the context as explained at the top of the method.\n\n\t\t// The collection will accumulate descriptors read during planning as well\n\t\t// as type descriptors read during execution on the local node. Many users\n\t\t// of the internal planner do set the `skipCache` flag on the resolver but\n\t\t// this is not respected by type resolution underneath execution. That\n\t\t// subtle details means that the type descriptor used by execution may be\n\t\t// stale, but that must be okay. Correctness concerns aside, we must release\n\t\t// the leases to ensure that we don't leak a descriptor lease.\n\t\tp.Descriptors().ReleaseAll(ctx)\n\n\t\t// Stop the memory monitor.\n\t\tplannerMon.Stop(ctx)\n\t}\n}",
"func New(ctx context.Context, store storage.Store, stm *semantic.Statement, chanSize, bulkSize int, w io.Writer) (Executor, error) {\n\tswitch stm.Type() {\n\tcase semantic.Query:\n\t\treturn newQueryPlan(ctx, store, stm, chanSize, w)\n\tcase semantic.Insert:\n\t\treturn &insertPlan{\n\t\t\tstm: stm,\n\t\t\tstore: store,\n\t\t\ttracer: w,\n\t\t}, nil\n\tcase semantic.Delete:\n\t\treturn &deletePlan{\n\t\t\tstm: stm,\n\t\t\tstore: store,\n\t\t\ttracer: w,\n\t\t}, nil\n\tcase semantic.Create:\n\t\treturn &createPlan{\n\t\t\tstm: stm,\n\t\t\tstore: store,\n\t\t\ttracer: w,\n\t\t}, nil\n\tcase semantic.Drop:\n\t\treturn &dropPlan{\n\t\t\tstm: stm,\n\t\t\tstore: store,\n\t\t\ttracer: w,\n\t\t}, nil\n\tcase semantic.Construct:\n\t\tqp, _ := newQueryPlan(ctx, store, stm, chanSize, w)\n\t\treturn &constructPlan{\n\t\t\tstm: stm,\n\t\t\tstore: store,\n\t\t\ttracer: w,\n\t\t\tbulkSize: bulkSize,\n\t\t\tqueryPlan: qp,\n\t\t\tconstruct: true,\n\t\t}, nil\n\tcase semantic.Deconstruct:\n\t\tqp, _ := newQueryPlan(ctx, store, stm, chanSize, w)\n\t\treturn &constructPlan{\n\t\t\tstm: stm,\n\t\t\tstore: store,\n\t\t\ttracer: w,\n\t\t\tbulkSize: bulkSize,\n\t\t\tqueryPlan: qp,\n\t\t\tconstruct: false,\n\t\t}, nil\n\tcase semantic.Show:\n\t\treturn &showPlan{\n\t\t\tstm: stm,\n\t\t\tstore: store,\n\t\t\ttracer: w,\n\t\t}, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"planner.New: unknown statement type in statement %v\", stm)\n\t}\n}",
"func (pb PlannerBuilder) Build() Planner {\n\treturn &planner{\n\t\tlp: NewLogicalPlanner(pb.lopts...),\n\t\tpp: NewPhysicalPlanner(pb.popts...),\n\t}\n}",
"func NewPlanParser(input antlr.TokenStream) *PlanParser {\n\tthis := new(PlanParser)\n\tdeserializer := antlr.NewATNDeserializer(nil)\n\tdeserializedATN := deserializer.DeserializeFromUInt16(parserATN)\n\tdecisionToDFA := make([]*antlr.DFA, len(deserializedATN.DecisionToState))\n\tfor index, ds := range deserializedATN.DecisionToState {\n\t\tdecisionToDFA[index] = antlr.NewDFA(ds, index)\n\t}\n\tthis.BaseParser = antlr.NewBaseParser(input)\n\n\tthis.Interpreter = antlr.NewParserATNSimulator(this, deserializedATN, decisionToDFA, antlr.NewPredictionContextCache())\n\tthis.RuleNames = ruleNames\n\tthis.LiteralNames = literalNames\n\tthis.SymbolicNames = symbolicNames\n\tthis.GrammarFileName = \"Plan.g4\"\n\n\treturn this\n}",
"func New() (ugrade.JobSolver, error) {\n\treturn &defaultSolver{\n\t\tcompiler: compiler.New(),\n\t\ttcgenerator: tcgenerator.NewGenerator(),\n\t\tsubmissionExecutor: executor.New(),\n\t\tchecker: checker.New(),\n\t}, nil\n}",
"func NewPlan(apiKey string) (p Plan) {\n\tp.ApiKey = apiKey\n\tp.PaymentMethods = \"credit_card\"\n\n\treturn\n}",
"func (m *Group) SetPlanner(value PlannerGroupable)() {\n m.planner = value\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
normalizeDimensions extacts the time interval, if specified. Returns all remaining dimensions.
|
func (p *Planner) normalizeDimensions(dimensions Dimensions) (time.Duration, []string, error) {
// Ignore if there are no dimensions.
if len(dimensions) == 0 {
return 0, nil, nil
}
// If the first dimension is a "time(duration)" then extract the duration.
if call, ok := dimensions[0].Expr.(*Call); ok && strings.ToLower(call.Name) == "time" {
// Make sure there is exactly one argument.
if len(call.Args) != 1 {
return 0, nil, errors.New("time dimension expected one argument")
}
// Ensure the argument is a duration.
lit, ok := call.Args[0].(*DurationLiteral)
if !ok {
return 0, nil, errors.New("time dimension must have one duration argument")
}
return lit.Val, dimensionKeys(dimensions[1:]), nil
}
return 0, dimensionKeys(dimensions), nil
}
|
[
"func normalizeCloudWatchAlarmDimensions(alarmDimensions *cloudformation.CloudWatchAlarmDimensionList) *cloudformation.CloudWatchAlarmDimensionList {\n\tif alarmDimensions == nil || len(*alarmDimensions) == 0 {\n\t\treturn &cloudformation.CloudWatchAlarmDimensionList{\n\t\t\t{\n\t\t\t\tName: cloudformation.String(\"LoadBalancer\"),\n\t\t\t\tValue: cloudformation.GetAtt(\"LB\", \"LoadBalancerFullName\").String(),\n\t\t\t},\n\t\t}\n\t}\n\n\tdimensions := make(cloudformation.CloudWatchAlarmDimensionList, len(*alarmDimensions))\n\n\tfor i, dimension := range *alarmDimensions {\n\t\tvalue := dimension.Value\n\n\t\tswitch dimension.Name.Literal {\n\t\tcase \"LoadBalancer\":\n\t\t\tvalue = cloudformation.GetAtt(\"LB\", \"LoadBalancerFullName\").String()\n\t\tcase \"TargetGroup\":\n\t\t\tvalue = cloudformation.GetAtt(\"TG\", \"TargetGroupFullName\").String()\n\t\t}\n\n\t\tdimensions[i] = cloudformation.CloudWatchAlarmDimension{\n\t\t\tName: dimension.Name,\n\t\t\tValue: value,\n\t\t}\n\t}\n\n\treturn &dimensions\n}",
"func sanitizeDimensions(in machine.SwarmingDimensions) machine.SwarmingDimensions {\n\tret := machine.SwarmingDimensions{}\n\tfor key, slice := range in {\n\t\tif len(slice) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tret[key] = in[key]\n\t}\n\treturn ret\n}",
"func (mf *MetricFilter) Normalize() (map[string][]string, error) {\n\tif mf.MetricName != \"\" {\n\t\tmf.MetricNames = append(mf.MetricNames, mf.MetricName)\n\t}\n\n\tdimSet := map[string][]string{}\n\tfor k, v := range mf.Dimensions {\n\t\tswitch s := v.(type) {\n\t\tcase []interface{}:\n\t\t\tnewSet := []string{}\n\t\t\tfor _, iv := range s {\n\t\t\t\tnewSet = append(newSet, fmt.Sprintf(\"%v\", iv))\n\t\t\t}\n\t\t\tdimSet[k] = newSet\n\t\tcase string:\n\t\t\tdimSet[k] = []string{s}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"%v should be either a string or string list\", v)\n\t\t}\n\t}\n\treturn dimSet, nil\n}",
"func (o *RollupAggregator) SetDimensions(v []string) {\n\to.Dimensions = v\n}",
"func (b *taskBuilder) dimension(dims ...string) {\n\tfor _, dim := range dims {\n\t\tif !In(dim, b.Spec.Dimensions) {\n\t\t\tb.Spec.Dimensions = append(b.Spec.Dimensions, dim)\n\t\t}\n\t}\n}",
"func (o *EntityTimeseriesData) GetDimensions() map[string]string {\n\tif o == nil || o.Dimensions == nil {\n\t\tvar ret map[string]string\n\t\treturn ret\n\t}\n\treturn *o.Dimensions\n}",
"func (s *MetricResultV2) SetDimensions(v map[string]*string) *MetricResultV2 {\n\ts.Dimensions = v\n\treturn s\n}",
"func modifyDimensions(name string, metricTypeString string, dims map[string]string, props map[string]interface{}) error {\n\tvar err error\n\t// Add common dimensions\n\tdims[\"agent\"] = \"telegraf\"\n\tdims[\"telegraf_type\"] = metricTypeString\n\n\t// If the plugin doesn't define a plugin name use metric.Name()\n\tif _, in := dims[\"plugin\"]; !in {\n\t\tdims[\"plugin\"] = name\n\t}\n\n\t// remove sf_prefix if it exists in the dimension map\n\tif _, in := dims[\"sf_prefix\"]; in {\n\t\tdelete(dims, \"sf_prefix\")\n\t}\n\n\t// if sfMetric exists\n\tif sfMetric, in := dims[\"sf_metric\"]; in {\n\t\t// if the metric is a metadata object\n\t\tif sfMetric == \"objects.host-meta-data\" {\n\t\t\t// If property exists remap it\n\t\t\tif _, in := dims[\"property\"]; in {\n\t\t\t\tprops[\"property\"] = dims[\"property\"]\n\t\t\t\tdelete(dims, \"property\")\n\t\t\t} else {\n\t\t\t\t// This is a malformed metadata event\n\t\t\t\terr = fmt.Errorf(\"E! Output [signalfx] objects.host-metadata object doesn't have a property\")\n\t\t\t}\n\t\t\t// remove the sf_metric dimension\n\t\t\tdelete(dims, \"sf_metric\")\n\t\t}\n\t}\n\treturn err\n}",
"func (a *AxisPosition) normalize(gtx layout.Context, axis layout.Axis, elements int, dimensioner Dimensioner) {\n\tif a.First < 0 {\n\t\ta.First = 0\n\t}\n\tif a.First > elements {\n\t\ta.First = elements - 1\n\t}\n\n\tconstraint := axis.Convert(gtx.Constraints.Max).X\n\tfor a.Offset < 0 && a.First > 0 {\n\t\ta.First--\n\t\tdim := dimensioner(axis, a.First, constraint)\n\t\ta.Offset += dim\n\t\ta.OffsetAbs += dim\n\t}\n\tif a.Offset < 0 {\n\t\ta.Offset = 0\n\t}\n\tfor a.Offset > dimensioner(axis, a.First, constraint) && a.First < elements-1 {\n\t\tdim := dimensioner(axis, a.First, constraint)\n\t\ta.First++\n\t\ta.Offset -= dim\n\t\ta.OffsetAbs += dim\n\t}\n}",
"func (e *BatchExecutorImpl) evalDimensions(prevResultSize int) {\n\t// dimension expression evaluation.\n\tfor dimIndex, dimension := range e.qc.OOPK.Dimensions {\n\t\te.qc.doProfile(func() {\n\t\t\tdimVectorIndex := e.qc.OOPK.DimensionVectorIndex[dimIndex]\n\t\t\tdimValueOffset, dimNullOffset := queryCom.GetDimensionStartOffsets(e.qc.OOPK.NumDimsPerDimWidth, dimVectorIndex, e.qc.OOPK.currentBatch.resultCapacity)\n\t\t\tif e.qc.OOPK.geoIntersection != nil && e.qc.OOPK.geoIntersection.dimIndex == dimIndex {\n\t\t\t\te.qc.OOPK.currentBatch.writeGeoShapeDim(\n\t\t\t\t\te.qc.OOPK.geoIntersection, e.qc.OOPK.currentBatch.geoPredicateVectorD,\n\t\t\t\t\tdimValueOffset, dimNullOffset, e.sizeBeforeGeoFilter, prevResultSize, e.stream, e.qc.Device)\n\t\t\t} else {\n\t\t\t\tdimensionExprRootAction := e.qc.OOPK.currentBatch.makeWriteToDimensionVectorAction(dimValueOffset, dimNullOffset, prevResultSize)\n\t\t\t\te.qc.OOPK.currentBatch.processExpression(dimension, nil,\n\t\t\t\t\te.qc.TableScanners, e.qc.OOPK.foreignTables, e.stream, e.qc.Device, dimensionExprRootAction)\n\t\t\t}\n\t\t}, fmt.Sprintf(\"dim%d\", dimIndex), e.stream)\n\t}\n\n\te.qc.reportTimingForCurrentBatch(e.stream, &e.start, dimEvalTiming)\n}",
"func (x *HrvMsg) GetTimeScaled() []float64 {\n\tif len(x.Time) == 0 {\n\t\treturn nil\n\t}\n\ts := make([]float64, len(x.Time))\n\tfor i, v := range x.Time {\n\t\ts[i] = float64(v) / 1000\n\t}\n\treturn s\n}",
"func (s *MetricDatum) SetDimensions(v []*Dimension) *MetricDatum {\n\ts.Dimensions = v\n\treturn s\n}",
"func (o *RollupAggregator) GetDimensions() []string {\n\tif o == nil || isNil(o.Dimensions) {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn o.Dimensions\n}",
"func ConvertOtelDimensions(attributes pcommon.Map) []*cloudwatch.Dimension {\n\t// Loop through map, similar to EMF exporter createLabels().\n\tmTags := make(map[string]string, attributes.Len())\n\tattributes.Range(func(k string, v pcommon.Value) bool {\n\t\tmTags[k] = v.AsString()\n\t\treturn true\n\t})\n\treturn BuildDimensions(mTags)\n}",
"func (ca *CliArgs) SetDimensions(d string) *CliArgs {\n\tfor _, pair := range strings.Split(d, \",\") {\n\t\tkeyVal := strings.Split(pair, \"=\")\n\t\tif len(keyVal) == 1 {\n\t\t\tcontinue\n\t\t}\n\t\tdim := cloudwatch.Dimension{\n\t\t\tName: &keyVal[0],\n\t\t\tValue: &keyVal[1],\n\t\t}\n\t\tca.Dimensions = append(ca.Dimensions, &dim)\n\t}\n\treturn ca\n}",
"func (s *Record) SetDimensions(v []*Dimension) *Record {\n\ts.Dimensions = v\n\treturn s\n}",
"func resize(x []float64, dim int) []float64 {\n\tif dim > cap(x) {\n\t\treturn make([]float64, dim)\n\t}\n\treturn x[:dim]\n}",
"func (a *Axis) sanitizeRange() {\n\tif math.IsInf(a.Min, 0) {\n\t\ta.Min = 0\n\t}\n\tif math.IsInf(a.Max, 0) {\n\t\ta.Max = 0\n\t}\n\tif a.Min > a.Max {\n\t\ta.Min, a.Max = a.Max, a.Min\n\t}\n\tif a.Min == a.Max {\n\t\ta.Min--\n\t\ta.Max++\n\t}\n\n\tif a.AutoRescale {\n\t\tmarks := a.Tick.Marker.Ticks(a.Min, a.Max)\n\t\tfor _, t := range marks {\n\t\t\ta.Min = math.Min(a.Min, t.Value)\n\t\t\ta.Max = math.Max(a.Max, t.Value)\n\t\t}\n\t}\n}",
"func (o ElastigroupMultipleMetricsMetricOutput) Dimensions() ElastigroupMultipleMetricsMetricDimensionArrayOutput {\n\treturn o.ApplyT(func(v ElastigroupMultipleMetricsMetric) []ElastigroupMultipleMetricsMetricDimension {\n\t\treturn v.Dimensions\n\t}).(ElastigroupMultipleMetricsMetricDimensionArrayOutput)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
planField returns a processor for field.
|
func (p *Planner) planField(e *Executor, f *Field) (processor, error) {
return p.planExpr(e, f.Expr)
}
|
[
"func (p *Planner) planCall(e *Executor, c *Call) (processor, error) {\n\t// Ensure there is a single argument.\n\tif len(c.Args) != 1 {\n\t\treturn nil, fmt.Errorf(\"expected one argument for %s()\", c.Name)\n\t}\n\n\t// Ensure the argument is a variable reference.\n\tref, ok := c.Args[0].(*VarRef)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"expected field argument in %s()\", c.Name)\n\t}\n\n\t// Extract the substatement for the call.\n\tsub, err := e.stmt.Substatement(ref)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tname := sub.Source.(*Measurement).Name\n\n\t// Extract tags from conditional.\n\ttags := make(map[string]string)\n\tcondition, err := p.extractTags(name, sub.Condition, tags)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsub.Condition = condition\n\n\t// Find field.\n\tfname := strings.TrimPrefix(ref.Val, name+\".\")\n\tfieldID, typ := e.db.Field(name, fname)\n\tif fieldID == 0 {\n\t\treturn nil, fmt.Errorf(\"field not found: %s.%s\", name, fname)\n\t}\n\n\t// Generate a reducer for the given function.\n\tr := newReducer(e)\n\tr.stmt = sub\n\n\t// Retrieve a list of series data ids.\n\tseriesIDs := p.DB.MatchSeries(name, tags)\n\n\t// Generate mappers for each id.\n\tr.mappers = make([]*mapper, len(seriesIDs))\n\tfor i, seriesID := range seriesIDs {\n\t\tm := newMapper(e, seriesID, fieldID, typ)\n\t\tm.min, m.max = e.min.UnixNano(), e.max.UnixNano()\n\t\tm.interval = int64(e.interval)\n\t\tm.key = append(make([]byte, 8), marshalStrings(p.DB.SeriesTagValues(seriesID, e.tags))...)\n\t\tr.mappers[i] = m\n\t}\n\n\t// Set the appropriate reducer function.\n\tswitch strings.ToLower(c.Name) {\n\tcase \"count\":\n\t\tr.fn = reduceSum\n\t\tfor _, m := range r.mappers {\n\t\t\tm.fn = mapCount\n\t\t}\n\tcase \"sum\":\n\t\tr.fn = reduceSum\n\t\tfor _, m := range r.mappers {\n\t\t\tm.fn = mapSum\n\t\t}\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"function not found: %q\", c.Name)\n\t}\n\n\treturn r, nil\n}",
"func (b *Builder) Field(keypath string) *Builder {\n\tb.p.RegisterTransformation(impl.Field(keypath))\n\treturn b\n}",
"func (b *messageBuilder) processField(d *desc.FieldDescriptor) entity.Field {\n\tvar f entity.Field\n\tswitch {\n\tcase isMessageType(d.AsFieldDescriptorProto().GetType()):\n\t\tf = b.processMessageField(d)\n\tcase isEnumType(d):\n\t\tf = newEnumField(d)\n\tdefault: // primitive field\n\t\tf = newPrimitiveField(d)\n\t}\n\treturn f\n}",
"func (v *ApiVisitor) VisitField(ctx *api.FieldContext) interface{} {\n\tiAnonymousFiled := ctx.AnonymousFiled()\n\tiNormalFieldContext := ctx.NormalField()\n\tif iAnonymousFiled != nil {\n\t\treturn iAnonymousFiled.Accept(v).(*TypeField)\n\t}\n\tif iNormalFieldContext != nil {\n\t\treturn iNormalFieldContext.Accept(v).(*TypeField)\n\t}\n\treturn nil\n}",
"func (m OverrideMap) Field(source string, fallback interface{}, in []interface{}) Field {\n\treturn m.execute(source, fallback, in).(Field)\n}",
"func NewProcessField(\n\tconf Config, mgr types.Manager, log log.Modular, stats metrics.Type,\n) (Type, error) {\n\tvar children []types.Processor\n\tfor i, pconf := range conf.ProcessField.Processors {\n\t\tprefix := fmt.Sprintf(\"%v\", i)\n\t\tproc, err := New(pconf, mgr, log.NewModule(\".\"+prefix), metrics.Namespaced(stats, prefix))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tchildren = append(children, proc)\n\t}\n\treturn &ProcessField{\n\t\tparts: conf.ProcessField.Parts,\n\t\tpath: strings.Split(conf.ProcessField.Path, \".\"),\n\t\tchildren: children,\n\n\t\tlog: log,\n\n\t\tmCount: stats.GetCounter(\"count\"),\n\t\tmErr: stats.GetCounter(\"error\"),\n\t\tmErrJSONParse: stats.GetCounter(\"error.json_parse\"),\n\t\tmErrMisaligned: stats.GetCounter(\"error.misaligned\"),\n\t\tmErrMisalignedBatch: stats.GetCounter(\"error.misaligned_messages\"),\n\t\tmSent: stats.GetCounter(\"sent\"),\n\t\tmBatchSent: stats.GetCounter(\"batch.sent\"),\n\t}, nil\n}",
"func (t Trait) Field(fieldName string, generator FieldGenerator) Trait {\n\tt.FieldGenerators[fieldName] = generator\n\n\treturn t\n}",
"func VisitField(nodes []Node, field string, callback func(value string, negated bool, annotation Annotation)) {\n\tvisitor := &FieldVisitor{callback: callback, field: field}\n\tvisitor.VisitNodes(visitor, nodes)\n}",
"func (pr *PrepareResult) PrmField(idx int) Field {\n\treturn pr.prmFields[idx]\n}",
"func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value {\n\tif !receiver.IsValid() {\n\t\treturn zero\n\t}\n\ttyp := receiver.Type()\n\treceiver, _ = indirect(receiver)\n\t// Unless it's an interface, need to get to a value of type *T to guarantee\n\t// we see all methods of T and *T.\n\tptr := receiver\n\tif ptr.Kind() != reflect.Interface && ptr.CanAddr() {\n\t\tptr = ptr.Addr()\n\t}\n\tif method := ptr.MethodByName(fieldName); method.IsValid() {\n\t\treturn s.evalCall(dot, method, node, fieldName, args, final)\n\t}\n\thasArgs := len(args) > 1 || final.IsValid()\n\t// It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil.\n\treceiver, isNil := indirect(receiver)\n\tif isNil {\n\t\ts.errorf(\"nil pointer evaluating %s.%s\", typ, fieldName)\n\t}\n\tswitch receiver.Kind() {\n\tcase reflect.Struct:\n\t\ttField, ok := receiver.Type().FieldByName(fieldName)\n\t\tif ok {\n\t\t\tfield := receiver.FieldByIndex(tField.Index)\n\t\t\tif tField.PkgPath != \"\" { // field is unexported\n\t\t\t\ts.errorf(\"%s is an unexported field of struct type %s\", fieldName, typ)\n\t\t\t}\n\t\t\t// If it's a function, we must call it.\n\t\t\tif hasArgs {\n\t\t\t\ts.errorf(\"%s has arguments but cannot be invoked as function\", fieldName)\n\t\t\t}\n\t\t\treturn field\n\t\t}\n\t\ts.errorf(\"%s is not a field of struct type %s\", fieldName, typ)\n\tcase reflect.Map:\n\t\t// If it's a map, attempt to use the field name as a key.\n\t\tnameVal := reflect.ValueOf(fieldName)\n\t\tif nameVal.Type().AssignableTo(receiver.Type().Key()) {\n\t\t\tif hasArgs {\n\t\t\t\ts.errorf(\"%s is not a method but has arguments\", fieldName)\n\t\t\t}\n\t\t\treturn receiver.MapIndex(nameVal)\n\t\t}\n\t}\n\ts.errorf(\"can't evaluate field %s in type %s\", fieldName, typ)\n\tpanic(\"not reached\")\n}",
"func (t TableModel) UseField(structFieldFunc func(columnMetaData metadata.Column) TableModelField) TableModel {\n\tt.Field = structFieldFunc\n\treturn t\n}",
"func (p *Plugin) FieldResolve(in driver.FieldResolveInput) driver.FieldResolveOutput {\n\tresp, err := p.do(in)\n\tif err != nil {\n\t\treturn driver.FieldResolveOutput{\n\t\t\tError: &driver.Error{\n\t\t\t\tMessage: err.Error(),\n\t\t\t},\n\t\t}\n\t}\n\treturn resp.(driver.FieldResolveOutput)\n}",
"func (op *metadataLookup) field(parentFunc *stmt.CallExpr, expr stmt.Expr) {\n\tif op.err != nil {\n\t\treturn\n\t}\n\tswitch e := expr.(type) {\n\tcase *stmt.SelectItem:\n\t\top.field(nil, e.Expr)\n\tcase *stmt.CallExpr:\n\t\tif e.FuncType == function.Quantile {\n\t\t\top.planHistogramFields(e)\n\t\t\treturn\n\t\t}\n\t\tfor _, param := range e.Params {\n\t\t\top.field(e, param)\n\t\t}\n\tcase *stmt.ParenExpr:\n\t\top.field(nil, e.Expr)\n\tcase *stmt.BinaryExpr:\n\t\top.field(nil, e.Left)\n\t\top.field(nil, e.Right)\n\tcase *stmt.FieldExpr:\n\t\tqueryStmt := op.executeCtx.Query\n\t\tfieldMeta, err := op.metadata.GetField(queryStmt.Namespace, queryStmt.MetricName, field.Name(e.Name))\n\t\tif err != nil {\n\t\t\top.err = err\n\t\t\treturn\n\t\t}\n\n\t\top.planField(parentFunc, fieldMeta)\n\t}\n}",
"func (b *PlanBuilder) buildProjectionField(ctx context.Context, p LogicalPlan, field *ast.SelectField, expr expression.Expression) (*expression.Column, *types.FieldName, error) {\n\tvar origTblName, tblName, colName, dbName parser_model.CIStr\n\tinnerNode := getInnerFromParenthesesAndUnaryPlus(field.Expr)\n\tcol, isCol := expr.(*expression.Column)\n\t// Correlated column won't affect the final output names. So we can put it in any of the three logic block.\n\t// Don't put it into the first block just for simplifying the codes.\n\tif colNameField, ok := innerNode.(*ast.ColumnNameExpr); ok && isCol {\n\t\t// Field is a column reference.\n\t\tidx := p.Schema().ColumnIndex(col)\n\t\tvar name *types.FieldName\n\t\t// The column maybe the one from join's redundant part.\n\t\t// TODO: Fully support USING/NATURAL JOIN, refactor here.\n\t\tif idx != -1 {\n\t\t\tname = p.OutputNames()[idx]\n\t\t}\n\t\tcolName, _, tblName, origTblName, dbName = b.buildProjectionFieldNameFromColumns(field, colNameField, name)\n\t} else if field.AsName.L != \"\" {\n\t\t// Field has alias.\n\t\tcolName = field.AsName\n\t} else {\n\t\t// Other: field is an expression.\n\t\tvar err error\n\t\tif colName, err = b.buildProjectionFieldNameFromExpressions(ctx, field); err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\tname := &types.FieldName{\n\t\tTblName: tblName,\n\t\tOrigTblName: origTblName,\n\t\tColName: colName,\n\t\tOrigColName: colName,\n\t\tDBName: dbName,\n\t}\n\tif isCol {\n\t\treturn col, name, nil\n\t}\n\tnewCol := &expression.Column{\n\t\tUniqueID: b.ctx.GetSessionVars().AllocPlanColumnID(),\n\t\tRetType: expr.GetType(),\n\t\tOrigName: colName.L,\n\t}\n\treturn newCol, name, nil\n}",
"func (o TimePartitioningOutput) Field() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v TimePartitioning) *string { return v.Field }).(pulumi.StringPtrOutput)\n}",
"func (w *State) GenerateFlowField(destination DestinationID) error {\n\tlog.Println(\"find shorted path\")\n\tFindShortestPath(w, destination)\n\tlog.Println(\"compute directions\")\n\tw.computeDirections(destination)\n\n\treturn nil\n\n}",
"func (_DelegateProfile *DelegateProfileTransactor) NewField(opts *bind.TransactOpts, _name string, _verifierAddr common.Address) (*types.Transaction, error) {\n\treturn _DelegateProfile.contract.Transact(opts, \"newField\", _name, _verifierAddr)\n}",
"func (m *SnapshotPrepare) Field(fieldpath []string) (string, bool) {\n\tif len(fieldpath) == 0 {\n\t\treturn \"\", false\n\t}\n\tswitch fieldpath[0] {\n\tcase \"key\":\n\t\treturn string(m.Key), len(m.Key) > 0\n\tcase \"parent\":\n\t\treturn string(m.Parent), len(m.Parent) > 0\n\tcase \"snapshotter\":\n\t\treturn string(m.Snapshotter), len(m.Snapshotter) > 0\n\t}\n\treturn \"\", false\n}",
"func (q *DistanceFeatureQuery) Field(name string) *DistanceFeatureQuery {\n\tq.field = name\n\treturn q\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
planExpr returns a processor for an expression.
|
func (p *Planner) planExpr(e *Executor, expr Expr) (processor, error) {
switch expr := expr.(type) {
case *VarRef:
panic("TODO")
case *Call:
return p.planCall(e, expr)
case *BinaryExpr:
return p.planBinaryExpr(e, expr)
case *ParenExpr:
return p.planExpr(e, expr.Expr)
case *NumberLiteral:
return newLiteralProcessor(expr.Val), nil
case *StringLiteral:
return newLiteralProcessor(expr.Val), nil
case *BooleanLiteral:
return newLiteralProcessor(expr.Val), nil
case *TimeLiteral:
return newLiteralProcessor(expr.Val), nil
case *DurationLiteral:
return newLiteralProcessor(expr.Val), nil
}
panic("unreachable")
}
|
[
"func (p *ExecutionPlanner) compileExpr(expr parser.Expr) (_ types.PlanExpression, err error) {\n\tif expr == nil {\n\t\treturn nil, nil\n\t}\n\n\tswitch expr := expr.(type) {\n\tcase *parser.BinaryExpr:\n\t\treturn p.compileBinaryExpr(expr)\n\n\tcase *parser.BoolLit:\n\t\treturn newBoolLiteralPlanExpression(expr.Value), nil\n\n\tcase *parser.Call:\n\t\treturn p.compileCallExpr(expr)\n\n\tcase *parser.CastExpr:\n\t\tcastExpr, err := p.compileExpr(expr.X)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdataType, err := dataTypeFromParserType(expr.Type)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn newCastPlanExpression(castExpr, dataType), nil\n\n\tcase *parser.Exists:\n\t\treturn nil, sql3.NewErrInternal(\"exists expressions are not supported\")\n\n\tcase *parser.ExprList:\n\t\texprList := []types.PlanExpression{}\n\t\tfor _, e := range expr.Exprs {\n\t\t\tlistExpr, err := p.compileExpr(e)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\texprList = append(exprList, listExpr)\n\t\t}\n\t\treturn newExprListExpression(exprList), nil\n\n\tcase *parser.SetLiteralExpr:\n\t\texprList := []types.PlanExpression{}\n\t\tfor _, e := range expr.Members {\n\t\t\tlistExpr, err := p.compileExpr(e)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\texprList = append(exprList, listExpr)\n\t\t}\n\t\treturn newExprSetLiteralPlanExpression(exprList, expr.DataType()), nil\n\n\tcase *parser.TupleLiteralExpr:\n\t\texprList := []types.PlanExpression{}\n\t\tfor _, e := range expr.Members {\n\t\t\tlistExpr, err := p.compileExpr(e)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\texprList = append(exprList, listExpr)\n\t\t}\n\t\treturn newExprTupleLiteralPlanExpression(exprList, expr.DataType()), nil\n\n\tcase *parser.Ident:\n\t\treturn nil, sql3.NewErrInternal(\"identifiers are not supported\")\n\n\tcase *parser.NullLit:\n\t\treturn newNullLiteralPlanExpression(), nil\n\n\tcase *parser.IntegerLit:\n\t\treturn newIntLiteralPlanExpression(expr.Value), nil\n\n\tcase *parser.FloatLit:\n\t\treturn newFloatLiteralPlanExpression(expr.Value), nil\n\n\tcase *parser.DateLit:\n\t\treturn newDateLiteralPlanExpression(expr.Value), nil\n\n\tcase *parser.ParenExpr:\n\t\treturn p.compileExpr(expr.X)\n\n\tcase *parser.QualifiedRef:\n\t\tref := newQualifiedRefPlanExpression(parser.IdentName(expr.Table), parser.IdentName(expr.Column), expr.ColumnIndex, expr.DataType())\n\t\tp.addReference(ref)\n\t\treturn ref, nil\n\n\tcase *parser.Range:\n\t\tlhs, err := p.compileExpr(expr.X)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trhs, err := p.compileExpr(expr.Y)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn newRangeOpPlanExpression(lhs, rhs, expr.ResultDataType), nil\n\n\tcase *parser.StringLit:\n\t\treturn newStringLiteralPlanExpression(expr.Value), nil\n\n\tcase *parser.UnaryExpr:\n\t\treturn p.compileUnaryExpr(expr)\n\n\tcase *parser.CaseExpr:\n\t\toperand, err := p.compileExpr(expr.Operand)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tblocks := []types.PlanExpression{}\n\t\tfor _, b := range expr.Blocks {\n\t\t\tblock, err := p.compileExpr(b)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tblocks = append(blocks, block)\n\t\t}\n\n\t\telseExpr, err := p.compileExpr(expr.ElseExpr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn newCasePlanExpression(operand, blocks, elseExpr, expr.DataType()), nil\n\n\tcase *parser.CaseBlock:\n\n\t\tcondition, err := p.compileExpr(expr.Condition)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbody, err := p.compileExpr(expr.Body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn newCaseBlockPlanExpression(condition, body), nil\n\n\tcase *parser.SelectStatement:\n\t\tselOp, err := p.compileSelectStatement(expr, true)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn newSubqueryPlanExpression(selOp), nil\n\n\tdefault:\n\t\treturn nil, sql3.NewErrInternalf(\"unexpected SQL expression type: %T\", expr)\n\t}\n}",
"func (p *Planner) planBinaryExpr(e *Executor, expr *BinaryExpr) (processor, error) {\n\t// Create processor for LHS.\n\tlhs, err := p.planExpr(e, expr.LHS)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"lhs: %s\", err)\n\t}\n\n\t// Create processor for RHS.\n\trhs, err := p.planExpr(e, expr.RHS)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"rhs: %s\", err)\n\t}\n\n\t// Combine processors.\n\treturn newBinaryExprEvaluator(e, expr.Op, lhs, rhs), nil\n}",
"func (p *Planner) planCall(e *Executor, c *Call) (processor, error) {\n\t// Ensure there is a single argument.\n\tif len(c.Args) != 1 {\n\t\treturn nil, fmt.Errorf(\"expected one argument for %s()\", c.Name)\n\t}\n\n\t// Ensure the argument is a variable reference.\n\tref, ok := c.Args[0].(*VarRef)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"expected field argument in %s()\", c.Name)\n\t}\n\n\t// Extract the substatement for the call.\n\tsub, err := e.stmt.Substatement(ref)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tname := sub.Source.(*Measurement).Name\n\n\t// Extract tags from conditional.\n\ttags := make(map[string]string)\n\tcondition, err := p.extractTags(name, sub.Condition, tags)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsub.Condition = condition\n\n\t// Find field.\n\tfname := strings.TrimPrefix(ref.Val, name+\".\")\n\tfieldID, typ := e.db.Field(name, fname)\n\tif fieldID == 0 {\n\t\treturn nil, fmt.Errorf(\"field not found: %s.%s\", name, fname)\n\t}\n\n\t// Generate a reducer for the given function.\n\tr := newReducer(e)\n\tr.stmt = sub\n\n\t// Retrieve a list of series data ids.\n\tseriesIDs := p.DB.MatchSeries(name, tags)\n\n\t// Generate mappers for each id.\n\tr.mappers = make([]*mapper, len(seriesIDs))\n\tfor i, seriesID := range seriesIDs {\n\t\tm := newMapper(e, seriesID, fieldID, typ)\n\t\tm.min, m.max = e.min.UnixNano(), e.max.UnixNano()\n\t\tm.interval = int64(e.interval)\n\t\tm.key = append(make([]byte, 8), marshalStrings(p.DB.SeriesTagValues(seriesID, e.tags))...)\n\t\tr.mappers[i] = m\n\t}\n\n\t// Set the appropriate reducer function.\n\tswitch strings.ToLower(c.Name) {\n\tcase \"count\":\n\t\tr.fn = reduceSum\n\t\tfor _, m := range r.mappers {\n\t\t\tm.fn = mapCount\n\t\t}\n\tcase \"sum\":\n\t\tr.fn = reduceSum\n\t\tfor _, m := range r.mappers {\n\t\t\tm.fn = mapSum\n\t\t}\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"function not found: %q\", c.Name)\n\t}\n\n\treturn r, nil\n}",
"func planFilterExpr(\n\tctx context.Context,\n\tflowCtx *execinfra.FlowCtx,\n\tevalCtx *tree.EvalContext,\n\tinput colexecop.Operator,\n\tcolumnTypes []*types.T,\n\tfilter execinfrapb.Expression,\n\tacc *mon.BoundAccount,\n\tfactory coldata.ColumnFactory,\n\thelper *colexecargs.ExprHelper,\n) (colexecop.Operator, error) {\n\tsemaCtx := flowCtx.TypeResolverFactory.NewSemaContext(evalCtx.Txn)\n\texpr, err := helper.ProcessExpr(filter, semaCtx, evalCtx, columnTypes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif expr == tree.DNull {\n\t\t// The filter expression is tree.DNull meaning that it is always false, so\n\t\t// we put a zero operator.\n\t\treturn colexecutils.NewZeroOp(input), nil\n\t}\n\top, _, filterColumnTypes, err := planSelectionOperators(\n\t\tctx, evalCtx, expr, columnTypes, input, acc, factory,\n\t)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to columnarize filter expression %q\", filter)\n\t}\n\tif len(filterColumnTypes) > len(columnTypes) {\n\t\t// Additional columns were appended to store projections while\n\t\t// evaluating the filter. Project them away.\n\t\tvar outputColumns []uint32\n\t\tfor i := range columnTypes {\n\t\t\toutputColumns = append(outputColumns, uint32(i))\n\t\t}\n\t\top = colexecbase.NewSimpleProjectOp(op, len(filterColumnTypes), outputColumns)\n\t}\n\treturn op, nil\n}",
"func (p *Planner) planField(e *Executor, f *Field) (processor, error) {\n\treturn p.planExpr(e, f.Expr)\n}",
"func EvalExpr(e *Expression, context Context) (interface{}, error) {\n\treturn eval(e.expr, context)\n}",
"func makePlanValue(n sqlparser.Expr) (*sqltypes.PlanValue, error) {\n\tvalue, err := sqlparser.NewPlanValue(n)\n\tif err != nil {\n\t\t// if we are unable to create a PlanValue, we can't use a vindex, but we don't have to fail\n\t\tif strings.Contains(err.Error(), \"expression is too complex\") {\n\t\t\treturn nil, nil\n\t\t}\n\t\t// something else went wrong, return the error\n\t\treturn nil, err\n\t}\n\treturn &value, nil\n}",
"func (p *Project) Plan() *ExecPlan {\n\treturn NewExecPlan(p)\n}",
"func (c *Compiler) Compile(expr string) (*runtime.Program, error) {\n\tprogAST, err := parser.Parse(expr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, pass := range context.Passes {\n\t\terr = progAST.RunPass(c.ctx, pass)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tprog := c.ctx.Builder.Build()\n\tprog.ResultType = progAST.Type()\n\treturn prog, nil\n}",
"func (p *Profile) Expr(r adt.Runtime, pkgID string, n adt.Expr) (ast.Expr, errors.Error) {\n\te := newExporter(p, r, pkgID, nil)\n\n\treturn e.expr(nil, n), nil\n}",
"func planProjectionOperators(\n\tctx context.Context,\n\tevalCtx *tree.EvalContext,\n\texpr tree.TypedExpr,\n\tcolumnTypes []*types.T,\n\tinput colexecop.Operator,\n\tacc *mon.BoundAccount,\n\tfactory coldata.ColumnFactory,\n) (op colexecop.Operator, resultIdx int, typs []*types.T, err error) {\n\t// projectDatum is a helper function that adds a new constant projection\n\t// operator for the given datum. typs are updated accordingly.\n\tprojectDatum := func(datum tree.Datum) (colexecop.Operator, error) {\n\t\tresultIdx = len(columnTypes)\n\t\tdatumType := datum.ResolvedType()\n\t\ttyps = appendOneType(columnTypes, datumType)\n\t\tif datumType.Family() == types.UnknownFamily {\n\t\t\t// We handle Unknown type by planning a special constant null\n\t\t\t// operator.\n\t\t\treturn colexecbase.NewConstNullOp(colmem.NewAllocator(ctx, acc, factory), input, resultIdx), nil\n\t\t}\n\t\tconstVal := colconv.GetDatumToPhysicalFn(datumType)(datum)\n\t\treturn colexecbase.NewConstOp(colmem.NewAllocator(ctx, acc, factory), input, datumType, constVal, resultIdx)\n\t}\n\tresultIdx = -1\n\tswitch t := expr.(type) {\n\tcase *tree.IndexedVar:\n\t\treturn input, t.Idx, columnTypes, nil\n\tcase *tree.ComparisonExpr:\n\t\treturn planProjectionExpr(\n\t\t\tctx, evalCtx, t.Operator, t.ResolvedType(), t.TypedLeft(), t.TypedRight(),\n\t\t\tcolumnTypes, input, acc, factory, nil /* binFn */, t,\n\t\t)\n\tcase *tree.BinaryExpr:\n\t\tif err = checkSupportedBinaryExpr(t.TypedLeft(), t.TypedRight(), t.ResolvedType()); err != nil {\n\t\t\treturn op, resultIdx, typs, err\n\t\t}\n\t\treturn planProjectionExpr(\n\t\t\tctx, evalCtx, t.Operator, t.ResolvedType(), t.TypedLeft(), t.TypedRight(),\n\t\t\tcolumnTypes, input, acc, factory, t.Fn.Fn, nil, /* cmpExpr */\n\t\t)\n\tcase *tree.IsNullExpr:\n\t\treturn planIsNullProjectionOp(ctx, evalCtx, t.ResolvedType(), t.TypedInnerExpr(), columnTypes, input, acc, false /* negate */, factory)\n\tcase *tree.IsNotNullExpr:\n\t\treturn planIsNullProjectionOp(ctx, evalCtx, t.ResolvedType(), t.TypedInnerExpr(), columnTypes, input, acc, true /* negate */, factory)\n\tcase *tree.CastExpr:\n\t\texpr := t.Expr.(tree.TypedExpr)\n\t\top, resultIdx, typs, err = planProjectionOperators(\n\t\t\tctx, evalCtx, expr, columnTypes, input, acc, factory,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, 0, nil, err\n\t\t}\n\t\top, resultIdx, typs, err = planCastOperator(ctx, acc, typs, op, resultIdx, expr.ResolvedType(), t.ResolvedType(), factory)\n\t\treturn op, resultIdx, typs, err\n\tcase *tree.FuncExpr:\n\t\tvar inputCols []int\n\t\ttyps = make([]*types.T, len(columnTypes))\n\t\tcopy(typs, columnTypes)\n\t\top = input\n\t\tfor _, e := range t.Exprs {\n\t\t\tvar err error\n\t\t\t// TODO(rohany): This could be done better, especially in the case\n\t\t\t// of constant arguments, because the vectorized engine right now\n\t\t\t// creates a new column full of the constant value.\n\t\t\top, resultIdx, typs, err = planProjectionOperators(\n\t\t\t\tctx, evalCtx, e.(tree.TypedExpr), typs, op, acc, factory,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, resultIdx, nil, err\n\t\t\t}\n\t\t\tinputCols = append(inputCols, resultIdx)\n\t\t}\n\t\tresultIdx = len(typs)\n\t\top, err = colexec.NewBuiltinFunctionOperator(\n\t\t\tcolmem.NewAllocator(ctx, acc, factory), evalCtx, t, typs, inputCols, resultIdx, op,\n\t\t)\n\t\ttyps = appendOneType(typs, t.ResolvedType())\n\t\treturn op, resultIdx, typs, err\n\tcase tree.Datum:\n\t\top, err = projectDatum(t)\n\t\treturn op, resultIdx, typs, err\n\tcase *tree.Tuple:\n\t\tisConstTuple := true\n\t\tfor _, expr := range t.Exprs {\n\t\t\tif _, isDatum := expr.(tree.Datum); !isDatum {\n\t\t\t\tisConstTuple = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif isConstTuple {\n\t\t\t// Tuple expression is a constant, so we can evaluate it and\n\t\t\t// project the resulting datum.\n\t\t\ttuple, err := t.Eval(evalCtx)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, resultIdx, typs, err\n\t\t\t}\n\t\t\top, err = projectDatum(tuple)\n\t\t\treturn op, resultIdx, typs, err\n\t\t}\n\t\toutputType := t.ResolvedType()\n\t\ttyps = make([]*types.T, len(columnTypes))\n\t\tcopy(typs, columnTypes)\n\t\ttupleContentsIdxs := make([]int, len(t.Exprs))\n\t\tfor i, expr := range t.Exprs {\n\t\t\tinput, tupleContentsIdxs[i], typs, err = planProjectionOperators(\n\t\t\t\tctx, evalCtx, expr.(tree.TypedExpr), typs, input, acc, factory,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, resultIdx, typs, err\n\t\t\t}\n\t\t}\n\t\tresultIdx = len(typs)\n\t\top = colexec.NewTupleProjOp(\n\t\t\tcolmem.NewAllocator(ctx, acc, factory), typs, tupleContentsIdxs, outputType, input, resultIdx,\n\t\t)\n\t\ttyps = appendOneType(typs, outputType)\n\t\treturn op, resultIdx, typs, err\n\tcase *tree.CaseExpr:\n\t\tif t.Expr != nil {\n\t\t\treturn nil, resultIdx, typs, errors.New(\"CASE <expr> WHEN expressions unsupported\")\n\t\t}\n\n\t\tallocator := colmem.NewAllocator(ctx, acc, factory)\n\t\tcaseOutputType := t.ResolvedType()\n\t\tif typeconv.TypeFamilyToCanonicalTypeFamily(caseOutputType.Family()) == types.BytesFamily {\n\t\t\t// Currently, there is a contradiction between the way CASE operator\n\t\t\t// works (which populates its output in arbitrary order) and the\n\t\t\t// flat bytes implementation of Bytes type (which prohibits sets in\n\t\t\t// arbitrary order), so we reject such scenario to fall back to\n\t\t\t// row-by-row engine.\n\t\t\treturn nil, resultIdx, typs, errors.Newf(\n\t\t\t\t\"unsupported type %s in CASE operator\", caseOutputType)\n\t\t}\n\t\tcaseOutputIdx := len(columnTypes)\n\t\t// We don't know the schema yet and will update it below, right before\n\t\t// instantiating caseOp. The same goes for subsetEndIdx.\n\t\tschemaEnforcer := colexecutils.NewBatchSchemaSubsetEnforcer(\n\t\t\tallocator, input, nil /* typs */, caseOutputIdx, -1, /* subsetEndIdx */\n\t\t)\n\t\tbuffer := colexec.NewBufferOp(schemaEnforcer)\n\t\tcaseOps := make([]colexecop.Operator, len(t.Whens))\n\t\ttyps = appendOneType(columnTypes, caseOutputType)\n\t\tthenIdxs := make([]int, len(t.Whens)+1)\n\t\tfor i, when := range t.Whens {\n\t\t\t// The case operator is assembled from n WHEN arms, n THEN arms, and\n\t\t\t// an ELSE arm. Each WHEN arm is a boolean projection. Each THEN arm\n\t\t\t// (and the ELSE arm) is a projection of the type of the CASE\n\t\t\t// expression. We set up each WHEN arm to write its output to a\n\t\t\t// fresh column, and likewise for the THEN arms and the ELSE arm.\n\t\t\t// Each WHEN arm individually acts on the single input batch from\n\t\t\t// the CaseExpr's input and is then transformed into a selection\n\t\t\t// vector, after which the THEN arm runs to create the output just\n\t\t\t// for the tuples that matched the WHEN arm. Each subsequent WHEN\n\t\t\t// arm will use the inverse of the selection vector to avoid running\n\t\t\t// the WHEN projection on tuples that have already been matched by a\n\t\t\t// previous WHEN arm. Finally, after each WHEN arm runs, we copy the\n\t\t\t// results of the WHEN into a single output vector, assembling the\n\t\t\t// final result of the case projection.\n\t\t\twhenTyped := when.Cond.(tree.TypedExpr)\n\t\t\tcaseOps[i], resultIdx, typs, err = planProjectionOperators(\n\t\t\t\tctx, evalCtx, whenTyped, typs, buffer, acc, factory,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, resultIdx, typs, err\n\t\t\t}\n\t\t\tcaseOps[i], err = colexecutils.BoolOrUnknownToSelOp(caseOps[i], typs, resultIdx)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, resultIdx, typs, err\n\t\t\t}\n\n\t\t\t// Run the \"then\" clause on those tuples that were selected.\n\t\t\tcaseOps[i], thenIdxs[i], typs, err = planProjectionOperators(\n\t\t\t\tctx, evalCtx, when.Val.(tree.TypedExpr), typs, caseOps[i], acc, factory,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, resultIdx, typs, err\n\t\t\t}\n\t\t\tif !typs[thenIdxs[i]].Identical(typs[caseOutputIdx]) {\n\t\t\t\t// It is possible that the projection of this THEN arm has\n\t\t\t\t// different column type (for example, we expect INT2, but INT8\n\t\t\t\t// is given). In such case, we need to plan a cast.\n\t\t\t\tfromType, toType := typs[thenIdxs[i]], typs[caseOutputIdx]\n\t\t\t\tcaseOps[i], thenIdxs[i], typs, err = planCastOperator(\n\t\t\t\t\tctx, acc, typs, caseOps[i], thenIdxs[i], fromType, toType, factory,\n\t\t\t\t)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, resultIdx, typs, err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tvar elseOp colexecop.Operator\n\t\telseExpr := t.Else\n\t\tif elseExpr == nil {\n\t\t\t// If there's no ELSE arm, we write NULLs.\n\t\t\telseExpr = tree.DNull\n\t\t}\n\t\telseOp, thenIdxs[len(t.Whens)], typs, err = planProjectionOperators(\n\t\t\tctx, evalCtx, elseExpr.(tree.TypedExpr), typs, buffer, acc, factory,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, resultIdx, typs, err\n\t\t}\n\t\tif !typs[thenIdxs[len(t.Whens)]].Identical(typs[caseOutputIdx]) {\n\t\t\t// It is possible that the projection of the ELSE arm has different\n\t\t\t// column type (for example, we expect INT2, but INT8 is given). In\n\t\t\t// such case, we need to plan a cast.\n\t\t\telseIdx := thenIdxs[len(t.Whens)]\n\t\t\tfromType, toType := typs[elseIdx], typs[caseOutputIdx]\n\t\t\telseOp, thenIdxs[len(t.Whens)], typs, err = planCastOperator(\n\t\t\t\tctx, acc, typs, elseOp, elseIdx, fromType, toType, factory,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, resultIdx, typs, err\n\t\t\t}\n\t\t}\n\n\t\tschemaEnforcer.SetTypes(typs)\n\t\top := colexec.NewCaseOp(allocator, buffer, caseOps, elseOp, thenIdxs, caseOutputIdx, caseOutputType)\n\t\treturn op, caseOutputIdx, typs, err\n\tcase *tree.AndExpr, *tree.OrExpr:\n\t\treturn planLogicalProjectionOp(ctx, evalCtx, expr, columnTypes, input, acc, factory)\n\tdefault:\n\t\treturn nil, resultIdx, nil, errors.Errorf(\"unhandled projection expression type: %s\", reflect.TypeOf(t))\n\t}\n}",
"func (l *logicalPlanner) Plan(ctx context.Context, logicalPlan *Spec) (*Spec, error) {\n\tnewLogicalPlan, err := l.heuristicPlanner.Plan(ctx, logicalPlan)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// check integrity after planning is complete\n\tif !l.disableIntegrityChecks {\n\t\terr := newLogicalPlan.CheckIntegrity()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn newLogicalPlan, nil\n}",
"func (v *planVisitor) expr(\n\tfieldName string, n int, expr parser.Expr, subplans []planNode,\n) []planNode {\n\tif v.err != nil {\n\t\treturn subplans\n\t}\n\n\tv.observer.expr(v.nodeName, fieldName, n, expr)\n\n\tif expr != nil {\n\t\t// Note: the recursion through WalkExprConst does nothing else\n\t\t// than calling observer.subqueryNode() and collect subplans in\n\t\t// v.subplans, in particular it does not recurse into the\n\t\t// collected subplans (this recursion is performed by visit() only\n\t\t// after all the subplans have been collected). Therefore, there\n\t\t// is no risk that v.subplans will be clobbered by a recursion\n\t\t// into visit().\n\t\tv.subplans = subplans\n\t\tparser.WalkExprConst(v, expr)\n\t\tsubplans = v.subplans\n\t\tv.subplans = nil\n\t}\n\treturn subplans\n}",
"func (stage *physicalPlanStage) Plan() PlanNode {\n\treturn NewPlanNode(operator.NewPhysicalPlan(stage.taskCtx))\n}",
"func resolveExpression(ctx *sql.Context, expression string, sch schema.Schema, tableName string) (sql.Expression, error) {\n\tquery := fmt.Sprintf(\"SELECT %s from %s.%s\", expression, \"mydb\", tableName)\n\tsqlSch, err := sqlutil.FromDoltSchema(tableName, sch)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmockTable := memory.NewTable(tableName, sqlSch, nil)\n\tmockDatabase := memory.NewDatabase(\"mydb\")\n\tmockDatabase.AddTable(tableName, mockTable)\n\tmockProvider := memory.NewDBProvider(mockDatabase)\n\tcatalog := analyzer.NewCatalog(mockProvider)\n\n\tpseudoAnalyzedQuery, err := planbuilder.Parse(ctx, catalog, query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar expr sql.Expression\n\ttransform.Inspect(pseudoAnalyzedQuery, func(n sql.Node) bool {\n\t\tif projector, ok := n.(sql.Projector); ok {\n\t\t\texpr = projector.ProjectedExprs()[0]\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t})\n\tif expr == nil {\n\t\treturn nil, fmt.Errorf(\"unable to find expression in analyzed query\")\n\t}\n\n\treturn expr, nil\n}",
"func (i *Interpreter) interpretExpr(expr ast.Expr) interface{} {\n\tswitch t := expr.(type) {\n\n\tcase *ast.AssignmentExpr:\n\t\treturn i.interpretAssignmentExpr(t)\n\n\tcase *ast.BinaryExpr:\n\t\treturn i.interpretBinaryExpr(t)\n\n\tcase *ast.CallExpr:\n\t\treturn i.interpretCallExpr(t)\n\n\tcase *ast.GetExpr:\n\t\treturn i.interpretGetExpr(t)\n\n\tcase *ast.GroupingExpr:\n\t\treturn i.interpretGroupingExpr(t)\n\n\tcase *ast.LiteralExpr:\n\t\treturn i.interpretLiteralExpr(t)\n\n\tcase *ast.LogicalExpr:\n\t\treturn i.interpretLogicalExpr(t)\n\n\tcase *ast.SetExpr:\n\t\treturn i.interpretSetExpr(t)\n\n\tcase *ast.ThisExpr:\n\t\treturn i.interpretThisExpr(t)\n\n\tcase *ast.UnaryExpr:\n\t\treturn i.interpretUnaryExpr(t)\n\n\tcase *ast.VariableExpr:\n\t\treturn i.interpretVariableExpr(t)\n\n\tdefault:\n\t\treturn nil\n\t}\n}",
"func compileExpr(compiledFunc *CompiledFunc, expr ast.Node) (string, string, error) {\n\tswitch e := expr.(type) {\n\tcase *ast.Literal:\n\t\treturns := compiledFunc.nextRegister()\n\t\tcompiledFunc.append(&instruction.Assign{\n\t\t\tVariableName: returns,\n\t\t\tValue: e,\n\t\t})\n\n\t\treturn returns, e.Kind, nil\n\n\tcase *ast.Array:\n\t\treturns, err := compileArray(compiledFunc, e)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\n\t\t// TODO(elliot): Doesn't return type.\n\t\treturn returns, \"[]\", nil\n\n\tcase *ast.Map:\n\t\treturns, err := compileMap(compiledFunc, e)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\n\t\t// TODO(elliot): Doesn't return type.\n\t\treturn returns, \"{}\", nil\n\n\tcase *ast.Call:\n\t\tresult, err := compileCall(compiledFunc, e)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\n\t\t// TODO(elliot): Doesn't return kind.\n\t\treturn result, \"\", nil\n\n\tcase *ast.Identifier:\n\t\tif v, ok := compiledFunc.variables[e.Name]; ok {\n\t\t\treturn e.Name, v, nil\n\t\t}\n\n\t\treturn \"\", \"\", fmt.Errorf(\"undefined variable: %s\", e.Name)\n\n\tcase *ast.Binary:\n\t\treturn compileBinary(compiledFunc, e)\n\n\tcase *ast.Group:\n\t\treturn compileExpr(compiledFunc, e.Expr)\n\n\tcase *ast.Unary:\n\t\treturn compileUnary(compiledFunc, e)\n\n\tcase *ast.Key:\n\t\treturn compileKey(compiledFunc, e)\n\t}\n\n\tpanic(expr)\n}",
"func (r *postProcessResult) planPostProcessSpec(\n\tctx context.Context,\n\tflowCtx *execinfra.FlowCtx,\n\tevalCtx *tree.EvalContext,\n\targs *colexecargs.NewColOperatorArgs,\n\tpost *execinfrapb.PostProcessSpec,\n\tfactory coldata.ColumnFactory,\n) error {\n\tif post.Projection {\n\t\tr.Op, r.ColumnTypes = addProjection(r.Op, r.ColumnTypes, post.OutputColumns)\n\t} else if post.RenderExprs != nil {\n\t\tif log.V(2) {\n\t\t\tlog.Infof(ctx, \"planning render expressions %+v\", post.RenderExprs)\n\t\t}\n\t\tsemaCtx := flowCtx.TypeResolverFactory.NewSemaContext(evalCtx.Txn)\n\t\tvar renderedCols []uint32\n\t\tfor _, renderExpr := range post.RenderExprs {\n\t\t\texpr, err := args.ExprHelper.ProcessExpr(renderExpr, semaCtx, evalCtx, r.ColumnTypes)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tvar outputIdx int\n\t\t\tr.Op, outputIdx, r.ColumnTypes, err = planProjectionOperators(\n\t\t\t\tctx, evalCtx, expr, r.ColumnTypes, r.Op, args.StreamingMemAccount, factory,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"unable to columnarize render expression %q\", expr)\n\t\t\t}\n\t\t\tif outputIdx < 0 {\n\t\t\t\treturn errors.AssertionFailedf(\"missing outputIdx\")\n\t\t\t}\n\t\t\trenderedCols = append(renderedCols, uint32(outputIdx))\n\t\t}\n\t\tr.Op = colexecbase.NewSimpleProjectOp(r.Op, len(r.ColumnTypes), renderedCols)\n\t\tnewTypes := make([]*types.T, len(renderedCols))\n\t\tfor i, j := range renderedCols {\n\t\t\tnewTypes[i] = r.ColumnTypes[j]\n\t\t}\n\t\tr.ColumnTypes = newTypes\n\t}\n\tif post.Offset != 0 {\n\t\tr.Op = colexec.NewOffsetOp(r.Op, post.Offset)\n\t}\n\tif post.Limit != 0 {\n\t\tr.Op = colexec.NewLimitOp(r.Op, post.Limit)\n\t}\n\treturn nil\n}",
"func (rp *routeTree) makePlanValue(ctx *planningContext, n sqlparser.Expr) (*sqltypes.PlanValue, error) {\n\tif ctx.isSubQueryToReplace(n) {\n\t\treturn nil, nil\n\t}\n\n\tfor _, expr := range ctx.semTable.GetExprAndEqualities(n) {\n\t\tif subq, isSubq := expr.(*sqlparser.Subquery); isSubq {\n\t\t\textractedSubquery := ctx.semTable.FindSubqueryReference(subq)\n\t\t\tif extractedSubquery == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tswitch engine.PulloutOpcode(extractedSubquery.OpCode) {\n\t\t\tcase engine.PulloutIn, engine.PulloutNotIn:\n\t\t\t\texpr = sqlparser.NewListArg(extractedSubquery.GetArgName())\n\t\t\tcase engine.PulloutValue, engine.PulloutExists:\n\t\t\t\texpr = sqlparser.NewArgument(extractedSubquery.GetArgName())\n\t\t\t}\n\t\t}\n\t\tpv, err := makePlanValue(expr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif pv != nil {\n\t\t\treturn pv, nil\n\t\t}\n\t}\n\n\treturn nil, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
planCall generates a processor for a function call.
|
func (p *Planner) planCall(e *Executor, c *Call) (processor, error) {
// Ensure there is a single argument.
if len(c.Args) != 1 {
return nil, fmt.Errorf("expected one argument for %s()", c.Name)
}
// Ensure the argument is a variable reference.
ref, ok := c.Args[0].(*VarRef)
if !ok {
return nil, fmt.Errorf("expected field argument in %s()", c.Name)
}
// Extract the substatement for the call.
sub, err := e.stmt.Substatement(ref)
if err != nil {
return nil, err
}
name := sub.Source.(*Measurement).Name
// Extract tags from conditional.
tags := make(map[string]string)
condition, err := p.extractTags(name, sub.Condition, tags)
if err != nil {
return nil, err
}
sub.Condition = condition
// Find field.
fname := strings.TrimPrefix(ref.Val, name+".")
fieldID, typ := e.db.Field(name, fname)
if fieldID == 0 {
return nil, fmt.Errorf("field not found: %s.%s", name, fname)
}
// Generate a reducer for the given function.
r := newReducer(e)
r.stmt = sub
// Retrieve a list of series data ids.
seriesIDs := p.DB.MatchSeries(name, tags)
// Generate mappers for each id.
r.mappers = make([]*mapper, len(seriesIDs))
for i, seriesID := range seriesIDs {
m := newMapper(e, seriesID, fieldID, typ)
m.min, m.max = e.min.UnixNano(), e.max.UnixNano()
m.interval = int64(e.interval)
m.key = append(make([]byte, 8), marshalStrings(p.DB.SeriesTagValues(seriesID, e.tags))...)
r.mappers[i] = m
}
// Set the appropriate reducer function.
switch strings.ToLower(c.Name) {
case "count":
r.fn = reduceSum
for _, m := range r.mappers {
m.fn = mapCount
}
case "sum":
r.fn = reduceSum
for _, m := range r.mappers {
m.fn = mapSum
}
default:
return nil, fmt.Errorf("function not found: %q", c.Name)
}
return r, nil
}
|
[
"func (t *TimeLine) Plan(nbars, num, denom uint32, callback func(delta int32)) {\n\t/*\n\t 1. calc the abs position for the callback by using forward\n\t 2. rewind cursor\n\t 3. register callback\n\t 4. sort planned callbacks\n\t*/\n\n\tsavedCursor := t.cursor\n\tt.forwardIgnoringCallbacks(nbars, num, denom)\n\tpos := t.cursor\n\tt.cursor = savedCursor\n\t//\tfmt.Printf(\"cursor: %v, pos: %v\\n\", t.cursor, pos)\n\tt.plannedCallbacks = append(t.plannedCallbacks, plannedCallback{callback: callback, position: pos})\n\tsort.Sort(t.plannedCallbacks)\n}",
"func (runner *suiteRunner) forkCall(method *reflect.FuncValue, kind funcKind,\n dispatcher func(c *C)) *C {\n c := newC(method, kind, runner.tempDir)\n runner.tracker.waitForCall(c)\n go (func() {\n defer runner.callDone(c)\n dispatcher(c)\n })()\n return c\n}",
"func (a *answer) queueCall(result *answer, transform []capnp.PipelineOp, call *capnp.Call) error {\n\ta.mu.Lock()\n\tdefer a.mu.Unlock()\n\tif a.done {\n\t\tpanic(\"answer.queueCall called on resolved answer\")\n\t}\n\tif len(a.queue) == cap(a.queue) {\n\t\treturn errQueueFull\n\t}\n\tcc, err := call.Copy(nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\ta.queue = append(a.queue, pcall{\n\t\ttransform: transform,\n\t\tqcall: qcall{\n\t\t\ta: result,\n\t\t\tcall: cc,\n\t\t},\n\t})\n\treturn nil\n}",
"func transformCall(n *ir.CallExpr) {\n\t// Set base.Pos, since transformArgs below may need it, but transformCall\n\t// is called in some passes that don't set base.Pos.\n\tir.SetPos(n)\n\t// n.Type() can be nil for calls with no return value\n\tassert(n.Typecheck() == 1)\n\ttransformArgs(n)\n\tl := n.X\n\tt := l.Type()\n\n\tswitch l.Op() {\n\tcase ir.ODOTINTER:\n\t\tn.SetOp(ir.OCALLINTER)\n\n\tcase ir.ODOTMETH:\n\t\tl := l.(*ir.SelectorExpr)\n\t\tn.SetOp(ir.OCALLMETH)\n\n\t\ttp := t.Recv().Type\n\n\t\tif l.X == nil || !types.Identical(l.X.Type(), tp) {\n\t\t\tbase.Fatalf(\"method receiver\")\n\t\t}\n\n\tdefault:\n\t\tn.SetOp(ir.OCALLFUNC)\n\t}\n\n\ttypecheckaste(ir.OCALL, n.X, n.IsDDD, t.Params(), n.Args)\n\tif l.Op() == ir.ODOTMETH && len(deref(n.X.Type().Recv().Type).RParams()) == 0 {\n\t\ttypecheck.FixMethodCall(n)\n\t}\n\tif t.NumResults() == 1 {\n\t\tif n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME {\n\t\t\tif sym := n.X.(*ir.Name).Sym(); types.IsRuntimePkg(sym.Pkg) && sym.Name == \"getg\" {\n\t\t\t\t// Emit code for runtime.getg() directly instead of calling function.\n\t\t\t\t// Most such rewrites (for example the similar one for math.Sqrt) should be done in walk,\n\t\t\t\t// so that the ordering pass can make sure to preserve the semantics of the original code\n\t\t\t\t// (in particular, the exact time of the function call) by introducing temporaries.\n\t\t\t\t// In this case, we know getg() always returns the same result within a given function\n\t\t\t\t// and we want to avoid the temporaries, so we do the rewrite earlier than is typical.\n\t\t\t\tn.SetOp(ir.OGETG)\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n}",
"func generateCall(generator *Generator, node parser.Node) string {\n\tvar identifier string\n\n\t// Check if it is a built-in function or not\n\tif strings.Contains(node.Value, \"|\") {\n\t\t// Get the function identifier by spliting the value by the pipe\n\t\tidentifier = strings.Split(node.Value, \"|\")[1]\n\n\t\tcheckCall(generator, node)\n\n\t\t// Add import to the generator\n\t\taddCallImport(\n\t\t\tgenerator,\n\t\t\tnode.Value,\n\t\t)\n\t} else {\n\t\tidentifier = node.Value\n\t}\n\n\t// Translate the params\n\tparams := generateParams(generator, node.Params)\n\n\t// Link all the translations together\n\treturn fmt.Sprintf(\n\t\tcCall,\n\t\tidentifier,\n\t\tstrings.Join(params, \",\"),\n\t)\n}",
"func (bc *BytecodeExecContext) call(subroutine *BytecodeContext, nargs int) goalRef {\n\toldLen := len(*bc.goalStack)\n\n\tchain := nodeParentChain{nil, nil, 0}\n\tbcCopy := BytecodeExecContext{bc.BytecodeContext, bc.GlobalSymbolContext, bc.FileSymbolContext, bc.goalStack, chain, len(*bc.goalStack) - nargs, 0}\n\tretVal := bcCopy.exec()\n\tbc.PopN(len(*bcCopy.goalStack) - oldLen)\n\treturn retVal\n}",
"func CALL(r operand.Op) { ctx.CALL(r) }",
"func (_e *MockCompactionPlanContext_Expecter) execCompactionPlan(signal interface{}, plan interface{}) *MockCompactionPlanContext_execCompactionPlan_Call {\n\treturn &MockCompactionPlanContext_execCompactionPlan_Call{Call: _e.mock.On(\"execCompactionPlan\", signal, plan)}\n}",
"func (c *CoreVM) makeCall(fn apFunc, args []string) (otto.Value, error) {\n\tif !knownFuncs.MatchString(string(fn)) {\n\t\treturn otto.UndefinedValue(), fmt.Errorf(\"No such AP function %q\", fn)\n\t}\n\tscript := fmt.Sprintf(\"AP.%s(%s);\", fn, strings.Join(args, \",\"))\n\n\tv, err := c.Run(script)\n\treturn v, err\n}",
"func (p *Planner) planExpr(e *Executor, expr Expr) (processor, error) {\n\tswitch expr := expr.(type) {\n\tcase *VarRef:\n\t\tpanic(\"TODO\")\n\tcase *Call:\n\t\treturn p.planCall(e, expr)\n\tcase *BinaryExpr:\n\t\treturn p.planBinaryExpr(e, expr)\n\tcase *ParenExpr:\n\t\treturn p.planExpr(e, expr.Expr)\n\tcase *NumberLiteral:\n\t\treturn newLiteralProcessor(expr.Val), nil\n\tcase *StringLiteral:\n\t\treturn newLiteralProcessor(expr.Val), nil\n\tcase *BooleanLiteral:\n\t\treturn newLiteralProcessor(expr.Val), nil\n\tcase *TimeLiteral:\n\t\treturn newLiteralProcessor(expr.Val), nil\n\tcase *DurationLiteral:\n\t\treturn newLiteralProcessor(expr.Val), nil\n\t}\n\tpanic(\"unreachable\")\n}",
"func (gts *GraphTraversalSource) Call(args ...interface{}) *GraphTraversal {\n\ttraversal := gts.GetGraphTraversal()\n\ttraversal.Bytecode.AddStep(\"call\", args...)\n\treturn traversal\n}",
"func (pc *programCode) createCall(name string) {\n\tcode := \"\"\n\tcode += \"\\n\\tcall \" + name + \"\\t; call label \" + name + \"\\n\"\n\tpc.appendCode(code)\n}",
"func (p *Process) Call(procFnc func(p *Process)) {\n\tprocFnc(p)\n}",
"func walkCall(n *ir.CallExpr, init *ir.Nodes) ir.Node {\n\tif n.Op() == ir.OCALLMETH {\n\t\tbase.FatalfAt(n.Pos(), \"OCALLMETH missed by typecheck\")\n\t}\n\tif n.Op() == ir.OCALLINTER || n.X.Op() == ir.OMETHEXPR {\n\t\t// We expect both interface call reflect.Type.Method and concrete\n\t\t// call reflect.(*rtype).Method.\n\t\tusemethod(n)\n\t}\n\tif n.Op() == ir.OCALLINTER {\n\t\treflectdata.MarkUsedIfaceMethod(n)\n\t}\n\n\tif n.Op() == ir.OCALLFUNC && n.X.Op() == ir.OCLOSURE {\n\t\tdirectClosureCall(n)\n\t}\n\n\tif isFuncPCIntrinsic(n) {\n\t\t// For internal/abi.FuncPCABIxxx(fn), if fn is a defined function, rewrite\n\t\t// it to the address of the function of the ABI fn is defined.\n\t\tname := n.X.(*ir.Name).Sym().Name\n\t\targ := n.Args[0]\n\t\tvar wantABI obj.ABI\n\t\tswitch name {\n\t\tcase \"FuncPCABI0\":\n\t\t\twantABI = obj.ABI0\n\t\tcase \"FuncPCABIInternal\":\n\t\t\twantABI = obj.ABIInternal\n\t\t}\n\t\tif isIfaceOfFunc(arg) {\n\t\t\tfn := arg.(*ir.ConvExpr).X.(*ir.Name)\n\t\t\tabi := fn.Func.ABI\n\t\t\tif abi != wantABI {\n\t\t\t\tbase.ErrorfAt(n.Pos(), \"internal/abi.%s expects an %v function, %s is defined as %v\", name, wantABI, fn.Sym().Name, abi)\n\t\t\t}\n\t\t\tvar e ir.Node = ir.NewLinksymExpr(n.Pos(), fn.Sym().LinksymABI(abi), types.Types[types.TUINTPTR])\n\t\t\te = ir.NewAddrExpr(n.Pos(), e)\n\t\t\te.SetType(types.Types[types.TUINTPTR].PtrTo())\n\t\t\te = ir.NewConvExpr(n.Pos(), ir.OCONVNOP, n.Type(), e)\n\t\t\treturn e\n\t\t}\n\t\t// fn is not a defined function. It must be ABIInternal.\n\t\t// Read the address from func value, i.e. *(*uintptr)(idata(fn)).\n\t\tif wantABI != obj.ABIInternal {\n\t\t\tbase.ErrorfAt(n.Pos(), \"internal/abi.%s does not accept func expression, which is ABIInternal\", name)\n\t\t}\n\t\targ = walkExpr(arg, init)\n\t\tvar e ir.Node = ir.NewUnaryExpr(n.Pos(), ir.OIDATA, arg)\n\t\te.SetType(n.Type().PtrTo())\n\t\te = ir.NewStarExpr(n.Pos(), e)\n\t\te.SetType(n.Type())\n\t\treturn e\n\t}\n\n\twalkCall1(n, init)\n\treturn n\n}",
"func CallFunction(p Process, expr string, retLoadCfg *LoadConfig, checkEscape bool) error {\n\tbi := p.BinInfo()\n\tif !p.Common().fncallEnabled {\n\t\treturn errFuncCallUnsupportedBackend\n\t}\n\tfncall := &p.Common().fncallState\n\tif fncall.inProgress {\n\t\treturn errFuncCallInProgress\n\t}\n\n\t*fncall = functionCallState{}\n\n\tdbgcallfn := bi.LookupFunc[debugCallFunctionName]\n\tif dbgcallfn == nil {\n\t\treturn errFuncCallUnsupported\n\t}\n\n\t// check that the selected goroutine is running\n\tg := p.SelectedGoroutine()\n\tif g == nil {\n\t\treturn errNoGoroutine\n\t}\n\tif g.Status != Grunning || g.Thread == nil {\n\t\treturn errGoroutineNotRunning\n\t}\n\n\t// check that there are at least 256 bytes free on the stack\n\tregs, err := g.Thread.Registers(true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tregs = regs.Copy()\n\tif regs.SP()-256 <= g.stacklo {\n\t\treturn errNotEnoughStack\n\t}\n\t_, err = regs.Get(int(x86asm.RAX))\n\tif err != nil {\n\t\treturn errFuncCallUnsupportedBackend\n\t}\n\n\tfn, closureAddr, argvars, err := funcCallEvalExpr(p, expr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\targmem, err := funcCallArgFrame(fn, argvars, g, bi, checkEscape)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := callOP(bi, g.Thread, regs, dbgcallfn.Entry); err != nil {\n\t\treturn err\n\t}\n\t// write the desired argument frame size at SP-(2*pointer_size) (the extra pointer is the saved PC)\n\tif err := writePointer(bi, g.Thread, regs.SP()-3*uint64(bi.Arch.PtrSize()), uint64(len(argmem))); err != nil {\n\t\treturn err\n\t}\n\n\tfncall.inProgress = true\n\tfncall.savedRegs = regs\n\tfncall.expr = expr\n\tfncall.fn = fn\n\tfncall.closureAddr = closureAddr\n\tfncall.argmem = argmem\n\tfncall.retLoadCfg = retLoadCfg\n\n\tfncallLog(\"function call initiated %v frame size %d\\n\", fn, len(argmem))\n\n\treturn Continue(p)\n}",
"func (r *HistoryRunner) Plan(ctx context.Context) error {\n\tif len(r.filename) != 0 {\n\t\t// file mode\n\t\treturn r.planFile(ctx, r.filename)\n\t}\n\n\t// directory mode\n\treturn r.planDir(ctx)\n}",
"func EvalCall(a, b Attrib) (*AddrCode, error) {\n\tentry_ := a.(*AddrCode).Symbol\n\tentry, ok := entry_.(*codegen.TargetEntry)\n\tvar varEntry *codegen.VariableEntry\n\tif !ok {\n\t\tif variable, ok1 := entry_.(*codegen.VariableEntry); ok1 {\n\t\t\tif t, ok2 := variable.Type().(codegen.FuncType); ok2 {\n\t\t\t\tentry = t.Target\n\t\t\t\tvarEntry = variable\n\t\t\t} else {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid function call statement\")\n\t\t\t}\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"invalid function call statement\")\n\t\t}\n\t}\n\texprList, ok := b.([]*AddrCode)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unable to type cast %v to []*AddrCode\", b)\n\t}\n\n\tvar isSystem bool\n\n\tif entry.Target == \"printf\" || entry.Target == \"scanf\" {\n\t\tisSystem = true\n\t}\n\n\tif !isSystem && len(exprList) != len(entry.InType) {\n\t\treturn nil, fmt.Errorf(\"wrong number of arguments in function call. expected %d, got %d\", len(entry.InType), len(exprList))\n\t}\n\tcode := a.(*AddrCode).Code\n\tfor i := len(exprList) - 1; i >= 0; i-- {\n\t\tif !isSystem && !SameType(exprList[i].Symbol.Type(), entry.InType[i]) {\n\t\t\treturn nil, fmt.Errorf(\"wrong type of argument %d in function call. expected %v, got %v\", i, entry.InType[i], exprList[i].Symbol.Type())\n\t\t}\n\t\tif isSystem && i == 0 {\n\t\t\tif !SameType(exprList[i].Symbol.Type(), stringType) {\n\t\t\t\treturn nil, fmt.Errorf(\"wrong type of 1st argument in function call. expected %v, got %v\", stringType, exprList[i].Symbol.Type())\n\t\t\t}\n\t\t}\n\t\tevaluatedExpr := EvalWrapped(exprList[i])\n\t\tcode = append(code, evaluatedExpr.Code...)\n\t\tcode = append(code, codegen.IRIns{\n\t\t\tTyp: codegen.KEY,\n\t\t\tOp: codegen.PARAM,\n\t\t\tArg1: evaluatedExpr.Symbol,\n\t\t})\n\t}\n\tif varEntry == nil {\n\t\tcode = append(code, codegen.IRIns{\n\t\t\tTyp: codegen.KEY,\n\t\t\tOp: codegen.CALL,\n\t\t\tArg1: entry,\n\t\t})\n\t} else {\n\t\tcode = append(code, codegen.IRIns{\n\t\t\tTyp: codegen.KEY,\n\t\t\tOp: codegen.CALL,\n\t\t\tArg1: varEntry,\n\t\t})\n\t}\n\t// code = append(code, codegen.IRIns{\n\t// \tTyp: codegen.KEY,\n\t// \tOp: codegen.UNALLOC,\n\t// \tArg1: &codegen.LiteralEntry{\n\t// \t\tValue: len(exprList) * 4,\n\t// \t\tLType: intType,\n\t// \t},\n\t// })\n\tentry1 := CreateTemporary(entry.RetType)\n\tcode = append(code, entry1.Code...)\n\tcode = append(code, codegen.IRIns{\n\t\tTyp: codegen.KEY,\n\t\tOp: codegen.SETRET,\n\t\tArg1: entry1.Symbol,\n\t})\n\treturn &AddrCode{\n\t\tCode: code,\n\t\tSymbol: entry1.Symbol,\n\t}, nil\n}",
"func walkPlan(plan planNode, observer planObserver) error {\n\tv := planVisitor{observer: observer}\n\tv.visit(plan)\n\treturn v.err\n}",
"func ResolveCall(ctx *broker.Context, node *specs.Node, call *specs.Call, flow specs.FlowInterface) (err error) {\n\tif call.Request != nil {\n\t\terr = ResolveParameterMap(ctx, node, call.Request, flow)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif call.Response != nil {\n\t\terr = ResolveParameterMap(ctx, node, call.Response, flow)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
planBinaryExpr generates a processor for a binary expression. A binary expression represents a join operator between two processors.
|
func (p *Planner) planBinaryExpr(e *Executor, expr *BinaryExpr) (processor, error) {
// Create processor for LHS.
lhs, err := p.planExpr(e, expr.LHS)
if err != nil {
return nil, fmt.Errorf("lhs: %s", err)
}
// Create processor for RHS.
rhs, err := p.planExpr(e, expr.RHS)
if err != nil {
return nil, fmt.Errorf("rhs: %s", err)
}
// Combine processors.
return newBinaryExprEvaluator(e, expr.Op, lhs, rhs), nil
}
|
[
"func newBinaryExprEvaluator(e *Executor, op Token, lhs, rhs processor) *binaryExprEvaluator {\n\treturn &binaryExprEvaluator{\n\t\texecutor: e,\n\t\top: op,\n\t\tlhs: lhs,\n\t\trhs: rhs,\n\t\tc: make(chan map[string]interface{}, 0),\n\t\tdone: make(chan chan struct{}, 0),\n\t}\n}",
"func TestCompiler_Compile_binaryExpr(t *testing.T) {\n\texpr := ast.BinaryExpr{\n\t\tX: ast.ScalarExpr{Val: \"1\", Typ: token.INT},\n\t\tOp: token.ADD,\n\t\tY: ast.BinaryExpr{\n\t\t\tX: ast.ScalarExpr{Val: \"2\", Typ: token.INT},\n\t\t\tOp: token.MUL,\n\t\t\tY: ast.ScalarExpr{Val: \"2\", Typ: token.INT},\n\t\t},\n\t}\n\tc := NewCompiler()\n\texpected := []Instruction{\n\t\t{Op: MULTIPLY, Arg1: Argument{Val: \"2\", ValType: INTEGER}, Arg2: Argument{Val: \"2\", ValType: INTEGER}, Ret: Argument{TVal: 1}},\n\t\t{Op: ADD, Arg1: Argument{Val: \"1\", ValType: INTEGER}, Arg2: Argument{TVal: 1}, Ret: Argument{TVal: 2}},\n\t}\n\tinsts := c.Compile(expr)\n\tif insts.tvals != 2 {\n\t\tt.Errorf(\"expected 2 got %d\", insts.tvals)\n\t}\n\tfor i, actual := range insts.instructions {\n\t\tif compareInstrucions(expected[i], actual) == false {\n\t\t\tt.Errorf(\"expected %s got %s\", expected, actual)\n\t\t}\n\t}\n}",
"func binary(typ int, od1 *expr, op string, od2 *expr) *expr {\n\treturn &expr{\n\t\tsexp: append(exprlist{atomic(typ, op)}, od1, od2),\n\t}\n}",
"func parseBinary(lex *lexer, prec1 int) Expr {\n\tlhs := parseUnary(lex)\n\tfor prec := precedence(lex.token); prec >= prec1; prec-- {\n\t\tfor precedence(lex.token) == prec {\n\t\t\top := lex.token\n\t\t\tlex.next() // consume operator\n\t\t\trhs := parseBinary(lex, prec+1)\n\t\t\tlhs = binary{op, lhs, rhs}\n\t\t}\n\t}\n\treturn lhs\n}",
"func NewBinaryBooleanExpression(op OP, lE, rE Evaluator) (Evaluator, error) {\n\tswitch op {\n\tcase AND, OR:\n\t\treturn &booleanNode{\n\t\t\top: op,\n\t\t\tlS: true,\n\t\t\tlE: lE,\n\t\t\trS: true,\n\t\t\trE: rE,\n\t\t}, nil\n\tdefault:\n\t\treturn nil, errors.New(\"binary boolean expressions require the operation to be one for the follwing 'and', 'or'\")\n\t}\n}",
"func newBinaryOp(op string, expr1, expr2 Expression) Expression {\n\tswitch {\n\tcase expr1 != nil && expr2 != nil:\n\t\treturn &BinaryOp{\n\t\t\tOp: op,\n\t\t\tExpr1: expr1,\n\t\t\tExpr2: expr2,\n\t\t}\n\tcase expr1 != nil && expr2 == nil:\n\t\treturn expr1\n\tcase expr1 == nil && expr2 != nil:\n\t\treturn expr2\n\tcase expr1 == nil && expr2 == nil:\n\t\treturn nil\n\t}\n\tpanic(\"unreachable\")\n}",
"func NewBinaryExpr(op BinaryOp, lhs, rhs Expr) Expr {\n\t// assert(ExprWidth(lhs) == ExprWidth(rhs), \"binary expr width mismatch: op=%s (%T) %d != (%T) %d\", op, lhs, ExprWidth(lhs), rhs, ExprWidth(rhs))\n\n\tswitch op {\n\t// Arithmetic operators\n\tcase ADD:\n\t\treturn newAddExpr(lhs, rhs)\n\tcase SUB:\n\t\treturn newSubExpr(lhs, rhs)\n\tcase MUL:\n\t\treturn newMulExpr(lhs, rhs)\n\tcase UDIV, SDIV:\n\t\treturn newDivExpr(op, lhs, rhs)\n\tcase UREM, SREM:\n\t\treturn newRemExpr(op, lhs, rhs)\n\tcase AND:\n\t\treturn newAndExpr(lhs, rhs)\n\tcase OR:\n\t\treturn newOrExpr(lhs, rhs)\n\tcase XOR:\n\t\treturn newXorExpr(lhs, rhs)\n\tcase SHL:\n\t\treturn newShlExpr(lhs, rhs)\n\tcase LSHR:\n\t\treturn newLShrExpr(lhs, rhs)\n\tcase ASHR:\n\t\treturn newAShrExpr(lhs, rhs)\n\n\t// Comparison operators\n\tcase EQ:\n\t\treturn newEqExpr(lhs, rhs)\n\tcase NE:\n\t\treturn NewBinaryExpr(EQ, NewConstantExpr(0, WidthBool), NewBinaryExpr(EQ, lhs, rhs))\n\tcase ULT:\n\t\treturn newUltExpr(lhs, rhs)\n\tcase UGT:\n\t\treturn newUltExpr(rhs, lhs) // reverse\n\tcase ULE:\n\t\treturn newUleExpr(lhs, rhs)\n\tcase UGE:\n\t\treturn newUleExpr(rhs, lhs) // reverse\n\tcase SLT:\n\t\treturn newSltExpr(lhs, rhs)\n\tcase SGT:\n\t\treturn newSltExpr(rhs, lhs) // reverse\n\tcase SLE:\n\t\treturn newSleExpr(lhs, rhs)\n\tcase SGE:\n\t\treturn newSleExpr(rhs, lhs) // reverse\n\n\tdefault:\n\t\tpanic(\"unreachable\")\n\t}\n}",
"func JoinBinaryOp(op token.Token, spaceType dst.SpaceType, exprs ...dst.Expr) dst.Expr {\n\treturn Reduce(\n\t\tfunc(x, y dst.Expr) dst.Expr {\n\t\t\treturn &dst.BinaryExpr{X: x, Op: op, Y: y}\n\t\t},\n\t\tspaceType,\n\t\texprs...)\n}",
"func NewBinary(left Expr, op *token.T, right Expr) *Binary {\n\treturn &Binary{\n\t\tLeft: left,\n\t\tOperator: op,\n\t\tRight: right,\n\t}\n}",
"func _b(x interface{}, op string, y interface{}) ast.Expr {\n\tvar xx, yx ast.Expr\n\tif xstr, ok := x.(string); ok {\n\t\txx = _x(xstr)\n\t} else {\n\t\txx = x.(ast.Expr)\n\t}\n\tif ystr, ok := y.(string); ok {\n\t\tyx = _x(ystr)\n\t} else {\n\t\tyx = y.(ast.Expr)\n\t}\n\treturn &ast.BinaryExpr{\n\t\tX: xx,\n\t\tOp: _op(op),\n\t\tY: yx,\n\t}\n}",
"func (*Base) Binary(p ASTPass, node *ast.Binary, ctx Context) {\n\tp.Visit(p, &node.Left, ctx)\n\tp.Fodder(p, &node.OpFodder, ctx)\n\tp.Visit(p, &node.Right, ctx)\n}",
"func (i *Interpreter) visitBinary(b Binary) Object {\n\tleftObj := i.Evaluate(b.left)\n\trightObj := i.Evaluate(b.right)\n\tisNum := CheckNumberOperands(leftObj, rightObj)\n\tif isNum {\n\t\tlFloat, lIsFloat := leftObj.(Float)\n\t\trFloat, rIsFloat := rightObj.(Float)\n\t\t//if either is a float, figure out which is a float and then cast to floats\n\t\tif lIsFloat || rIsFloat {\n\t\t\tif !lIsFloat {\n\t\t\t\tleftInt := leftObj.(Integer)\n\t\t\t\tlFloat = Float{float64(leftInt.Value)}\n\t\t\t}\n\t\t\tif !rIsFloat {\n\t\t\t\trightInt := rightObj.(Integer)\n\t\t\t\trFloat = Float{float64(rightInt.Value)}\n\t\t\t}\n\t\t\treturn EvaluateFloat(lFloat, rFloat, b.operator)\n\t\t} else {\n\t\t\t//If neither are floats, they must be integers and should use integer math\n\t\t\tlInteger := leftObj.(Integer)\n\t\t\trInteger := rightObj.(Integer)\n\t\t\treturn EvaluateInt(lInteger, rInteger, b.operator)\n\t\t}\n\t}\n\tleftBool, rightBool, isBool := CheckBoolOperands(leftObj, rightObj)\n\tif isBool {\n\t\treturn EvaluateBoolean(leftBool, rightBool, b.operator)\n\t}\n\tif leftString, ok := leftObj.(String); ok {\n\t\tswitch b.operator.Type {\n\t\tcase PLUS:\n\t\t\treturn String{leftString.Value + Stringify(rightObj)}\n\t\tdefault:\n\t\t\tRuntimeError(\"string does not support '\" + b.operator.Type.String() + \"' operator\")\n\t\t}\n\t}\n\tRuntimeError(\"Mismatched operands: '\" + leftObj.Type() + \"' and '\" + rightObj.Type() + \"'\")\n\treturn NIL\n}",
"func (i *Interpreter) VisitBinaryExpr(expr *ast.Binary) interface{} {\n\tleft := i.evaluate(expr.Left)\n\tright := i.evaluate(expr.Right)\n\n\tswitch expr.Operator.Type {\n\t// Compators\n\tcase token.GreaterThan:\n\t\tcheckNumberOperands(expr.Operator, left, right)\n\t\treturn left.(float64) > right.(float64)\n\tcase token.GreaterThanOrEqual:\n\t\tcheckNumberOperands(expr.Operator, left, right)\n\t\treturn left.(float64) >= right.(float64)\n\tcase token.LessThan:\n\t\tcheckNumberOperands(expr.Operator, left, right)\n\t\treturn left.(float64) < right.(float64)\n\tcase token.LessThanOrEqual:\n\t\tcheckNumberOperands(expr.Operator, left, right)\n\t\treturn left.(float64) <= right.(float64)\n\t// Equality\n\tcase token.NotEqual:\n\t\treturn !isEqual(left, right)\n\tcase token.EqualEqual:\n\t\treturn isEqual(left, right)\n\t// Arithmetic\n\tcase token.Plus:\n\t\t// TODO: Handle Strings\n\t\tcheckNumberOperands(expr.Operator, left, right)\n\t\treturn left.(float64) + right.(float64)\n\tcase token.Minus:\n\t\tcheckNumberOperands(expr.Operator, left, right)\n\t\treturn left.(float64) - right.(float64)\n\tcase token.ForwardSlash:\n\t\tcheckNumberOperands(expr.Operator, left, right)\n\t\treturn left.(float64) / right.(float64)\n\tcase token.Star:\n\t\tcheckNumberOperands(expr.Operator, left, right)\n\t\treturn left.(float64) * right.(float64)\n\t}\n\t// Unreachable.\n\treturn nil\n}",
"func (s *BaseMySqlParserListener) EnterBinaryComparasionPredicate(ctx *BinaryComparasionPredicateContext) {\n}",
"func (s *BaselimboListener) EnterBinary_expression(ctx *Binary_expressionContext) {}",
"func NewBinary(op circuit.Operation, a, b, o *Wire) *Gate {\n\tgate := &Gate{\n\t\tOp: op,\n\t\tA: a,\n\t\tB: b,\n\t\tO: o,\n\t}\n\ta.AddOutput(gate)\n\tb.AddOutput(gate)\n\to.SetInput(gate)\n\n\treturn gate\n}",
"func BinaryExp(ex1 Exp, ex2 Exp, op byte) *BinaryExpType {\n\tbe := BinaryExpType{ex1, ex2, op}\n\treturn &be\n}",
"func (cmp *Comparator) RefOfBinaryExpr(a, b *BinaryExpr) bool {\n\tif a == b {\n\t\treturn true\n\t}\n\tif a == nil || b == nil {\n\t\treturn false\n\t}\n\treturn a.Operator == b.Operator &&\n\t\tcmp.Expr(a.Left, b.Left) &&\n\t\tcmp.Expr(a.Right, b.Right)\n}",
"func ComputeBinaryOp(xI, yI interface{}, op token.Token) (interface{}, error) {\n\ttypeX := reflect.TypeOf(xI)\n\ttypeY := reflect.TypeOf(yI)\n\tif typeX == typeY {\n\t\tswitch xI.(type) {\n\t\tcase string:\n\t\t\tx := xI.(string)\n\t\t\ty := yI.(string)\n\t\t\tswitch op {\n\t\t\tcase token.ADD:\n\t\t\t\treturn x + y, nil\n\t\t\t}\n\t\tcase int:\n\t\t\tx := xI.(int)\n\t\t\ty := yI.(int)\n\t\t\tswitch op {\n\t\t\tcase token.ADD:\n\t\t\t\treturn x + y, nil\n\t\t\tcase token.SUB:\n\t\t\t\treturn x - y, nil\n\t\t\tcase token.MUL:\n\t\t\t\treturn x * y, nil\n\t\t\tcase token.QUO:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x / y, nil\n\t\t\tcase token.REM:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x % y, nil\n\t\t\tcase token.AND:\n\t\t\t\treturn x & y, nil\n\t\t\tcase token.OR:\n\t\t\t\treturn x | y, nil\n\t\t\tcase token.XOR:\n\t\t\t\treturn x ^ y, nil\n\t\t\tcase token.AND_NOT:\n\t\t\t\treturn x &^ y, nil\n\t\t\tcase token.LSS:\n\t\t\t\treturn x < y, nil\n\t\t\tcase token.GTR:\n\t\t\t\treturn x > y, nil\n\t\t\tcase token.LEQ:\n\t\t\t\treturn x <= y, nil\n\t\t\tcase token.GEQ:\n\t\t\t\treturn x >= y, nil\n\t\t\t}\n\t\tcase int8:\n\t\t\tx := xI.(int8)\n\t\t\ty := yI.(int8)\n\t\t\tswitch op {\n\t\t\tcase token.ADD:\n\t\t\t\treturn x + y, nil\n\t\t\tcase token.SUB:\n\t\t\t\treturn x - y, nil\n\t\t\tcase token.MUL:\n\t\t\t\treturn x * y, nil\n\t\t\tcase token.QUO:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x / y, nil\n\t\t\tcase token.REM:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x % y, nil\n\t\t\tcase token.AND:\n\t\t\t\treturn x & y, nil\n\t\t\tcase token.OR:\n\t\t\t\treturn x | y, nil\n\t\t\tcase token.XOR:\n\t\t\t\treturn x ^ y, nil\n\t\t\tcase token.AND_NOT:\n\t\t\t\treturn x &^ y, nil\n\t\t\tcase token.LSS:\n\t\t\t\treturn x < y, nil\n\t\t\tcase token.GTR:\n\t\t\t\treturn x > y, nil\n\t\t\tcase token.LEQ:\n\t\t\t\treturn x <= y, nil\n\t\t\tcase token.GEQ:\n\t\t\t\treturn x >= y, nil\n\t\t\t}\n\t\tcase int16:\n\t\t\tx := xI.(int16)\n\t\t\ty := yI.(int16)\n\t\t\tswitch op {\n\t\t\tcase token.ADD:\n\t\t\t\treturn x + y, nil\n\t\t\tcase token.SUB:\n\t\t\t\treturn x - y, nil\n\t\t\tcase token.MUL:\n\t\t\t\treturn x * y, nil\n\t\t\tcase token.QUO:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x / y, nil\n\t\t\tcase token.REM:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x % y, nil\n\t\t\tcase token.AND:\n\t\t\t\treturn x & y, nil\n\t\t\tcase token.OR:\n\t\t\t\treturn x | y, nil\n\t\t\tcase token.XOR:\n\t\t\t\treturn x ^ y, nil\n\t\t\tcase token.AND_NOT:\n\t\t\t\treturn x &^ y, nil\n\t\t\tcase token.LSS:\n\t\t\t\treturn x < y, nil\n\t\t\tcase token.GTR:\n\t\t\t\treturn x > y, nil\n\t\t\tcase token.LEQ:\n\t\t\t\treturn x <= y, nil\n\t\t\tcase token.GEQ:\n\t\t\t\treturn x >= y, nil\n\t\t\t}\n\t\tcase int32:\n\t\t\tx := xI.(int32)\n\t\t\ty := yI.(int32)\n\t\t\tswitch op {\n\t\t\tcase token.ADD:\n\t\t\t\treturn x + y, nil\n\t\t\tcase token.SUB:\n\t\t\t\treturn x - y, nil\n\t\t\tcase token.MUL:\n\t\t\t\treturn x * y, nil\n\t\t\tcase token.QUO:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x / y, nil\n\t\t\tcase token.REM:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x % y, nil\n\t\t\tcase token.AND:\n\t\t\t\treturn x & y, nil\n\t\t\tcase token.OR:\n\t\t\t\treturn x | y, nil\n\t\t\tcase token.XOR:\n\t\t\t\treturn x ^ y, nil\n\t\t\tcase token.AND_NOT:\n\t\t\t\treturn x &^ y, nil\n\t\t\tcase token.LSS:\n\t\t\t\treturn x < y, nil\n\t\t\tcase token.GTR:\n\t\t\t\treturn x > y, nil\n\t\t\tcase token.LEQ:\n\t\t\t\treturn x <= y, nil\n\t\t\tcase token.GEQ:\n\t\t\t\treturn x >= y, nil\n\t\t\t}\n\t\tcase int64:\n\t\t\tx := xI.(int64)\n\t\t\ty := yI.(int64)\n\t\t\tswitch op {\n\t\t\tcase token.ADD:\n\t\t\t\treturn x + y, nil\n\t\t\tcase token.SUB:\n\t\t\t\treturn x - y, nil\n\t\t\tcase token.MUL:\n\t\t\t\treturn x * y, nil\n\t\t\tcase token.QUO:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x / y, nil\n\t\t\tcase token.REM:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x % y, nil\n\t\t\tcase token.AND:\n\t\t\t\treturn x & y, nil\n\t\t\tcase token.OR:\n\t\t\t\treturn x | y, nil\n\t\t\tcase token.XOR:\n\t\t\t\treturn x ^ y, nil\n\t\t\tcase token.AND_NOT:\n\t\t\t\treturn x &^ y, nil\n\t\t\tcase token.LSS:\n\t\t\t\treturn x < y, nil\n\t\t\tcase token.GTR:\n\t\t\t\treturn x > y, nil\n\t\t\tcase token.LEQ:\n\t\t\t\treturn x <= y, nil\n\t\t\tcase token.GEQ:\n\t\t\t\treturn x >= y, nil\n\t\t\t}\n\t\tcase uint:\n\t\t\tx := xI.(uint)\n\t\t\ty := yI.(uint)\n\t\t\tswitch op {\n\t\t\tcase token.ADD:\n\t\t\t\treturn x + y, nil\n\t\t\tcase token.SUB:\n\t\t\t\treturn x - y, nil\n\t\t\tcase token.MUL:\n\t\t\t\treturn x * y, nil\n\t\t\tcase token.QUO:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x / y, nil\n\t\t\tcase token.REM:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x % y, nil\n\t\t\tcase token.AND:\n\t\t\t\treturn x & y, nil\n\t\t\tcase token.OR:\n\t\t\t\treturn x | y, nil\n\t\t\tcase token.XOR:\n\t\t\t\treturn x ^ y, nil\n\t\t\tcase token.AND_NOT:\n\t\t\t\treturn x &^ y, nil\n\t\t\tcase token.LSS:\n\t\t\t\treturn x < y, nil\n\t\t\tcase token.GTR:\n\t\t\t\treturn x > y, nil\n\t\t\tcase token.LEQ:\n\t\t\t\treturn x <= y, nil\n\t\t\tcase token.GEQ:\n\t\t\t\treturn x >= y, nil\n\t\t\t}\n\t\tcase uint8:\n\t\t\tx := xI.(uint8)\n\t\t\ty := yI.(uint8)\n\t\t\tswitch op {\n\t\t\tcase token.ADD:\n\t\t\t\treturn x + y, nil\n\t\t\tcase token.SUB:\n\t\t\t\treturn x - y, nil\n\t\t\tcase token.MUL:\n\t\t\t\treturn x * y, nil\n\t\t\tcase token.QUO:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x / y, nil\n\t\t\tcase token.REM:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x % y, nil\n\t\t\tcase token.AND:\n\t\t\t\treturn x & y, nil\n\t\t\tcase token.OR:\n\t\t\t\treturn x | y, nil\n\t\t\tcase token.XOR:\n\t\t\t\treturn x ^ y, nil\n\t\t\tcase token.AND_NOT:\n\t\t\t\treturn x &^ y, nil\n\t\t\tcase token.LSS:\n\t\t\t\treturn x < y, nil\n\t\t\tcase token.GTR:\n\t\t\t\treturn x > y, nil\n\t\t\tcase token.LEQ:\n\t\t\t\treturn x <= y, nil\n\t\t\tcase token.GEQ:\n\t\t\t\treturn x >= y, nil\n\t\t\t}\n\t\tcase uint16:\n\t\t\tx := xI.(uint16)\n\t\t\ty := yI.(uint16)\n\t\t\tswitch op {\n\t\t\tcase token.ADD:\n\t\t\t\treturn x + y, nil\n\t\t\tcase token.SUB:\n\t\t\t\treturn x - y, nil\n\t\t\tcase token.MUL:\n\t\t\t\treturn x * y, nil\n\t\t\tcase token.QUO:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x / y, nil\n\t\t\tcase token.REM:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x % y, nil\n\t\t\tcase token.AND:\n\t\t\t\treturn x & y, nil\n\t\t\tcase token.OR:\n\t\t\t\treturn x | y, nil\n\t\t\tcase token.XOR:\n\t\t\t\treturn x ^ y, nil\n\t\t\tcase token.AND_NOT:\n\t\t\t\treturn x &^ y, nil\n\t\t\tcase token.LSS:\n\t\t\t\treturn x < y, nil\n\t\t\tcase token.GTR:\n\t\t\t\treturn x > y, nil\n\t\t\tcase token.LEQ:\n\t\t\t\treturn x <= y, nil\n\t\t\tcase token.GEQ:\n\t\t\t\treturn x >= y, nil\n\t\t\t}\n\t\tcase uint32:\n\t\t\tx := xI.(uint32)\n\t\t\ty := yI.(uint32)\n\t\t\tswitch op {\n\t\t\tcase token.ADD:\n\t\t\t\treturn x + y, nil\n\t\t\tcase token.SUB:\n\t\t\t\treturn x - y, nil\n\t\t\tcase token.MUL:\n\t\t\t\treturn x * y, nil\n\t\t\tcase token.QUO:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x / y, nil\n\t\t\tcase token.REM:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x % y, nil\n\t\t\tcase token.AND:\n\t\t\t\treturn x & y, nil\n\t\t\tcase token.OR:\n\t\t\t\treturn x | y, nil\n\t\t\tcase token.XOR:\n\t\t\t\treturn x ^ y, nil\n\t\t\tcase token.AND_NOT:\n\t\t\t\treturn x &^ y, nil\n\t\t\tcase token.LSS:\n\t\t\t\treturn x < y, nil\n\t\t\tcase token.GTR:\n\t\t\t\treturn x > y, nil\n\t\t\tcase token.LEQ:\n\t\t\t\treturn x <= y, nil\n\t\t\tcase token.GEQ:\n\t\t\t\treturn x >= y, nil\n\t\t\t}\n\t\tcase uint64:\n\t\t\tx := xI.(uint64)\n\t\t\ty := yI.(uint64)\n\t\t\tswitch op {\n\t\t\tcase token.ADD:\n\t\t\t\treturn x + y, nil\n\t\t\tcase token.SUB:\n\t\t\t\treturn x - y, nil\n\t\t\tcase token.MUL:\n\t\t\t\treturn x * y, nil\n\t\t\tcase token.QUO:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x / y, nil\n\t\t\tcase token.REM:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x % y, nil\n\t\t\tcase token.AND:\n\t\t\t\treturn x & y, nil\n\t\t\tcase token.OR:\n\t\t\t\treturn x | y, nil\n\t\t\tcase token.XOR:\n\t\t\t\treturn x ^ y, nil\n\t\t\tcase token.AND_NOT:\n\t\t\t\treturn x &^ y, nil\n\t\t\tcase token.LSS:\n\t\t\t\treturn x < y, nil\n\t\t\tcase token.GTR:\n\t\t\t\treturn x > y, nil\n\t\t\tcase token.LEQ:\n\t\t\t\treturn x <= y, nil\n\t\t\tcase token.GEQ:\n\t\t\t\treturn x >= y, nil\n\t\t\t}\n\t\tcase uintptr:\n\t\t\tx := xI.(uintptr)\n\t\t\ty := yI.(uintptr)\n\t\t\tswitch op {\n\t\t\tcase token.ADD:\n\t\t\t\treturn x + y, nil\n\t\t\tcase token.SUB:\n\t\t\t\treturn x - y, nil\n\t\t\tcase token.MUL:\n\t\t\t\treturn x * y, nil\n\t\t\tcase token.QUO:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x / y, nil\n\t\t\tcase token.REM:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x % y, nil\n\t\t\tcase token.AND:\n\t\t\t\treturn x & y, nil\n\t\t\tcase token.OR:\n\t\t\t\treturn x | y, nil\n\t\t\tcase token.XOR:\n\t\t\t\treturn x ^ y, nil\n\t\t\tcase token.AND_NOT:\n\t\t\t\treturn x &^ y, nil\n\t\t\tcase token.LSS:\n\t\t\t\treturn x < y, nil\n\t\t\tcase token.GTR:\n\t\t\t\treturn x > y, nil\n\t\t\tcase token.LEQ:\n\t\t\t\treturn x <= y, nil\n\t\t\tcase token.GEQ:\n\t\t\t\treturn x >= y, nil\n\t\t\t}\n\t\tcase complex64:\n\t\t\tx := xI.(complex64)\n\t\t\ty := yI.(complex64)\n\t\t\tswitch op {\n\t\t\tcase token.ADD:\n\t\t\t\treturn x + y, nil\n\t\t\tcase token.SUB:\n\t\t\t\treturn x - y, nil\n\t\t\tcase token.MUL:\n\t\t\t\treturn x * y, nil\n\t\t\tcase token.QUO:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x / y, nil\n\t\t\t}\n\t\tcase complex128:\n\t\t\tx := xI.(complex128)\n\t\t\ty := yI.(complex128)\n\t\t\tswitch op {\n\t\t\tcase token.ADD:\n\t\t\t\treturn x + y, nil\n\t\t\tcase token.SUB:\n\t\t\t\treturn x - y, nil\n\t\t\tcase token.MUL:\n\t\t\t\treturn x * y, nil\n\t\t\tcase token.QUO:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x / y, nil\n\t\t\t}\n\t\tcase float32:\n\t\t\tx := xI.(float32)\n\t\t\ty := yI.(float32)\n\t\t\tswitch op {\n\t\t\tcase token.ADD:\n\t\t\t\treturn x + y, nil\n\t\t\tcase token.SUB:\n\t\t\t\treturn x - y, nil\n\t\t\tcase token.MUL:\n\t\t\t\treturn x * y, nil\n\t\t\tcase token.QUO:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x / y, nil\n\t\t\tcase token.LSS:\n\t\t\t\treturn x < y, nil\n\t\t\tcase token.GTR:\n\t\t\t\treturn x > y, nil\n\t\t\tcase token.LEQ:\n\t\t\t\treturn x <= y, nil\n\t\t\tcase token.GEQ:\n\t\t\t\treturn x >= y, nil\n\t\t\t}\n\t\tcase float64:\n\t\t\tx := xI.(float64)\n\t\t\ty := yI.(float64)\n\t\t\tswitch op {\n\t\t\tcase token.ADD:\n\t\t\t\treturn x + y, nil\n\t\t\tcase token.SUB:\n\t\t\t\treturn x - y, nil\n\t\t\tcase token.MUL:\n\t\t\t\treturn x * y, nil\n\t\t\tcase token.QUO:\n\t\t\t\tif y == 0 {\n\t\t\t\t\treturn nil, ErrDivisionByZero\n\t\t\t\t}\n\n\t\t\t\treturn x / y, nil\n\t\t\tcase token.LSS:\n\t\t\t\treturn x < y, nil\n\t\t\tcase token.GTR:\n\t\t\t\treturn x > y, nil\n\t\t\tcase token.LEQ:\n\t\t\t\treturn x <= y, nil\n\t\t\tcase token.GEQ:\n\t\t\t\treturn x >= y, nil\n\t\t\t}\n\t\tcase bool:\n\t\t\tx := xI.(bool)\n\t\t\ty := yI.(bool)\n\t\t\tswitch op {\n\t\t\t// Bool\n\t\t\tcase token.LAND:\n\t\t\t\treturn x && y, nil\n\t\t\tcase token.LOR:\n\t\t\t\treturn x || y, nil\n\t\t\t}\n\t\t}\n\t}\n\tyUint, isUint := yI.(uint64)\n\tif !isUint {\n\t\tisUint = true\n\t\tswitch yV := yI.(type) {\n\t\tcase int:\n\t\t\tyUint = uint64(yV)\n\t\tcase int8:\n\t\t\tyUint = uint64(yV)\n\t\tcase int16:\n\t\t\tyUint = uint64(yV)\n\t\tcase int32:\n\t\t\tyUint = uint64(yV)\n\t\tcase int64:\n\t\t\tyUint = uint64(yV)\n\t\tcase uint:\n\t\t\tyUint = uint64(yV)\n\t\tcase uintptr:\n\t\t\tyUint = uint64(yV)\n\t\tcase uint8:\n\t\t\tyUint = uint64(yV)\n\t\tcase uint16:\n\t\t\tyUint = uint64(yV)\n\t\tcase uint32:\n\t\t\tyUint = uint64(yV)\n\t\tcase float32:\n\t\t\tyUint = uint64(yV)\n\t\tcase float64:\n\t\t\tyUint = uint64(yV)\n\t\tdefault:\n\t\t\tisUint = false\n\t\t}\n\t}\n\tif isUint {\n\t\tswitch xI.(type) {\n\t\tcase int:\n\t\t\tx := xI.(int)\n\t\t\tswitch op {\n\t\t\t// Num, uint\n\t\t\tcase token.SHL:\n\t\t\t\treturn x << yUint, nil\n\t\t\tcase token.SHR:\n\t\t\t\treturn x >> yUint, nil\n\t\t\t}\n\t\tcase int8:\n\t\t\tx := xI.(int8)\n\t\t\tswitch op {\n\t\t\t// Num, uint\n\t\t\tcase token.SHL:\n\t\t\t\treturn x << yUint, nil\n\t\t\tcase token.SHR:\n\t\t\t\treturn x >> yUint, nil\n\t\t\t}\n\t\tcase int16:\n\t\t\tx := xI.(int16)\n\t\t\tswitch op {\n\t\t\t// Num, uint\n\t\t\tcase token.SHL:\n\t\t\t\treturn x << yUint, nil\n\t\t\tcase token.SHR:\n\t\t\t\treturn x >> yUint, nil\n\t\t\t}\n\t\tcase int32:\n\t\t\tx := xI.(int32)\n\t\t\tswitch op {\n\t\t\t// Num, uint\n\t\t\tcase token.SHL:\n\t\t\t\treturn x << yUint, nil\n\t\t\tcase token.SHR:\n\t\t\t\treturn x >> yUint, nil\n\t\t\t}\n\t\tcase int64:\n\t\t\tx := xI.(int64)\n\t\t\tswitch op {\n\t\t\t// Num, uint\n\t\t\tcase token.SHL:\n\t\t\t\treturn x << yUint, nil\n\t\t\tcase token.SHR:\n\t\t\t\treturn x >> yUint, nil\n\t\t\t}\n\t\tcase uint:\n\t\t\tx := xI.(uint)\n\t\t\tswitch op {\n\t\t\t// Num, uint\n\t\t\tcase token.SHL:\n\t\t\t\treturn x << yUint, nil\n\t\t\tcase token.SHR:\n\t\t\t\treturn x >> yUint, nil\n\t\t\t}\n\t\tcase uint8:\n\t\t\tx := xI.(uint8)\n\t\t\tswitch op {\n\t\t\t// Num, uint\n\t\t\tcase token.SHL:\n\t\t\t\treturn x << yUint, nil\n\t\t\tcase token.SHR:\n\t\t\t\treturn x >> yUint, nil\n\t\t\t}\n\t\tcase uint16:\n\t\t\tx := xI.(uint16)\n\t\t\tswitch op {\n\t\t\t// Num, uint\n\t\t\tcase token.SHL:\n\t\t\t\treturn x << yUint, nil\n\t\t\tcase token.SHR:\n\t\t\t\treturn x >> yUint, nil\n\t\t\t}\n\t\tcase uint32:\n\t\t\tx := xI.(uint32)\n\t\t\tswitch op {\n\t\t\t// Num, uint\n\t\t\tcase token.SHL:\n\t\t\t\treturn x << yUint, nil\n\t\t\tcase token.SHR:\n\t\t\t\treturn x >> yUint, nil\n\t\t\t}\n\t\tcase uint64:\n\t\t\tx := xI.(uint64)\n\t\t\tswitch op {\n\t\t\t// Num, uint\n\t\t\tcase token.SHL:\n\t\t\t\treturn x << yUint, nil\n\t\t\tcase token.SHR:\n\t\t\t\treturn x >> yUint, nil\n\t\t\t}\n\t\tcase uintptr:\n\t\t\tx := xI.(uintptr)\n\t\t\tswitch op {\n\t\t\t// Num, uint\n\t\t\tcase token.SHL:\n\t\t\t\treturn x << yUint, nil\n\t\t\tcase token.SHR:\n\t\t\t\treturn x >> yUint, nil\n\t\t\t}\n\t\t}\n\t}\n\t// Anything\n\tswitch op {\n\tcase token.EQL:\n\t\treturn xI == yI, nil\n\tcase token.NEQ:\n\t\treturn xI != yI, nil\n\t}\n\treturn nil, fmt.Errorf(\"unknown operation %#v between %#v and %#v\", op, xI, yI)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
extractTags extracts a tag key/value map from a statement. Extracted tags are removed from the statement.
|
func (p *Planner) extractTags(name string, expr Expr, tags map[string]string) (Expr, error) {
// TODO: Refactor into a walk-like Replace().
switch expr := expr.(type) {
case *BinaryExpr:
// If the LHS is a variable ref then check for tag equality.
if lhs, ok := expr.LHS.(*VarRef); ok && expr.Op == EQ {
return p.extractBinaryExprTags(name, expr, lhs, expr.RHS, tags)
}
// If the RHS is a variable ref then check for tag equality.
if rhs, ok := expr.RHS.(*VarRef); ok && expr.Op == EQ {
return p.extractBinaryExprTags(name, expr, rhs, expr.LHS, tags)
}
// Recursively process LHS.
lhs, err := p.extractTags(name, expr.LHS, tags)
if err != nil {
return nil, err
}
expr.LHS = lhs
// Recursively process RHS.
rhs, err := p.extractTags(name, expr.RHS, tags)
if err != nil {
return nil, err
}
expr.RHS = rhs
return expr, nil
case *ParenExpr:
e, err := p.extractTags(name, expr.Expr, tags)
if err != nil {
return nil, err
}
expr.Expr = e
return expr, nil
default:
return expr, nil
}
}
|
[
"func Extract(ctx context.Context) *Tags {\n\tt, ok := ctx.Value(ctxMarkerKey).(*Tags)\n\tif !ok {\n\t\treturn &Tags{values: make(map[string]interface{})}\n\t}\n\n\treturn t\n}",
"func ExtractTags(s string) (map[string]string, error) {\n\tif s == \"\" {\n\t\treturn nil, nil\n\t}\n\ttags := make(map[string]string)\n\ttagSlice := strings.Split(s, \",\")\n\tfor _, tag := range tagSlice {\n\t\tpair := strings.SplitN(tag, \"=\", 2)\n\t\tif len(pair) != 2 {\n\t\t\treturn nil, fmt.Errorf(\"bad tag %s\", tag)\n\t\t}\n\t\tk := strings.TrimSpace(pair[0])\n\t\tv := strings.TrimSpace(pair[1])\n\t\ttags[k] = v\n\n\t}\n\treturn tags, nil\n}",
"func getTags(trustReport *hvs.TrustReport) map[string]string {\n\tdefaultLog.Trace(\"hosttrust/saml_report:getTags() Entering\")\n\tdefer defaultLog.Trace(\"hosttrust/saml_report:getTags() Leaving\")\n\n\ttagPrefix := \"TAG_\"\n\ttagsMap := make(map[string]string)\n\tfor _, result := range trustReport.GetResultsForMarker(common.FlavorPartAssetTag.String()) {\n\t\tif result.Rule.Name == faultsConst.RuleAssetTagMatches && len(result.Rule.Tags) > 0 {\n\t\t\tfor key, value := range result.Rule.Tags {\n\t\t\t\ttagsMap[tagPrefix+key] = value\n\t\t\t}\n\t\t}\n\t}\n\treturn tagsMap\n}",
"func (r serverResult) ExtractTags() ([]string, error) {\n\tvar s struct {\n\t\tTags []string `json:\"tags\"`\n\t}\n\terr := r.ExtractInto(&s)\n\treturn s.Tags, err\n}",
"func ExtractTags(article *Article) (err error) {\n\tscanner := bufio.NewScanner(bytes.NewBuffer(article.Raw))\n\n\tvar b []byte\n\tout := bytes.NewBuffer(b)\n\n\tfor scanner.Scan() {\n\t\ttext := scanner.Text()\n\t\tif strings.HasPrefix(text, TagsPrefix) {\n\t\t\ttext = strings.TrimPrefix(text, TagsPrefix)\n\t\t\ttext = strings.TrimRight(text, ClosingComment)\n\t\t\tarticle.Tags = normalizeTags(strings.Split(text, \",\"))\n\t\t} else {\n\t\t\tif _, err = out.WriteString(text + NewLine); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tarticle.Raw = out.Bytes()\n\treturn\n}",
"func extractTagValues(regex *regexp.Regexp, tagList []string, input string) api.TagSet {\n\tmatches := regex.FindStringSubmatch(input)\n\tif matches == nil {\n\t\treturn nil\n\t}\n\ttagSet := api.NewTagSet()\n\tfor index, tagValue := range matches {\n\t\tif index == 0 {\n\t\t\tcontinue\n\t\t}\n\t\ttagKey := tagList[index-1]\n\t\ttagSet[tagKey] = tagValue\n\t}\n\treturn tagSet\n}",
"func parseTags(st reflect.StructTag) tags {\n\ts := st.Get(ndrNameSpace)\n\tt := tags{\n\t\tValues: []string{},\n\t\tMap: make(map[string]string),\n\t}\n\tif s != \"\" {\n\t\tndrTags := strings.Trim(s, `\"`)\n\t\tfor _, tag := range strings.Split(ndrTags, \",\") {\n\t\t\tif strings.Contains(tag, \":\") {\n\t\t\t\tm := strings.SplitN(tag, \":\", 2)\n\t\t\t\tt.Map[m[0]] = m[1]\n\t\t\t} else {\n\t\t\t\tt.Values = append(t.Values, tag)\n\t\t\t}\n\t\t}\n\t}\n\treturn t\n}",
"func (p *Planner) extractBinaryExprTags(name string, expr Expr, ref *VarRef, value Expr, tags map[string]string) (Expr, error) {\n\t// Ignore if the value is not a string literal.\n\tlit, ok := value.(*StringLiteral)\n\tif !ok {\n\t\treturn expr, nil\n\t}\n\n\t// Extract the key and remove the measurement prefix.\n\tkey := strings.TrimPrefix(ref.Val, name+\".\")\n\n\t// If tag is already filtered then return error.\n\tif _, ok := tags[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate tag filter: %s.%s\", name, key)\n\t}\n\n\t// Add tag to the filter.\n\ttags[key] = lit.Val\n\n\t// Return nil to remove the expression.\n\treturn nil, nil\n}",
"func (t *Table) ExtractAttributes(node *past.Call) {\n\tif t.Attributes == nil {\n\t\tt.Attributes = map[string]interface{}{}\n\t}\n\tfor _, kw := range node.Keywords {\n\t\toptkey := string(kw.Arg)\n\t\tswitch v := kw.Value.(type) {\n\t\tcase *past.NameConstant:\n\t\t\tt.Attributes[optkey] = v.Value\n\t\tcase *past.Str:\n\t\t\tt.Attributes[optkey] = string(v.S)\n\t\tcase *past.Name:\n\t\t\tt.Attributes[optkey] = string(v.Id)\n\t\t}\n\t}\n\tt.Logger().Debug(\"Extracted table attributes\")\n}",
"func getStatsTagsMap(ctx context.Context) Tags {\n\tif ctx != nil {\n\t\tfields, _ := ctx.Value(tagsKey).(Tags)\n\t\tif fields != nil {\n\t\t\treturn fields\n\t\t}\n\t}\n\n\treturn Tags{}\n}",
"func tagKeysToMap(tags string) map[string]*string {\n\toutput := make(map[string]*string)\n\n\tfor _, tag := range strings.Split(strings.TrimSpace(tags), \",\") {\n\t\tsplit := strings.SplitN(tag, \"=\", 2)\n\t\tkey := strings.TrimSpace(split[0])\n\t\tvalue := \"\"\n\n\t\tif key == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(split) > 1 {\n\t\t\tvalue = strings.TrimSpace(split[1])\n\t\t}\n\n\t\toutput[key] = &value\n\t}\n\n\tif len(output) == 0 {\n\t\treturn nil\n\t}\n\n\treturn output\n}",
"func (q *Query) processTags(out *bytes.Buffer, groupingTags []string) ([]string, int) { // nolint: golint, interfacer\n\t// Grouping tags based on native WarpScript \"equivalence class\"\n\ttags := q.Tags\n\tfor tagk, tagv := range tags {\n\t\tif tagv == \"*\" {\n\t\t\t// Simple group by\n\t\t\tgroupingTags = append(groupingTags, tagk)\n\t\t\t// Add a filter that makes sure the tag exists\n\t\t\tfmt.Fprintf(out, \"'%s' '~.*'\\n\", tagk)\n\n\t\t} else if strings.Contains(tagv, \"|\") {\n\t\t\t// Filtered group by\n\t\t\tgroupingTags = append(groupingTags, tagk)\n\n\t\t\tfmt.Fprintf(out, \"'%s' '~\", tagk) // open regexp\n\n\t\t\tvalues := strings.Split(tagv, \"|\")\n\t\t\tquotedValues := make([]string, len(values))\n\t\t\tfor i, value := range values {\n\t\t\t\tquotedValues[i] = regexp.QuoteMeta(strings.TrimSpace(value))\n\t\t\t}\n\t\t\tfmt.Fprintf(out, \"%s\", strings.Join(quotedValues, \"|\"))\n\t\t\tfmt.Fprint(out, \"'\\n\") // close regexp\n\n\t\t} else {\n\t\t\t// Regular filter\n\t\t\tfmt.Fprintf(out, \"'%s' '=%s'\\n\", tagk, tagv)\n\t\t}\n\t}\n\treturn groupingTags, len(tags)\n}",
"func (t HTML5SemanticTagsTechnique) Extract(html string) DirtyExtracted {\n\textracted := GetEmptyDirtyExtracted()\n\tt.setName(\"HTML5SemanticTagsTechnique\")\n\n\tdoc, err := goquery.NewDocumentFromReader(strings.NewReader(html))\n\tif err != nil {\n\t\treturn extracted\n\t}\n\n\tdoc.Find(\"article\").Each(func(i int, selection *goquery.Selection) {\n\t\tif title := selection.Find(\"h1\").Text(); title != EmptyString {\n\t\t\textracted[TitlesField] = append(extracted[TitlesField], title)\n\t\t}\n\t\tif desc := selection.Find(\"p\").Text(); desc != EmptyString {\n\t\t\textracted[DescriptionsField] = append(extracted[DescriptionsField], desc)\n\t\t}\n\t})\n\n\tdoc.Find(\"video\").Each(func(i int, selection *goquery.Selection) {\n\t\tselection.Find(\"source\").Each(func(i int, selection *goquery.Selection) {\n\t\t\tif src, ok := selection.Attr(\"src\"); ok {\n\t\t\t\textracted[VideosField] = append(extracted[VideosField], src)\n\t\t\t}\n\t\t})\n\t})\n\treturn extracted\n}",
"func parseTags(s string) map[string]string {\n\ttags := strings.Split(s, \",\")\n\ttagMap := make(map[string]string)\n\tfor _, tag := range tags {\n\t\tif len(tag) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tprop := strings.Split(tag, \"(\")\n\t\tif len(prop) == 2 && len(prop[1]) > 1 {\n\t\t\ttagMap[prop[0]] = prop[1][:len(prop[1])-1]\n\t\t} else {\n\t\t\ttagMap[tag] = \"\"\n\t\t}\n\t}\n\treturn tagMap\n}",
"func ExtractEntries(parserInfo []plugins.Entries, trigger string) map[string]string {\n\tentries := make(map[string]string)\n\n\tvar parser metatools.Parser\n\tparser.Init(trigger)\n\n\tfmt.Println(\"extracting extries for sentence : \", trigger)\n\tfor _, e := range parserInfo {\n\t\tres := e.Resources.(map[string]interface{})\n\t\tswitch e.Parser {\n\t\tcase \"before\":\n\t\t\tentries[e.Name] = parser.Before(res[\"key\"].(string), int(res[\"x\"].(float64)))\n\t\tcase \"after\":\n\t\t\tentries[e.Name] = parser.After(res[\"key\"].(string), int(res[\"x\"].(float64)))\n\t\tcase \"between\":\n\t\t\tentries[e.Name] = parser.Between(res[\"after\"].(string), res[\"before\"].(string), int(res[\"x\"].(float64)), int(res[\"y\"].(float64)))\n\t\t}\n\t}\n\treturn entries\n}",
"func (e *Extractor) FieldValueFromTagMap(tag string) (out map[string]interface{}, err error) {\n\n\tif err := e.isValidStruct(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tout = make(map[string]interface{})\n\ts := reflect.ValueOf(e.StructAddr).Elem()\n\tfields := e.fields(s)\n\n\tfor _, field := range fields {\n\t\tif val, ok := field.tags.Lookup(tag); ok {\n\t\t\tkey, omit := e.parseOmitempty(val, field.value)\n\t\t\tif omit {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tout[key] = field.value.Interface()\n\t\t}\n\n\t}\n\n\treturn\n}",
"func extractTags(txt string) []string {\n\ttags := []string{}\n\tfor _, match := range TagsRx.FindAllStringSubmatch(txt, -1) {\n\t\ttags = append(tags, strings.TrimLeft(match[1], \"#\"))\n\t}\n\treturn tags\n}",
"func DecodeTag(structTag reflect.StructTag, tagName string) map[string]string {\r\n\ttags := make(map[string]string)\r\n\t\r\n\ttagString := structTag.Get(tagName)\r\n\tif(tagString == \"\") {\r\n\t\treturn tags\r\n\t}\r\n\t\t\r\n\t// NB This will break any individual tags with comma in them\r\n\tfor _, kvpairGlued := range strings.Split(tagString, \",\") {\r\n\t\tkvPair := strings.Split(kvpairGlued, \"=\")\r\n\t\tswitch len(kvPair) {\r\n\t\tcase 1:\r\n\t\t\ttags[kvPair[0]] = kvPair[0];\t\t\t\r\n\t\tcase 2:\r\n\t\t\ttags[kvPair[0]] = kvPair[1];\r\n\t\tdefault:\r\n\t\t\tkey, value := kvPair[0], kvPair[1:]\r\n\t\t\ttags[key] = strings.Join(value, \"=\")\r\n\t\t}\r\n\t}\r\n\r\n\treturn tags\r\n}",
"func tags(kv ...string) map[string]string {\n\tm := make(map[string]string)\n\tfor i := 0; i < len(kv)-1; i += 2 {\n\t\tm[kv[i]] = kv[i+1]\n\t}\n\treturn m\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
extractBinaryExprTags extracts a tag key/value map from a statement.
|
func (p *Planner) extractBinaryExprTags(name string, expr Expr, ref *VarRef, value Expr, tags map[string]string) (Expr, error) {
// Ignore if the value is not a string literal.
lit, ok := value.(*StringLiteral)
if !ok {
return expr, nil
}
// Extract the key and remove the measurement prefix.
key := strings.TrimPrefix(ref.Val, name+".")
// If tag is already filtered then return error.
if _, ok := tags[key]; ok {
return nil, fmt.Errorf("duplicate tag filter: %s.%s", name, key)
}
// Add tag to the filter.
tags[key] = lit.Val
// Return nil to remove the expression.
return nil, nil
}
|
[
"func (p *Planner) extractTags(name string, expr Expr, tags map[string]string) (Expr, error) {\n\t// TODO: Refactor into a walk-like Replace().\n\tswitch expr := expr.(type) {\n\tcase *BinaryExpr:\n\t\t// If the LHS is a variable ref then check for tag equality.\n\t\tif lhs, ok := expr.LHS.(*VarRef); ok && expr.Op == EQ {\n\t\t\treturn p.extractBinaryExprTags(name, expr, lhs, expr.RHS, tags)\n\t\t}\n\n\t\t// If the RHS is a variable ref then check for tag equality.\n\t\tif rhs, ok := expr.RHS.(*VarRef); ok && expr.Op == EQ {\n\t\t\treturn p.extractBinaryExprTags(name, expr, rhs, expr.LHS, tags)\n\t\t}\n\n\t\t// Recursively process LHS.\n\t\tlhs, err := p.extractTags(name, expr.LHS, tags)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\texpr.LHS = lhs\n\n\t\t// Recursively process RHS.\n\t\trhs, err := p.extractTags(name, expr.RHS, tags)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\texpr.RHS = rhs\n\n\t\treturn expr, nil\n\n\tcase *ParenExpr:\n\t\te, err := p.extractTags(name, expr.Expr, tags)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\texpr.Expr = e\n\t\treturn expr, nil\n\n\tdefault:\n\t\treturn expr, nil\n\t}\n}",
"func (sm *SpecMore) TagsMap(inclTop, inclOps bool) map[string]int {\n\ttagsMap := map[string]int{}\n\tif inclTop {\n\t\tfor _, tag := range sm.Spec.Tags {\n\t\t\ttagName := strings.TrimSpace(tag.Name)\n\t\t\tif len(tagName) > 0 {\n\t\t\t\tif _, ok := tagsMap[tagName]; !ok {\n\t\t\t\t\ttagsMap[tagName] = 0\n\t\t\t\t}\n\t\t\t\ttagsMap[tagName]++\n\t\t\t}\n\t\t}\n\t}\n\tif inclOps {\n\t\tVisitOperations(sm.Spec, func(skipPath, skipMethod string, op *oas3.Operation) {\n\t\t\tfor _, tagName := range op.Tags {\n\t\t\t\ttagName = strings.TrimSpace(tagName)\n\t\t\t\tif len(tagName) > 0 {\n\t\t\t\t\tif _, ok := tagsMap[tagName]; !ok {\n\t\t\t\t\t\ttagsMap[tagName] = 0\n\t\t\t\t\t}\n\t\t\t\t\ttagsMap[tagName]++\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n\treturn tagsMap\n}",
"func (e *Extractor) FieldValueFromTagMap(tag string) (out map[string]interface{}, err error) {\n\n\tif err := e.isValidStruct(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tout = make(map[string]interface{})\n\ts := reflect.ValueOf(e.StructAddr).Elem()\n\tfields := e.fields(s)\n\n\tfor _, field := range fields {\n\t\tif val, ok := field.tags.Lookup(tag); ok {\n\t\t\tkey, omit := e.parseOmitempty(val, field.value)\n\t\t\tif omit {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tout[key] = field.value.Interface()\n\t\t}\n\n\t}\n\n\treturn\n}",
"func keyvalTagsMap(keyMap []string, valueMap []interface{}, f *Feature) (tags []uint32, err error) {\n\n\tif f == nil {\n\t\treturn nil, ErrNilFeature\n\t}\n\n\tvar kidx, vidx int64\n\n\tfor key, val := range f.Tags {\n\n\t\tkidx, vidx = -1, -1 // Set to known not found value.\n\n\t\tfor i, k := range keyMap {\n\t\t\tif k != key {\n\t\t\t\tcontinue // move to the next key\n\t\t\t}\n\t\t\tkidx = int64(i)\n\t\t\tbreak // we found a match\n\t\t}\n\n\t\tif kidx == -1 {\n\t\t\tlog.Printf(\"did not find key (%v) in keymap.\", key)\n\t\t\treturn tags, fmt.Errorf(\"did not find key (%v) in keymap.\", key)\n\t\t}\n\n\t\t// if val is nil we skip it for now\n\t\t// https://github.com/mapbox/vector-tile-spec/issues/62\n\t\tif val == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor i, v := range valueMap {\n\t\t\tswitch tv := val.(type) {\n\t\t\tdefault:\n\t\t\t\treturn tags, fmt.Errorf(\"value (%[1]v) of type (%[1]T) for key (%[2]v) is not supported.\", tv, key)\n\t\t\tcase string:\n\t\t\t\tvmt, ok := v.(string) // Make sure the type of the Value map matches the type of the Tag's value\n\t\t\t\tif !ok || vmt != tv { // and that the values match\n\t\t\t\t\tcontinue // if they don't match move to the next value.\n\t\t\t\t}\n\t\t\tcase fmt.Stringer:\n\t\t\t\tvmt, ok := v.(fmt.Stringer)\n\t\t\t\tif !ok || vmt.String() != tv.String() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase int:\n\t\t\t\tvmt, ok := v.(int)\n\t\t\t\tif !ok || vmt != tv {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase int8:\n\t\t\t\tvmt, ok := v.(int8)\n\t\t\t\tif !ok || vmt != tv {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase int16:\n\t\t\t\tvmt, ok := v.(int16)\n\t\t\t\tif !ok || vmt != tv {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase int32:\n\t\t\t\tvmt, ok := v.(int32)\n\t\t\t\tif !ok || vmt != tv {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase int64:\n\t\t\t\tvmt, ok := v.(int64)\n\t\t\t\tif !ok || vmt != tv {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase uint:\n\t\t\t\tvmt, ok := v.(uint)\n\t\t\t\tif !ok || vmt != tv {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase uint8:\n\t\t\t\tvmt, ok := v.(uint8)\n\t\t\t\tif !ok || vmt != tv {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase uint16:\n\t\t\t\tvmt, ok := v.(uint16)\n\t\t\t\tif !ok || vmt != tv {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase uint32:\n\t\t\t\tvmt, ok := v.(uint32)\n\t\t\t\tif !ok || vmt != tv {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase uint64:\n\t\t\t\tvmt, ok := v.(uint64)\n\t\t\t\tif !ok || vmt != tv {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\tcase float32:\n\t\t\t\tvmt, ok := v.(float32)\n\t\t\t\tif !ok || vmt != tv {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase float64:\n\t\t\t\tvmt, ok := v.(float64)\n\t\t\t\tif !ok || vmt != tv {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase bool:\n\t\t\t\tvmt, ok := v.(bool)\n\t\t\t\tif !ok || vmt != tv {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t} // Values Switch Statement\n\t\t\t// if the values match let's record the index.\n\t\t\tvidx = int64(i)\n\t\t\tbreak // we found our value no need to continue on.\n\t\t} // range on value\n\n\t\tif vidx == -1 { // None of the values matched.\n\t\t\treturn tags, fmt.Errorf(\"did not find a value: %v in valuemap.\", val)\n\t\t}\n\t\ttags = append(tags, uint32(kidx), uint32(vidx))\n\t} // Move to the next tag key and value.\n\n\treturn tags, nil\n}",
"func GetValueTagMap(src map[string]interface{}) map[string]interface{} {\n\tres := NewEmptyTagMap()\n\tres[\"inname\"] = \"value\"\n\tres[\"exname\"] = \"value\"\n\tres[\"type\"] = src[\"type\"]\n\tres[\"length\"] = src[\"length\"]\n\tres[\"scale\"] = src[\"scale\"]\n\tres[\"precision\"] = src[\"precision\"]\n\tres[\"fieldid\"] = src[\"fieldid\"]\n\treturn res\n}",
"func getTags(trustReport *hvs.TrustReport) map[string]string {\n\tdefaultLog.Trace(\"hosttrust/saml_report:getTags() Entering\")\n\tdefer defaultLog.Trace(\"hosttrust/saml_report:getTags() Leaving\")\n\n\ttagPrefix := \"TAG_\"\n\ttagsMap := make(map[string]string)\n\tfor _, result := range trustReport.GetResultsForMarker(common.FlavorPartAssetTag.String()) {\n\t\tif result.Rule.Name == faultsConst.RuleAssetTagMatches && len(result.Rule.Tags) > 0 {\n\t\t\tfor key, value := range result.Rule.Tags {\n\t\t\t\ttagsMap[tagPrefix+key] = value\n\t\t\t}\n\t\t}\n\t}\n\treturn tagsMap\n}",
"func tags(kv ...string) map[string]string {\n\tm := make(map[string]string)\n\tfor i := 0; i < len(kv)-1; i += 2 {\n\t\tm[kv[i]] = kv[i+1]\n\t}\n\treturn m\n}",
"func newBinaryExprGuard(expr *influxql.BinaryExpr) *exprGuard {\n\t// if it's a nested binary expression, always match.\n\tif _, ok := expr.LHS.(*influxql.BinaryExpr); ok {\n\t\treturn nil\n\t} else if _, ok := expr.RHS.(*influxql.BinaryExpr); ok {\n\t\treturn nil\n\t}\n\n\t// ensure one of the expressions is a VarRef, and make that the key.\n\tkey, ok := expr.LHS.(*influxql.VarRef)\n\tvalue := expr.RHS\n\tif !ok {\n\t\tkey, ok = expr.RHS.(*influxql.VarRef)\n\t\tif !ok {\n\t\t\treturn nil\n\t\t}\n\t\tvalue = expr.LHS\n\t}\n\n\t// check the key for situations we know we can't filter.\n\tif key.Val != \"_name\" && key.Type != influxql.Unknown && key.Type != influxql.Tag {\n\t\treturn nil\n\t}\n\n\t// scrutinize the value to return an efficient guard.\n\tswitch value := value.(type) {\n\tcase *influxql.StringLiteral:\n\t\tval := []byte(value.Val)\n\t\tg := &exprGuard{tagMatches: &tagGuard{\n\t\t\tmeas: key.Val == \"_name\",\n\t\t\tkey: []byte(key.Val),\n\t\t}}\n\n\t\tswitch expr.Op {\n\t\tcase influxql.EQ:\n\t\t\tg.tagMatches.op = func(x []byte) bool { return bytes.Equal(val, x) }\n\n\t\tcase influxql.NEQ:\n\t\t\tg.tagMatches.op = func(x []byte) bool { return !bytes.Equal(val, x) }\n\n\t\tdefault: // any other operator isn't valid. conservatively match everything.\n\t\t\treturn nil\n\t\t}\n\n\t\treturn g\n\n\tcase *influxql.RegexLiteral:\n\t\t// There's a tradeoff between being precise and being fast. For example, if the\n\t\t// delete includes a very expensive regex, we don't want to run that against every\n\t\t// incoming point. The decision here is to match any point that has a possibly\n\t\t// expensive match if there is any overlap on the tags. In other words, expensive\n\t\t// matches get transformed into trivially matching everything.\n\t\treturn &exprGuard{tagExists: map[string]struct{}{key.Val: {}}}\n\n\tcase *influxql.VarRef:\n\t\t// We could do a better job here by encoding the two names and checking the points\n\t\t// against them, but I'm not quite sure how to do that. Be conservative and match\n\t\t// any points that contain either the key or value.\n\n\t\t// since every point has a measurement, always match if either are on the measurement.\n\t\tif key.Val == \"_name\" || value.Val == \"_name\" {\n\t\t\treturn nil\n\t\t}\n\t\treturn &exprGuard{tagExists: map[string]struct{}{\n\t\t\tkey.Val: {},\n\t\t\tvalue.Val: {},\n\t\t}}\n\n\tdefault: // any other value type matches everything\n\t\treturn nil\n\t}\n}",
"func GetValueTagMap(src map[string]interface{}) map[string]interface{} {\n\tres := NewEmptyTagMap()\n\tres[\"inname\"] = \"value\"\n\tres[\"exname\"] = \"value\"\n\tres[\"type\"] = src[\"valuetype\"]\n\tres[\"basetype\"] = src[\"valuebasetype\"]\n\tres[\"length\"] = src[\"valuelength\"]\n\tres[\"scale\"] = src[\"valuescale\"]\n\tres[\"precision\"] = src[\"valueprecision\"]\n\tres[\"fieldid\"] = src[\"valuefieldid\"]\n\tres[\"repetitiontype\"] = src[\"valuerepetitiontype\"]\n\tres[\"encoding\"] = src[\"valueencoding\"]\n\treturn res\n}",
"func mapColumnTags(tables map[string]schema.Schema) (m map[uint64]string) {\n\tm = make(map[uint64]string, len(tables))\n\tfor tbl, sch := range tables {\n\t\tfor _, tag := range sch.GetAllCols().Tags {\n\t\t\tm[tag] = tbl\n\t\t}\n\t}\n\treturn\n}",
"func mapTags(tags []*elb.Tag) map[string]string {\n\ttagMap := make(map[string]string)\n\tfor _, t := range tags {\n\t\ttagMap[*t.Key] = *t.Value\n\t}\n\n\treturn tagMap\n}",
"func tagKeysToMap(tags string) map[string]*string {\n\toutput := make(map[string]*string)\n\n\tfor _, tag := range strings.Split(strings.TrimSpace(tags), \",\") {\n\t\tsplit := strings.SplitN(tag, \"=\", 2)\n\t\tkey := strings.TrimSpace(split[0])\n\t\tvalue := \"\"\n\n\t\tif key == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(split) > 1 {\n\t\t\tvalue = strings.TrimSpace(split[1])\n\t\t}\n\n\t\toutput[key] = &value\n\t}\n\n\tif len(output) == 0 {\n\t\treturn nil\n\t}\n\n\treturn output\n}",
"func getStatsTagsMap(ctx context.Context) Tags {\n\tif ctx != nil {\n\t\tfields, _ := ctx.Value(tagsKey).(Tags)\n\t\tif fields != nil {\n\t\t\treturn fields\n\t\t}\n\t}\n\n\treturn Tags{}\n}",
"func parseNextBinaryValue(ann []Symbol, r io.Reader) (Value, error) {\n\tswitch high, low, err := readNibblesHighAndLow(r); {\n\tcase err != nil:\n\t\treturn nil, err\n\tcase low == 0xF:\n\t\treturn parseBinaryNull(high)\n\tcase high == binaryTypePadding:\n\t\treturn parseBinaryPadding(low, r)\n\tcase high == binaryTypeBool:\n\t\treturn parseBinaryBool(ann, low)\n\tcase high == binaryTypeInt || high == binaryTypeNegInt:\n\t\t// 2 = positive integer, 3 = negative integer.\n\t\treturn parseBinaryInt(ann, high == binaryTypeNegInt, low, r)\n\tcase high == binaryTypeFloat:\n\t\treturn parseBinaryFloat(ann, low, r)\n\tcase high == binaryTypeDecimal:\n\t\treturn parseBinaryDecimal(ann, low, r)\n\tcase high == binaryTypeTimestamp:\n\t\treturn parseBinaryTimestamp(ann, low, r)\n\tcase high == binaryTypeSymbol:\n\t\treturn parseBinarySymbol(ann, low, r)\n\tcase high == binaryTypeString:\n\t\treturn parseBinaryString(ann, low, r)\n\tcase high == binaryTypeBlob || high == binaryTypeClob:\n\t\treturn parseBinaryBytes(ann, high, low, r)\n\tcase high == binaryTypeList || high == binaryTypeSExp:\n\t\treturn parseBinaryList(ann, high, low, r)\n\tcase high == binaryTypeStruct:\n\t\treturn parseBinaryStruct(ann, low, r)\n\tcase high == binaryTypeAnnotation:\n\t\tif len(ann) != 0 {\n\t\t\treturn nil, errors.New(\"nesting annotations is not legal\")\n\t\t}\n\t\treturn parseBinaryAnnotation(low, r)\n\tdefault:\n\t\treturn nil, errors.Errorf(\"invalid header combination - high: %d low: %d\", high, low)\n\t}\n}",
"func parseBinary(lex *lexer, prec1 int) Expr {\n\tlhs := parseUnary(lex)\n\tfor prec := precedence(lex.token); prec >= prec1; prec-- {\n\t\tfor precedence(lex.token) == prec {\n\t\t\top := lex.token\n\t\t\tlex.next() // consume operator\n\t\t\trhs := parseBinary(lex, prec+1)\n\t\t\tlhs = binary{op, lhs, rhs}\n\t\t}\n\t}\n\treturn lhs\n}",
"func parseBinaryVersionMarker(r io.Reader) (Value, error) {\n\tnumBytes := len(ion10BVM)\n\tbvm := make([]byte, numBytes)\n\tbvm[0] = ion10BVM[0]\n\tif n, err := r.Read(bvm[1:]); err != nil || n != numBytes-1 {\n\t\treturn nil, errors.Errorf(\"unable to read binary version marker - read %d bytes of %d with err: %v\", n, numBytes-1, err)\n\t}\n\n\tif err := verifyByteVersionMarker(bytes.NewReader(bvm)); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn nil, nil\n}",
"func (s *BaseMySqlParserListener) EnterBinaryComparasionPredicate(ctx *BinaryComparasionPredicateContext) {\n}",
"func RaggedBincountBinaryOutput(value bool) RaggedBincountAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"binary_output\"] = value\n\t}\n}",
"func binaryPlusOpType(sc *meta.Scope, cs *meta.ClassParseState, left, right ir.Node, custom []CustomType) types.Map {\n\t// TODO: PHP will raise fatal error if one operand is array and other is not, so we may check it too\n\tleftType := ExprTypeLocalCustom(sc, cs, left, custom)\n\trightType := ExprTypeLocalCustom(sc, cs, right, custom)\n\tif leftType.IsLazyArray() && rightType.IsLazyArray() {\n\t\treturn types.MergeMaps(leftType, rightType)\n\t}\n\treturn binaryMathOpType(sc, cs, left, right, custom)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Execute begins execution of the query and returns a channel to receive rows.
|
func (e *Executor) Execute() (<-chan *Row, error) {
// Initialize processors.
for _, p := range e.processors {
p.start()
}
// Create output channel and stream data in a separate goroutine.
out := make(chan *Row, 0)
go e.execute(out)
return out, nil
}
|
[
"func (e *RawExecutor) Execute(closing <-chan struct{}) <-chan *models.Row {\n\tout := make(chan *models.Row, 0)\n\tgo e.execute(out, closing)\n\treturn out\n}",
"func (q *Query) ExecuteWithChannel(resChan chan ResponseData) {\n\tif q.JobConfig != nil {\n\t\tgo q.retrieveRowsWithJobConfig(resChan)\n\t} else {\n\t\tgo q.retrieveRows(resChan)\n\t}\n}",
"func (q *Query) Exec(ctx context.Context) (*QueryResult, error) {\n\tvar r QueryResult\n\n\tif q.client == nil || !q.client.Started() {\n\t\treturn &r, fmt.Errorf(\"client or db is nil\")\n\t}\n\n\tswitch q.action {\n\tcase \"select\":\n\t\trows, err := q.execSelect(ctx)\n\t\tr.Rows = rows\n\t\treturn &r, err\n\tcase \"insert\":\n\t\trows, err := q.execInsert(ctx)\n\t\tr.Rows = rows\n\t\treturn &r, err\n\tcase \"update\":\n\t\tvar err error\n\t\tif len(q.returning) == 0 {\n\t\t\tr.RowsAffected, err = q.execUpdate(ctx)\n\t\t} else {\n\t\t\tr.Rows, err = q.execUpdateR(ctx)\n\t\t}\n\t\treturn &r, err\n\tcase \"delete\":\n\t\tvar err error\n\t\tif len(q.returning) == 0 {\n\t\t\tr.RowsAffected, err = q.execDelete(ctx)\n\t\t} else {\n\t\t\tr.Rows, err = q.execDeleteR(ctx)\n\t\t}\n\t\treturn &r, err\n\tdefault:\n\t\treturn &r, fmt.Errorf(\"unsupported action %v\", q.action)\n\t}\n}",
"func (reader *Input) Execute(writer io.Writer) error {\n\tmyRow := make(chan *Row)\n\tcurBuf := bytes.NewBuffer(make([]byte, 1000000))\n\tcurBuf.Reset()\n\tprogressTicker := time.NewTicker(progressTime)\n\tcontinuationTimer := time.NewTimer(continuationTime)\n\tdefer progressTicker.Stop()\n\tdefer continuationTimer.Stop()\n\tgo reader.runSelectParser(convertMySQL(reader.options.Expression), myRow)\n\tfor {\n\t\tselect {\n\t\tcase row, ok := <-myRow:\n\t\t\tif ok && row.err != nil {\n\t\t\t\terrorMessage := reader.writeErrorMessage(row.err, curBuf)\n\t\t\t\t_, err := errorMessage.WriteTo(writer)\n\t\t\t\tflusher, okFlush := writer.(http.Flusher)\n\t\t\t\tif okFlush {\n\t\t\t\t\tflusher.Flush()\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcurBuf.Reset()\n\t\t\t\tclose(myRow)\n\t\t\t\treturn nil\n\t\t\t} else if ok {\n\t\t\t\tmessage := reader.writeRecordMessage(row.record, curBuf)\n\t\t\t\t_, err := message.WriteTo(writer)\n\t\t\t\tflusher, okFlush := writer.(http.Flusher)\n\t\t\t\tif okFlush {\n\t\t\t\t\tflusher.Flush()\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcurBuf.Reset()\n\t\t\t\treader.stats.BytesReturned += int64(len(row.record))\n\t\t\t\tif !continuationTimer.Stop() {\n\t\t\t\t\t<-continuationTimer.C\n\t\t\t\t}\n\t\t\t\tcontinuationTimer.Reset(continuationTime)\n\t\t\t} else if !ok {\n\t\t\t\tstatPayload, err := reader.createStatXML()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tstatMessage := reader.writeStatMessage(statPayload, curBuf)\n\t\t\t\t_, err = statMessage.WriteTo(writer)\n\t\t\t\tflusher, ok := writer.(http.Flusher)\n\t\t\t\tif ok {\n\t\t\t\t\tflusher.Flush()\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcurBuf.Reset()\n\t\t\t\tmessage := reader.writeEndMessage(curBuf)\n\t\t\t\t_, err = message.WriteTo(writer)\n\t\t\t\tflusher, ok = writer.(http.Flusher)\n\t\t\t\tif ok {\n\t\t\t\t\tflusher.Flush()\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\tcase <-progressTicker.C:\n\t\t\t// Send progress messages only if requested by client.\n\t\t\tif reader.options.Progress {\n\t\t\t\tprogressPayload, err := reader.createProgressXML()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tprogressMessage := reader.writeProgressMessage(progressPayload, curBuf)\n\t\t\t\t_, err = progressMessage.WriteTo(writer)\n\t\t\t\tflusher, ok := writer.(http.Flusher)\n\t\t\t\tif ok {\n\t\t\t\t\tflusher.Flush()\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcurBuf.Reset()\n\t\t\t}\n\t\tcase <-continuationTimer.C:\n\t\t\tmessage := reader.writeContinuationMessage(curBuf)\n\t\t\t_, err := message.WriteTo(writer)\n\t\t\tflusher, ok := writer.(http.Flusher)\n\t\t\tif ok {\n\t\t\t\tflusher.Flush()\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcurBuf.Reset()\n\t\t\tcontinuationTimer.Reset(continuationTime)\n\t\t}\n\t}\n}",
"func (vtc *VTConn) Execute(query string, bindVars map[string]interface{}, keyspace string, shards []string) (*mproto.QueryResult, error) {\n\tvtc.mu.Lock()\n\tdefer vtc.mu.Unlock()\n\n\tqr := new(mproto.QueryResult)\n\tallErrors := new(concurrency.AllErrorRecorder)\n\tswitch len(shards) {\n\tcase 0:\n\t\treturn new(mproto.QueryResult), nil\n\tcase 1:\n\t\t// Fast-path for single shard execution\n\t\tvar err error\n\t\tqr, err = vtc.execOnShard(query, bindVars, keyspace, shards[0])\n\t\tallErrors.RecordError(err)\n\tdefault:\n\t\tresults := make(chan *mproto.QueryResult, len(shards))\n\t\tvar wg sync.WaitGroup\n\t\tfor shard := range unique(shards) {\n\t\t\twg.Add(1)\n\t\t\tgo func(shard string) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tinnerqr, err := vtc.execOnShard(query, bindVars, keyspace, shard)\n\t\t\t\tif err != nil {\n\t\t\t\t\tallErrors.RecordError(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tresults <- innerqr\n\t\t\t}(shard)\n\t\t}\n\t\tgo func() {\n\t\t\twg.Wait()\n\t\t\tclose(results)\n\t\t}()\n\t\tfor innerqr := range results {\n\t\t\tappendResult(qr, innerqr)\n\t\t}\n\t}\n\tif allErrors.HasErrors() {\n\t\tif vtc.transactionId != 0 {\n\t\t\terrstr := allErrors.Error().Error()\n\t\t\t// We cannot recover from these errors\n\t\t\tif strings.Contains(errstr, \"tx_pool_full\") || strings.Contains(errstr, \"not_in_tx\") {\n\t\t\t\tvtc.rollback()\n\t\t\t}\n\t\t}\n\t\treturn nil, allErrors.Error()\n\t}\n\treturn qr, nil\n}",
"func (stmt *statement) Query(ctx context.Context, db Executor, handler func(rows *sql.Rows)) error {\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\n\t// Fetch rows\n\trows, err := db.QueryContext(ctx, stmt.String(), stmt.args...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Iterate through rows of returned dataset\n\tfor rows.Next() {\n\t\tif len(stmt.dest) > 0 {\n\t\t\terr = rows.Scan(stmt.dest...)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\t// Call a callback function\n\t\thandler(rows)\n\t}\n\t// Check for errors during rows \"Close\".\n\t// This may be more important if multiple statements are executed\n\t// in a single batch and rows were written as well as read.\n\tif closeErr := rows.Close(); closeErr != nil {\n\t\treturn closeErr\n\t}\n\n\t// Check for row scan errors.\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Check for errors during row iteration.\n\treturn rows.Err()\n}",
"func (q *QueryPackage) Execute() (chan QueryResponse, chan bool) {\n\tresponseChan := make(chan QueryResponse, 1)\n\tstopChan := make(chan bool)\n\n\tgo func() {\n\t\tdefer close(responseChan)\n\t\turl := url.URL{Scheme: \"wss\", Host: q.Ip + \":\" + q.Port, Path: \"/api/v1\"}\n\t\tvar dialer = &websocket.Dialer{\n\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\tHandshakeTimeout: 5 * time.Second,\n\t\t\tTLSClientConfig: q.tls,\n\t\t\tJar: q.jar,\n\t\t}\n\n\t\tconn, _, err := dialer.Dial(url.String(), nil)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Execute dial error:\", err)\n\t\t\treturn\n\t\t}\n\t\tdefer conn.Close()\n\n\t\t//TODO: set conn read/write timeout?\n\t\treadWriteTimeout := time.Duration(20)\n\t\tconn.SetReadDeadline(time.Now().Add(time.Second * readWriteTimeout))\n\t\tconn.SetWriteDeadline(time.Now().Add(time.Second * readWriteTimeout))\n\n\t\tstreaming := false\n\t\tfor _, v := range q.Queries {\n\t\t\tif v.Type == StartStream {\n\t\t\t\tstreaming = true\n\t\t\t}\n\t\t}\n\t\tpacket, err := json.Marshal(q.Queries)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error marshaling query:\", err)\n\t\t\treturn\n\t\t}\n\n\t\terr = conn.WriteMessage(websocket.BinaryMessage, packet)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error writing to socket:\", err)\n\t\t\treturn\n\t\t}\n\n\t\t_, message, err := conn.ReadMessage()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error reading from socket:\", err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tresponse := QueryResponse{}\n\t\terr = json.Unmarshal(message, &response.Responses)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error parsing response:\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif streaming {\n\t\tLoop:\n\t\t\tfor {\n\t\t\t\tconn.SetReadDeadline(time.Now().Add(time.Second * readWriteTimeout))\n\t\t\t\t_, message, err := conn.ReadMessage()\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"Error reading from socket:\", err)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tresponse := QueryResponse{}\n\t\t\t\terr = json.Unmarshal(message, &response.Responses)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"Error parsing response:\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tresponseChan <- response\n\n\t\t\t\tselect {\n\t\t\t\tcase <-stopChan:\n\t\t\t\t\tbreak Loop\n\t\t\t\tdefault:\n\t\t\t\t}\n\n\t\t\t}\n\t\t} else {\n\t\t\tconn.SetWriteDeadline(time.Now().Add(time.Second * readWriteTimeout))\n\t\t\terr = conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(1000, \"Query Complete\"))\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Error closing socket:\", err)\n\t\t\t}\n\n\t\t\tresponseChan <- response\n\t\t\treturn\n\t\t}\n\t\tconn.SetWriteDeadline(time.Now().Add(time.Second * readWriteTimeout))\n\t\terr = conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(1000, \"Query Complete\"))\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error closing socket:\", err)\n\t\t}\n\t\treturn\n\t}()\n\treturn responseChan, stopChan\n}",
"func (c Client) Execute(q storage.Query) (storage.Fetcher, error) {\n\tp, err := c.Prepare(q)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn p.Run()\n}",
"func (vtc *VTConn) StreamExecute(query string, bindVars map[string]interface{}, keyspace string, shards []string, sendReply func(reply interface{}) error) error {\n\tvtc.mu.Lock()\n\tdefer vtc.mu.Unlock()\n\n\tif vtc.transactionId != 0 {\n\t\treturn fmt.Errorf(\"cannot stream in a transaction\")\n\t}\n\tresults := make(chan *mproto.QueryResult, len(shards))\n\tallErrors := new(concurrency.AllErrorRecorder)\n\tvar wg sync.WaitGroup\n\tfor shard := range unique(shards) {\n\t\twg.Add(1)\n\t\tgo func(shard string) {\n\t\t\tdefer wg.Done()\n\t\t\tsdc, _ := vtc.getConnection(keyspace, shard)\n\t\t\tsr, errFunc := sdc.StreamExecute(query, bindVars)\n\t\t\tfor qr := range sr {\n\t\t\t\tresults <- qr\n\t\t\t}\n\t\t\terr := errFunc()\n\t\t\tif err != nil {\n\t\t\t\tallErrors.RecordError(err)\n\t\t\t}\n\t\t}(shard)\n\t}\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(results)\n\t}()\n\tvar replyErr error\n\tfor innerqr := range results {\n\t\t// We still need to finish pumping\n\t\tif replyErr != nil {\n\t\t\tcontinue\n\t\t}\n\t\treplyErr = sendReply(innerqr)\n\t}\n\tif replyErr != nil {\n\t\tallErrors.RecordError(replyErr)\n\t}\n\treturn allErrors.Error()\n}",
"func (stc *ScatterConn) Execute(query string, bindVars map[string]interface{}, keyspace string, shards []string) (*mproto.QueryResult, error) {\n\tstc.mu.Lock()\n\tdefer stc.mu.Unlock()\n\n\tqr := new(mproto.QueryResult)\n\tallErrors := new(concurrency.AllErrorRecorder)\n\tswitch len(shards) {\n\tcase 0:\n\t\treturn qr, nil\n\tcase 1:\n\t\t// Fast-path for single shard execution\n\t\tvar err error\n\t\tqr, err = stc.execOnShard(query, bindVars, keyspace, shards[0])\n\t\tallErrors.RecordError(err)\n\tdefault:\n\t\tresults := make(chan *mproto.QueryResult, len(shards))\n\t\tvar wg sync.WaitGroup\n\t\tfor shard := range unique(shards) {\n\t\t\twg.Add(1)\n\t\t\tgo func(shard string) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tinnerqr, err := stc.execOnShard(query, bindVars, keyspace, shard)\n\t\t\t\tif err != nil {\n\t\t\t\t\tallErrors.RecordError(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tresults <- innerqr\n\t\t\t}(shard)\n\t\t}\n\t\tgo func() {\n\t\t\twg.Wait()\n\t\t\tclose(results)\n\t\t}()\n\t\tfor innerqr := range results {\n\t\t\tappendResult(qr, innerqr)\n\t\t}\n\t}\n\tif allErrors.HasErrors() {\n\t\tif stc.transactionId != 0 {\n\t\t\terrstr := allErrors.Error().Error()\n\t\t\t// We cannot recover from these errors\n\t\t\tif strings.Contains(errstr, \"tx_pool_full\") || strings.Contains(errstr, \"not_in_tx\") {\n\t\t\t\tstc.rollback()\n\t\t\t}\n\t\t}\n\t\treturn nil, allErrors.Error()\n\t}\n\treturn qr, nil\n}",
"func (e *sqlExecutor) Execute(ctx context.Context, c *sqlconf.Config) error {\n\tif err := c.Validate(); err != nil {\n\t\treturn err\n\t}\n\tdb, err := c.DB()\n\tif err != nil {\n\t\treturn nil\n\t}\n\tif err := setupDB(db, c); err != nil {\n\t\treturn err\n\t}\n\tif c.Concurrent {\n\t\treturn e.execParallel(ctx, db, c)\n\t}\n\tfor _, payload := range c.Payloads {\n\t\t_, err := db.ExecContext(ctx, payload.Exec)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (accessor *DbAccessor) Execute(ctx context.Context, command string, args ...interface{}) (sql.Result, error) {\n\tvar r sql.Result\n\tvar err error\n\tLogOperation(ctx, \"ExecuteCommand\", func() error {\n\t\tr, err = accessor.db.ExecContext(ctx, command, args...)\n\t\treturn err\n\t})\n\n\treturn r, err\n}",
"func (db *DB) Execute(req *command.Request, xTime bool) ([]*command.ExecuteResult, error) {\n\tstats.Add(numExecutions, int64(len(req.Statements)))\n\tconn, err := db.rwDB.Conn(context.Background())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer conn.Close()\n\treturn db.executeWithConn(req, xTime, conn)\n}",
"func (q *Query) Run() (int, int, error) {\n\treturn q.execute()\n}",
"func (c *RethinkDbConnection) exec(ctx context.Context, q func(ctx context.Context) (*r.Cursor, error)) (*r.Cursor, error) {\n\tctx, cancel := context.WithTimeout(ctx, c.queryTimeout)\n\tdefer cancel()\n\treturn q(ctx)\n}",
"func (si *ScanIterator) Execute() Tuple {\n\tresult := si.tuples[si.idx]\n\tsi.idx++\n\treturn result\n}",
"func Execute(db *sql.DB, query string, args ...interface{}) error {\n\treturn crdb.Execute(func() error {\n\t\t_, err := db.Exec(query, args...)\n\t\treturn err\n\t})\n}",
"func (l *Lock) StreamExecute(vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error {\n\tqr, err := l.Execute(vcursor, bindVars, wantfields)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn callback(qr)\n}",
"func (stmt *Statement) Run(params ...interface{}) (res *Result, err os.Error) {\n defer stmt.db.unlockIfError(&err)\n defer catchOsError(&err)\n stmt.db.lock()\n\n if stmt.db.conn == nil {\n return nil, NOT_CONN_ERROR\n }\n if stmt.db.unreaded_rows {\n return nil, UNREADED_ROWS_ERROR\n }\n\n // Bind parameters if any\n if len(params) != 0 {\n stmt.BindParams(params...)\n }\n\n // Send EXEC command with binded parameters\n stmt.sendCmdExec()\n // Get response\n res = stmt.db.getResponse(true)\n res.binary = true\n return\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
creates a new value set if one does not already exist for a given tagset + timestamp.
|
func (e *Executor) createRowValuesIfNotExists(rows map[string]*Row, name string, tagset []byte, timestamp int64) []interface{} {
// TODO: Add "name" to lookup key.
// Find row by tagset.
var row *Row
if row = rows[string(tagset)]; row == nil {
row = &Row{Name: name}
// Create tag map.
row.Tags = make(map[string]string)
for i, v := range unmarshalStrings(tagset) {
row.Tags[e.tags[i]] = v
}
// Create column names.
row.Columns = make([]string, 1, len(e.stmt.Fields)+1)
row.Columns[0] = "time"
for i, f := range e.stmt.Fields {
name := f.Name()
if name == "" {
name = fmt.Sprintf("col%d", i)
}
row.Columns = append(row.Columns, name)
}
// Save to lookup.
rows[string(tagset)] = row
}
// If no values exist or last value doesn't match the timestamp then create new.
if len(row.Values) == 0 || row.Values[len(row.Values)-1][0] != timestamp {
values := make([]interface{}, len(e.processors)+1)
values[0] = timestamp
row.Values = append(row.Values, values)
}
return row.Values[len(row.Values)-1]
}
|
[
"func newSet(txn *Transaction, key []byte) *Set {\n\tnow := Now()\n\treturn &Set{\n\t\ttxn: txn,\n\t\tkey: key,\n\t\tmeta: &SetMeta{\n\t\t\tObject: Object{\n\t\t\t\tID: UUID(),\n\t\t\t\tCreatedAt: now,\n\t\t\t\tUpdatedAt: now,\n\t\t\t\tExpireAt: 0,\n\t\t\t\tType: ObjectSet,\n\t\t\t\tEncoding: ObjectEncodingHT,\n\t\t\t},\n\t\t\tLen: 0,\n\t\t},\n\t}\n}",
"func newSet(typ reflect.Type, gm *gmap) *set {\n\t// Create set.\n\ts := &set{vtype: newValueType(typ), gmap: gm}\n\t// A set has only one value type, so insert valueTypes here.\n\tvalueTypes.Store(typ.String(), s.vtype)\n\n\treturn s\n}",
"func (d *datumSet) set(m *metricdata.Metric, ts *metricdata.TimeSeries, p metricdata.Point) {\n\tif d.previousDatumCache == nil {\n\t\td.previousDatumCache = make(map[string]metricdata.Point)\n\t}\n\td.previousDatumCache[hashMetric(m, ts)] = p\n}",
"func NewSet(values ...Value) Set {\n\tset := None\n\tfor _, value := range values {\n\t\tset = set.With(value)\n\t}\n\treturn set\n}",
"func getOrCreateMetricSet(entityIdentifier string, entityType string, m map[string]*metric.Set, i *integration.Integration) *metric.Set {\n\n\t// If the metric set already exists, return it\n\tset, ok := m[entityIdentifier]\n\tif ok {\n\t\treturn set\n\t}\n\n\t// If the metric set doesn't exist, get the entity for it and create a new metric set\n\te, _ := i.Entity(entityIdentifier, entityType) //can't error if both name and namespace are defined\n\tvar newSet *metric.Set\n\tif entityType == \"instance\" {\n\t\tnewSet = e.NewMetricSet(\"OracleDatabaseSample\", metric.Attr(\"entityName\", \"instance:\"+entityIdentifier), metric.Attr(\"displayName\", entityIdentifier))\n\t} else if entityType == \"tablespace\" {\n\t\tnewSet = e.NewMetricSet(\"OracleTablespaceSample\", metric.Attr(\"entityName\", \"tablespace:\"+entityIdentifier), metric.Attr(\"displayName\", entityIdentifier))\n\t} else {\n\t\tlog.Error(\"Unreachable code\")\n\t\tos.Exit(1)\n\t}\n\n\t// Put the new metric set the map\n\tm[entityIdentifier] = newSet\n\n\treturn newSet\n}",
"func NewSet(elements ...interface{}) Set {\n\ts := make(Set)\n\tfor _, e := range elements {\n\t\tv, err := InternalType(e)\n\t\tif err != nil {\n\t\t\tlogging.Error(\"Error initializing set: %s\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\ts[v] = struct{}{}\n\t}\n\n\t//logging.Debug(\"Created new set: %s\", s)\n\treturn s\n}",
"func NewSet(elements ...interface{}) Set {\n\toptions := &SetOptions{Cache: true}\n\tset := options.newThreadSafeSet()\n\tset.Add(elements...)\n\treturn &set\n}",
"func NewSetUnknown(elementType attr.Type) SetValue {\n\treturn SetValue{\n\t\telementType: elementType,\n\t\tstate: attr.ValueStateUnknown,\n\t}\n}",
"func (db *DB) CreateSeriesIfNotExists(name string, tags map[string]string) (*Measurement, *Series) {\n\t// Find or create meaurement\n\tm := db.measurements[name]\n\tif m == nil {\n\t\tm = NewMeasurement(name)\n\t\tdb.measurements[name] = m\n\t}\n\n\t// Normalize tags and try to match against existing series.\n\tif tags == nil {\n\t\ttags = make(map[string]string)\n\t}\n\tfor _, s := range m.series {\n\t\tif reflect.DeepEqual(s.tags, tags) {\n\t\t\treturn m, s\n\t\t}\n\t}\n\n\t// Create new series.\n\tdb.maxSeriesID++\n\ts := &Series{id: db.maxSeriesID, tags: tags}\n\n\t// Add series to DB and measurement.\n\tdb.series[s.id] = s\n\tm.series[s.id] = s\n\n\treturn m, s\n}",
"func NewOneTimeSetter(key string, d time.Duration) OneTimeSet {\n\tassert.NotNil(otFactory)\n\treturn otFactory(key, d)\n}",
"func NewSet[T constraints.Ordered](values ...T) *Set[T] {\n\ts := new(Set[T])\n\tfor i := range values {\n\t\ts.Add(values[i])\n\t}\n\treturn s\n}",
"func NewSet() Set {\n\tm := make(map[string]struct{})\n\treturn Set{m}\n}",
"func (ts *TagSet) Unique() {\n\tseen := make(map[string]struct{})\n\tfor i := 0; i < len(*ts); {\n\t\tt := (*ts)[i]\n\t\tif _, found := seen[t]; found {\n\t\t\t*ts = append((*ts)[:i], (*ts)[i+1:]...)\n\t\t} else {\n\t\t\tseen[t] = struct{}{}\n\t\t\ti++\n\t\t}\n\t}\n}",
"func (m *MongoDB) CreateMeteringTimeSeriesIfNotExist() error {\n\treturn m.CreateTimeSeriesIfNotExist(m.DBName, m.MeteringConn)\n}",
"func NewSet(name string, loader pongo2.TemplateLoader) *TemplateSet {\n\treturn pongo2.NewSet(name, loader)\n}",
"func NewSetValue(elementType attr.Type, elements []attr.Value) (SetValue, diag.Diagnostics) {\n\tvar diags diag.Diagnostics\n\n\t// Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521\n\tctx := context.Background()\n\n\tfor idx, element := range elements {\n\t\tif !elementType.Equal(element.Type(ctx)) {\n\t\t\tdiags.AddError(\n\t\t\t\t\"Invalid Set Element Type\",\n\t\t\t\t\"While creating a Set value, an invalid element was detected. \"+\n\t\t\t\t\t\"A Set must use the single, given element type. \"+\n\t\t\t\t\t\"This is always an issue with the provider and should be reported to the provider developers.\\n\\n\"+\n\t\t\t\t\tfmt.Sprintf(\"Set Element Type: %s\\n\", elementType.String())+\n\t\t\t\t\tfmt.Sprintf(\"Set Index (%d) Element Type: %s\", idx, element.Type(ctx)),\n\t\t\t)\n\t\t}\n\t}\n\n\tif diags.HasError() {\n\t\treturn NewSetUnknown(elementType), diags\n\t}\n\n\treturn SetValue{\n\t\telementType: elementType,\n\t\telements: elements,\n\t\tstate: attr.ValueStateKnown,\n\t}, nil\n}",
"func New() Set {\n\treturn Set{}\n}",
"func (f *LogFile) TagValueSeriesIDSet(name, key, value []byte) (*tsdb.SeriesIDSet, error) {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\tmm, ok := f.mms[string(name)]\n\tif !ok {\n\t\treturn nil, nil\n\t}\n\n\ttk, ok := mm.tagSet[string(key)]\n\tif !ok {\n\t\treturn nil, nil\n\t}\n\n\ttv, ok := tk.tagValues[string(value)]\n\tif !ok {\n\t\treturn nil, nil\n\t} else if tv.cardinality() == 0 {\n\t\treturn nil, nil\n\t}\n\n\treturn tv.seriesIDSet(), nil\n}",
"func NewSet(els ...string) (s Set) {\n\treturn s.Add(els...)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
dimensionKeys returns a list of tag key names for the dimensions. Each dimension must be a VarRef.
|
func dimensionKeys(dimensions Dimensions) (a []string) {
for _, d := range dimensions {
a = append(a, d.Expr.(*VarRef).Val)
}
return
}
|
[
"func (m *Measurement) TagKeys() []string {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\tkeys := make([]string, 0, len(m.seriesByTagKeyValue))\n\tfor k := range m.seriesByTagKeyValue {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}",
"func (target *Target) TagKeys() []string {\n\n\tkeys := make([]string, len(target.Tags))\n\n\tfor i, tag := range target.Tags {\n\t\tkeys[i] = tag.Key\n\t\ti++\n\t}\n\n\treturn keys\n}",
"func (m *logMeasurement) keys() []string {\n\ta := make([]string, 0, len(m.tagSet))\n\tfor k := range m.tagSet {\n\t\ta = append(a, k)\n\t}\n\tsort.Strings(a)\n\treturn a\n}",
"func (o *FiltersVmGroup) GetTagKeys() []string {\n\tif o == nil || o.TagKeys == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn *o.TagKeys\n}",
"func (t Tags) Keys() (keys []string) {\n\tkeys = make([]string, 0, t.Count())\n\tfor key := range t {\n\t\tkeys = append(keys, key)\n\t}\n\treturn keys\n}",
"func (m *varMap) Keys() []string {\n\treturn m.keys\n}",
"func (o *FiltersVirtualGateway) GetTagKeys() []string {\n\tif o == nil || o.TagKeys == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn *o.TagKeys\n}",
"func (o *FiltersDhcpOptions) GetTagKeys() []string {\n\tif o == nil || o.TagKeys == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn *o.TagKeys\n}",
"func (client *InstanceMetadataClient) GetTagKeys(ctx context.Context) ([]string, error) {\n\tbody, err := client.getMetadata(ctx, \"tags/instance\")\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\treturn strings.Split(body, \"\\n\"), nil\n}",
"func (d *MetadataAsDictionary) KeySet() []string {\n\tif d.metadata == nil {\n\t\td.Init()\n\t}\n\t// TODO: pre-allocate res\n\tvar res []string\n\tfor k := range d.metadata {\n\t\tres = append(res, k)\n\t}\n\treturn res\n}",
"func (c *ServiceConfigs) Keys() []string {\n\tkeys := []string{}\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\tfor name := range c.M {\n\t\tkeys = append(keys, name)\n\t}\n\treturn keys\n}",
"func (g *Graph) listOfKeys() []string {\n\tkeys := make([]string, len(g.Nodes))\n\ti := 0\n\n\tfor k := range g.Nodes {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\n\treturn keys\n}",
"func (dsn DSN) Keys() []string {\n\n\tkeys := make([]string, 0)\n\n\tfor k, _ := range dsn {\n\t\tkeys = append(keys, k)\n\t}\n\n\tsort.Strings(keys)\n\treturn keys\n}",
"func (p *Partitions) Keys() []string {\n\tvar result = make([]string, 0)\n\tfor k := range p.index {\n\t\tresult = append(result, k)\n\t}\n\treturn result\n}",
"func (doc *HclDocument) GetKeys(q string) ([]string, error) {\n\treturn doc.QueryKeys(q)\n}",
"func (m *Measurement) SeriesKeys() []string {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\tvar keys []string\n\tfor _, s := range m.seriesByID {\n\t\tkeys = append(keys, s.Key)\n\t}\n\treturn keys\n}",
"func (self *Map) Keys(tagName ...string) []interface{} {\n\treturn Keys(self.MapNative(tagName...))\n}",
"func (a *Aliases) Keys() []string {\n\ta.mx.RLock()\n\tdefer a.mx.RUnlock()\n\n\tss := make([]string, 0, len(a.Alias))\n\tfor k := range a.Alias {\n\t\tss = append(ss, k)\n\t}\n\treturn ss\n}",
"func (v GitConfig) Keys() []string {\n\tallKeys := []string{}\n\tfor s, keys := range v {\n\t\tfor key := range keys {\n\t\t\tallKeys = append(allKeys, s+\".\"+key)\n\t\t}\n\t}\n\tsort.Strings(allKeys)\n\treturn allKeys\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
newMapper returns a new instance of mapper.
|
func newMapper(e *Executor, seriesID uint32, fieldID uint8, typ DataType) *mapper {
return &mapper{
executor: e,
seriesID: seriesID,
fieldID: fieldID,
typ: typ,
c: make(chan map[string]interface{}, 0),
done: make(chan chan struct{}, 0),
}
}
|
[
"func NewMapper(config MapperConfig) *Mapper {\n\tm := Mapper{\n\t\tconfig: config,\n\t\tregexes: make([]*regexp.Regexp, len(config.Mappings)),\n\t\tvalidate: validator.New(),\n\t}\n\n\tm.Initialize()\n\n\treturn &m\n}",
"func NewMapper(isBiFlow bool) *Mapper {\n\treturn &Mapper{\n\t\tmmap: make(map[string]*[]*gopacket.Packet, 0),\n\t\tisBiFlow: isBiFlow,\n\t}\n}",
"func NewMapper(m func(srcPtr interface{}, destPtr interface{}) error) Mapper {\n return funcMapper(m)\n}",
"func NewMapper(tagName string, tagFunc TagFunc) *Mapper {\n\tif tagFunc == nil {\n\t\ttagFunc = StdTagFunc\n\t}\n\treturn &Mapper{\n\t\ttagName: tagName,\n\t\ttagFunc: tagFunc,\n\t}\n}",
"func NewMapper(port int, destAddr string) (t *Mapper, err error) {\n\tif port <= 0 || port > 65534 {\n\t\terr := fmt.Errorf(\"bind port %v out of range\", port)\n\t\treturn nil, err\n\t}\n\tdestArr := strings.Split(destAddr, \",\")\n\tif len(destArr) == 0 {\n\t\terr := errors.New(\"the dest address length is 0\")\n\t\treturn nil, err\n\t}\n\tt = &Mapper{\n\t\tstart: time.Now(),\n\t\tLogger: log.NewLogger(fmt.Sprintf(\"map:%v\", port)),\n\t\tport: port,\n\t\tdest: destArr,\n\t\ttunnels: make(map[string]*Tunnel),\n\t}\n\n\tt.Info(\"mapper created\")\n\n\treturn t, nil\n}",
"func NewMapper(content string) *Mapper {\n\tm := &Mapper{\n\t\tcontent: content,\n\t}\n\n\t// Precompute line offsets, for easy deduction of the line number for a global offset\n\tm.offsets = make([]int, 0, 32)\n\tm.offsets = append(m.offsets, 0) // First line starts at offset 0.\n\tfor offset, r := range content {\n\t\tif r == '\\n' {\n\t\t\tm.offsets = append(m.offsets, offset+1)\n\t\t}\n\t}\n\n\t// Introduce an artificial last line.\n\tm.offsets = append(m.offsets, len(content))\n\n\treturn m\n}",
"func newAzureNetworkMapper() *AzureNetworkMapper {\n\treturn &AzureNetworkMapper{}\n}",
"func New(client dynamodbiface.ClientAPI) app.Mapper {\n\treturn &mapper{\n\t\tclient: client,\n\t}\n}",
"func NewMapFunc(t mockConstructorTestingTNewMapFunc) *MapFunc {\n\tmock := &MapFunc{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NewMapMapper(mapping map[string]string) *MapMapper {\n\treturn &MapMapper{mapping: mapping}\n}",
"func NewFlowMapper(args *Args) *FlowMapper {\n\treturn &FlowMapper{workdir: args.WorkDir, mapfile: args.MapFile}\n}",
"func New(log zerolog.Logger, chain Chain, feed Feeder, index index.Writer, options ...func(*MapperConfig)) (*Mapper, error) {\n\n\t// We don't use a checkpoint by default. The options can set one, in which\n\t// case we will add the checkpoint as a finalized state commitment in our\n\t// trie registry.\n\tcfg := MapperConfig{\n\t\tCheckpointFile: \"\",\n\t\tPostProcessing: PostNoop,\n\t}\n\tfor _, option := range options {\n\t\toption(&cfg)\n\t}\n\n\t// Check if the checkpoint file exists.\n\tif cfg.CheckpointFile != \"\" {\n\t\tstat, err := os.Stat(cfg.CheckpointFile)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid checkpoint file: %w\", err)\n\t\t}\n\t\tif stat.IsDir() {\n\t\t\treturn nil, fmt.Errorf(\"invalid checkpoint file: directory\")\n\t\t}\n\t}\n\n\ti := Mapper{\n\t\tlog: log,\n\t\tchain: chain,\n\t\tfeed: feed,\n\t\tindex: index,\n\t\tcheckpoint: cfg.CheckpointFile,\n\t\tpost: cfg.PostProcessing,\n\t\twg: &sync.WaitGroup{},\n\t\tstop: make(chan struct{}),\n\t}\n\n\treturn &i, nil\n}",
"func NewMapper(from interface{}, ctor func() interface{}, mapFunc MapFunc) *EntityMapper {\n\t// check nil\n\tif from == nil || ctor == nil {\n\t\tpanic(\"mapper's model could not be nil\")\n\t}\n\tto := ctor()\n\tif to == nil {\n\t\tpanic(\"mapper's model could not be nil\")\n\t}\n\n\t// check pointer\n\tfromType := reflect.TypeOf(from)\n\ttoType := reflect.TypeOf(to)\n\tif fromType.Kind() != reflect.Ptr || toType.Kind() != reflect.Ptr {\n\t\tpanic(\"mapper's model could only be pointer\")\n\t}\n\n\t// check struct\n\tfromElType := fromType.Elem()\n\ttoElType := toType.Elem()\n\tif fromElType.Kind() != reflect.Struct || toElType.Kind() != reflect.Struct {\n\t\tpanic(\"mapper's model could only be a pointer pointed to a struct\")\n\t}\n\n\t// return\n\treturn &EntityMapper{\n\t\tfrom: from,\n\t\tto: to,\n\t\tfromType: fromType,\n\t\ttoType: toType,\n\t\tctor: ctor,\n\t\tmapFunc: mapFunc,\n\t}\n}",
"func CreateMapper() *Mapper {\n\treturn &Mapper{}\n}",
"func NewResourceMapper() *ResourceMapper {\n\tm := map[schema.GroupVersionKind]NewInterfaceFunc{}\n\treturn &ResourceMapper{\n\t\tl: &sync.Mutex{},\n\t\tgvkToNew: m,\n\t}\n}",
"func NewMap(r *goja.Runtime) *Map {\n\treturn &Map{runtime: r}\n}",
"func NewCMapper() (m *CMapper) {\n\tm = new(CMapper)\n\tm.charToStr = make(map[int]string)\n\tm.strToChar = make(map[string]int)\n\treturn\n}",
"func NewSameMapper() NamespaceMapper {\n\treturn &same{}\n}",
"func New() *Map { return new(Map).Init() }"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
start begins processing the iterator.
|
func (m *mapper) start() {
m.itr = m.executor.db.CreateIterator(m.seriesID, m.fieldID, m.typ,
m.executor.min, m.executor.max, m.executor.interval)
go m.run()
}
|
[
"func (s *mongoSource) start(iter Iter) {\n\tdefer s.Stop()\n\tdefer close(s.rows)\n\tfor !s.stopped {\n\t\tr := optimus.Row{}\n\t\tif !iter.Next(&r) {\n\t\t\tbreak\n\t\t}\n\t\ts.rows <- r\n\t}\n\ts.err = iter.Err()\n}",
"func (w *Walker) startProcessing() {\n\tdoStart := false\n\tw.pipe.RLock()\n\tif w.pipe.filters == nil { // no processing up to now => start with initial node\n\t\tw.pipe.pushSync(w.initial, 0) // input is buffered, will return immediately\n\t\tdoStart = true // yes, we will have to start the pipeline\n\t}\n\tw.pipe.RUnlock()\n\tif doStart { // ok to be outside mutex as other goroutines will check pipe.empty()\n\t\tw.pipe.startProcessing() // must be outside of mutex lock\n\t}\n}",
"func (iterator *Iterator) Begin() {\n\titerator.iterator.Begin()\n}",
"func (r *reducer) start() {\n\tfor _, m := range r.mappers {\n\t\tm.start()\n\t}\n\tgo r.run()\n}",
"func (pm *parallelFileReadManager) begin(numWorkers int, reader io.Reader) {\n\tpm.workerResultManagers = make([]workerResultManager, numWorkers)\n\tfor i := 0; i < numWorkers; i++ {\n\t\tpm.workerResultManagers[i] = workerResultManager{\n\t\t\tresultChan: make(chan *recordedOpResult),\n\t\t\tavailable: make(chan struct{}, 1),\n\t\t}\n\t\tpm.workerResultManagers[i].available <- struct{}{}\n\t}\n\n\tpm.parseJobsChan = make(chan *parseJob, numWorkers)\n\tpm.stopChan = make(chan struct{})\n\n\tpm.runFileReader(numWorkers, reader)\n\tpm.runParsePool(numWorkers)\n}",
"func (graphMinion *graphMinion) start() {\n\tgo func() {\n\t\tdefer graphMinion.wg.Done()\n\t\tfor {\n\n\t\t\t// pull reads from queue until done\n\t\t\tmappingData, ok := <-graphMinion.inputChannel\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif mappingData == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// increment the nodes contained in the mapping window\n\t\t\tmisc.ErrorCheck(graphMinion.graph.IncrementSubPath(mappingData.ContainedNodes, mappingData.Freq))\n\t\t}\n\t}()\n}",
"func (s *BasecluListener) EnterIterator(ctx *IteratorContext) {}",
"func (root *mTreap) start(mask, match treapIterType) treapIter {\n\tf := treapFilter(mask, match)\n\treturn treapIter{f, root.treap.findMinimal(f)}\n}",
"func (mi *MinerIndex) start() {\n\tdefer func() { mi.finished <- struct{}{} }()\n\n\tif err := mi.updateOnChainIndex(); err != nil {\n\t\tlog.Errorf(\"error on initial updating miner index: %s\", err)\n\t}\n\tmi.chMeta <- struct{}{}\n\tfor {\n\t\tselect {\n\t\tcase <-mi.ctx.Done():\n\t\t\tlog.Info(\"graceful shutdown of background miner index\")\n\t\t\treturn\n\t\tcase <-time.After(metadataRefreshInterval):\n\t\t\tselect {\n\t\t\tcase mi.chMeta <- struct{}{}:\n\t\t\tdefault:\n\t\t\t\tlog.Info(\"skipping meta index update since it's busy\")\n\t\t\t}\n\t\tcase <-time.After(util.AvgBlockTime):\n\t\t\tif err := mi.updateOnChainIndex(); err != nil {\n\t\t\t\tlog.Errorf(\"error when updating miner index: %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}",
"func (r *chanReader) start(rd io.Reader) {\n\tr.ch = make(chan chanResponse, 1)\n\tgo r.read(rd)\n}",
"func (t *tcParser) start() {\n\tt.logger.Info(\"start(): Starting the tc_reader.\")\n\tconfigTemplate := \"tc_reader configuration: tcCmdPath: %s parseInterval: %d tcQdiscStats: %s tcClassStats: %s ifaces: %s userNameClass: %v\"\n\tt.logIfDebug(fmt.Sprintf(configTemplate, t.options.tcCmdPath(), t.options.parseInterval(), t.options.tcQdiscStats(), t.options.tcClassStats(), t.options.ifaces(), t.options.userNameClass()))\n\t// One initial run of TC execution and parsing.\n\tt.parseTc()\n\n\tgo func() {\n\t\tfor range time.Tick(time.Duration(t.options.parseInterval()) * time.Second) {\n\t\t\tt.parseTc()\n\t\t}\n\t}()\n}",
"func (r *reaper) start() {\n\tgo r.runLoop()\n}",
"func (r *record) start() {\n\tr.startTime = time.Now()\n}",
"func (i *informers) start(ctx context.Context) {\n\tfor _, startable := range i.toStart {\n\t\tstartable.Start(ctx.Done())\n\t}\n}",
"func (c *Communicator) startProcessing() bool {\n\tc.runningLock.RLock()\n\tdefer c.runningLock.RUnlock()\n\tif c.running {\n\t\tc.pending.Add(1)\n\t}\n\treturn c.running\n}",
"func (w *Processor) start() {\n\tfor {\n\t\tselect {\n\t\tcase job, ok := <-w.jobQueue:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tw.limiter <- empty{}\n\n\t\t\t// Spawn a worker goroutine.\n\t\t\tgo func() {\n\t\t\t\tif err := job.Run(); err != nil {\n\t\t\t\t\tw.jobErrorHandler(err)\n\t\t\t\t}\n\t\t\t\t<-w.limiter\n\t\t\t}()\n\t\tcase <-w.stop:\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (i *Ingester) Start(ctx context.Context) error {\n\tconcurrentProc := make(chan bool, nConcurrentProcessors)\n\tresultChan, err := i.getInputChannel(ctx)\n\tif err != nil {\n\t\treturn sklog.FmtErrorf(\"Error retrieving input channel: %s\", err)\n\t}\n\n\t// Continuously catch events from all input sources and push the data to the processor.\n\tgo func(doneCh <-chan bool) {\n\t\tvar resultFile ResultFileLocation = nil\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase resultFile = <-resultChan:\n\t\t\tcase <-doneCh:\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// get a slot in line to call Process\n\t\t\tconcurrentProc <- true\n\t\t\tgo func(resultFile ResultFileLocation) {\n\t\t\t\tdefer func() { <-concurrentProc }()\n\t\t\t\ti.processResult(ctx, resultFile)\n\t\t\t}(resultFile)\n\t\t}\n\t}(i.doneCh)\n\treturn nil\n}",
"func (p *Pipe) start() {\n\tp.cancel = make(chan struct{})\n\terrcList := make([]<-chan error, 0, 1+len(p.processors)+len(p.sinks))\n\t// start pump\n\tout, errc := p.pump.run(p.cancel, p.ID(), p.provide, p.consume, p.sampleRate, p.metric)\n\terrcList = append(errcList, errc)\n\n\t// start chained processesing\n\tfor _, proc := range p.processors {\n\t\tout, errc = proc.run(p.cancel, p.ID(), out, p.sampleRate, p.metric)\n\t\terrcList = append(errcList, errc)\n\t}\n\n\tsinkErrcList := p.broadcastToSinks(out)\n\terrcList = append(errcList, sinkErrcList...)\n\tp.errc = mergeErrors(errcList...)\n}",
"func (s *seriesValueGenerator) Start() error { return nil }"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
stop stops the mapper.
|
func (m *mapper) stop() { syncClose(m.done) }
|
[
"func (r *reducer) stop() {\n\tfor _, m := range r.mappers {\n\t\tm.stop()\n\t}\n\tsyncClose(r.done)\n}",
"func (m *Map) Stop(c chan<- string) {\n\tm.bus.Stop(c)\n}",
"func (p *literalProcessor) stop() { syncClose(p.done) }",
"func (r *reaper) stop() {\n\tr.stopCh <- struct{}{}\n}",
"func (s *Smr) stop() {\n\t// TODO: zq\n}",
"func (c *podChurner) stop() {\n\tc.stopCh <- struct{}{}\n}",
"func (a *Arm) stop() {\n\ta.vel = 0\n}",
"func (s *schedule) stop() {\n\tif !s.running {\n\t\treturn\n\t}\n\ts.running = false\n\ts.stopCh <- struct{}{}\n}",
"func (w *Watcher) stop() {\n\tatomic.StoreUint32(&w.isStop, 1)\n}",
"func (t *Tracer) Stop() {}",
"func (dh *DeviceHandler) stop(ctx context.Context) {\n\tlogger.Debug(\"stopping-device-handler\")\n\tdh.exitChannel <- 1\n}",
"func (controlManager *controlManager) stop() (err error) {\n\tdefer helper.CatchPanic(&err, \"main\", \"stop\")\n\n\t// shutdown the log system\n\ttracelog.Stop()\n\n\treturn err\n}",
"func (oc *OSRMConnector) Stop() {\n\t// todo\n}",
"func (b *Blinker) Stop() {\n\tclose(b.stop)\n}",
"func (cl *chainListener) Stop() error {\n\t// notify all responders to exit\n\tcl.streamMap.Range(func(key, _ interface{}) error {\n\t\tr, ok := key.(Responder)\n\t\tif !ok {\n\t\t\tlog.L().Error(\"streamMap stores a key which is not a Responder\")\n\t\t\treturn errorKeyIsNotResponder\n\t\t}\n\t\tr.Exit()\n\t\treturn nil\n\t})\n\tcl.streamMap.Reset()\n\treturn nil\n}",
"func (s *Scanner) Stop() {\n\ts.stop <- struct{}{}\n}",
"func stop() {\n\tlog.Info(\"Maison is stopping...\")\n\n\tclosePlugins()\n\n\t// TODO: close stores\n\n\tbus.Stop()\n\n\tlog.Info(\"Maison is stopped\")\n}",
"func (f *FakeOutput) Stop() error { return nil }",
"func (w *Watch) stop() {\n\tw.done <- struct{}{}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
run executes the map function against the iterator.
|
func (m *mapper) run() {
for m.itr.NextIterval() {
m.fn(m.itr, m)
}
close(m.c)
}
|
[
"func (conn *db) runMap(stmt Stmt, mapper MapMapper) (rowsReturned int, err error) {\n\tif err = conn.Connect(); err != nil {\n\t\treturn\n\t}\n\n\tvar (\n\t\tstmtx *sqlx.Stmt\n\t\trows *sqlx.Rows\n\t\tt time.Time\n\t)\n\n\tif conn.hasProfiling() {\n\t\tt = time.Now()\n\t}\n\n\tstmtx, err = preparex(conn, stmt)\n\tif err == nil {\n\t\tdefer stmtx.Close()\n\t\trows, err = stmtx.Queryx(stmt.Args()...)\n\t\tif err == nil {\n\t\t\tdefer rows.Close()\n\n\t\t\trow := map[string]any{}\n\t\t\tfor rows.Next() {\n\t\t\t\terr = rows.MapScan(row)\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tmapper(row)\n\t\t\t\trowsReturned++\n\t\t\t}\n\t\t} else if errors.Is(err, sql.ErrNoRows) {\n\t\t\tif !conn.env.ErrorNoRows {\n\t\t\t\terr = nil\n\t\t\t}\n\t\t}\n\t}\n\n\tif err != nil && conn.hasVerbose() {\n\t\tconn.logErr.Println(err.Error())\n\t}\n\n\tconn.profilingStmt(stmt, err, t)\n\treturn\n}",
"func Run() {\n\tfor key, val := range funcMap {\n\t\targs := funcArgs[key]\n\t\tcall(val, args)\n\t}\n}",
"func (gm *gmap) run() {\n\t// Destruct gmap before exit.\n\tdefer func() {\n\t\tgm.raft.Stop()\n\t\tclose(gm.done)\n\t}()\n\t// Start gmap raft node.\n\tgo gm.raft.run()\n\t// Apply entries and snapshot get from raft.\n\tvar gmp gmapProgress\n\tfor {\n\t\tselect {\n\t\t// New apply.\n\t\tcase ap := <-gm.raft.applyc:\n\t\t\tgm.applyAll(&gmp, &ap)\n\t\t// gmap is closed.\n\t\tcase <-gm.ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (r *reducer) run() {\nloop:\n\tfor {\n\t\t// Combine all data from the mappers.\n\t\tdata := make(map[string][]interface{})\n\t\tfor _, m := range r.mappers {\n\t\t\tkv, ok := <-m.C()\n\t\t\tif !ok {\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t\tfor k, v := range kv {\n\t\t\t\tdata[k] = append(data[k], v)\n\t\t\t}\n\t\t}\n\n\t\t// Reduce each key.\n\t\tfor k, v := range data {\n\t\t\tr.fn(k, v, r)\n\t\t}\n\t}\n\n\t// Mark the channel as complete.\n\tclose(r.c)\n}",
"func (p *MapToKeys) Run() {\n\tdefer p.CloseAllOutPorts()\n\tfor ip := range p.In().Chan {\n\t\tnewKeys := p.mapFunc(ip)\n\t\tip.AddKeys(newKeys)\n\t\tip.WriteAuditLogToFile()\n\t\tp.Out().Send(ip)\n\t}\n}",
"func (f Filter) run(node *yaml.RNode) error {\n\tfor key, value := range f.Annotations {\n\t\tif err := node.PipeE(fsslice.Filter{\n\t\t\tFsSlice: f.FsSlice,\n\t\t\tSetValue: fsslice.SetEntry(key, value),\n\t\t\tCreateKind: yaml.MappingNode, // Annotations are MappingNodes.\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func _map(fn mapfn, chunks []string, c chan dict) {\n\tfor _, chunk := range chunks {\n\t\tgo fn(chunk, c)\n\t}\n}",
"func (m *FlowMapper) Run() {\n\tm.flowMap = ReadFlowMap(m.mapfile)\n\tzips := GetZipNames(m.workdir)\n\tfor _, name := range zips {\n\t\tsourcePath := filepath.Join(m.workdir, name)\n\t\ttargetPath := filepath.Join(m.workdir, \"peflocus_\"+name)\n\t\tDeleteExisting(targetPath)\n\t\tlog.Println(\"INFO: map flows in\", sourcePath, \"to\", targetPath)\n\t\tm.doIt(sourcePath, targetPath)\n\t}\n}",
"func (sm safeMap) run() {\n\tstore := make(map[string]interface{})\n\tfor command := range sm {\n\t\tswitch command.action {\n\t\tcase INSERT:\n\t\t\tstore[command.key] = command.value\n\t\tcase REMOVE:\n\t\t\tdelete(store, command.key)\n\t\tcase FLUSH:\n\t\t\tflush(store, command.keys)\n\t\tcase FIND:\n\t\t\tvalue, found := store[command.key]\n\t\t\tcommand.result <- findResult{value, found}\n\t\tcase COUNT:\n\t\t\tcommand.result <- len(store)\n\t\tcase TRUNCATE:\n\t\t\tclearMap(store)\n\t\tcase END:\n\t\t\tclose(sm)\n\t\t\tcommand.data <- store\n\t\t}\n\t}\n}",
"func doMap(\n\tjobName string, // the name of the MapReduce job\n\tmapTaskNumber int, // which map task this is\n\tinFile string,\n\tnReduce int, // the number of reduce task that will be run (\"R\" in the paper)\n\tmapF func(file string, contents string) []KeyValue,\n) {\n\tstream, err := ioutil.ReadFile(inFile)\n\tcheck_error(err)\n\n\tkeyVals := mapF(inFile, string(stream))\n\t\n\tresults := make(map[int][]KeyValue)\n\tfor _, kv := range keyVals {\n\t\t// Calculate R\n\t\tr := ihash(kv.Key) % nReduce\n\n\t\t// Map the results internally\n\t\tresults[r] = append(results[r], kv)\n\t}\n\n\tfor r, keyVals := range results {\n\t\toutputFileName := reduceName(jobName, mapTaskNumber, r)\n\t\tfile, err := os.Create(outputFileName)\n\t\tcheck_error(err)\n\t\tenc := json.NewEncoder(file)\n\n\t\tfor _, kv := range keyVals {\n\t\t\terr := enc.Encode(&kv)\n\t\t\tcheck_error(err)\n\t\t}\n\n\t\tfile.Close()\n\t}\n}",
"func (n *Globals) Run() {\n\tfor _, node := range n.nodeMap {\n\t\tgo RunNode(node)\n\t}\n}",
"func (conn *db) runMapRow(stmt Stmt, mapper MapMapper) (rowsReturned int, err error) {\n\tif err = conn.Connect(); err != nil {\n\t\treturn\n\t}\n\n\tvar (\n\t\tstmtx *sqlx.Stmt\n\t\tt time.Time\n\t\tvalues = map[string]any{}\n\t)\n\n\tif conn.hasProfiling() {\n\t\tt = time.Now()\n\t}\n\n\tstmtx, err = preparex(conn, stmt)\n\tif err == nil {\n\t\tdefer stmtx.Close()\n\n\t\terr = stmtx.QueryRowx(stmt.Args()...).MapScan(values)\n\t\tif err == nil {\n\t\t\tmapper(values)\n\t\t\trowsReturned = 1\n\t\t} else if errors.Is(err, sql.ErrNoRows) {\n\t\t\tif !conn.env.ErrorNoRows {\n\t\t\t\terr = nil\n\t\t\t}\n\t\t}\n\t}\n\n\tif err != nil && conn.hasVerbose() {\n\t\tconn.logErr.Println(err.Error())\n\t}\n\n\tconn.profilingStmt(stmt, err, t)\n\treturn\n}",
"func (p *literalProcessor) run() {\n\tfor {\n\t\tselect {\n\t\tcase ch := <-p.done:\n\t\t\tclose(ch)\n\t\t\treturn\n\t\tcase p.c <- map[string]interface{}{\"\": p.val}:\n\t\t}\n\t}\n}",
"func (m *mapper) start() {\n\tm.itr = m.executor.db.CreateIterator(m.seriesID, m.fieldID, m.typ,\n\t\tm.executor.min, m.executor.max, m.executor.interval)\n\tgo m.run()\n}",
"func (job *MapOnlyJob) Run() error {\n\tif job.NewMapperF == nil {\n\t\treturn errors.New(\"MapOnlyJob: NewMapperF undefined!\")\n\t}\n\tif job.Source == nil {\n\t\treturn errors.New(\"MapOnlyJob: Source undefined!\")\n\t}\n\ttotalPart := 0\n\tendss := make([][]chan error, 0, len(job.Source))\n\tfor i := range job.Source {\n\t\tpartCount, err := job.Source[i].PartCount()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tends := make([]chan error, 0, partCount)\n\t\tfor part := 0; part < partCount; part++ {\n\t\t\tend := make(chan error, 1)\n\t\t\tends = append(ends, end)\n\t\t\tgo func(i, part, totalPart int, end chan error) {\n\t\t\t\tend <- func() error {\n\t\t\t\t\tmapper := job.NewMapperF(i, part)\n\t\t\t\t\tkey, val := mapper.NewKey(), mapper.NewVal()\n\t\t\t\t\tcs := make([]sophie.Collector, 0, len(job.Dest))\n\t\t\t\t\tfor _, dst := range job.Dest {\n\t\t\t\t\t\tc, err := dst.Collector(totalPart)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn errorsp.WithStacksAndMessage(err, \"open collector for source %d part %d failed\", i, part)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdefer c.Close()\n\t\t\t\t\t\tcs = append(cs, c)\n\t\t\t\t\t}\n\t\t\t\t\titer, err := job.Source[i].Iterator(part)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn errorsp.WithStacksAndMessage(err, \" open source %d part %d failed\", i, part)\n\t\t\t\t\t}\n\t\t\t\t\tdefer iter.Close()\n\n\t\t\t\t\tfor {\n\t\t\t\t\t\tif err := iter.Next(key, val); err != nil {\n\t\t\t\t\t\t\tif errorsp.Cause(err) != io.EOF {\n\t\t\t\t\t\t\t\treturn errorsp.WithStacksAndMessage(err, \"next failed\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif err := mapper.Map(key, val, cs); err != nil {\n\t\t\t\t\t\t\tif errorsp.Cause(err) == EOM {\n\t\t\t\t\t\t\t\tlog.Print(\"EOM returned, exit early\")\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\treturn errorsp.WithStacksAndMessage(err, \"mapping %v %v failed\", key, val)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn errorsp.WithStacksAndMessage(mapper.MapEnd(cs), \"map end failed\")\n\t\t\t\t}()\n\t\t\t}(i, part, totalPart, end)\n\t\t\ttotalPart++\n\t\t}\n\t\tendss = append(endss, ends)\n\t}\n\tvar errReturned error\n\tfor _, ends := range endss {\n\t\tfor part, end := range ends {\n\t\t\tlog.Printf(\"Waiting for mapper %d...\", part)\n\t\t\tif err := <-end; err != nil {\n\t\t\t\tlog.Printf(\"Error returned for part %d: %v\", part, err)\n\t\t\t\terrReturned = err\n\t\t\t}\n\t\t\tlog.Printf(\"No error for mapper %d...\", part)\n\t\t}\n\t}\n\treturn errReturned\n}",
"func Map(array []interface{}, iterator ResultIterator) []interface{} {\r\n\tvar result = make([]interface{}, len(array))\r\n\tfor index, data := range array {\r\n\t\tresult[index] = iterator(data, index)\r\n\t}\r\n\treturn result\r\n}",
"func (gd Grid) Map(fn func(Point, Cell) Cell) {\n\tif gd.Ug == nil {\n\t\treturn\n\t}\n\tw := gd.Ug.Width\n\tcells := gd.Ug.Cells\n\tyimax := gd.Rg.Max.Y * w\n\tfor y, yi := 0, gd.Rg.Min.Y*w; yi < yimax; y, yi = y+1, yi+w {\n\t\tximax := yi + gd.Rg.Max.X\n\t\tfor x, xi := 0, yi+gd.Rg.Min.X; xi < ximax; x, xi = x+1, xi+1 {\n\t\t\tc := cells[xi]\n\t\t\tp := Point{X: x, Y: y}\n\t\t\tcells[xi] = fn(p, c)\n\t\t}\n\t}\n}",
"func (_ SequentialRunner) Run(fn func()) {\n\tfn()\n}",
"func (w *funcWrapper) Run() {\n\t(*w)()\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
emit sends a value to the mapper's output channel.
|
func (m *mapper) emit(key int64, value interface{}) {
// Encode the timestamp to the beginning of the key.
binary.BigEndian.PutUint64(m.key, uint64(key))
// OPTIMIZE: Collect emit calls and flush all at once.
m.c <- map[string]interface{}{string(m.key): value}
}
|
[
"func (r *reducer) emit(key string, value interface{}) {\n\tr.c <- map[string]interface{}{key: value}\n}",
"func (c channelConveyor) Emit(v interface{}) error {\n\tc.outputCh <- v\n\n\treturn nil\n}",
"func (b Broadcaster) Write(v interface{}) {\n\tutils.Debugf(\"Sending %v\\n\", v)\n\tb.Sendc <- v // write value on send channel\n}",
"func emit(e *core.NormalizedEmitter, message string) {\n\te.Emit(core.Logs, &core.LogsArgs{\n\t\tLogs: message,\n\t})\n}",
"func (c *Client) Emit(moduleID string, val interface{}) {\n\tif c.client == nil {\n\t\tlog.Println(\"core: emit failed: client not ready\")\n\t\treturn\n\t}\n\n\tif !setupModules[moduleID].registration.hasEventHandler {\n\t\tpanic(\"core: attempt to emit event without an event handler \" +\n\t\t\t\"for module: \" + moduleID)\n\t}\n\n\tif !reflect.TypeOf(val).\n\t\tAssignableTo(setupModules[moduleID].registration.eventType) {\n\t\tlog.Println(\"core: refusing to emit: event value not assignable to \" +\n\t\t\t\"module's registered event type\")\n\t\treturn\n\t}\n\n\terr := c.client.Notify(EventMsg, Event{\n\t\tModuleID: moduleID,\n\t\tValue: val,\n\t})\n\tif err != nil {\n\t\tlog.Println(\"core: emit failed:\", err)\n\t}\n}",
"func (l *lexer) emit(t tokenType) {\n\tdebugPrint(\"emit \" + string(l.buf))\n\tl.out <- token{lexeme: string(l.buf), t: t}\n\tl.buf = nil\n}",
"func (p *Parser) emit() {\n\tp.out <- *p.tag\n\tp.tag = nil\n}",
"func (p *blockParser) emit(b Block) {\n\tp.blockChan <- b\n\tp.start = p.cur\n}",
"func (c *Compiler) emitChanSend(frame *Frame, instr *ssa.Send) error {\n\tvalueType, err := c.getLLVMType(instr.Chan.Type().(*types.Chan).Elem())\n\tif err != nil {\n\t\treturn err\n\t}\n\tch, err := c.parseExpr(frame, instr.Chan)\n\tif err != nil {\n\t\treturn err\n\t}\n\tchanValue, err := c.parseExpr(frame, instr.X)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvalueSize := llvm.ConstInt(c.uintptrType, c.targetData.TypeAllocSize(chanValue.Type()), false)\n\tvalueAlloca := c.builder.CreateAlloca(valueType, \"chan.value\")\n\tc.builder.CreateStore(chanValue, valueAlloca)\n\tvalueAllocaCast := c.builder.CreateBitCast(valueAlloca, c.i8ptrType, \"chan.value.i8ptr\")\n\tc.createRuntimeCall(\"chanSendStub\", []llvm.Value{llvm.Undef(c.i8ptrType), ch, valueAllocaCast, valueSize}, \"\")\n\treturn nil\n}",
"func (bus *EventBus) Emit(msg Message) {\n\tbus.input <- msg\n}",
"func (g *GUI) Emit(name string, dat interface{}, f interface{}) error {\n\treturn g.Client.Emit(name, dat, f)\n}",
"func emitOutput(ctx context.Context, n *node) stateFn {\n\tif n == nil || n.outputC == nil { // OMIT\n\t\treturn nil // OMIT\n\t} // OMIT\n\tselect {\n\tcase <-ctx.Done():\n\t\tn.err = ctx.Err()\n\t\treturn nil\n\tcase n.outputC <- n.output:\n\t}\n\treturn nil\n}",
"func (s *Scanner) emit(tok token.Token) {\n\ts.results <- result{\n\t\tPos: s.f.Pos(s.start),\n\t\tTok: tok,\n\t\tLit: string(s.pending),\n\t}\n\ts.ignore()\n}",
"func (p *program) doWriteOutput(i *instruction) {\n if p.outChannel != nil {\n p.outChannel <- i.params[0].value\n } else {\n p.dataStack = append(p.dataStack, i.params[0].value)\n }\n p.position += i.length\n\n if p.haltOnOutput {\n p.halt = true\n }\n}",
"func (e *encoder) emit(bits, nBits uint32) {\n\tnBits += e.nBits\n\tbits <<= 32 - nBits\n\tbits |= e.bits\n\tfor nBits >= 8 {\n\t\tb := uint8(bits >> 24)\n\t\te.writeByte(b)\n\t\tif b == 0xff {\n\t\t\te.writeByte(0x00)\n\t\t}\n\t\tbits <<= 8\n\t\tnBits -= 8\n\t}\n\te.bits, e.nBits = bits, nBits\n}",
"func (a *Actor) Send(m string) { a.input <- m }",
"func (l *reader) emit(t itemType) {\n\tl.items <- item{t, l.current.String()}\n\tl.current.Reset()\n\tl.width = 0\n}",
"func (ms *metricSender) SendValue(name string, value float64, unit string) error {\n\treturn ms.eventEmitter.Emit(&events.ValueMetric{Name: &name, Value: &value, Unit: &unit})\n}",
"func (this Client) emit(message interface{}) {\n mu.Lock()\n for _, client := range clients {\n websocket.JSON.Send(client.Websocket, message)\n }\n mu.Unlock()\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
mapCount computes the number of values in an iterator.
|
func mapCount(itr Iterator, m *mapper) {
n := 0
for k, _ := itr.Next(); k != 0; k, _ = itr.Next() {
n++
}
m.emit(itr.Time(), float64(n))
}
|
[
"func MapCount(itr Iterator) interface{} {\n\tn := float64(0)\n\tfor k, _ := itr.Next(); k != -1; k, _ = itr.Next() {\n\t\tn++\n\t}\n\tif n > 0 {\n\t\treturn n\n\t}\n\treturn nil\n}",
"func Count(itr Iterator) int {\n\tconst mask = ^Word(0)\n\tcount := 0\n\tfor {\n\t\tw, n := itr.Next()\n\t\tif n == 0 {\n\t\t\tbreak\n\t\t}\n\t\tif n < bitLength-1 {\n\t\t\tw &= mask >> uint(bitLength-n)\n\t\t}\n\t\tcount += bits.OnesCount(uint(w))\n\t}\n\treturn count\n}",
"func MapCountDistinct(itr Iterator) interface{} {\n\tvar index = make(map[interface{}]struct{})\n\n\tfor time, value := itr.Next(); time != -1; time, value = itr.Next() {\n\t\tindex[value] = struct{}{}\n\t}\n\n\tif len(index) == 0 {\n\t\treturn nil\n\t}\n\n\treturn index\n}",
"func TestMapCount(t *testing.T) {\n\tm := map[Key]interface{}{}\n\ttestMapCountN(testN, m)\n}",
"func CountNumberOfValuesMap(jsonMap map[string]interface{}) int {\n\tcount := 0\n\tfor _, value := range jsonMap {\n\t\tcount += countNumberOfValueObject(value)\n\t}\n\treturn count\n}",
"func Count(it *Iterator) (int, error) {\n\tcount := 0\n\tfor {\n\t\t_, err := it.Next()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn count, err\n\t\t}\n\t\tcount += 1\n\t}\n\treturn count, nil\n}",
"func getTotalCount(a map[string]int) int {\n\tvar result int\n\tfor _, v := range a {\n\t\tresult += v\n\t}\n\treturn result\n}",
"func (m *OMap) Count() int {\n\treturn len(m.keys)\n}",
"func (j Json) IterMap(f func(key string, value Json) bool) int {\n\tm, ok := j.asMap()\n\tif !ok {\n\t\treturn 0\n\t}\n\n\tcount := 0\n\tfor k, v := range m {\n\t\tcount++\n\t\tif !f(k, Json{v, true}) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn count\n}",
"func (p *SliceOfMap) Count(key interface{}) (cnt int) {\n\tk := ToString(key)\n\tcnt = p.CountW(func(x O) bool { return ToStringMap(x).Exists(k) })\n\treturn\n}",
"func (w WaysMapping) Count() int {\n\treturn len(w)\n}",
"func testMapCountN(n int, m map[Key]interface{}) {\n\tfor i := 0; i < n; i++ {\n\t\t_ = len(m)\n\t}\n}",
"func (s *Stream) Count() int {\n\titerable := s.process()\n\n\tif iterable.Len() >= 0 {\n\t\treturn iterable.Len()\n\t}\n\n\titerator := iterable.Iterator()\n\tsize := 0\n\n\tfor ; iterator.HasNext(); iterator.Next() {\n\t\tsize++\n\t}\n\n\treturn size\n}",
"func testCountN(n int, hm HashMaper) {\n\tfor i := 0; i < n; i++ {\n\t\thm.Count()\n\t}\n}",
"func (s *CPUSet) Count() int {\n\tc := 0\n\tfor _, b := range s {\n\t\tc += onesCount64(uint64(b))\n\t}\n\treturn c\n}",
"func Count(m map[string]int, list []string) int {\n\treturn m[k(list)]\n}",
"func (set Int64Set) CountBy(predicate func(int64) bool) (result int) {\n\tfor v := range set {\n\t\tif predicate(v) {\n\t\t\tresult++\n\t\t}\n\t}\n\treturn\n}",
"func calculateCount(cMap map[string]int) int {\n\tcount := 0\n\tfor _, val := range cMap {\n\t\tif val > 0 {\n\t\t\tcount++\n\t\t}\n\t\tif count == 5 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn count\n}",
"func (db *RDBMS) QStrCountMap(query string, params ...interface{}) M.SI {\n\tres := M.SI{}\n\trows := db.QAll(query, params...)\n\tdefer rows.Close()\n\tval := ``\n\tfor rows.Scan(&val) {\n\t\tres[val] += 1\n\t}\n\treturn res\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
mapSum computes the summation of values in an iterator.
|
func mapSum(itr Iterator, m *mapper) {
n := float64(0)
for k, v := itr.Next(); k != 0; k, v = itr.Next() {
n += v.(float64)
}
m.emit(itr.Time(), n)
}
|
[
"func MapSum(itr Iterator) interface{} {\n\tn := float64(0)\n\tcount := 0\n\tvar resultType NumberType\n\tfor k, v := itr.Next(); k != -1; k, v = itr.Next() {\n\t\tcount++\n\t\tswitch n1 := v.(type) {\n\t\tcase float64:\n\t\t\tn += n1\n\t\tcase int64:\n\t\t\tn += float64(n1)\n\t\t\tresultType = Int64Type\n\t\t}\n\t}\n\tif count > 0 {\n\t\tswitch resultType {\n\t\tcase Float64Type:\n\t\t\treturn n\n\t\tcase Int64Type:\n\t\t\treturn int64(n)\n\t\t}\n\t}\n\treturn nil\n}",
"func (ai aggregatingIterator) sum() float64 {\n\tvar sum float64\n\tfor i := range ai {\n\t\tsum = sum + ai[i].value()\n\t}\n\treturn sum\n}",
"func MapSum[T Number](slicesOfItems [][]T) []T {\n\tresult := make([]T, 0, len(slicesOfItems))\n\n\tfor _, items := range slicesOfItems {\n\t\tresult = append(result, Sum(items))\n\t}\n\treturn result\n}",
"func (m *Monoid[T]) Sum(it Iterator[T]) T {\n\treturn m.SumWithInit(m.Empty(), it)\n}",
"func SumInts(m map[string]int64) int64 {\r\n\tvar s int64\r\n\tfor _, v := range m {\r\n\t\ts += v\r\n\t}\r\n\treturn s\r\n}",
"func (q Query) SumInts() (r int64) {\n\tnext := q.Iterate()\n\titem, ok := next()\n\tif !ok {\n\t\treturn 0\n\t}\n\n\tconv := getIntConverter(item)\n\tr = conv(item)\n\n\tfor item, ok = next(); ok; item, ok = next() {\n\t\tr += conv(item)\n\t}\n\n\treturn\n}",
"func Sum(items []Value) (op int) {\n\tfor _, item := range items {\n\t\top += item.Value()\n\t}\n\treturn\n}",
"func reduceSum(key string, values []interface{}, r *reducer) {\n\tvar n float64\n\tfor _, v := range values {\n\t\tn += v.(float64)\n\t}\n\tr.emit(key, n)\n}",
"func (s MathableStream[T]) Sum() T {\n\tif s.stream == nil {\n\t\tpanic(PanicMissingChannel)\n\t}\n\n\tsum, ok := <-s.stream\n\tif !ok {\n\t\tpanic(PanicNoSuchElement)\n\t}\n\n\tfor val := range s.stream {\n\t\tsum = Sum(sum, val)\n\t}\n\n\treturn sum\n}",
"func (m *Map) ReduceIntSum(reduce func(map[interface{}]interface{}) int) int {\n\tresult := 0\n\tsplits := m.splits\n\tfor i := 0; i < len(splits); i++ {\n\t\tresult += splits[i].reduceInt(reduce)\n\t}\n\treturn result\n}",
"func (m mathUtil) Sum(values ...float64) float64 {\n\tvar total float64\n\tfor _, v := range values {\n\t\ttotal += v\n\t}\n\treturn total\n}",
"func (s sample) Sum() (sum int64) {\n\tfor _, v := range s {\n\t\tsum += int64(v)\n\t}\n\treturn\n}",
"func sum(b *bolt.Bucket, fn func([]byte) int) (int, error) {\n\tsum := 0\n\terr := b.ForEach(func(_, v []byte) error {\n\t\tsum += fn(v)\n\t\treturn nil\n\t})\n\treturn sum, err\n}",
"func (a Aggregate) Sum() float64 {\n\tvar total float64\n\tfor _, v := range a.values {\n\t\ttotal += v\n\t}\n\treturn total\n}",
"func (set Int64Set) Sum() int64 {\n\tsum := int64(0)\n\tfor v, _ := range set {\n\t\tsum = sum + v\n\t}\n\treturn sum\n}",
"func (fs fundListing) Sum() int64 {\n\tsum := int64(0)\n\tfor _, o := range fs.Outputs {\n\t\tsum += o.Value\n\t}\n\treturn sum\n}",
"func Sum(by []string, input []*oproto.ValueStream) []*oproto.ValueStream {\n\toutput := []*oproto.ValueStream{{Variable: input[0].Variable}}\n\tiPos := make([]int, len(input))\n\tfor {\n\t\tvalues := []float64{}\n\t\ttimestamps := []uint64{}\n\t\tfor i := 0; i < len(input); i++ {\n\t\t\tif iPos[i] >= len(input[i].Value) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif input[i] != nil {\n\t\t\t\tvalues = append(values, input[i].Value[iPos[i]].GetDouble())\n\t\t\t\ttimestamps = append(timestamps, input[i].Value[iPos[i]].Timestamp)\n\t\t\t}\n\t\t\tiPos[i]++\n\t\t}\n\t\tif len(values) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tvar total float64\n\t\tfor _, i := range values {\n\t\t\ttotal += i\n\t\t}\n\t\tvar tsTotal uint64\n\t\tfor _, i := range timestamps {\n\t\t\ttsTotal += i\n\t\t}\n\t\toutput[0].Value = append(output[0].Value, value.NewDouble(tsTotal/uint64(len(timestamps)), total))\n\t}\n\treturn output\n}",
"func (m Multiples) Sum() int {\n\tvar output int\n\tfor _, i := range m {\n\t\toutput += i\n\t}\n\n\treturn output\n}",
"func Sum(xi ...int) int {\n\ts := 0\n\tfor _, v := range xi {\n\t\ts += v\n\t}\n\treturn s\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
newReducer returns a new instance of reducer.
|
func newReducer(e *Executor) *reducer {
return &reducer{
executor: e,
c: make(chan map[string]interface{}, 0),
done: make(chan chan struct{}, 0),
}
}
|
[
"func NewReducer(f func(*Ctx) *State) *ReduceFunc {\n\treturn &ReduceFunc{Func: f}\n}",
"func NewReducer(reduce ReduceFn, options ...func(*RFunc)) *RFunc {\n\trf := &RFunc{Func: reduce}\n\tfor _, o := range options {\n\t\to(rf)\n\t}\n\treturn rf\n}",
"func New(reducers ...interface{}) *Store {\n\tnewStore := &Store{\n\t\treducers: make(map[uintptr]reflect.Value),\n\t\tstate: make(map[uintptr]reflect.Value),\n\t}\n\tfor _, reducer := range reducers {\n\t\tr := reflect.ValueOf(reducer)\n\t\t// If fail any checking, it will panic, so don't try to recover or handling the error\n\t\tcheckReducer(r)\n\n\t\tif _, ok := newStore.state[r.Pointer()]; ok {\n\t\t\tpanic(\"You can't put duplicated reducer into the same store!\")\n\t\t}\n\n\t\tactualReducer, initState := getReducerAndInitState(r)\n\n\t\tnewStore.reducers[r.Pointer()] = actualReducer\n\t\tnewStore.state[r.Pointer()] = initState\n\t}\n\treturn newStore\n}",
"func newCompactionState(maxFileSize uint32, snapshot version.Snapshot, compaction *version.Compaction) *compactionState {\n\treturn &compactionState{\n\t\tmaxFileSize: maxFileSize,\n\t\tsnapshot: snapshot,\n\t\tcompaction: compaction,\n\t}\n}",
"func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &Reconciler{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}",
"func (s *Store) ReplaceReducer(r Reducer) {\n\ts.reducer = r\n\ts.Dispatch(INITAction())\n}",
"func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileChe{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}",
"func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileStack{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}",
"func NewStoreWithFakeReducer() (st *Store, getActions func() []Action) {\n\tvar mu sync.Mutex\n\tactions := []Action{}\n\treducer := Reducer(func(ctx context.Context, s *EngineState, action Action) {\n\t\tmu.Lock()\n\t\tdefer mu.Unlock()\n\t\tactions = append(actions, action)\n\n\t\terrorAction, isErrorAction := action.(ErrorAction)\n\t\tif isErrorAction {\n\t\t\ts.FatalError = errorAction.Error\n\t\t}\n\t})\n\n\tgetActions = func() []Action {\n\t\tmu.Lock()\n\t\tdefer mu.Unlock()\n\t\treturn append([]Action{}, actions...)\n\t}\n\treturn NewStore(reducer, false), getActions\n}",
"func newActionParser(r io.Reader) *actionParser {\n\treturn &actionParser{\n\t\tr: bufio.NewReader(r),\n\t\ts: make(stack, 0),\n\t}\n}",
"func New() Action {\n\treturn &action{}\n}",
"func newRuleIndexState() *RuleIndexState {\n\treturn &RuleIndexState{newRuleIndexID(), make([]*Rule, 0),\n\t\tmake(map[string]*RuleMatcherKey)}\n}",
"func newLexer(r io.Reader) *lexer {\n\treturn &lexer{\n\t\tscanner: newScanner(r),\n\t}\n}",
"func New(grammarString string) (*grammar.Grammar, tree.Reducer, error) {\n\tparseTree, err := runner.Run(grammarString)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tg, r := evalGrammar(parseTree.(*tree.PN))\n\treturn g, r, nil\n}",
"func NewDirectoryDiffMapReducer(dirs int, split []byte) Reducer {\n\treturn &DirectoryDiffMapReducer{\n\t\tDirs: dirs,\n\t\tSplit: split,\n\t}\n}",
"func newRedactingWriter(target *os.File) *redactingWriter {\n\treturn &redactingWriter{\n\t\ttarget: target,\n\t\tr: replacer{},\n\t}\n}",
"func newLexer(src string) *lexer {\n\tl := &lexer{src: src,\n\t\ttokenChan: make(chan token),\n\t}\n\tgo l.run()\n\treturn l\n}",
"func NewReconciler(cfg ReconcilerConfig) (*Reconciler, error) {\n\tawsClient, err := aws.NewClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treconciler := Reconciler{\n\t\tcfg: cfg,\n\t\tlog: logutil.WithRollout(cfg.Rollout).WithField(logutil.IngressKey, cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.ALB.Ingress),\n\t\taws: awsClient,\n\t}\n\treturn &reconciler, nil\n}",
"func New() *Controller {\n\treturn &Controller{\n\t\tValidatePayload: ValidatePayload,\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
start begins streaming values from the mappers and reducing them.
|
func (r *reducer) start() {
for _, m := range r.mappers {
m.start()
}
go r.run()
}
|
[
"func (m *mapper) start() {\n\tm.itr = m.executor.db.CreateIterator(m.seriesID, m.fieldID, m.typ,\n\t\tm.executor.min, m.executor.max, m.executor.interval)\n\tgo m.run()\n}",
"func (w *SimpleMapReduce) Start() *SimpleMapReduce {\n if (w.hasStarted) {\n return w\n }\n\n w.hasStarted = true\n\n for i := 0; i < w.mappers; i++ {\n mapFn := w.mapFn\n mapperFinished := make(chan bool)\n w.mappersFinished[i] = mapperFinished\n\n // Parallel function which performs the map and adds the result to the reduction queue\n go func() {\n for item := range w.workQueue {\n res := mapFn(item)\n w.reduceQueue <- res\n }\n close(mapperFinished)\n }()\n }\n\n // If a reduction function is specified, start it. Otherwise, simply close the reducedFinish\n // channel.\n if (w.reduceFn != nil) {\n go func() {\n w.reduceFn(w.reduceQueue)\n close(w.reducedFinished)\n }()\n } else {\n close(w.reducedFinished)\n }\n\n return w\n}",
"func (r *reducer) run() {\nloop:\n\tfor {\n\t\t// Combine all data from the mappers.\n\t\tdata := make(map[string][]interface{})\n\t\tfor _, m := range r.mappers {\n\t\t\tkv, ok := <-m.C()\n\t\t\tif !ok {\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t\tfor k, v := range kv {\n\t\t\t\tdata[k] = append(data[k], v)\n\t\t\t}\n\t\t}\n\n\t\t// Reduce each key.\n\t\tfor k, v := range data {\n\t\t\tr.fn(k, v, r)\n\t\t}\n\t}\n\n\t// Mark the channel as complete.\n\tclose(r.c)\n}",
"func (m *mapper) run() {\n\tfor m.itr.NextIterval() {\n\t\tm.fn(m.itr, m)\n\t}\n\tclose(m.c)\n}",
"func (l *LocalMapper) Begin(c *influxql.Call, startingTime int64, chunkSize int) error {\n\t// set up the buffers. These ensure that we return data in time order\n\tmapFunc, err := influxql.InitializeMapFunc(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.mapFunc = mapFunc\n\tl.keyBuffer = make([]int64, len(l.cursors))\n\tl.valueBuffer = make([][]byte, len(l.cursors))\n\tl.chunkSize = chunkSize\n\tl.tmin = startingTime\n\n\tvar isCountDistinct bool\n\n\t// determine if this is a raw data query with a single field, multiple fields, or an aggregate\n\tvar fieldName string\n\tif c == nil { // its a raw data query\n\t\tl.isRaw = true\n\t\tif len(l.selectFields) == 1 {\n\t\t\tfieldName = l.selectFields[0]\n\t\t}\n\n\t\t// if they haven't set a limit, just set it to the max int size\n\t\tif l.limit == 0 {\n\t\t\tl.limit = math.MaxUint64\n\t\t}\n\t} else {\n\t\t// Check for calls like `derivative(mean(value), 1d)`\n\t\tvar nested *influxql.Call = c\n\t\tif fn, ok := c.Args[0].(*influxql.Call); ok {\n\t\t\tnested = fn\n\t\t}\n\n\t\tswitch lit := nested.Args[0].(type) {\n\t\tcase *influxql.VarRef:\n\t\t\tfieldName = lit.Val\n\t\tcase *influxql.Distinct:\n\t\t\tif c.Name != \"count\" {\n\t\t\t\treturn fmt.Errorf(\"aggregate call didn't contain a field %s\", c.String())\n\t\t\t}\n\t\t\tisCountDistinct = true\n\t\t\tfieldName = lit.Val\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"aggregate call didn't contain a field %s\", c.String())\n\t\t}\n\n\t\tisCountDistinct = isCountDistinct || (c.Name == \"count\" && nested.Name == \"distinct\")\n\t}\n\n\t// set up the field info if a specific field was set for this mapper\n\tif fieldName != \"\" {\n\t\tfid, err := l.decoder.FieldIDByName(fieldName)\n\t\tif err != nil {\n\t\t\tswitch {\n\t\t\tcase c != nil && c.Name == \"distinct\":\n\t\t\t\treturn fmt.Errorf(`%s isn't a field on measurement %s; to query the unique values for a tag use SHOW TAG VALUES FROM %[2]s WITH KEY = \"%[1]s`, fieldName, l.job.MeasurementName)\n\t\t\tcase isCountDistinct:\n\t\t\t\treturn fmt.Errorf(\"%s isn't a field on measurement %s; count(distinct) on tags isn't yet supported\", fieldName, l.job.MeasurementName)\n\t\t\t}\n\t\t}\n\t\tl.fieldID = fid\n\t\tl.fieldName = fieldName\n\t}\n\n\t// seek the bolt cursors and fill the buffers\n\tfor i, c := range l.cursors {\n\t\t// this series may have never been written in this shard group (time range) so the cursor would be nil\n\t\tif c == nil {\n\t\t\tl.keyBuffer[i] = 0\n\t\t\tl.valueBuffer[i] = nil\n\t\t\tcontinue\n\t\t}\n\t\tk, v := c.Seek(u64tob(uint64(l.job.TMin)))\n\t\tif k == nil {\n\t\t\tl.keyBuffer[i] = 0\n\t\t\tl.valueBuffer[i] = nil\n\t\t\tcontinue\n\t\t}\n\t\tl.cursorsEmpty = false\n\t\tt := int64(btou64(k))\n\t\tl.keyBuffer[i] = t\n\t\tl.valueBuffer[i] = v\n\t}\n\treturn nil\n}",
"func (s *JsonEntryCounter) Mapper(r io.Reader, w io.Writer) error {\n\tlog.Printf(\"map_input_file %s\", os.Getenv(\"map_input_file\"))\n\twg, out := mrproto.JsonInternalOutputProtocol(w)\n\n\t// for efficient counting, use an in-memory counter that flushes the least recently used item\n\t// less Mapper output makes for faster sorting and reducing.\n\tcounter := lru.NewLRUCounter(func(k interface{}, v int64) {\n\t\tout <- mrproto.KeyValue{k, v}\n\t}, 100)\n\n\tfor line := range mrproto.RawInputProtocol(r) {\n\t\tvar record map[string]json.RawMessage\n\t\tif err := json.Unmarshal(line, &record); err != nil {\n\t\t\tgomrjob.Counter(\"example_mr\", \"Unmarshal Error\", 1)\n\t\t\tlog.Printf(\"%s\", err)\n\t\t\tcontinue\n\t\t}\n\t\tgomrjob.Counter(\"example_mr\", \"Map Lines Read\", 1)\n\t\tcounter.Incr(\"lines_read\", 1)\n\t\tfor k, _ := range record {\n\t\t\tcounter.Incr(k, 1)\n\t\t}\n\t}\n\tcounter.Flush()\n\tclose(out)\n\twg.Wait()\n\treturn nil\n}",
"func (s *mongoSource) start(iter Iter) {\n\tdefer s.Stop()\n\tdefer close(s.rows)\n\tfor !s.stopped {\n\t\tr := optimus.Row{}\n\t\tif !iter.Next(&r) {\n\t\t\tbreak\n\t\t}\n\t\ts.rows <- r\n\t}\n\ts.err = iter.Err()\n}",
"func (graphMinion *graphMinion) start() {\n\tgo func() {\n\t\tdefer graphMinion.wg.Done()\n\t\tfor {\n\n\t\t\t// pull reads from queue until done\n\t\t\tmappingData, ok := <-graphMinion.inputChannel\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif mappingData == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// increment the nodes contained in the mapping window\n\t\t\tmisc.ErrorCheck(graphMinion.graph.IncrementSubPath(mappingData.ContainedNodes, mappingData.Freq))\n\t\t}\n\t}()\n}",
"func (r *reducer) stop() {\n\tfor _, m := range r.mappers {\n\t\tm.stop()\n\t}\n\tsyncClose(r.done)\n}",
"func (srvc *MapReduceService) Reduce(ctx *RequestContext, s *Status) error {\n\t// Initialize return status\n\t*s = FAILED\n\n\t// Provision data\n\tp := makeDataProvisioner(ctx, srvc)\n\tpaths, err := p.provisionData()\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\t// Open files\n\tits := []io.Reader{}\n\tfor _, path := range paths {\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer f.Close()\n\t\tits = append(its, f)\n\t}\n\n\t// Create key-values iterator\n\tkvIt, err := utils.MakeKeyValueIterator(its...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treducer := roles.Reducer{}\n\tfor {\n\t\t// Check if all data has been processed\n\t\tif kvIt.HasNext() == false {\n\t\t\tbreak\n\t\t}\n\t\tkey, vIt := kvIt.Next()\n\n\t\t// Reduce values\n\t\tres, err := reducer.Reduce(key, vIt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Print values. TODO: remove and store to file\n\t\tfmt.Printf(\"%s: %s\\n\", key, res)\n\t}\n\t*s = SUCCESS\n\treturn nil\n}",
"func (job *MapOnlyJob) Run() error {\n\tif job.NewMapperF == nil {\n\t\treturn errors.New(\"MapOnlyJob: NewMapperF undefined!\")\n\t}\n\tif job.Source == nil {\n\t\treturn errors.New(\"MapOnlyJob: Source undefined!\")\n\t}\n\ttotalPart := 0\n\tendss := make([][]chan error, 0, len(job.Source))\n\tfor i := range job.Source {\n\t\tpartCount, err := job.Source[i].PartCount()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tends := make([]chan error, 0, partCount)\n\t\tfor part := 0; part < partCount; part++ {\n\t\t\tend := make(chan error, 1)\n\t\t\tends = append(ends, end)\n\t\t\tgo func(i, part, totalPart int, end chan error) {\n\t\t\t\tend <- func() error {\n\t\t\t\t\tmapper := job.NewMapperF(i, part)\n\t\t\t\t\tkey, val := mapper.NewKey(), mapper.NewVal()\n\t\t\t\t\tcs := make([]sophie.Collector, 0, len(job.Dest))\n\t\t\t\t\tfor _, dst := range job.Dest {\n\t\t\t\t\t\tc, err := dst.Collector(totalPart)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn errorsp.WithStacksAndMessage(err, \"open collector for source %d part %d failed\", i, part)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdefer c.Close()\n\t\t\t\t\t\tcs = append(cs, c)\n\t\t\t\t\t}\n\t\t\t\t\titer, err := job.Source[i].Iterator(part)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn errorsp.WithStacksAndMessage(err, \" open source %d part %d failed\", i, part)\n\t\t\t\t\t}\n\t\t\t\t\tdefer iter.Close()\n\n\t\t\t\t\tfor {\n\t\t\t\t\t\tif err := iter.Next(key, val); err != nil {\n\t\t\t\t\t\t\tif errorsp.Cause(err) != io.EOF {\n\t\t\t\t\t\t\t\treturn errorsp.WithStacksAndMessage(err, \"next failed\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif err := mapper.Map(key, val, cs); err != nil {\n\t\t\t\t\t\t\tif errorsp.Cause(err) == EOM {\n\t\t\t\t\t\t\t\tlog.Print(\"EOM returned, exit early\")\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\treturn errorsp.WithStacksAndMessage(err, \"mapping %v %v failed\", key, val)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn errorsp.WithStacksAndMessage(mapper.MapEnd(cs), \"map end failed\")\n\t\t\t\t}()\n\t\t\t}(i, part, totalPart, end)\n\t\t\ttotalPart++\n\t\t}\n\t\tendss = append(endss, ends)\n\t}\n\tvar errReturned error\n\tfor _, ends := range endss {\n\t\tfor part, end := range ends {\n\t\t\tlog.Printf(\"Waiting for mapper %d...\", part)\n\t\t\tif err := <-end; err != nil {\n\t\t\t\tlog.Printf(\"Error returned for part %d: %v\", part, err)\n\t\t\t\terrReturned = err\n\t\t\t}\n\t\t\tlog.Printf(\"No error for mapper %d...\", part)\n\t\t}\n\t}\n\treturn errReturned\n}",
"func (w *noAggregationStreamWorker) run() {\n\tlog.Debugf(\"Starting streaming routine for the no-aggregation pipeline\")\n\n\tticker := time.NewTicker(noAggWorkerStreamCheckFrequency)\n\tdefer ticker.Stop()\n\tlogPayloads := config.Datadog.GetBool(\"log_payloads\")\n\tw.seriesSink, w.sketchesSink = createIterableMetrics(w.flushConfig, w.serializer, logPayloads, false)\n\n\tstopped := false\n\tvar stopBlockChan chan struct{}\n\tvar lastStream time.Time\n\n\tfor !stopped {\n\t\tstart := time.Now()\n\t\tserializedSamples := 0\n\n\t\tmetrics.Serialize(\n\t\t\tw.seriesSink,\n\t\t\tw.sketchesSink,\n\t\t\tfunc(seriesSink metrics.SerieSink, sketchesSink metrics.SketchesSink) {\n\t\t\tmainloop:\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\n\t\t\t\t\t// stop signal\n\t\t\t\t\tcase trigger := <-w.stopChan:\n\t\t\t\t\t\tstopped = true\n\t\t\t\t\t\tstopBlockChan = trigger.blockChan\n\t\t\t\t\t\tbreak mainloop // end `Serialize` call and trigger a flush to the forwarder\n\n\t\t\t\t\tcase <-ticker.C:\n\t\t\t\t\t\tn := time.Now()\n\t\t\t\t\t\tif serializedSamples > 0 && lastStream.Before(n.Add(-time.Second*1)) {\n\t\t\t\t\t\t\tlog.Debug(\"noAggregationStreamWorker: triggering an automatic payloads flush to the forwarder (no traffic since 1s)\")\n\t\t\t\t\t\t\tbreak mainloop // end `Serialize` call and trigger a flush to the forwarder\n\t\t\t\t\t\t}\n\n\t\t\t\t\t// receiving samples\n\t\t\t\t\tcase samples := <-w.samplesChan:\n\t\t\t\t\t\tlog.Debugf(\"Streaming %d metrics from the no-aggregation pipeline\", len(samples))\n\t\t\t\t\t\tfor _, sample := range samples {\n\t\t\t\t\t\t\t// enrich metric sample tags\n\t\t\t\t\t\t\tsample.GetTags(w.taggerBuffer, w.metricBuffer)\n\t\t\t\t\t\t\tw.metricBuffer.AppendHashlessAccumulator(w.taggerBuffer)\n\n\t\t\t\t\t\t\t// turns this metric sample into a serie\n\t\t\t\t\t\t\tvar serie metrics.Serie\n\t\t\t\t\t\t\tserie.Name = sample.Name\n\t\t\t\t\t\t\tserie.Points = []metrics.Point{{Ts: sample.Timestamp, Value: sample.Value}}\n\t\t\t\t\t\t\tserie.Tags = tagset.CompositeTagsFromSlice(w.metricBuffer.Copy())\n\t\t\t\t\t\t\tserie.Host = sample.Host\n\t\t\t\t\t\t\t// ignored when late but mimic dogstatsd traffic here anyway\n\t\t\t\t\t\t\tserie.Interval = 10\n\t\t\t\t\t\t\tw.seriesSink.Append(&serie)\n\n\t\t\t\t\t\t\tw.taggerBuffer.Reset()\n\t\t\t\t\t\t\tw.metricBuffer.Reset()\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tlastStream = time.Now()\n\n\t\t\t\t\t\tserializedSamples += len(samples)\n\t\t\t\t\t\tif serializedSamples > w.maxMetricsPerPayload {\n\t\t\t\t\t\t\tbreak mainloop // end `Serialize` call and trigger a flush to the forwarder\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}, func(serieSource metrics.SerieSource) {\n\t\t\t\tsendIterableSeries(w.serializer, start, serieSource)\n\t\t\t}, func(sketches metrics.SketchesSource) {\n\t\t\t\t// noop: we do not support sketches in the no-agg pipeline.\n\t\t\t})\n\n\t\tif stopped {\n\t\t\tbreak\n\t\t}\n\n\t\tw.seriesSink, w.sketchesSink = createIterableMetrics(w.flushConfig, w.serializer, logPayloads, false)\n\t}\n\n\tif stopBlockChan != nil {\n\t\tclose(stopBlockChan)\n\t}\n}",
"func (s *seriesValueGenerator) Start() error { return nil }",
"func (theBoss *theBoss) mapReads() error {\n\ttheBoss.alignments = make(chan *sam.Record, BUFFERSIZE)\n\n\t// set up the BAM if exact alignment is requested\n\tif !theBoss.info.Sketch.NoExactAlign {\n\t\tif err := theBoss.setupBAM(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// setup the waitgroups for the sketching and graphing minions\n\tvar wg1 sync.WaitGroup\n\tvar wg2 sync.WaitGroup\n\n\t// launch the graph minions (one minion per graph in the index)\n\ttheBoss.graphMinionRegister = make([]*graphMinion, len(theBoss.info.Store))\n\tfor _, graph := range theBoss.info.Store {\n\n\t\t// create, start and register the graph minion\n\t\tminion := newGraphMinion(theBoss, graph)\n\t\twg2.Add(1)\n\t\tminion.start(&wg2)\n\t\ttheBoss.graphMinionRegister[graph.GraphID] = minion\n\t}\n\n\t// launch the sketching minions (one per CPU)\n\tfor i := 0; i < theBoss.info.NumProc; i++ {\n\t\twg1.Add(1)\n\t\tgo func(workerNum int) {\n\t\t\tdefer wg1.Done()\n\n\t\t\t// keep a track of what this minion does\n\t\t\treceivedReads := 0\n\t\t\tmappedCount := 0\n\t\t\tmultimappedCount := 0\n\n\t\t\t// start the main processing loop\n\t\t\tfor {\n\n\t\t\t\t// pull reads from queue until done\n\t\t\t\tread, ok := <-theBoss.reads\n\t\t\t\tif !ok {\n\n\t\t\t\t\t// update the counts\n\t\t\t\t\ttheBoss.Lock()\n\t\t\t\t\ttheBoss.receivedReadCount += receivedReads\n\t\t\t\t\ttheBoss.mappedCount += mappedCount\n\t\t\t\t\ttheBoss.multimappedCount += multimappedCount\n\t\t\t\t\ttheBoss.Unlock()\n\n\t\t\t\t\t// end the sketching minion\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// get sketch for read\n\t\t\t\treadSketch, err := read.RunMinHash(theBoss.info.KmerSize, theBoss.info.SketchSize, false, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\t// get the number of k-mers in the sequence\n\t\t\t\tkmerCount := (len(read.Seq) - theBoss.info.KmerSize) + 1\n\n\t\t\t\t// query the LSH ensemble\n\t\t\t\tresults, err := theBoss.info.db.Query(readSketch, kmerCount, theBoss.info.ContainmentThreshold)\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\t// if multiple graphs are returned, we need to deep copy the read\n\t\t\t\tdeepCopy := false\n\t\t\t\tif len(results) > 1 {\n\t\t\t\t\tdeepCopy = true\n\t\t\t\t}\n\n\t\t\t\t// augment graphs and optionally perform exact alignment\n\t\t\t\tfor graphID, hits := range results {\n\t\t\t\t\tif deepCopy {\n\t\t\t\t\t\treadCopy := *read.DeepCopy()\n\t\t\t\t\t\ttheBoss.graphMinionRegister[graphID].inputChannel <- &graphMinionPair{hits, readCopy}\n\t\t\t\t\t} else {\n\t\t\t\t\t\ttheBoss.graphMinionRegister[graphID].inputChannel <- &graphMinionPair{hits, *read}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// update counts\n\t\t\t\treceivedReads++\n\t\t\t\tif len(results) > 0 {\n\t\t\t\t\tmappedCount++\n\t\t\t\t}\n\t\t\t\tif len(results) > 1 {\n\t\t\t\t\tmultimappedCount++\n\t\t\t\t}\n\t\t\t}\n\t\t}(i)\n\t}\n\n\t// control the channels\n\tgo func() {\n\n\t\t// wait for the sketching minions to finish\n\t\twg1.Wait()\n\n\t\t// shut down the graph minions input channels\n\t\tfor _, minion := range theBoss.graphMinionRegister {\n\t\t\tclose(minion.inputChannel)\n\t\t}\n\n\t\t// wait for the graph minions to finish\n\t\twg2.Wait()\n\n\t\t// end the alignment writer\n\t\tclose(theBoss.alignments)\n\n\t}()\n\n\t// collect the alignments and write them\n\tfor record := range theBoss.alignments {\n\t\t// check the record is valid\n\t\t//if sam.IsValidRecord(record) == false {\n\t\t//\tos.Exit(1)\n\t\t//}\n\t\ttheBoss.alignmentCount++\n\t\tif err := theBoss.bamwriter.Write(record); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// close the bam writer and return to the completed boss to the pipeline\n\tvar err error\n\tif !theBoss.info.Sketch.NoExactAlign {\n\t\terr = theBoss.bamwriter.Close()\n\t}\n\treturn err\n}",
"func (s Stream) Map(mapper Function) Stream {\n\toutstream := make(chan Entry, cap(s.stream))\n\n\tgo func() { // TODO: introduce a cut-off to prevent the go func from straying\n\t\tdefer close(outstream)\n\t\tif s.stream == nil {\n\t\t\treturn\n\t\t}\n\t\tfor val := range s.stream {\n\t\t\toutstream <- mapper(val)\n\t\t}\n\t}()\n\n\treturn Stream{\n\t\tstream: outstream,\n\t}\n}",
"func (s *Stream) FlatMap(name string, mapper FlatMapper) *Stream {\n\tp := NewFlatMapProcessor(mapper)\n\tn := s.tp.AddProcessor(name, p, s.parents)\n\n\treturn newStream(s.tp, []Node{n})\n}",
"func mapSum(itr Iterator, m *mapper) {\n\tn := float64(0)\n\tfor k, v := itr.Next(); k != 0; k, v = itr.Next() {\n\t\tn += v.(float64)\n\t}\n\tm.emit(itr.Time(), n)\n}",
"func (p *Pipe) start() {\n\tp.cancel = make(chan struct{})\n\terrcList := make([]<-chan error, 0, 1+len(p.processors)+len(p.sinks))\n\t// start pump\n\tout, errc := p.pump.run(p.cancel, p.ID(), p.provide, p.consume, p.sampleRate, p.metric)\n\terrcList = append(errcList, errc)\n\n\t// start chained processesing\n\tfor _, proc := range p.processors {\n\t\tout, errc = proc.run(p.cancel, p.ID(), out, p.sampleRate, p.metric)\n\t\terrcList = append(errcList, errc)\n\t}\n\n\tsinkErrcList := p.broadcastToSinks(out)\n\terrcList = append(errcList, sinkErrcList...)\n\tp.errc = mergeErrors(errcList...)\n}",
"func (w *SimpleMapReduce) Reduce (reduceFn ReduceFn) *SimpleMapReduce {\n w.reduceFn = reduceFn\n return w\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
stop stops the reducer.
|
func (r *reducer) stop() {
for _, m := range r.mappers {
m.stop()
}
syncClose(r.done)
}
|
[
"func (r *reaper) stop() {\n\tr.stopCh <- struct{}{}\n}",
"func (p *literalProcessor) stop() { syncClose(p.done) }",
"func (m *mapper) stop() { syncClose(m.done) }",
"func (c *podChurner) stop() {\n\tc.stopCh <- struct{}{}\n}",
"func (s *Smr) stop() {\n\t// TODO: zq\n}",
"func (w *worker) stop() {\n\tatomic.StoreInt32(&w.running, 0)\n}",
"func (w *Watcher) stop() {\n\tatomic.StoreUint32(&w.isStop, 1)\n}",
"func (w *Watch) stop() {\n\tw.done <- struct{}{}\n}",
"func (rc *raftNode) stop() {\n\trc.stopHTTP()\n\tclose(rc.commitC)\n\tclose(rc.errorC)\n\trc.node.Stop()\n}",
"func (a *Arm) stop() {\n\ta.vel = 0\n}",
"func (s *schedule) stop() {\n\tif !s.running {\n\t\treturn\n\t}\n\ts.running = false\n\ts.stopCh <- struct{}{}\n}",
"func (hc *HealthChecker) stop() {\n\thc.closec <- struct{}{}\n}",
"func (w *Washer) Stop() { close(w.done) }",
"func (s *Scanner) Stop() {\n\ts.stop <- struct{}{}\n}",
"func (c *Cluster) Stop() {\n\tclose(c.actionc)\n}",
"func (c *stoppableContext) stop() {\n\tc.stopOnce.Do(func() {\n\t\tclose(c.stopped)\n\t})\n\n\tc.stopWg.Wait()\n}",
"func (m *Metrics) Stop() {\n\tif m.stop != nil {\n\t\tclose(m.stop)\n\t\tm.stop = nil\n\t}\n}",
"func (hb *heartbeat) stop() {\n\tselect {\n\tcase hb.stopChan <- struct{}{}:\n\tdefault:\n\t}\n}",
"func (it *messageIterator) stop() {\n\tit.cancel()\n\tit.mu.Lock()\n\tit.checkDrained()\n\tit.mu.Unlock()\n\tit.wg.Wait()\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
run runs the reducer loop to read mapper output and reduce it.
|
func (r *reducer) run() {
loop:
for {
// Combine all data from the mappers.
data := make(map[string][]interface{})
for _, m := range r.mappers {
kv, ok := <-m.C()
if !ok {
break loop
}
for k, v := range kv {
data[k] = append(data[k], v)
}
}
// Reduce each key.
for k, v := range data {
r.fn(k, v, r)
}
}
// Mark the channel as complete.
close(r.c)
}
|
[
"func (m *mapper) run() {\n\tfor m.itr.NextIterval() {\n\t\tm.fn(m.itr, m)\n\t}\n\tclose(m.c)\n}",
"func (s *JsonEntryCounter) Mapper(r io.Reader, w io.Writer) error {\n\tlog.Printf(\"map_input_file %s\", os.Getenv(\"map_input_file\"))\n\twg, out := mrproto.JsonInternalOutputProtocol(w)\n\n\t// for efficient counting, use an in-memory counter that flushes the least recently used item\n\t// less Mapper output makes for faster sorting and reducing.\n\tcounter := lru.NewLRUCounter(func(k interface{}, v int64) {\n\t\tout <- mrproto.KeyValue{k, v}\n\t}, 100)\n\n\tfor line := range mrproto.RawInputProtocol(r) {\n\t\tvar record map[string]json.RawMessage\n\t\tif err := json.Unmarshal(line, &record); err != nil {\n\t\t\tgomrjob.Counter(\"example_mr\", \"Unmarshal Error\", 1)\n\t\t\tlog.Printf(\"%s\", err)\n\t\t\tcontinue\n\t\t}\n\t\tgomrjob.Counter(\"example_mr\", \"Map Lines Read\", 1)\n\t\tcounter.Incr(\"lines_read\", 1)\n\t\tfor k, _ := range record {\n\t\t\tcounter.Incr(k, 1)\n\t\t}\n\t}\n\tcounter.Flush()\n\tclose(out)\n\twg.Wait()\n\treturn nil\n}",
"func (r *reducer) start() {\n\tfor _, m := range r.mappers {\n\t\tm.start()\n\t}\n\tgo r.run()\n}",
"func doReduce(\n\tjobName string, // the name of the whole MapReduce job\n\treduceTaskNumber int, // which reduce task this is\n\toutFile string, // write the output here\n\tnMap int, // the number of map tasks that were run (\"M\" in the paper)\n\treduceF func(key string, values []string) string,\n) {\n\t//\n\t// You will need to write this function.\n\t//\n\t// You'll need to read one intermediate file from each map task;\n\t// reduceName(jobName, m, reduceTaskNumber) yields the file\n\t// name from map task m.\n\t//\n\t// Your doMap() encoded the key/value pairs in the intermediate\n\t// files, so you will need to decode them. If you used JSON, you can\n\t// read and decode by creating a decoder and repeatedly calling\n\t// .Decode(&kv) on it until it returns an error.\n\t//\n\t// You may find the first example in the golang sort package\n\t// documentation useful.\n\t//\n\t// reduceF() is the application's reduce function. You should\n\t// call it once per distinct key, with a slice of all the values\n\t// for that key. reduceF() returns the reduced value for that key.\n\t//\n\t// You should write the reduce output as JSON encoded KeyValue\n\t// objects to the file named outFile. We require you to use JSON\n\t// because that is what the merger than combines the output\n\t// from all the reduce tasks expects. There is nothing special about\n\t// JSON -- it is just the marshalling format we chose to use. Your\n\t// output code will look something like this:\n\t//\n\t// enc := json.NewEncoder(file)\n\t// for key := ... {\n\t// \tenc.Encode(KeyValue{key, reduceF(...)})\n\t// }\n\t// file.Close()\n\t//\n\n\t// CUTSOM\n\t// get all kv pairs from tmp data\n\t// Dataflow\n\t// 1. read each tmp file which map task generated\n\t// 2. decode file json data to kv\n\t// 3. sorted kv list by k\n\t// 4. group the kv pairs to k-(v list) which k is same\n\t// 5. get reduce(k, v-list) result and write the k-result to output file\n\n\tfmt.Println(\"doReduce1\")\n\tkvList := make([]KeyValue, 0)\n\tfor i := 0; i < nMap; i++ {\n\n\t\treadFileName := reduceName(jobName, i, reduceTaskNumber)\n\t\treadFile, readFileErr := os.Open(readFileName)\n\t\tif readFileErr != nil {\n\t\t\treturn\n\t\t}\n\t\tdec := json.NewDecoder(readFile)\n\t\tfor dec.More() {\n\t\t\tvar kv KeyValue\n\t\t\tdecErr := dec.Decode(&kv)\n\t\t\tif decErr != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tkvList = append(kvList, kv)\n\t\t}\n\t}\n\n\t//close(kvChan)\n\t//fmt.Println(\"doReduce chan finish\")\n\n\t// sorted or not\n\t// we can skip sort procedure daze!\n\tkvsMap := make(map[string][]string)\n\tfor _, kv := range kvList {\n\t\tif _, ok := kvsMap[kv.Key]; ok {\n\t\t\t// found key in the kvList\n\t\t\tkvsMap[kv.Key] = append(kvsMap[kv.Key], kv.Value)\n\t\t} else {\n\t\t\tkvsMap[kv.Key] = make([]string, 1)\n\t\t\tkvsMap[kv.Key] = append(kvsMap[kv.Key], kv.Value)\n\t\t}\n\t}\n\n\twriteFile, writeFileErr := os.Create(outFile)\n\tif writeFileErr != nil {\n\t\treturn\n\t}\n\tdefer writeFile.Close()\n\n\toutEnc := json.NewEncoder(writeFile)\n\tfor key, vlist := range kvsMap {\n\t\toutEnc.Encode(KeyValue{key, reduceF(key, vlist)})\n\t}\n\n}",
"func (p *AsmParser) run() {\n\tdefer close(p.Output)\n\n\tvar errs errorList\n\n\tif p.Error != nil {\n\t\treturn\n\t}\n\n\tvar i asm // instruction, reset to 0 after every write\n\tvar err error\n\tvar d, c, j asm // dest, comp, jump, OR together for final instruction\n\n\twriteResult := func() {\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\n\t\tif errs == nil {\n\t\t\tp.Output <- fmt.Sprintf(\"%.16b\", i)\n\t\t}\n\n\t\ti = 0\n\t}\n\n\tfor index, lex := range p.lexemes {\n\n\t\tswitch lex.instruction {\n\n\t\t// possible edge case, hitting EOF before an EOL\n\t\tcase asmEOF:\n\t\t\tfallthrough\n\n\t\tcase asmEOL:\n\t\t\tprev := p.previousInstruction(index)\n\n\t\t\tif prev.instruction != asmLABEL {\n\t\t\t\twriteResult()\n\t\t\t}\n\n\t\tcase asmAINSTRUCT:\n\t\t\tprev := p.previousInstruction(index)\n\n\t\t\tif prev.instruction == asmAINSTRUCT {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"WARNING - redundant loading of A-Register on line %d\\n\", prev.lineNum)\n\t\t\t}\n\n\t\t\ti, err = p.mapToA(lex)\n\n\t\tcase asmLABEL:\n\t\t\tindex += 2 // skip label and EOL\n\t\t\tcontinue\n\n\t\tcase asmJUMP:\n\t\t\tj, err = mapJmp(lex.value)\n\t\t\ti = i | j\n\n\t\tcase asmCOMP:\n\t\t\tc, err = mapCmp(lex.value)\n\t\t\ti = i | c\n\n\t\tcase asmDEST:\n\t\t\td, err = mapDest(lex.value)\n\t\t\ti = i | d\n\t\t}\n\n\t\tindex++\n\t}\n\n\tp.Error = errs.asError()\n}",
"func (m *FlowMapper) Run() {\n\tm.flowMap = ReadFlowMap(m.mapfile)\n\tzips := GetZipNames(m.workdir)\n\tfor _, name := range zips {\n\t\tsourcePath := filepath.Join(m.workdir, name)\n\t\ttargetPath := filepath.Join(m.workdir, \"peflocus_\"+name)\n\t\tDeleteExisting(targetPath)\n\t\tlog.Println(\"INFO: map flows in\", sourcePath, \"to\", targetPath)\n\t\tm.doIt(sourcePath, targetPath)\n\t}\n}",
"func (job *MapOnlyJob) Run() error {\n\tif job.NewMapperF == nil {\n\t\treturn errors.New(\"MapOnlyJob: NewMapperF undefined!\")\n\t}\n\tif job.Source == nil {\n\t\treturn errors.New(\"MapOnlyJob: Source undefined!\")\n\t}\n\ttotalPart := 0\n\tendss := make([][]chan error, 0, len(job.Source))\n\tfor i := range job.Source {\n\t\tpartCount, err := job.Source[i].PartCount()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tends := make([]chan error, 0, partCount)\n\t\tfor part := 0; part < partCount; part++ {\n\t\t\tend := make(chan error, 1)\n\t\t\tends = append(ends, end)\n\t\t\tgo func(i, part, totalPart int, end chan error) {\n\t\t\t\tend <- func() error {\n\t\t\t\t\tmapper := job.NewMapperF(i, part)\n\t\t\t\t\tkey, val := mapper.NewKey(), mapper.NewVal()\n\t\t\t\t\tcs := make([]sophie.Collector, 0, len(job.Dest))\n\t\t\t\t\tfor _, dst := range job.Dest {\n\t\t\t\t\t\tc, err := dst.Collector(totalPart)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn errorsp.WithStacksAndMessage(err, \"open collector for source %d part %d failed\", i, part)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdefer c.Close()\n\t\t\t\t\t\tcs = append(cs, c)\n\t\t\t\t\t}\n\t\t\t\t\titer, err := job.Source[i].Iterator(part)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn errorsp.WithStacksAndMessage(err, \" open source %d part %d failed\", i, part)\n\t\t\t\t\t}\n\t\t\t\t\tdefer iter.Close()\n\n\t\t\t\t\tfor {\n\t\t\t\t\t\tif err := iter.Next(key, val); err != nil {\n\t\t\t\t\t\t\tif errorsp.Cause(err) != io.EOF {\n\t\t\t\t\t\t\t\treturn errorsp.WithStacksAndMessage(err, \"next failed\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif err := mapper.Map(key, val, cs); err != nil {\n\t\t\t\t\t\t\tif errorsp.Cause(err) == EOM {\n\t\t\t\t\t\t\t\tlog.Print(\"EOM returned, exit early\")\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\treturn errorsp.WithStacksAndMessage(err, \"mapping %v %v failed\", key, val)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn errorsp.WithStacksAndMessage(mapper.MapEnd(cs), \"map end failed\")\n\t\t\t\t}()\n\t\t\t}(i, part, totalPart, end)\n\t\t\ttotalPart++\n\t\t}\n\t\tendss = append(endss, ends)\n\t}\n\tvar errReturned error\n\tfor _, ends := range endss {\n\t\tfor part, end := range ends {\n\t\t\tlog.Printf(\"Waiting for mapper %d...\", part)\n\t\t\tif err := <-end; err != nil {\n\t\t\t\tlog.Printf(\"Error returned for part %d: %v\", part, err)\n\t\t\t\terrReturned = err\n\t\t\t}\n\t\t\tlog.Printf(\"No error for mapper %d...\", part)\n\t\t}\n\t}\n\treturn errReturned\n}",
"func doMap(\n\tjobName string, // the name of the MapReduce job\n\tmapTaskNumber int, // which map task this is\n\tinFile string,\n\tnReduce int, // the number of reduce task that will be run (\"R\" in the paper)\n\tmapF func(file string, contents string) []KeyValue,\n) {\n\tstream, err := ioutil.ReadFile(inFile)\n\tcheck_error(err)\n\n\tkeyVals := mapF(inFile, string(stream))\n\t\n\tresults := make(map[int][]KeyValue)\n\tfor _, kv := range keyVals {\n\t\t// Calculate R\n\t\tr := ihash(kv.Key) % nReduce\n\n\t\t// Map the results internally\n\t\tresults[r] = append(results[r], kv)\n\t}\n\n\tfor r, keyVals := range results {\n\t\toutputFileName := reduceName(jobName, mapTaskNumber, r)\n\t\tfile, err := os.Create(outputFileName)\n\t\tcheck_error(err)\n\t\tenc := json.NewEncoder(file)\n\n\t\tfor _, kv := range keyVals {\n\t\t\terr := enc.Encode(&kv)\n\t\t\tcheck_error(err)\n\t\t}\n\n\t\tfile.Close()\n\t}\n}",
"func doReduce(\n\tjobName string, // the name of the whole MapReduce job\n\treduceTaskNumber int, // which reduce task this is\n\tnMap int, // the number of map tasks that were run (\"M\" in the paper)\n\treduceF func(key string, values []string) string,\n) {\n\n\t//Read the keyValues of reducer on data structure\n\tjsonMap := make(map[string][]KeyValue)\n\tfor m := 0; m < nMap; m++ {\n\n\t\tfilename := reduceName(jobName, m, reduceTaskNumber)\n\n\t\tfile, err := os.Open(filename)\n\t\tcheckError(err)\n\n\t\tvar kv []KeyValue\n\t\tencoder := json.NewDecoder(file)\n\t\terr = encoder.Decode(&kv)\n\t\tcheckError(err)\n\n\t\tjsonMap[filename] = kv\n\t}\n\n\t//Create the encode data\n\treduceMap := make(map[string][]string)\n\tfor _, kvs := range jsonMap {\n\t\tfor _, kv := range kvs {\n\t\t\treduceMap[kv.Key] = append(reduceMap[kv.Key], kv.Value)\n\t\t}\n\t}\n\n\t//Create the output file\n\toutputFile := mergeName(jobName, reduceTaskNumber)\n\tfile, err := os.OpenFile(outputFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)\n\tcheckError(err)\n\tencoder := json.NewEncoder(file)\n\n\t//Call the reducer with the actual data\n\tfor key, values := range reduceMap {\n\t\tres := reduceF(key, values)\n\t\tencoder.Encode(KeyValue{key, res})\n\t}\n\n\tfile.Close()\n}",
"func doMap(\n\tjobName string, // the name of the MapReduce job\n\tmapTaskNumber int, // which map task this is\n\tinFile string,\n\tnReduce int, // the number of reduce task that will be run (\"R\" in the paper)\n\tmapF func(file string, contents string) []KeyValue,\n) {\n\tcontent, err := ioutil.ReadFile(inFile)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tkeyValues := mapF(inFile, string(content))\n\treduceFiles := make(map[string]*os.File)\n\n\tfor _, kv := range keyValues {\n\t\treduceTaskNumber := ihash(kv.Key) % nReduce\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\treduceFileName := reduceName(jobName, mapTaskNumber, reduceTaskNumber)\n\n\t\tif reduceFiles[reduceFileName] == nil {\n\t\t\tf, err := os.OpenFile(reduceFileName, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\treduceFiles[reduceFileName] = f\n\t\t}\n\n\t\tf := reduceFiles[reduceFileName]\n\t\tenc := json.NewEncoder(f)\n\t\tenc.Encode(&kv)\n\t}\n\n\tfor _, f := range reduceFiles {\n\t\tf.Close()\n\t}\n}",
"func (r *ride) run(ctx context.Context, outc chan<- pipeline.Event) error {\n\tpositions, errc := pipeline.Generate(ctx, r.positions)\n\tsegments, errc1 := pipeline.Reduce(ctx, positions, r.segments)\n\ttotal, err := r.fare(ctx, segments)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terrm := pipeline.MergeErrors(ctx, errc, errc1)\n\tfor err := range errm {\n\t\tswitch {\n\t\tcase err == ErrLinesEmpty:\n\t\tcase err != nil:\n\t\t\treturn err\n\t\t}\n\t}\n\n\tselect {\n\tcase <-ctx.Done():\n\tcase outc <- total:\n\t}\n\n\treturn nil\n}",
"func doMap(\n\tjobName string, // the name of the MapReduce job\n\tmapTaskNumber int, // which map task this is\n\tinFile string,\n\tnReduce int, // the number of reduce task that will be run (\"R\" in the paper)\n\tmapF func(file string, contents string) []KeyValue,\n) {\n\t//\n\t// You will need to write this function.\n\t//\n\t// The intermediate output of a map task is stored as multiple\n\t// files, one per destination reduce task. The file name includes\n\t// both the map task number and the reduce task number. Use the\n\t// filename generated by reduceName(jobName, mapTaskNumber, r) as\n\t// the intermediate file for reduce task r. Call ihash() (see below)\n\t// on each key, mod nReduce, to pick r for a key/value pair.\n\t//\n\t// mapF() is the map function provided by the application. The first\n\t// argument should be the input file name, though the map function\n\t// typically ignores it. The second argument should be the entire\n\t// input file contents. mapF() returns a slice containing the\n\t// key/value pairs for reduce; see common.go for the definition of\n\t// KeyValue.\n\t//\n\t// Look at Go's ioutil and os packages for functions to read\n\t// and write files.\n\t//\n\t// Coming up with a scheme for how to format the key/value pairs on\n\t// disk can be tricky, especially when taking into account that both\n\t// keys and values could contain newlines, quotes, and any other\n\t// character you can think of.\n\t//\n\t// One format often used for serializing data to a byte stream that the\n\t// other end can correctly reconstruct is JSON. You are not required to\n\t// use JSON, but as the output of the reduce tasks *must* be JSON,\n\t// familiarizing yourself with it here may prove useful. You can write\n\t// out a data structure as a JSON string to a file using the commented\n\t// code below. The corresponding decoding functions can be found in\n\t// common_reduce.go.\n\t//\n\t// enc := json.NewEncoder(file)\n\t// for _, kv := ... {\n\t// err := enc.Encode(&kv)\n\t//\n\t// Remember to close the file after you have written all the values!\n\t//\n\n\n\t//have a look about the parameters\n \t//fmt.Println(\"Some informations ->>>>> Map: job name = %s, input file = %s, map task id = %d, nReduce = %d\\n\", jobName, inFile, mapTaskNumber, nReduce)\n\n\t/*\n\tfull CODE\n\t*/\n\t// first read the inFile and trasfer the results to the String\n\tbytes, err := ioutil.ReadFile(inFile)\n \tif err != nil {\n \t\tlog.Fatalf(\"err: %s\", err)\n \t}\n\t// in this part mapF() in the test_test.go. It Split the (inFile, bytes) in words, and get []KeyValue\n\t// so we use a new Key structure to store the []KeyValue\n\tkvs := mapF(inFile, string(bytes))\n\t// This is standard use that can transfer the structure into JSON\n\tencoders := make([]*json.Encoder, nReduce);\n\t// for one input file, we need to generate *nReduce* output files for the reduce job\n\t// so in this part we create *nReduce* Encoders, which are stored in the encoders, the\n\t// file name is also generated by the reduceName provided by the MIT\n\tfor reduceTaskNumber := 0; reduceTaskNumber < nReduce; reduceTaskNumber++ {\n\t\tfilename := reduceName(jobName, mapTaskNumber, reduceTaskNumber)\n\t\t// Create() 默认权限 0666\n\t\tfile_ptr, err := os.Create(filename)\n\t\tif (err != nil) {\n\t\t\tlog.Fatal(\"Unable to create file: \", filename)\n\t\t}\n\t\tdefer file_ptr.Close()\n\t\tencoders[reduceTaskNumber] = json.NewEncoder(file_ptr);\n\t}\n\t// now what we are going to do is to encode the kv structure into JSON,\n\t// Please Remember the use of it, here we use *&kv* -- *the pointer* to get the structure \n\tfor _, kv := range kvs {\n\t\tkey := kv.Key\n\t\tHashedKey := int(ihash(key) % nReduce)\n\t\terr := encoders[HashedKey].Encode(&kv)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t}\n\n\t// 这个版本会跑太久*** Test killed: ran too long (10m0s).\n/*\n\tfor _, kv := range kvs {\n\t\tkey := kv.Key\n\t\tHashedKey := int(ihash(key) % nReduce)\n\t\tfilename := reduceName(jobName, mapTaskNumber, HashedKey)\n\t\toutputFile, _ := os.OpenFile(filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)\n\t\tenc := json.NewEncoder(outputFile)\n\t\t//err := encoders[HashedKey].Encode(&kv)\n\t\terr := enc.Encode(&kv)\n\t\tfmt.Println(\"writing \", key)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\toutputFile.Close()\n\t}\n\n*/\n\t//outputFile, _ := os.OpenFile(\"output.txt\", os.O_WRONLY|os.O_CREATE, 0666)\n //enc2 := json.NewEncoder(outputFile)\n\t//var filename = reduceName(jobName, mapTaskNumber, nReduce)\n\t//var enc =\n\t//err2 := enc2.Encode(&movie2)\n\t//if err2 != nil {\n\t\t//fmt.Println(err2)\n\t//}\n\n\n\n\n\t//d1 := []byte(jobName)\n\t//ioutil.WriteFile(\"output.txt\", d1, 0644)\n\t /*\n\tcontents, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\tfmt.Print(err)\n\t}\n\t//string(data)\n\t*/\n\n/*******\n\tbytes, err := ioutil.ReadFile(inFile)\n\tif (err != nil) {\n\t\t// log.Fatal() 打印输出并调用 exit(1)\n\t\tlog.Fatal(\"Unable to read file: \", inFile)\n\t}\n\n\t// 解析输入文件为 {key,val} 数组\n\tkv_pairs := mapF(inFile, string(bytes))\n\n\t// 生成一组 encoder 用来将 {key,val} 保存至对应文件\n\tencoders := make([]*json.Encoder, nReduce);\n\tfor reduceTaskNumber := 0; reduceTaskNumber < nReduce; reduceTaskNumber++ {\n\t\tfilename := reduceName(jobName, mapTaskNumber, reduceTaskNumber)\n\t\t// Create() 默认权限 0666\n\t\tfile_ptr, err := os.Create(filename)\n\t\tif (err != nil) {\n\t\t\tlog.Fatal(\"Unable to create file: \", filename)\n\t\t}\n\t\t// defer 后不能用括号\n\t\tdefer file_ptr.Close()\n\t\tencoders[reduceTaskNumber] = json.NewEncoder(file_ptr);\n\t}\n\n\t// 利用 encoder 将 {key,val} 写入对应的文件\n\tfor _, key_val := range kv_pairs {\n\t\tkey := key_val.Key\n\t\treduce_idx := ihash(key) % nReduce\n\t\terr := encoders[reduce_idx].Encode(key_val)\n\t\tif (err != nil) {\n\t\t\tlog.Fatal(\"Unable to write to file\")\n\t\t}\n\t}\n**********/\n}",
"func (w *Worker) DoMap(inputFileName string, mapperNum, numReducers uint) {\n\tfmt.Printf(\"MAP[%s:%d]: Processing '%s' for %d reducers.\\n\", w.jobName,\n\t\tmapperNum, inputFileName, numReducers)\n\n\tdata, err := ioutil.ReadFile(inputFileName)\n\tcheckErr(err, \"DoMap: Read Err.\\n\");\n\n\tm := w.mapF(inputFileName, string(data))\n\tp := make(map[uint][]KeyValue)\n\n\t// create a map of target partition to KeyValue list\n\tfor _, pair := range m {\n\t\tindex := uint(ihash(pair.Key)) % numReducers\n\t\tp[index] = append(p[index], pair)\n\t}\n\n\t// serialize KeyValue list and write to json file\n\tfor k, v := range p {\n\t\tblob, _ := json.Marshal(v)\n\t\terr := ioutil.WriteFile(reduceInputName(w.jobName, mapperNum, k), blob, 0644)\n\t\tcheckErr(err, \"DoMap: Failed to write partition.\\n\")\n\t}\n}",
"func (gm *gmap) run() {\n\t// Destruct gmap before exit.\n\tdefer func() {\n\t\tgm.raft.Stop()\n\t\tclose(gm.done)\n\t}()\n\t// Start gmap raft node.\n\tgo gm.raft.run()\n\t// Apply entries and snapshot get from raft.\n\tvar gmp gmapProgress\n\tfor {\n\t\tselect {\n\t\t// New apply.\n\t\tcase ap := <-gm.raft.applyc:\n\t\t\tgm.applyAll(&gmp, &ap)\n\t\t// gmap is closed.\n\t\tcase <-gm.ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (e *executor) mapReduce(ctx context.Context, index string, shards []uint64, c *pql.Call, opt *ExecOptions, mapFn mapFunc, reduceFn reduceFunc) (result interface{}, err error) {\n\tspan, ctx := tracing.StartSpanFromContext(ctx, \"executor.mapReduce\")\n\tdefer span.Finish()\n\n\tch := make(chan mapResponse)\n\n\t// Wrap context with a cancel to kill goroutines on exit.\n\tctx, cancel := context.WithCancel(ctx)\n\t// Create an errgroup so we can wait for all the goroutines to exit\n\teg, ctx := errgroup.WithContext(ctx)\n\n\t// After we're done processing, we have to wait for any outstanding\n\t// functions in the ErrGroup to complete. If we didn't have an error\n\t// already at that point, we'll report any errors from the ErrGroup\n\t// instead.\n\tdefer func() {\n\t\tcancel()\n\t\terrWait := eg.Wait()\n\t\tif err == nil {\n\t\t\terr = errWait\n\t\t}\n\t}()\n\t// If this is the coordinating node then start with all nodes in the cluster.\n\t//\n\t// However, if this request is being sent from the primary then all\n\t// processing should be done locally so we start with just the local node.\n\tvar nodes []*disco.Node\n\tif !opt.Remote {\n\t\tnodes = disco.Nodes(e.Cluster.Nodes()).Clone()\n\t} else {\n\t\tnodes = []*disco.Node{e.Cluster.nodeByID(e.Node.ID)}\n\t}\n\n\t// Start mapping across all primary owners.\n\tif err = e.mapper(ctx, eg, ch, nodes, index, shards, c, opt, e.Cluster.ReplicaN == 1, mapFn, reduceFn); err != nil {\n\t\treturn nil, errors.Wrap(err, \"starting mapper\")\n\t}\n\n\t// Iterate over all map responses and reduce.\n\texpected := len(shards)\n\tdone := ctx.Done()\n\tfor expected > 0 {\n\t\tselect {\n\t\tcase <-done:\n\t\t\treturn nil, ctx.Err()\n\t\tcase resp := <-ch:\n\t\t\t// On error retry against remaining nodes. If an error returns then\n\t\t\t// the context will cancel and cause all open goroutines to return.\n\n\t\t\t// We distinguish here between an error which indicates that the\n\t\t\t// node is not available (and therefore we need to failover to a\n\t\t\t// replica) and a valid error from a healthy node. In the case of\n\t\t\t// the latter, there's no need to retry a replica, we should trust\n\t\t\t// the error from the healthy node and return that immediately.\n\t\t\tif resp.err != nil && strings.Contains(resp.err.Error(), errConnectionRefused) {\n\t\t\t\t// Filter out unavailable nodes.\n\t\t\t\tnodes = disco.Nodes(nodes).FilterID(resp.node.ID)\n\n\t\t\t\t// Begin mapper against secondary nodes.\n\t\t\t\tif err := e.mapper(ctx, eg, ch, nodes, index, resp.shards, c, opt, true, mapFn, reduceFn); errors.Cause(err) == errShardUnavailable {\n\t\t\t\t\treturn nil, resp.err\n\t\t\t\t} else if err != nil {\n\t\t\t\t\treturn nil, errors.Wrap(err, \"mapping on secondary node\")\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t} else if resp.err != nil {\n\t\t\t\treturn nil, errors.Wrap(resp.err, \"mapping on primary node\")\n\t\t\t}\n\t\t\t// if we got a response that we aren't discarding\n\t\t\t// because it's an error, subtract it from our count...\n\t\t\texpected -= len(resp.shards)\n\n\t\t\t// Reduce value.\n\t\t\tresult = reduceFn(ctx, result, resp.result)\n\t\t\tvar ok bool\n\t\t\t// note *not* shadowed.\n\t\t\tif err, ok = result.(error); ok {\n\t\t\t\tcancel()\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\t// note the deferred Wait above which might override this nil.\n\treturn result, nil\n}",
"func (p *literalProcessor) run() {\n\tfor {\n\t\tselect {\n\t\tcase ch := <-p.done:\n\t\t\tclose(ch)\n\t\t\treturn\n\t\tcase p.c <- map[string]interface{}{\"\": p.val}:\n\t\t}\n\t}\n}",
"func (l *Lexer) run() {\n\tdefer close(l.items)\n\teor := len(l.rec.States) - 1\n\tfor {\n\t\tfor i, state := range l.rec.States {\n\t\t\tif !state.StateFn(l, state.ItemType, state.Emit) {\n\t\t\t\tl.rec.ErrorFn(l)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif i == eor || l.eof {\n\t\t\t\tl.Emit(ItemEOR)\n\t\t\t}\n\t\t}\n\t\tif l.Peek() == EOF {\n\t\t\tl.Emit(ItemEOF)\n\t\t\tbreak\n\t\t}\n\t}\n}",
"func (srvc *MapReduceService) Reduce(ctx *RequestContext, s *Status) error {\n\t// Initialize return status\n\t*s = FAILED\n\n\t// Provision data\n\tp := makeDataProvisioner(ctx, srvc)\n\tpaths, err := p.provisionData()\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\t// Open files\n\tits := []io.Reader{}\n\tfor _, path := range paths {\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer f.Close()\n\t\tits = append(its, f)\n\t}\n\n\t// Create key-values iterator\n\tkvIt, err := utils.MakeKeyValueIterator(its...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treducer := roles.Reducer{}\n\tfor {\n\t\t// Check if all data has been processed\n\t\tif kvIt.HasNext() == false {\n\t\t\tbreak\n\t\t}\n\t\tkey, vIt := kvIt.Next()\n\n\t\t// Reduce values\n\t\tres, err := reducer.Reduce(key, vIt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Print values. TODO: remove and store to file\n\t\tfmt.Printf(\"%s: %s\\n\", key, res)\n\t}\n\t*s = SUCCESS\n\treturn nil\n}",
"func (theBoss *theBoss) mapReads() error {\n\ttheBoss.alignments = make(chan *sam.Record, BUFFERSIZE)\n\n\t// set up the BAM if exact alignment is requested\n\tif !theBoss.info.Sketch.NoExactAlign {\n\t\tif err := theBoss.setupBAM(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// setup the waitgroups for the sketching and graphing minions\n\tvar wg1 sync.WaitGroup\n\tvar wg2 sync.WaitGroup\n\n\t// launch the graph minions (one minion per graph in the index)\n\ttheBoss.graphMinionRegister = make([]*graphMinion, len(theBoss.info.Store))\n\tfor _, graph := range theBoss.info.Store {\n\n\t\t// create, start and register the graph minion\n\t\tminion := newGraphMinion(theBoss, graph)\n\t\twg2.Add(1)\n\t\tminion.start(&wg2)\n\t\ttheBoss.graphMinionRegister[graph.GraphID] = minion\n\t}\n\n\t// launch the sketching minions (one per CPU)\n\tfor i := 0; i < theBoss.info.NumProc; i++ {\n\t\twg1.Add(1)\n\t\tgo func(workerNum int) {\n\t\t\tdefer wg1.Done()\n\n\t\t\t// keep a track of what this minion does\n\t\t\treceivedReads := 0\n\t\t\tmappedCount := 0\n\t\t\tmultimappedCount := 0\n\n\t\t\t// start the main processing loop\n\t\t\tfor {\n\n\t\t\t\t// pull reads from queue until done\n\t\t\t\tread, ok := <-theBoss.reads\n\t\t\t\tif !ok {\n\n\t\t\t\t\t// update the counts\n\t\t\t\t\ttheBoss.Lock()\n\t\t\t\t\ttheBoss.receivedReadCount += receivedReads\n\t\t\t\t\ttheBoss.mappedCount += mappedCount\n\t\t\t\t\ttheBoss.multimappedCount += multimappedCount\n\t\t\t\t\ttheBoss.Unlock()\n\n\t\t\t\t\t// end the sketching minion\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// get sketch for read\n\t\t\t\treadSketch, err := read.RunMinHash(theBoss.info.KmerSize, theBoss.info.SketchSize, false, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\t// get the number of k-mers in the sequence\n\t\t\t\tkmerCount := (len(read.Seq) - theBoss.info.KmerSize) + 1\n\n\t\t\t\t// query the LSH ensemble\n\t\t\t\tresults, err := theBoss.info.db.Query(readSketch, kmerCount, theBoss.info.ContainmentThreshold)\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\t// if multiple graphs are returned, we need to deep copy the read\n\t\t\t\tdeepCopy := false\n\t\t\t\tif len(results) > 1 {\n\t\t\t\t\tdeepCopy = true\n\t\t\t\t}\n\n\t\t\t\t// augment graphs and optionally perform exact alignment\n\t\t\t\tfor graphID, hits := range results {\n\t\t\t\t\tif deepCopy {\n\t\t\t\t\t\treadCopy := *read.DeepCopy()\n\t\t\t\t\t\ttheBoss.graphMinionRegister[graphID].inputChannel <- &graphMinionPair{hits, readCopy}\n\t\t\t\t\t} else {\n\t\t\t\t\t\ttheBoss.graphMinionRegister[graphID].inputChannel <- &graphMinionPair{hits, *read}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// update counts\n\t\t\t\treceivedReads++\n\t\t\t\tif len(results) > 0 {\n\t\t\t\t\tmappedCount++\n\t\t\t\t}\n\t\t\t\tif len(results) > 1 {\n\t\t\t\t\tmultimappedCount++\n\t\t\t\t}\n\t\t\t}\n\t\t}(i)\n\t}\n\n\t// control the channels\n\tgo func() {\n\n\t\t// wait for the sketching minions to finish\n\t\twg1.Wait()\n\n\t\t// shut down the graph minions input channels\n\t\tfor _, minion := range theBoss.graphMinionRegister {\n\t\t\tclose(minion.inputChannel)\n\t\t}\n\n\t\t// wait for the graph minions to finish\n\t\twg2.Wait()\n\n\t\t// end the alignment writer\n\t\tclose(theBoss.alignments)\n\n\t}()\n\n\t// collect the alignments and write them\n\tfor record := range theBoss.alignments {\n\t\t// check the record is valid\n\t\t//if sam.IsValidRecord(record) == false {\n\t\t//\tos.Exit(1)\n\t\t//}\n\t\ttheBoss.alignmentCount++\n\t\tif err := theBoss.bamwriter.Write(record); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// close the bam writer and return to the completed boss to the pipeline\n\tvar err error\n\tif !theBoss.info.Sketch.NoExactAlign {\n\t\terr = theBoss.bamwriter.Close()\n\t}\n\treturn err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
emit sends a value to the reducer's output channel.
|
func (r *reducer) emit(key string, value interface{}) {
r.c <- map[string]interface{}{key: value}
}
|
[
"func (c channelConveyor) Emit(v interface{}) error {\n\tc.outputCh <- v\n\n\treturn nil\n}",
"func (m *mapper) emit(key int64, value interface{}) {\n\t// Encode the timestamp to the beginning of the key.\n\tbinary.BigEndian.PutUint64(m.key, uint64(key))\n\n\t// OPTIMIZE: Collect emit calls and flush all at once.\n\tm.c <- map[string]interface{}{string(m.key): value}\n}",
"func (b Broadcaster) Write(v interface{}) {\n\tutils.Debugf(\"Sending %v\\n\", v)\n\tb.Sendc <- v // write value on send channel\n}",
"func (c *Compiler) emitChanSend(frame *Frame, instr *ssa.Send) error {\n\tvalueType, err := c.getLLVMType(instr.Chan.Type().(*types.Chan).Elem())\n\tif err != nil {\n\t\treturn err\n\t}\n\tch, err := c.parseExpr(frame, instr.Chan)\n\tif err != nil {\n\t\treturn err\n\t}\n\tchanValue, err := c.parseExpr(frame, instr.X)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvalueSize := llvm.ConstInt(c.uintptrType, c.targetData.TypeAllocSize(chanValue.Type()), false)\n\tvalueAlloca := c.builder.CreateAlloca(valueType, \"chan.value\")\n\tc.builder.CreateStore(chanValue, valueAlloca)\n\tvalueAllocaCast := c.builder.CreateBitCast(valueAlloca, c.i8ptrType, \"chan.value.i8ptr\")\n\tc.createRuntimeCall(\"chanSendStub\", []llvm.Value{llvm.Undef(c.i8ptrType), ch, valueAllocaCast, valueSize}, \"\")\n\treturn nil\n}",
"func (l *lexer) emit(t tokenType) {\n\tdebugPrint(\"emit \" + string(l.buf))\n\tl.out <- token{lexeme: string(l.buf), t: t}\n\tl.buf = nil\n}",
"func (c *Client) Emit(moduleID string, val interface{}) {\n\tif c.client == nil {\n\t\tlog.Println(\"core: emit failed: client not ready\")\n\t\treturn\n\t}\n\n\tif !setupModules[moduleID].registration.hasEventHandler {\n\t\tpanic(\"core: attempt to emit event without an event handler \" +\n\t\t\t\"for module: \" + moduleID)\n\t}\n\n\tif !reflect.TypeOf(val).\n\t\tAssignableTo(setupModules[moduleID].registration.eventType) {\n\t\tlog.Println(\"core: refusing to emit: event value not assignable to \" +\n\t\t\t\"module's registered event type\")\n\t\treturn\n\t}\n\n\terr := c.client.Notify(EventMsg, Event{\n\t\tModuleID: moduleID,\n\t\tValue: val,\n\t})\n\tif err != nil {\n\t\tlog.Println(\"core: emit failed:\", err)\n\t}\n}",
"func (p *blockParser) emit(b Block) {\n\tp.blockChan <- b\n\tp.start = p.cur\n}",
"func (p *Parser) emit() {\n\tp.out <- *p.tag\n\tp.tag = nil\n}",
"func (s *Scanner) emit(tok token.Token) {\n\ts.results <- result{\n\t\tPos: s.f.Pos(s.start),\n\t\tTok: tok,\n\t\tLit: string(s.pending),\n\t}\n\ts.ignore()\n}",
"func (bus *EventBus) Emit(msg Message) {\n\tbus.input <- msg\n}",
"func emit(e *core.NormalizedEmitter, message string) {\n\te.Emit(core.Logs, &core.LogsArgs{\n\t\tLogs: message,\n\t})\n}",
"func emitOutput(ctx context.Context, n *node) stateFn {\n\tif n == nil || n.outputC == nil { // OMIT\n\t\treturn nil // OMIT\n\t} // OMIT\n\tselect {\n\tcase <-ctx.Done():\n\t\tn.err = ctx.Err()\n\t\treturn nil\n\tcase n.outputC <- n.output:\n\t}\n\treturn nil\n}",
"func (l *reader) emit(t itemType) {\n\tl.items <- item{t, l.current.String()}\n\tl.current.Reset()\n\tl.width = 0\n}",
"func emit(ch chan<- spec.Measurement, elapsed time.Duration, numBytes int64) {\n\tch <- spec.Measurement{\n\t\tAppInfo: &spec.AppInfo{\n\t\t\tElapsedTime: int64(elapsed) / int64(time.Microsecond),\n\t\t\tNumBytes: numBytes,\n\t\t},\n\t\tTest: spec.TestUpload,\n\t\tOrigin: spec.OriginClient,\n\t}\n}",
"func (p *spanParser) emit(s Span) {\n\tp.spanChan <- s\n\tp.start = p.cur\n}",
"func (e *encoder) emit(bits, nBits uint32) {\n\tnBits += e.nBits\n\tbits <<= 32 - nBits\n\tbits |= e.bits\n\tfor nBits >= 8 {\n\t\tb := uint8(bits >> 24)\n\t\te.writeByte(b)\n\t\tif b == 0xff {\n\t\t\te.writeByte(0x00)\n\t\t}\n\t\tbits <<= 8\n\t\tnBits -= 8\n\t}\n\te.bits, e.nBits = bits, nBits\n}",
"func (a *Actor) Send(m string) { a.input <- m }",
"func (g *GUI) Emit(name string, dat interface{}, f interface{}) error {\n\treturn g.Client.Emit(name, dat, f)\n}",
"func (o Output) Value() *core.Thunk {\n\treturn o.value\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
reduceSum computes the sum of values for each key.
|
func reduceSum(key string, values []interface{}, r *reducer) {
var n float64
for _, v := range values {
n += v.(float64)
}
r.emit(key, n)
}
|
[
"func (m *Map) ReduceIntSum(reduce func(map[interface{}]interface{}) int) int {\n\tresult := 0\n\tsplits := m.splits\n\tfor i := 0; i < len(splits); i++ {\n\t\tresult += splits[i].reduceInt(reduce)\n\t}\n\treturn result\n}",
"func (m *Map) ReduceStringSum(reduce func(map[interface{}]interface{}) string) string {\n\tresult := \"\"\n\tsplits := m.splits\n\tfor i := 0; i < len(splits); i++ {\n\t\tresult += splits[i].reduceString(reduce)\n\t}\n\treturn result\n}",
"func (m *Map) ReduceFloat64Sum(reduce func(map[interface{}]interface{}) float64) float64 {\n\tresult := float64(0)\n\tsplits := m.splits\n\tfor i := 0; i < len(splits); i++ {\n\t\tresult += splits[i].reduceFloat64(reduce)\n\t}\n\treturn result\n}",
"func reduceFunc(input []mapreduce.KeyValue) (result []mapreduce.KeyValue) {\r\n\t// \tMaybe it's easier if we have an auxiliary structure? Which one?\r\n\t//\r\n\t// \tYou can check if a map have a key as following:\r\n\t// \t\tif _, ok := myMap[myKey]; !ok {\r\n\t//\t\t\t// Don't have the key\r\n\t//\t\t}\r\n\t//\r\n\t// \tReduce will receive KeyValue pairs that have string values, you may need\r\n\t// \tconvert those values to int before being able to use it in operations.\r\n\t// \tpackage strconv: func Atoi(s string) (int, error)\r\n\t//\r\n\t// \tIt's also possible to receive a non-numeric value (i.e. \"+\"). You can check the\r\n\t// \terror returned by Atoi and if it's not 'nil', use 1 as the value.\r\n\r\n\t/////////////////////////\r\n\t// YOUR CODE GOES HERE //\r\n\t/////////////////////////\r\n\r\n\tresult = make([]mapreduce.KeyValue, 0)\r\n\r\n\t// This auxiliary map is used to count the number of occurrences of a certain key\r\n\tauxiliaryMap := make(map[string]int)\r\n\r\n\tfor _, keyValuePair := range input {\r\n\r\n\t\t// If the value in the input key-value pair is numeric, then parse the value and update\r\n\t\t// the corresponding value in the auxiliary map.\r\n\t\t// If a key is not in the auxiliary map, 0 is returned as the corresponding value.\r\n\t\t// Using that fact, it's not necessary to check for the key.\r\n\t\tif value, err := strconv.Atoi(keyValuePair.Value); err == nil {\r\n\t\t\tauxiliaryMap[keyValuePair.Key] += value\r\n\r\n\t\t// If it's a non-numeric value, count as 1 occurrence\r\n\t\t// This considers that all possible non-numeric values are equivalent \r\n\t\t// (e.g. \"-\" or \"+\" have the same meaning when used as values)\r\n\t\t} else {\r\n\t\t\tauxiliaryMap[keyValuePair.Key] += 1\r\n\t\t}\r\n\t}\r\n\r\n\t// Convert the key-value pairs in auxiliary map to the output format (array of mapreduce.KeyValue structs)\r\n\tfor key, value := range auxiliaryMap {\r\n\t\tresult = append(result, mapreduce.KeyValue{key, strconv.Itoa(value)})\r\n\t}\r\n\treturn result\r\n}",
"func Sum(keys ...Vkey) Vkey {\n\tsum := Vkey{}\n\tfor _, v := range keys {\n\t\tsum.X += v.X\n\t\tsum.Y += v.Y\n\t}\n\treturn sum\n}",
"func (mr MrImpl) Reduce(key string, values []string) string {\n\tcounter := 0\n\tfor _, v := range values {\n\t\tval, err := strconv.Atoi(v)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tcounter += val\n\t}\n\n\treturn fmt.Sprintf(\"%d\", counter)\n}",
"func (m *Map) ParallelReduceIntSum(reduce func(map[interface{}]interface{}) int) int {\n\tvar recur func(splits []Split) int\n\trecur = func(splits []Split) int {\n\t\tif len(splits) < 2 {\n\t\t\t// NewMap and case 2 below ensure that len(splits) > 0\n\t\t\treturn splits[0].reduceInt(reduce)\n\t\t}\n\t\tvar left, right int\n\t\tvar p interface{}\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tswitch len(splits) {\n\t\tcase 2:\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = splits[1].reduceInt(reduce)\n\t\t\t}()\n\t\t\tleft = splits[0].reduceInt(reduce)\n\t\tdefault:\n\t\t\thalf := len(splits) / 2\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(splits[half:])\n\t\t\t}()\n\t\t\tleft = recur(splits[:half])\n\t\t}\n\t\twg.Wait()\n\t\tif p != nil {\n\t\t\tpanic(p)\n\t\t}\n\t\treturn left + right\n\t}\n\treturn recur(m.splits)\n}",
"func Sum(items []Value) (op int) {\n\tfor _, item := range items {\n\t\top += item.Value()\n\t}\n\treturn\n}",
"func (a Aggregate) Sum() float64 {\n\tvar total float64\n\tfor _, v := range a.values {\n\t\ttotal += v\n\t}\n\treturn total\n}",
"func (m *Map) ParallelReduceStringSum(reduce func(map[interface{}]interface{}) string) string {\n\tvar recur func(splits []Split) string\n\trecur = func(splits []Split) string {\n\t\tif len(splits) < 2 {\n\t\t\t// NewMap and case 2 below ensure that len(splits) > 0\n\t\t\treturn splits[0].reduceString(reduce)\n\t\t}\n\t\tvar left, right string\n\t\tvar p interface{}\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tswitch len(splits) {\n\t\tcase 2:\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = splits[1].reduceString(reduce)\n\t\t\t}()\n\t\t\tleft = splits[0].reduceString(reduce)\n\t\tdefault:\n\t\t\thalf := len(splits) / 2\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(splits[half:])\n\t\t\t}()\n\t\t\tleft = recur(splits[:half])\n\t\t}\n\t\twg.Wait()\n\t\tif p != nil {\n\t\t\tpanic(p)\n\t\t}\n\t\treturn left + right\n\t}\n\treturn recur(m.splits)\n}",
"func (m *Map) ParallelReduceFloat64Sum(reduce func(map[interface{}]interface{}) float64) float64 {\n\tvar recur func(splits []Split) float64\n\trecur = func(splits []Split) float64 {\n\t\tif len(splits) < 2 {\n\t\t\t// NewMap and case 2 below ensure that len(splits) > 0\n\t\t\treturn splits[0].reduceFloat64(reduce)\n\t\t}\n\t\tvar left, right float64\n\t\tvar p interface{}\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tswitch len(splits) {\n\t\tcase 2:\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = splits[1].reduceFloat64(reduce)\n\t\t\t}()\n\t\t\tleft = splits[0].reduceFloat64(reduce)\n\t\tdefault:\n\t\t\thalf := len(splits) / 2\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(splits[half:])\n\t\t\t}()\n\t\t\tleft = recur(splits[:half])\n\t\t}\n\t\twg.Wait()\n\t\tif p != nil {\n\t\t\tpanic(p)\n\t\t}\n\t\treturn left + right\n\t}\n\treturn recur(m.splits)\n}",
"func (ai aggregatingIterator) sum() float64 {\n\tvar sum float64\n\tfor i := range ai {\n\t\tsum = sum + ai[i].value()\n\t}\n\treturn sum\n}",
"func (m mathUtil) Sum(values ...float64) float64 {\n\tvar total float64\n\tfor _, v := range values {\n\t\ttotal += v\n\t}\n\treturn total\n}",
"func (r *HashReader) Sum(b []byte) []byte {\n\tr.drain()\n\treturn r.hasher.Sum(b)\n}",
"func SumFloats(m map[string]float64) float64 {\r\n\tvar s float64\r\n\tfor _, v := range m {\r\n\t\ts += v\r\n\t}\r\n\treturn s\r\n}",
"func SumInts(m map[string]int64) int64 {\r\n\tvar s int64\r\n\tfor _, v := range m {\r\n\t\ts += v\r\n\t}\r\n\treturn s\r\n}",
"func mapSum(itr Iterator, m *mapper) {\n\tn := float64(0)\n\tfor k, v := itr.Next(); k != 0; k, v = itr.Next() {\n\t\tn += v.(float64)\n\t}\n\tm.emit(itr.Time(), n)\n}",
"func (h *Hash) Sum(b []byte) []byte {\n\tx := h.Sum64()\n\treturn append(b,\n\t\tbyte(x>>0),\n\t\tbyte(x>>8),\n\t\tbyte(x>>16),\n\t\tbyte(x>>24),\n\t\tbyte(x>>32),\n\t\tbyte(x>>40),\n\t\tbyte(x>>48),\n\t\tbyte(x>>56))\n}",
"func MergeSum(vals []uint64) uint64 {\n\trv := vals[0]\n\tfor _, v := range vals[1:] {\n\t\trv += v\n\t}\n\treturn rv\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
newBinaryExprEvaluator returns a new instance of binaryExprEvaluator.
|
func newBinaryExprEvaluator(e *Executor, op Token, lhs, rhs processor) *binaryExprEvaluator {
return &binaryExprEvaluator{
executor: e,
op: op,
lhs: lhs,
rhs: rhs,
c: make(chan map[string]interface{}, 0),
done: make(chan chan struct{}, 0),
}
}
|
[
"func NewBinary(left Expr, op *token.T, right Expr) *Binary {\n\treturn &Binary{\n\t\tLeft: left,\n\t\tOperator: op,\n\t\tRight: right,\n\t}\n}",
"func NewBinaryBooleanExpression(op OP, lE, rE Evaluator) (Evaluator, error) {\n\tswitch op {\n\tcase AND, OR:\n\t\treturn &booleanNode{\n\t\t\top: op,\n\t\t\tlS: true,\n\t\t\tlE: lE,\n\t\t\trS: true,\n\t\t\trE: rE,\n\t\t}, nil\n\tdefault:\n\t\treturn nil, errors.New(\"binary boolean expressions require the operation to be one for the follwing 'and', 'or'\")\n\t}\n}",
"func NewBinaryExpr(op BinaryOp, lhs, rhs Expr) Expr {\n\t// assert(ExprWidth(lhs) == ExprWidth(rhs), \"binary expr width mismatch: op=%s (%T) %d != (%T) %d\", op, lhs, ExprWidth(lhs), rhs, ExprWidth(rhs))\n\n\tswitch op {\n\t// Arithmetic operators\n\tcase ADD:\n\t\treturn newAddExpr(lhs, rhs)\n\tcase SUB:\n\t\treturn newSubExpr(lhs, rhs)\n\tcase MUL:\n\t\treturn newMulExpr(lhs, rhs)\n\tcase UDIV, SDIV:\n\t\treturn newDivExpr(op, lhs, rhs)\n\tcase UREM, SREM:\n\t\treturn newRemExpr(op, lhs, rhs)\n\tcase AND:\n\t\treturn newAndExpr(lhs, rhs)\n\tcase OR:\n\t\treturn newOrExpr(lhs, rhs)\n\tcase XOR:\n\t\treturn newXorExpr(lhs, rhs)\n\tcase SHL:\n\t\treturn newShlExpr(lhs, rhs)\n\tcase LSHR:\n\t\treturn newLShrExpr(lhs, rhs)\n\tcase ASHR:\n\t\treturn newAShrExpr(lhs, rhs)\n\n\t// Comparison operators\n\tcase EQ:\n\t\treturn newEqExpr(lhs, rhs)\n\tcase NE:\n\t\treturn NewBinaryExpr(EQ, NewConstantExpr(0, WidthBool), NewBinaryExpr(EQ, lhs, rhs))\n\tcase ULT:\n\t\treturn newUltExpr(lhs, rhs)\n\tcase UGT:\n\t\treturn newUltExpr(rhs, lhs) // reverse\n\tcase ULE:\n\t\treturn newUleExpr(lhs, rhs)\n\tcase UGE:\n\t\treturn newUleExpr(rhs, lhs) // reverse\n\tcase SLT:\n\t\treturn newSltExpr(lhs, rhs)\n\tcase SGT:\n\t\treturn newSltExpr(rhs, lhs) // reverse\n\tcase SLE:\n\t\treturn newSleExpr(lhs, rhs)\n\tcase SGE:\n\t\treturn newSleExpr(rhs, lhs) // reverse\n\n\tdefault:\n\t\tpanic(\"unreachable\")\n\t}\n}",
"func NewBinaryNode(operator lex.Token, lhArg, rhArg Node) *BinaryNode {\n\t//u.Debugf(\"NewBinaryNode: %v %v %v\", lhArg, operator, rhArg)\n\treturn &BinaryNode{Args: []Node{lhArg, rhArg}, Operator: operator}\n}",
"func NewBinaryExpression(left Expression, op TokenType, right Expression) *BinaryExpression {\n\treturn &BinaryExpression{\n\t\tleft: left,\n\t\top: op,\n\t\tright: right,\n\t}\n}",
"func newBinaryOp(op string, expr1, expr2 Expression) Expression {\n\tswitch {\n\tcase expr1 != nil && expr2 != nil:\n\t\treturn &BinaryOp{\n\t\t\tOp: op,\n\t\t\tExpr1: expr1,\n\t\t\tExpr2: expr2,\n\t\t}\n\tcase expr1 != nil && expr2 == nil:\n\t\treturn expr1\n\tcase expr1 == nil && expr2 != nil:\n\t\treturn expr2\n\tcase expr1 == nil && expr2 == nil:\n\t\treturn nil\n\t}\n\tpanic(\"unreachable\")\n}",
"func NewBinary(l, r string) Element {\n\treturn Element{leftNonTerminal: l, rightNonTerminal: r}\n}",
"func NewBinaryOperation(t token.Token, binOpString string, l Expression, r Expression) (*BinaryOperation, error) {\n\tvar k BinaryOpType\n\n\tswitch binOpString {\n\tcase \"+\":\n\t\tk = AdditionBinaryOp\n\n\tcase \"-\":\n\t\tk = SubtractionBinaryOp\n\n\tcase \"*\":\n\t\tk = MultiplicationBinaryOp\n\n\tcase \"/\":\n\t\tk = DivisionBinaryOp\n\n\tdefault:\n\t\treturn nil, errors.Errorf(\"Could not decifer operation from supplied operand: %s\", binOpString)\n\t}\n\n\treturn &BinaryOperation{\n\t\tToken: t,\n\t\tOp: k,\n\t\tLeftNode: l,\n\t\tRightNode: r,\n\t}, nil\n}",
"func NewBinary(op circuit.Operation, a, b, o *Wire) *Gate {\n\tgate := &Gate{\n\t\tOp: op,\n\t\tA: a,\n\t\tB: b,\n\t\tO: o,\n\t}\n\ta.AddOutput(gate)\n\tb.AddOutput(gate)\n\to.SetInput(gate)\n\n\treturn gate\n}",
"func AsBinaryExpr(node ast.Node) *ast.BinaryExpr {\n\texpr, ok := node.(*ast.BinaryExpr)\n\tif !ok {\n\t\tpanic(\"expected *ast.BinaryExpr\")\n\t}\n\treturn expr\n}",
"func BinaryExp(ex1 Exp, ex2 Exp, op byte) *BinaryExpType {\n\tbe := BinaryExpType{ex1, ex2, op}\n\treturn &be\n}",
"func NewBinaryExpression(left Expression, op string, right Expression) Expression {\n\tlType := left.Type().GetKind()\n\trType := right.Type().GetKind()\n\n\tif lType != rType {\n\t\tpanic(\"mismatching types\")\n\t}\n\n\tvar retVal Expression\n\tswitch op {\n\tcase \"+\":\n\t\tif lType == INTEGER {\n\t\t\tretVal = &integerAddition{left: left, right: right}\n\t\t} else if lType == REAL {\n\t\t\tretVal = &realAddition{left: left, right: right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"-\":\n\t\tif lType == INTEGER {\n\t\t\tretVal = &integerSubstraction{left: left, right: right}\n\t\t} else if lType == REAL {\n\t\t\tretVal = &realSubstraction{left: left, right: right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"*\":\n\t\tif lType == INTEGER {\n\t\t\tretVal = &integerMultiplication{left, right}\n\t\t} else if lType == REAL {\n\t\t\tretVal = &realMultiplication{left: left, right: right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"/\":\n\t\tif lType == INTEGER {\n\t\t\tretVal = &integerDivision{left, right}\n\t\t} else if lType == REAL {\n\t\t\tretVal = &realDivision{left: left, right: right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"%\":\n\t\tif lType == INTEGER {\n\t\t\tretVal = &integerModulo{left, right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"|\":\n\t\tif lType == INTEGER {\n\t\t\tretVal = &integerOr{left, right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"^\":\n\t\tif lType == INTEGER {\n\t\t\tretVal = &integerXor{left, right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"&\":\n\t\tif lType == INTEGER {\n\t\t\tretVal = &integerAnd{left, right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"or\":\n\t\tif lType == BOOLEAN {\n\t\t\tretVal = &booleanOr{left, right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"and\":\n\t\tif lType == BOOLEAN {\n\t\t\tretVal = &booleanAnd{left, right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"==\":\n\t\tif lType == BOOLEAN {\n\t\t\tretVal = &booleanEqual{left, right}\n\t\t} else if lType == INTEGER {\n\t\t\tretVal = &integerEqual{left, right}\n\t\t} else if lType == REAL {\n\t\t\tretVal = &realEqual{left, right}\n\t\t} else if lType == FUNCTION {\n\t\t\tretVal = &funcEqual{left, right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"!=\":\n\t\tif lType == BOOLEAN {\n\t\t\tretVal = &booleanNotEqual{left, right}\n\t\t} else if lType == INTEGER {\n\t\t\tretVal = &integerNotEqual{left, right}\n\t\t} else if lType == REAL {\n\t\t\tretVal = &realNotEqual{left, right}\n\t\t} else if lType == FUNCTION {\n\t\t\tretVal = &funcNotEqual{left, right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"<\":\n\t\tif lType == INTEGER {\n\t\t\tretVal = &integerLessThan{left, right}\n\t\t} else if lType == REAL {\n\t\t\tretVal = &realLessThan{left, right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \">\":\n\t\tif lType == INTEGER {\n\t\t\tretVal = &integerGreaterThan{left, right}\n\t\t} else if lType == REAL {\n\t\t\tretVal = &realGreaterThan{left, right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \"<=\":\n\t\tif lType == INTEGER {\n\t\t\tretVal = &integerLessOrEqual{left, right}\n\t\t} else if lType == REAL {\n\t\t\tretVal = &realLessOrEqual{left, right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tcase \">=\":\n\t\tif lType == INTEGER {\n\t\t\tretVal = &integerGreaterOrEqual{left, right}\n\t\t} else if lType == REAL {\n\t\t\tretVal = &realGreaterOrEqual{left, right}\n\t\t} else {\n\t\t\tpanic(\"unsupported type\")\n\t\t}\n\tdefault:\n\t\tpanic(\"unsupported operand\")\n\t}\n\n\t// fmt.Println(retVal.String())\n\treturn retVal\n}",
"func binary(typ int, od1 *expr, op string, od2 *expr) *expr {\n\treturn &expr{\n\t\tsexp: append(exprlist{atomic(typ, op)}, od1, od2),\n\t}\n}",
"func (p *Planner) planBinaryExpr(e *Executor, expr *BinaryExpr) (processor, error) {\n\t// Create processor for LHS.\n\tlhs, err := p.planExpr(e, expr.LHS)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"lhs: %s\", err)\n\t}\n\n\t// Create processor for RHS.\n\trhs, err := p.planExpr(e, expr.RHS)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"rhs: %s\", err)\n\t}\n\n\t// Combine processors.\n\treturn newBinaryExprEvaluator(e, expr.Op, lhs, rhs), nil\n}",
"func NewBinary(d []byte) *Binary {\n\treturn &Binary{d, -1}\n}",
"func (er *expressionRewriter) constructBinaryOpFunction(l expression.Expression, r expression.Expression, op string) (expression.Expression, error) {\n\tlLen, rLen := expression.GetRowLen(l), expression.GetRowLen(r)\n\tif lLen == 1 && rLen == 1 {\n\t\treturn er.newFunction(op, types.NewFieldType(mysql.TypeTiny), l, r)\n\t} else if rLen != lLen {\n\t\treturn nil, expression.ErrOperandColumns.GenWithStackByArgs(lLen)\n\t}\n\tswitch op {\n\tcase ast.EQ, ast.NE:\n\t\tfuncs := make([]expression.Expression, lLen)\n\t\tfor i := 0; i < lLen; i++ {\n\t\t\tvar err error\n\t\t\tfuncs[i], err = er.constructBinaryOpFunction(expression.GetFuncArg(l, i), expression.GetFuncArg(r, i), op)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tif op == ast.NE {\n\t\t\treturn expression.ComposeDNFCondition(er.sctx, funcs...), nil\n\t\t}\n\t\treturn expression.ComposeCNFCondition(er.sctx, funcs...), nil\n\tdefault:\n\t\tlarg0, rarg0 := expression.GetFuncArg(l, 0), expression.GetFuncArg(r, 0)\n\t\tvar expr1, expr2, expr3, expr4, expr5 expression.Expression\n\t\texpr1 = expression.NewFunctionInternal(er.sctx, ast.NE, types.NewFieldType(mysql.TypeTiny), larg0, rarg0)\n\t\texpr2 = expression.NewFunctionInternal(er.sctx, op, types.NewFieldType(mysql.TypeTiny), larg0, rarg0)\n\t\texpr3 = expression.NewFunctionInternal(er.sctx, ast.IsNull, types.NewFieldType(mysql.TypeTiny), expr1)\n\t\tvar err error\n\t\tl, err = expression.PopRowFirstArg(er.sctx, l)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tr, err = expression.PopRowFirstArg(er.sctx, r)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\texpr4, err = er.constructBinaryOpFunction(l, r, op)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\texpr5, err = er.newFunction(ast.If, types.NewFieldType(mysql.TypeTiny), expr3, expression.Null, expr4)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn er.newFunction(ast.If, types.NewFieldType(mysql.TypeTiny), expr1, expr2, expr5)\n\t}\n}",
"func ToBinaryExpr(x ast.Node) *ast.BinaryExpr {\n\tif x, ok := x.(*ast.BinaryExpr); ok {\n\t\treturn x\n\t}\n\treturn NilBinaryExpr\n}",
"func NewEvaluationExpression(op OP, lB, rB string) (Evaluator, error) {\n\tl, r := strings.TrimSpace(lB), strings.TrimSpace(rB)\n\tif l == \"\" || r == \"\" {\n\t\treturn nil, fmt.Errorf(\"bindings cannot be empty; got %q, %q\", l, r)\n\t}\n\tswitch op {\n\tcase EQ, LT, GT:\n\t\treturn &evaluationNode{\n\t\t\top: op,\n\t\t\tlB: lB,\n\t\t\trB: rB,\n\t\t}, nil\n\tdefault:\n\t\treturn nil, errors.New(\"evaluation expressions require the operation to be one for the follwing '=', '<', '>'\")\n\t}\n}",
"func evalBinaryOp(op string, a, b interface{}) (r interface{}, err error) {\n\n\tswitch op {\n\tcase \"+\":\n\t\treturn add(a, b)\n\tcase \"-\":\n\t\treturn sub(a, b)\n\tcase \"*\":\n\t\treturn mul(a, b)\n\tcase \"/\":\n\t\treturn quo(a, b)\n\tcase \"^\":\n\t\treturn exp(a, b)\n\tcase \"&\":\n\t\treturn and(a, b)\n\tcase \"|\":\n\t\treturn or(a, b)\n\tcase \"<\":\n\t\treturn lt(a, b)\n\tcase \">\":\n\t\treturn gt(a, b)\n\tcase \"=\":\n\t\treturn eql(a, b)\n\tcase \"<=\":\n\t\treturn lte(a, b)\n\tcase \">=\":\n\t\treturn gte(a, b)\n\tcase \"<<\":\n\t\treturn lsh(a, b)\n\tcase \">>\":\n\t\treturn rsh(a, b)\n\t}\n\n\treturn nil, fmt.Errorf(\"Unsupported operation %v\", op)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
start begins streaming values from the lhs/rhs processors
|
func (e *binaryExprEvaluator) start() {
e.lhs.start()
e.rhs.start()
go e.run()
}
|
[
"func (p *Pipe) start() {\n\tp.cancel = make(chan struct{})\n\terrcList := make([]<-chan error, 0, 1+len(p.processors)+len(p.sinks))\n\t// start pump\n\tout, errc := p.pump.run(p.cancel, p.ID(), p.provide, p.consume, p.sampleRate, p.metric)\n\terrcList = append(errcList, errc)\n\n\t// start chained processesing\n\tfor _, proc := range p.processors {\n\t\tout, errc = proc.run(p.cancel, p.ID(), out, p.sampleRate, p.metric)\n\t\terrcList = append(errcList, errc)\n\t}\n\n\tsinkErrcList := p.broadcastToSinks(out)\n\terrcList = append(errcList, sinkErrcList...)\n\tp.errc = mergeErrors(errcList...)\n}",
"func (w *Walker) startProcessing() {\n\tdoStart := false\n\tw.pipe.RLock()\n\tif w.pipe.filters == nil { // no processing up to now => start with initial node\n\t\tw.pipe.pushSync(w.initial, 0) // input is buffered, will return immediately\n\t\tdoStart = true // yes, we will have to start the pipeline\n\t}\n\tw.pipe.RUnlock()\n\tif doStart { // ok to be outside mutex as other goroutines will check pipe.empty()\n\t\tw.pipe.startProcessing() // must be outside of mutex lock\n\t}\n}",
"func (r *reducer) start() {\n\tfor _, m := range r.mappers {\n\t\tm.start()\n\t}\n\tgo r.run()\n}",
"func (s *seriesValueGenerator) Start() error { return nil }",
"func (m *mapper) start() {\n\tm.itr = m.executor.db.CreateIterator(m.seriesID, m.fieldID, m.typ,\n\t\tm.executor.min, m.executor.max, m.executor.interval)\n\tgo m.run()\n}",
"func (graphMinion *graphMinion) start() {\n\tgo func() {\n\t\tdefer graphMinion.wg.Done()\n\t\tfor {\n\n\t\t\t// pull reads from queue until done\n\t\t\tmappingData, ok := <-graphMinion.inputChannel\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif mappingData == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// increment the nodes contained in the mapping window\n\t\t\tmisc.ErrorCheck(graphMinion.graph.IncrementSubPath(mappingData.ContainedNodes, mappingData.Freq))\n\t\t}\n\t}()\n}",
"func main() {\n\tconst n = 100000\n\tstart := make(chan int)\n\tleft := start\n\tright := left\n\tfor i := 0; i < n; i++ {\n\t\tright = make(chan int)\n\t\tgo stage(left, right)\n\t\tleft = right\n\t}\n // make the last one a sink channel\n\tsink := left\n\t// inject the starting value into the daisy chain\n\tstart <- 0\n\tfmt.Println(<-sink)\n}",
"func (s *mongoSource) start(iter Iter) {\n\tdefer s.Stop()\n\tdefer close(s.rows)\n\tfor !s.stopped {\n\t\tr := optimus.Row{}\n\t\tif !iter.Next(&r) {\n\t\t\tbreak\n\t\t}\n\t\ts.rows <- r\n\t}\n\ts.err = iter.Err()\n}",
"func (s *Stream) initGraph() error {\n\ts.log.Print(\"Preparing stream operator graph\")\n\tif s.source == nil {\n\t\treturn fmt.Errorf(\"Operator graph failed, missing source\")\n\t}\n\n\t// if there are no ops, link source to sink\n\tif len(s.ops) == 0 && s.sink != nil {\n\t\ts.log.Print(\"No operator nodes found, binding source to sink directly\")\n\t\ts.sink.SetInput(s.source.GetOutput())\n\t\treturn nil\n\t}\n\n\t// link ops\n\ts.bindOps()\n\n\t// link last op to sink\n\tif s.sink != nil {\n\t\ts.sink.SetInput(s.ops[len(s.ops)-1].GetOutput())\n\t}\n\n\treturn nil\n}",
"func (w *noAggregationStreamWorker) run() {\n\tlog.Debugf(\"Starting streaming routine for the no-aggregation pipeline\")\n\n\tticker := time.NewTicker(noAggWorkerStreamCheckFrequency)\n\tdefer ticker.Stop()\n\tlogPayloads := config.Datadog.GetBool(\"log_payloads\")\n\tw.seriesSink, w.sketchesSink = createIterableMetrics(w.flushConfig, w.serializer, logPayloads, false)\n\n\tstopped := false\n\tvar stopBlockChan chan struct{}\n\tvar lastStream time.Time\n\n\tfor !stopped {\n\t\tstart := time.Now()\n\t\tserializedSamples := 0\n\n\t\tmetrics.Serialize(\n\t\t\tw.seriesSink,\n\t\t\tw.sketchesSink,\n\t\t\tfunc(seriesSink metrics.SerieSink, sketchesSink metrics.SketchesSink) {\n\t\t\tmainloop:\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\n\t\t\t\t\t// stop signal\n\t\t\t\t\tcase trigger := <-w.stopChan:\n\t\t\t\t\t\tstopped = true\n\t\t\t\t\t\tstopBlockChan = trigger.blockChan\n\t\t\t\t\t\tbreak mainloop // end `Serialize` call and trigger a flush to the forwarder\n\n\t\t\t\t\tcase <-ticker.C:\n\t\t\t\t\t\tn := time.Now()\n\t\t\t\t\t\tif serializedSamples > 0 && lastStream.Before(n.Add(-time.Second*1)) {\n\t\t\t\t\t\t\tlog.Debug(\"noAggregationStreamWorker: triggering an automatic payloads flush to the forwarder (no traffic since 1s)\")\n\t\t\t\t\t\t\tbreak mainloop // end `Serialize` call and trigger a flush to the forwarder\n\t\t\t\t\t\t}\n\n\t\t\t\t\t// receiving samples\n\t\t\t\t\tcase samples := <-w.samplesChan:\n\t\t\t\t\t\tlog.Debugf(\"Streaming %d metrics from the no-aggregation pipeline\", len(samples))\n\t\t\t\t\t\tfor _, sample := range samples {\n\t\t\t\t\t\t\t// enrich metric sample tags\n\t\t\t\t\t\t\tsample.GetTags(w.taggerBuffer, w.metricBuffer)\n\t\t\t\t\t\t\tw.metricBuffer.AppendHashlessAccumulator(w.taggerBuffer)\n\n\t\t\t\t\t\t\t// turns this metric sample into a serie\n\t\t\t\t\t\t\tvar serie metrics.Serie\n\t\t\t\t\t\t\tserie.Name = sample.Name\n\t\t\t\t\t\t\tserie.Points = []metrics.Point{{Ts: sample.Timestamp, Value: sample.Value}}\n\t\t\t\t\t\t\tserie.Tags = tagset.CompositeTagsFromSlice(w.metricBuffer.Copy())\n\t\t\t\t\t\t\tserie.Host = sample.Host\n\t\t\t\t\t\t\t// ignored when late but mimic dogstatsd traffic here anyway\n\t\t\t\t\t\t\tserie.Interval = 10\n\t\t\t\t\t\t\tw.seriesSink.Append(&serie)\n\n\t\t\t\t\t\t\tw.taggerBuffer.Reset()\n\t\t\t\t\t\t\tw.metricBuffer.Reset()\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tlastStream = time.Now()\n\n\t\t\t\t\t\tserializedSamples += len(samples)\n\t\t\t\t\t\tif serializedSamples > w.maxMetricsPerPayload {\n\t\t\t\t\t\t\tbreak mainloop // end `Serialize` call and trigger a flush to the forwarder\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}, func(serieSource metrics.SerieSource) {\n\t\t\t\tsendIterableSeries(w.serializer, start, serieSource)\n\t\t\t}, func(sketches metrics.SketchesSource) {\n\t\t\t\t// noop: we do not support sketches in the no-agg pipeline.\n\t\t\t})\n\n\t\tif stopped {\n\t\t\tbreak\n\t\t}\n\n\t\tw.seriesSink, w.sketchesSink = createIterableMetrics(w.flushConfig, w.serializer, logPayloads, false)\n\t}\n\n\tif stopBlockChan != nil {\n\t\tclose(stopBlockChan)\n\t}\n}",
"func (l *Learner) Start() {\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase lrn := <-l.learnIn:\n\t\t\t\tval, out := l.handleLearn(lrn)\n\t\t\t\tif out {\n\t\t\t\t\tl.valOut <- val\n\t\t\t\t\tl.learned = map[int]Learn{}\n\t\t\t\t}\n\t\t\tcase <-l.stop:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}",
"func (p *Processor) Forward(xs ...ag.Node) []ag.Node {\n\tg := p.Graph\n\tys := make([]ag.Node, len(xs))\n\tfor i, x := range xs {\n\t\tmean := g.ReduceMean(x)\n\t\tdev := g.SubScalar(x, mean)\n\t\tstdDev := g.Sqrt(g.Add(g.ReduceMean(g.Square(dev)), p.eps))\n\t\tys[i] = g.Add(g.Prod(g.DivScalar(dev, stdDev), p.w), p.b)\n\t}\n\treturn ys\n}",
"func (px *Paxos) Start(seq int, v interface{}) {\n // Your code here.\n if seq < px.Min() {\n return\n }\n go func() {\n instance := px.getInstance(seq)\n instance.mu.Lock()\n defer instance.mu.Unlock()\n for !px.dead {\n if instance.decidedValue != nil {\n break\n }\n instance.proposer.highestSeenProposedNumber++\n instance.proposer.proposedNumber = instance.proposer.highestSeenProposedNumber\n ok, value := px.propose(instance, seq)\n if !ok {\n continue\n }\n if value != nil {\n v = value\n }\n if !px.requestAccept(instance, seq, v) {\n continue\n }\n px.decide(seq, v)\n break\n }\n }()\n}",
"func (sp *StreamPool) Start() {\n\tfor {\n\t\tselect {\n\t\tcase <-sp.quitCh:\n\t\t\tsp.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}",
"func startPipelineFunction(numbers chan<- int) {\n\tfor i := 1; i <= 10; i++ {\n\t\tnumbers <- i\n\t}\n\tclose(numbers)\n}",
"func (g *Gosmonaut) Start(\n\ttypes OSMTypeSet,\n\tfuncEntityNeeded func(OSMType, OSMTags) bool,\n) {\n\t// Block until previous run finished\n\tg.lock.Lock()\n\tg.stream = make(chan osmPair, entitiesPerPrimitiveBlock)\n\n\t// Init vars\n\tg.funcEntityNeeded = funcEntityNeeded\n\tg.types = types\n\n\tgo func() {\n\t\t// Decode\n\t\tg.decode()\n\n\t\t// Finish\n\t\tclose(g.stream)\n\t\tg.lock.Unlock()\n\t}()\n}",
"func (pm *parallelFileReadManager) begin(numWorkers int, reader io.Reader) {\n\tpm.workerResultManagers = make([]workerResultManager, numWorkers)\n\tfor i := 0; i < numWorkers; i++ {\n\t\tpm.workerResultManagers[i] = workerResultManager{\n\t\t\tresultChan: make(chan *recordedOpResult),\n\t\t\tavailable: make(chan struct{}, 1),\n\t\t}\n\t\tpm.workerResultManagers[i].available <- struct{}{}\n\t}\n\n\tpm.parseJobsChan = make(chan *parseJob, numWorkers)\n\tpm.stopChan = make(chan struct{})\n\n\tpm.runFileReader(numWorkers, reader)\n\tpm.runParsePool(numWorkers)\n}",
"func (m *DistSQLMetrics) FlowStart() {\n\tm.FlowsActive.Inc(1)\n\tm.FlowsTotal.Inc(1)\n}",
"func (a *Agent) startProcessors(\n\tdst chan<- telegraf.Metric,\n\tprocessors models.RunningProcessors,\n) (chan<- telegraf.Metric, []*processorUnit, error) {\n\tvar units []*processorUnit\n\n\t// Sort from last to first\n\tsort.SliceStable(processors, func(i, j int) bool {\n\t\treturn processors[i].Config.Order > processors[j].Config.Order\n\t})\n\n\tvar src chan telegraf.Metric\n\tfor _, processor := range processors {\n\t\tsrc = make(chan telegraf.Metric, 100)\n\t\tacc := NewAccumulator(processor, dst)\n\n\t\terr := processor.Start(acc)\n\t\tif err != nil {\n\t\t\tfor _, u := range units {\n\t\t\t\tu.processor.Stop()\n\t\t\t\tclose(u.dst)\n\t\t\t}\n\t\t\treturn nil, nil, fmt.Errorf(\"starting processor %s: %w\", processor.LogName(), err)\n\t\t}\n\n\t\tunits = append(units, &processorUnit{\n\t\t\tsrc: src,\n\t\t\tdst: dst,\n\t\t\tprocessor: processor,\n\t\t})\n\n\t\tdst = src\n\t}\n\n\treturn src, units, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
eval evaluates two values using the evaluator's operation.
|
func (e *binaryExprEvaluator) eval(lhs, rhs interface{}) interface{} {
switch e.op {
case ADD:
return lhs.(float64) + rhs.(float64)
case SUB:
return lhs.(float64) - rhs.(float64)
case MUL:
return lhs.(float64) * rhs.(float64)
case DIV:
rhs := rhs.(float64)
if rhs == 0 {
return float64(0)
}
return lhs.(float64) / rhs
default:
// TODO: Validate operation & data types.
panic("invalid operation: " + e.op.String())
}
}
|
[
"func evaluate(arg1 *vector.Vector, oper *vector.Vector, arg2 *vector.Vector) *vector.Vector {\n\t//Store the operator in a temp string, to save typing it out\n\tvar operS string\n\toperS = oper.At(0).(string)\n\tvar val1, val2 int \n\tvar err1, err2 os.Error\n\tval1, err1 = strconv.Atoi(arg1.At(0).(string))\n\tval2, err2 = strconv.Atoi(arg2.At(0).(string))\n\t//screens for consecutive operators\n\tif(err1 != nil || err2 != nil){\n\t\tfmt.Println(\"expr: syntax error\")\n\t\tos.Exit(-2)\n\t}\n\tvar result int = -1\n\t//Evaluate based on the operator\n\tif operS == \"+\" {\n\t\tresult = val1 + val2\n\t} else if operS == \"-\" {\n\t\tresult = val1 - val2\n\t} else if operS == \"/\" {\n\t\tresult = val1 / val2\n\t} else if operS == \"*\" {\n\t\tresult = val1 * val2\n\t} else if operS == \"%\" {\n\t\tresult = val1 % val2\n\t}\n\t//Clear the arg1 vector and add the result to it, then return\n\t//(saves memory by not creating a new vector)\n\targ1.Cut(0, arg1.Len())\n\targ1.Push(strconv.Itoa(result))\n\treturn arg1\n}",
"func ExampleEval() {\n\tfmt.Println(Eval(\"5\"))\n\tfmt.Println(Eval(\"1 + 2\"))\n\tfmt.Println(Eval(\"1 - 2 + 3\"))\n\tfmt.Println(Eval(\"3 * ( 3 + 1 * 3 ) / 2\"))\n\tfmt.Println(Eval(\"3 * ( ( 3 + 1 ) * 3 ) / 2\"))\n\t//OutPut:\n\t//5\n\t//3\n\t//2\n\t//9\n\t//18\n}",
"func evalBinaryOp(op string, a, b interface{}) (r interface{}, err error) {\n\n\tswitch op {\n\tcase \"+\":\n\t\treturn add(a, b)\n\tcase \"-\":\n\t\treturn sub(a, b)\n\tcase \"*\":\n\t\treturn mul(a, b)\n\tcase \"/\":\n\t\treturn quo(a, b)\n\tcase \"^\":\n\t\treturn exp(a, b)\n\tcase \"&\":\n\t\treturn and(a, b)\n\tcase \"|\":\n\t\treturn or(a, b)\n\tcase \"<\":\n\t\treturn lt(a, b)\n\tcase \">\":\n\t\treturn gt(a, b)\n\tcase \"=\":\n\t\treturn eql(a, b)\n\tcase \"<=\":\n\t\treturn lte(a, b)\n\tcase \">=\":\n\t\treturn gte(a, b)\n\tcase \"<<\":\n\t\treturn lsh(a, b)\n\tcase \">>\":\n\t\treturn rsh(a, b)\n\t}\n\n\treturn nil, fmt.Errorf(\"Unsupported operation %v\", op)\n}",
"func (bp *BinaryPlus) Eval() float64 {\n\treturn bp.left.(Eval).Eval() + bp.right.(Eval).Eval()\n}",
"func Evaluate(expr string) (float64, error) {\n\tvar a float64\n\tvar b float64\n\tvar op string\n\t_, err := fmt.Sscanf(expr, \"%f%s%f\\n\", &a, &op, &b)\n\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"%s Unexpected error %s\", expr, err)\n\t}\n\n\tswitch operation := op; operation {\n\tcase \"+\":\n\t\treturn Add(a, b), nil\n\tcase \"-\":\n\t\treturn Substract(a, b), nil\n\tcase \"*\":\n\t\treturn Multiply(a, b), nil\n\tcase \"/\":\n\t\tresult, err := Divide(a, b)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"%s Invalid operator %s\", expr, op)\n\t}\n}",
"func TestEvaluatorRelational(t *testing.T) {\n\tvar values = make(map[string]int)\n\ttestCases := []TestCase{\n\t\t{\n\t\t\tname: \"less than\",\n\t\t\texpression: \"1 < 2\",\n\t\t\texpectedValue: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"greater or equal\",\n\t\t\texpression: \"1 >= 2\",\n\t\t\texpectedValue: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"long expression\",\n\t\t\texpression: \"1 < 2 && 1 > 2 && 1 <= 2 && 1 >= 2 && 1==2 && 1 != 2\",\n\t\t\texpectedValue: 0,\n\t\t},\n\t}\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tresult, err := evaluator.Evaluate(tc.expression, values)\n\t\t\tassert.NoError(t, err, \"unexpected error\")\n\t\t\tassert.Equal(t, tc.expectedValue, result)\n\t\t})\n\t}\n}",
"func TestEvaluatorArithmetic(t *testing.T) {\n\tvar values = make(map[string]int)\n\ttestCases := []TestCase{\n\t\t{\n\t\t\tname: \"short expression\",\n\t\t\texpression: \"1+2*3\",\n\t\t\texpectedValue: 7,\n\t\t},\n\t\t{\n\t\t\tname: \"long expression\",\n\t\t\texpression: \"4/2-1+5%2\",\n\t\t\texpectedValue: 2,\n\t\t},\n\t}\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tresult, err := evaluator.Evaluate(tc.expression, values)\n\t\t\tassert.NoError(t, err, \"unexpected error\")\n\t\t\tassert.Equal(t, tc.expectedValue, result)\n\t\t})\n\t}\n}",
"func TestEvaluatorValues(t *testing.T) {\n\tvar values = make(map[string]int)\n\tvalues[\"x\"] = 1\n\tvalues[\"y\"] = 2\n\texpression := \"x+y*2\"\n\n\tresult, err := evaluator.Evaluate(expression, values)\n\n\tassert.Nil(t, err, \"unexpected error\")\n\tassert.Equal(t, 5, result)\n}",
"func (op *OpPlus) Eval(x, y float32) float32 {\n\treturn op.LeftChild.Eval(x, y) + op.RightChild.Eval(x, y)\n}",
"func (e *EqualOp) Evaluate(left, right EvalResult) (EvalResult, error) {\n\tif out, err := e.IsTrue(left, right); err != nil || !out {\n\t\treturn resultFalse, err\n\t}\n\treturn resultTrue, nil\n}",
"func Eval(resolver Resolver, expr string) (interface{}, error) {\n\treturn EvalProgram(resolver, ParseString(expr))\n}",
"func (op *OpConstant) Eval(x, y float32) float32 {\n\treturn op.value\n}",
"func (op *OpX) Eval(x, y float32) float32 {\n\treturn x\n}",
"func (e *Exp) Eval() float64 {\n\te.init()\n\tresult, _ := e.eval(e.opTree)\n\treturn result\n}",
"func (ast *Binary) Eval(env *Env, ctx *Codegen, gen *ssa.Generator) (\n\tssa.Value, bool, error) {\n\tl, ok, err := ast.Left.Eval(env, ctx, gen)\n\tif err != nil || !ok {\n\t\treturn ssa.Undefined, ok, err\n\t}\n\tr, ok, err := ast.Right.Eval(env, ctx, gen)\n\tif err != nil || !ok {\n\t\treturn ssa.Undefined, ok, err\n\t}\n\n\tswitch lval := l.ConstValue.(type) {\n\tcase bool:\n\t\trval, ok := r.ConstValue.(bool)\n\t\tif !ok {\n\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\"invalid types: %s %s %s\", l, ast.Op, r)\n\t\t}\n\t\tswitch ast.Op {\n\t\tcase BinaryEq:\n\t\t\treturn gen.Constant(lval == rval, types.Bool), true, nil\n\t\tcase BinaryNeq:\n\t\t\treturn gen.Constant(lval != rval, types.Bool), true, nil\n\t\tcase BinaryAnd:\n\t\t\treturn gen.Constant(lval && rval, types.Bool), true, nil\n\t\tcase BinaryOr:\n\t\t\treturn gen.Constant(lval || rval, types.Bool), true, nil\n\t\tdefault:\n\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\"Binary.Eval: '%v %v %v' not supported\", l, ast.Op, r)\n\t\t}\n\n\tcase int32:\n\t\tvar rval int32\n\t\tswitch rv := r.ConstValue.(type) {\n\t\tcase int32:\n\t\t\trval = rv\n\t\tdefault:\n\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\"invalid r-value %T %s %T\", lval, ast.Op, rv)\n\t\t}\n\t\tswitch ast.Op {\n\t\tcase BinaryMult:\n\t\t\treturn gen.Constant(lval*rval, types.Int32), true, nil\n\t\tcase BinaryDiv:\n\t\t\tif rval == 0 {\n\t\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\t\"integer divide by zero\")\n\t\t\t}\n\t\t\treturn gen.Constant(lval/rval, types.Int32), true, nil\n\t\tcase BinaryMod:\n\t\t\tif rval == 0 {\n\t\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\t\"integer divide by zero\")\n\t\t\t}\n\t\t\treturn gen.Constant(lval%rval, types.Int32), true, nil\n\t\tcase BinaryLshift:\n\t\t\treturn gen.Constant(lval<<rval, types.Int32), true, nil\n\t\tcase BinaryRshift:\n\t\t\treturn gen.Constant(lval>>rval, types.Int32), true, nil\n\t\tcase BinaryBand:\n\t\t\treturn gen.Constant(lval&rval, types.Int32), true, nil\n\t\tcase BinaryBclear:\n\t\t\treturn gen.Constant(lval&^rval, types.Int32), true, nil\n\t\tcase BinaryBor:\n\t\t\treturn gen.Constant(lval|rval, types.Int32), true, nil\n\t\tcase BinaryBxor:\n\t\t\treturn gen.Constant(lval^rval, types.Int32), true, nil\n\n\t\tcase BinaryPlus:\n\t\t\treturn gen.Constant(lval+rval, types.Int32), true, nil\n\t\tcase BinaryMinus:\n\t\t\treturn gen.Constant(lval-rval, types.Int32), true, nil\n\n\t\tcase BinaryEq:\n\t\t\treturn gen.Constant(lval == rval, types.Bool), true, nil\n\t\tcase BinaryNeq:\n\t\t\treturn gen.Constant(lval != rval, types.Bool), true, nil\n\t\tcase BinaryLt:\n\t\t\treturn gen.Constant(lval < rval, types.Bool), true, nil\n\t\tcase BinaryLe:\n\t\t\treturn gen.Constant(lval <= rval, types.Bool), true, nil\n\t\tcase BinaryGt:\n\t\t\treturn gen.Constant(lval > rval, types.Bool), true, nil\n\t\tcase BinaryGe:\n\t\t\treturn gen.Constant(lval >= rval, types.Bool), true, nil\n\t\tdefault:\n\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\"Binary.Eval: '%v %s %v' not implemented yet\", l, ast.Op, r)\n\t\t}\n\n\tcase uint64:\n\t\tvar rval uint64\n\t\tswitch rv := r.ConstValue.(type) {\n\t\tcase uint64:\n\t\t\trval = rv\n\t\tdefault:\n\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\"%T: invalid r-value %v (%T)\", lval, rv, rv)\n\t\t}\n\t\tswitch ast.Op {\n\t\tcase BinaryMult:\n\t\t\treturn gen.Constant(lval*rval, types.Uint64), true, nil\n\t\tcase BinaryDiv:\n\t\t\tif rval == 0 {\n\t\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\t\"integer divide by zero\")\n\t\t\t}\n\t\t\treturn gen.Constant(lval/rval, types.Uint64), true, nil\n\t\tcase BinaryMod:\n\t\t\tif rval == 0 {\n\t\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\t\"integer divide by zero\")\n\t\t\t}\n\t\t\treturn gen.Constant(lval%rval, types.Uint64), true, nil\n\t\tcase BinaryLshift:\n\t\t\treturn gen.Constant(lval<<rval, types.Uint64), true, nil\n\t\tcase BinaryRshift:\n\t\t\treturn gen.Constant(lval>>rval, types.Uint64), true, nil\n\n\t\tcase BinaryPlus:\n\t\t\treturn gen.Constant(lval+rval, types.Uint64), true, nil\n\t\tcase BinaryMinus:\n\t\t\treturn gen.Constant(lval-rval, types.Uint64), true, nil\n\n\t\tcase BinaryEq:\n\t\t\treturn gen.Constant(lval == rval, types.Bool), true, nil\n\t\tcase BinaryNeq:\n\t\t\treturn gen.Constant(lval != rval, types.Bool), true, nil\n\t\tcase BinaryLt:\n\t\t\treturn gen.Constant(lval < rval, types.Bool), true, nil\n\t\tcase BinaryLe:\n\t\t\treturn gen.Constant(lval <= rval, types.Bool), true, nil\n\t\tcase BinaryGt:\n\t\t\treturn gen.Constant(lval > rval, types.Bool), true, nil\n\t\tcase BinaryGe:\n\t\t\treturn gen.Constant(lval >= rval, types.Bool), true, nil\n\t\tdefault:\n\t\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Right,\n\t\t\t\t\"Binary.Eval: '%v %s %v' not implemented yet\", l, ast.Op, r)\n\t\t}\n\n\tdefault:\n\t\treturn ssa.Undefined, false, ctx.Errorf(ast.Left,\n\t\t\t\"%s %v %s: invalid l-value %v (%T)\", l, ast.Op, r, lval, lval)\n\t}\n}",
"func (m *rewrite) eval(arg expr.Node) (value.Value, bool, bool) {\n\tswitch arg := arg.(type) {\n\tcase *expr.NumberNode, *expr.StringNode:\n\t\tval, ok := vm.Eval(nil, arg)\n\t\treturn val, ok, false\n\tcase *expr.IdentityNode:\n\t\tif arg.IsBooleanIdentity() {\n\t\t\treturn value.NewBoolValue(arg.Bool()), true, false\n\t\t}\n\t\treturn value.NewStringValue(arg.Text), true, true\n\tcase *expr.ArrayNode:\n\t\tval, ok := vm.Eval(nil, arg)\n\t\treturn val, ok, false\n\t}\n\treturn nil, false, false\n}",
"func EvaluateInt(left Integer, right Integer, operator Token) Object {\n\tswitch operator.Type {\n\tcase PLUS:\n\t\treturn Integer{left.Value + right.Value}\n\tcase MINUS:\n\t\treturn Integer{left.Value - right.Value}\n\tcase DIV:\n\t\tif right.Value == 0 {\n\t\t\tRuntimeError(\"Divide by zero error\")\n\t\t} else if left.Value == 0 {\n\t\t\treturn Integer{0}\n\t\t}\n\t\treturn Integer{left.Value / right.Value}\n\tcase MOD:\n\t\tif right.Value == 0 {\n\t\t\tRuntimeError(\"Module by zero error\")\n\t\t}\n\t\treturn Integer{left.Value % right.Value}\n\tcase MULT:\n\t\tif left.Value == 0 || right.Value == 0 {\n\t\t\treturn Integer{0}\n\t\t}\n\t\treturn Integer{left.Value * right.Value}\n\tcase EXP:\n\t\tres := math.Pow(float64(left.Value), float64(right.Value))\n\t\treturn Integer{int(res)}\n\tcase EQUALEQUAL:\n\t\treturn Boolean{left.Value == right.Value}\n\tcase BANGEQUAL:\n\t\treturn Boolean{left.Value != right.Value}\n\tcase GREATER:\n\t\treturn Boolean{left.Value > right.Value}\n\tcase GREATEREQUAL:\n\t\treturn Boolean{left.Value >= right.Value}\n\tcase LESS:\n\t\treturn Boolean{left.Value < right.Value}\n\tcase LESSEQUAL:\n\t\treturn Boolean{left.Value <= right.Value}\n\tdefault:\n\t\tRuntimeError(fmt.Sprintf(\"Unsupported operation (%s) on values of type 'INTEGER'\", operator.Type.String()))\n\t\treturn NIL\n\t}\n}",
"func (p *onPredicate) eval(leftRow parser.DTuple, rightRow parser.DTuple) (bool, error) {\n\tp.qvals.populateQVals(p.leftInfo, leftRow)\n\tp.qvals.populateQVals(p.rightInfo, rightRow)\n\treturn sqlbase.RunFilter(p.filter, &p.p.evalCtx)\n}",
"func (op *OpDiv) Eval(x, y float32) float32 {\n\treturn op.LeftChild.Eval(x, y) / op.RightChild.Eval(x, y)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
newLiteralProcessor returns a literalProcessor for a given value.
|
func newLiteralProcessor(val interface{}) *literalProcessor {
return &literalProcessor{
val: val,
c: make(chan map[string]interface{}, 0),
done: make(chan chan struct{}, 0),
}
}
|
[
"func NewLiteral(value Object) Expression {\n\treturn &literal{value: value}\n}",
"func NewLiteral(expr string, flags ...CompileFlag) *Literal {\n\tvar v CompileFlag\n\tfor _, f := range flags {\n\t\tv |= f\n\t}\n\treturn &Literal{Expression: expr, Flags: v}\n}",
"func NewProcessor(d resources.Decoder) *Processor {\n\treturn &Processor{\n\t\tDecoder: d,\n\t}\n}",
"func NewLiteral(value string) *Spec {\n\treturn LiteralSpec(&Literal{\n\t\tValue: value,\n\t})\n}",
"func (p *parser) literal(r rune) {\n\tre := p.newRegexp(OpLiteral)\n\tre.Flags = p.flags\n\tif p.flags&FoldCase != 0 {\n\t\tr = minFoldRune(r)\n\t}\n\tre.Rune0[0] = r\n\tre.Rune = re.Rune0[:1]\n\tp.push(re)\n}",
"func createLiteral(tl TokenList) Literal {\n\tliteral := Literal{negated: false}\n\tif tl[0] != \"(\" {\n\t\tlog.Fatal(folError{\"error parsing literal, must start with '('\" +\n\t\t\t\" but started with: \" + tl[0]})\n\t}\n\ti := 1\n\tif tl[i] == \"not\" {\n\t\tliteral.negated = true\n\t\ti++\n\t}\n\tif len(tl) <= 2 {\n\t\tliteral.predicate = &Predicate{}\n\t\tliteral.args = make([]*Object, 0)\n\t} else {\n\t\tliteral.predicate = NewPredicate(tl[i:])\n\t\tliteral.args = make([]*Object, len(literal.predicate.parameters))\n\t}\n\treturn literal\n}",
"func NewProcessor() *Processor {\n\tp := &Processor{}\n\tp.Nodes = make(map[string]*Node)\n\treturn p\n}",
"func (e *exprHelper) NewLiteral(value ref.Val) ast.Expr {\n\treturn e.exprFactory.NewLiteral(e.nextMacroID(), value)\n}",
"func NewProcessor(numWorkers int, fn MappingFunction, handler OutputHandler) *Processor {\n\tproc := &Processor{\n\t\tnumWorkers: numWorkers,\n\t\tfn: fn,\n\t\thandler: handler,\n\t\tInput: make(chan bobstore.Ref),\n\t\toutput: make(chan *Output, numWorkers),\n\t}\n\n\tproc.wg.Add(numWorkers)\n\tfor i := 0; i < numWorkers; i++ {\n\t\tgo runWorker(proc)\n\t}\n\n\tproc.hwg.Add(1)\n\tgo runHandler(proc)\n\n\treturn proc\n}",
"func newSpanProcessor(config Config) (*spanProcessor, error) {\n\tskipExpr, err := filterspan.NewSkipExpr(&config.MatchConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsp := &spanProcessor{\n\t\tconfig: config,\n\t\tskipExpr: skipExpr,\n\t}\n\n\t// Compile ToAttributes regexp and extract attributes names.\n\tif config.Rename.ToAttributes != nil {\n\t\tfor _, pattern := range config.Rename.ToAttributes.Rules {\n\t\t\tre, err := regexp.Compile(pattern)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid regexp pattern %s\", pattern)\n\t\t\t}\n\n\t\t\trule := toAttributeRule{\n\t\t\t\tre: re,\n\t\t\t\t// Subexpression names will become attribute names during extraction.\n\t\t\t\tattrNames: re.SubexpNames(),\n\t\t\t}\n\n\t\t\tsp.toAttributeRules = append(sp.toAttributeRules, rule)\n\t\t}\n\t}\n\n\treturn sp, nil\n}",
"func NewLiteral(typ LiteralType, value interface{}) *Literal {\n\treturn &Literal{\n\t\tType: typ,\n\t\tValue: value,\n\t}\n}",
"func NewProcessor(name, version string, cf ConfigFunc, pf ProcessorFunc) ReceiveSendComponent {\n\treturn &processor{\n\t\tname: fmt.Sprintf(\"processor-%s\", name),\n\t\tversion: version,\n\t\tcf: cf,\n\t\tpf: pf,\n\t}\n}",
"func newPerfProcessor(vcs vcsinfo.VCS, config *sharedconfig.IngesterConfig, client *http.Client) (ingestion.Processor, error) {\n\treturn &perfProcessor{\n\t\tstore: ptracestore.Default,\n\t\tvcs: vcs,\n\t}, nil\n}",
"func (p *Program) GetLiteral() Token {\n\tmeta := parseMetaLiteral{}\n\n\tfor {\n\t\tswitch {\n\t\tcase string(p.GetNextChar()) == \";\" || string(p.GetNextChar()) == \" \":\n\t\t\tp.ShiftChar()\n\n\t\t\tt := Token{\n\t\t\t\tID: -6,\n\t\t\t\tType: \"literal\",\n\t\t\t\tString: p.GetAccumulator(),\n\t\t\t\t// Location: ,\n\t\t\t\t// TODO: this has to be based on the last token\n\t\t\t\t//Expect\n\t\t\t}\n\n\t\t\tswitch {\n\t\t\t// TODO: check enclosers before this\n\t\t\tcase meta.Period:\n\t\t\t\t// FIXME: default to 64 for now\n\t\t\t\ttrueValue, err := strconv.ParseFloat(p.GetAccumulator(), 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\t// TODO: idk handle this who cares\n\t\t\t\t\tfmt.Println(\"got a fuckin err brah\", err)\n\t\t\t\t}\n\n\t\t\t\tt.True = trueValue\n\t\t\t\treturn t\n\n\t\t\t\t//TODO: we would look up the value before the default in current then global scope\n\t\t\tdefault:\n\t\t\t\ttrueValue, err := strconv.Atoi(p.GetAccumulator())\n\t\t\t\tif err != nil {\n\t\t\t\t\t// TODO: this is where we would look up the value in the current variable scope and change the token\n\t\t\t\t}\n\t\t\t\tt.True = trueValue\n\t\t\t\treturn t\n\t\t\t}\n\n\t\tcase string(p.GetCurrentChar()) != \" \":\n\t\t\tswitch p.GetCurrentChar() {\n\t\t\tcase '.':\n\t\t\t\tif meta.Period {\n\t\t\t\t\tos.Exit(666)\n\t\t\t\t\t//TODO: just exit for now if there is two periods\n\t\t\t\t}\n\t\t\t\tmeta.Period = true\n\t\t\t\t// TODO: forget about these for now\n\t\t\t\t// case '\\'':\n\t\t\t\t// \tmeta.Tick = true\n\t\t\t\t// case \"[\":\n\t\t\t\t// \tmeta.Enclosed.Value = \"bracket\"\n\t\t\t\t// \tmeta.\n\t\t\t\t// }\n\n\t\t\t}\n\t\t\tp.ShiftChar()\n\n\t\t\t// TODO: need to implement someway to tell if we need to include the space in our literal (string) or not\n\t\t\t// TODO: we also need to do this for plus, minus, basically anything else that should separate a literal (i.e, 6 + 5, 6+5)\n\t\t}\n\t}\n}",
"func NewLiteral(arg interface{}) Expression {\n\treturn &Literal{Literal: NewDatum(arg)}\n}",
"func NewLiteral(v interface{}) Literal {\n\tswitch t := v.(type) {\n\tcase bool:\n\t\treturn Literal{\n\t\t\tvalue: strconv.FormatBool(t),\n\t\t\tdatatype: XSDboolean,\n\t\t}\n\tcase int:\n\t\tif strconv.IntSize == 32 {\n\t\t\treturn Literal{\n\t\t\t\tvalue: strconv.FormatInt(int64(t), 10),\n\t\t\t\tdatatype: XSDint,\n\t\t\t}\n\t\t}\n\t\treturn Literal{\n\t\t\tvalue: strconv.FormatInt(int64(t), 10),\n\t\t\tdatatype: XSDlong,\n\t\t}\n\tcase int8:\n\t\treturn Literal{\n\t\t\tvalue: strconv.FormatInt(int64(t), 10),\n\t\t\tdatatype: XSDbyte,\n\t\t}\n\tcase int16:\n\t\treturn Literal{\n\t\t\tvalue: strconv.FormatInt(int64(t), 10),\n\t\t\tdatatype: XSDshort,\n\t\t}\n\tcase int32:\n\t\treturn Literal{\n\t\t\tvalue: strconv.FormatInt(int64(t), 10),\n\t\t\tdatatype: XSDint,\n\t\t}\n\tcase int64:\n\t\treturn Literal{\n\t\t\tvalue: strconv.FormatInt(int64(t), 10),\n\t\t\tdatatype: XSDlong,\n\t\t}\n\tcase uint:\n\t\tif strconv.IntSize == 32 {\n\t\t\treturn Literal{\n\t\t\t\tvalue: strconv.FormatUint(uint64(t), 10),\n\t\t\t\tdatatype: XSDunsignedInt,\n\t\t\t}\n\t\t}\n\t\treturn Literal{\n\t\t\tvalue: strconv.FormatUint(uint64(t), 10),\n\t\t\tdatatype: XSDunsignedLong,\n\t\t}\n\tcase uint8:\n\t\treturn Literal{\n\t\t\tvalue: strconv.FormatUint(uint64(t), 10),\n\t\t\tdatatype: XSDunsignedByte,\n\t\t}\n\tcase uint16:\n\t\treturn Literal{\n\t\t\tvalue: strconv.FormatUint(uint64(t), 10),\n\t\t\tdatatype: XSDunsignedShort,\n\t\t}\n\tcase uint32:\n\t\treturn Literal{\n\t\t\tvalue: strconv.FormatUint(uint64(t), 10),\n\t\t\tdatatype: XSDunsignedInt,\n\t\t}\n\tcase uint64:\n\t\treturn Literal{\n\t\t\tvalue: strconv.FormatUint(uint64(t), 10),\n\t\t\tdatatype: XSDunsignedLong,\n\t\t}\n\tcase float32:\n\t\treturn Literal{\n\t\t\tvalue: strconv.FormatFloat(float64(t), 'E', -1, 32),\n\t\t\tdatatype: XSDfloat,\n\t\t}\n\tcase float64:\n\t\treturn Literal{\n\t\t\tvalue: strconv.FormatFloat(float64(t), 'E', -1, 64),\n\t\t\tdatatype: XSDdouble,\n\t\t}\n\tcase string:\n\t\treturn Literal{value: t, datatype: XSDstring}\n\tcase time.Time:\n\t\treturn Literal{\n\t\t\tvalue: t.UTC().Format(time.RFC3339Nano),\n\t\t\tdatatype: XSDdateTimeStamp,\n\t\t}\n\tdefault:\n\t\treturn Literal{\n\t\t\tvalue: fmt.Sprintf(\"%#v\", t),\n\t\t\tdatatype: XSDstring,\n\t\t}\n\t}\n}",
"func New(name string, store OperationStoreClient, pc protocol.Client) *OperationProcessor {\n\treturn &OperationProcessor{name: name, store: store, pc: pc}\n}",
"func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args, env []string, payload *types.Any) (StreamProcessor, error) {\n\tcmd := exec.CommandContext(ctx, name, args...)\n\tcmd.Env = os.Environ()\n\tcmd.Env = append(cmd.Env, env...)\n\n\tvar payloadC io.Closer\n\tif payload != nil {\n\t\tdata, err := proto.Marshal(payload)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tr, w, err := os.Pipe()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tgo func() {\n\t\t\tio.Copy(w, bytes.NewReader(data))\n\t\t\tw.Close()\n\t\t}()\n\n\t\tcmd.ExtraFiles = append(cmd.ExtraFiles, r)\n\t\tpayloadC = r\n\t}\n\tcmd.Env = append(cmd.Env, fmt.Sprintf(\"%s=%s\", mediaTypeEnvVar, imt))\n\tvar (\n\t\tstdin io.Reader\n\t\tcloser func() error\n\t\terr error\n\t)\n\tif f, ok := stream.(RawProcessor); ok {\n\t\tstdin = f.File()\n\t\tcloser = f.File().Close\n\t} else {\n\t\tstdin = stream\n\t}\n\tcmd.Stdin = stdin\n\tr, w, err := os.Pipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcmd.Stdout = w\n\n\tstderr := bytes.NewBuffer(nil)\n\tcmd.Stderr = stderr\n\n\tif err := cmd.Start(); err != nil {\n\t\treturn nil, err\n\t}\n\tp := &binaryProcessor{\n\t\tcmd: cmd,\n\t\tr: r,\n\t\tmt: rmt,\n\t\tstderr: stderr,\n\t}\n\tgo p.wait()\n\n\t// close after start and dup\n\tw.Close()\n\tif closer != nil {\n\t\tcloser()\n\t}\n\tif payloadC != nil {\n\t\tpayloadC.Close()\n\t}\n\treturn p, nil\n}",
"func NewProcessor(\n\tname string,\n\tlog logr.Logger,\n\tcc ClientCommandRunner,\n\tmeterDefStore *MeterDefinitionStore,\n\tprocessor ObjectResourceMessageProcessor,\n) Processor {\n\treturn &processorImpl{\n\t\tname: name,\n\t\tlog: log,\n\t\tcc: cc,\n\t\tmeterDefStore: meterDefStore,\n\t\tprocessor: processor,\n\t\tdigestersSize: 1,\n\t\tretryCount: 3,\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
run executes the processor loop.
|
func (p *literalProcessor) run() {
for {
select {
case ch := <-p.done:
close(ch)
return
case p.c <- map[string]interface{}{"": p.val}:
}
}
}
|
[
"func (e *Executor) Run() { e.loop() }",
"func (r *Runner) run() {\n\tfor {\n\t\ttask := r.rq.Pop()\n\t\tr.process(task)\n\t}\n}",
"func (p *SingleLineParser) run() {\n\tfor input := range p.inputChan {\n\t\tp.process(input)\n\t}\n\tp.lineHandler.Stop()\n}",
"func (alg *Algorand) run() {\n\ttime.Sleep(100 * time.Millisecond)\n\n\tgo alg.forkLoop()\n\n\t// propose block\n\tfor {\n\t\tselect {\n\t\tcase <-alg.quitCh:\n\t\t\treturn\n\t\tdefault:\n\t\t\talg.processMain()\n\t\t}\n\t}\n\n}",
"func (tfm *trxFlowMonitor) run() {\n\t// make sure we are orchestrated\n\tif tfm.mgr == nil {\n\t\tpanic(fmt.Errorf(\"no svc manager set on %s\", tfm.name()))\n\t}\n\n\t// start go routine for processing\n\ttfm.mgr.started(tfm)\n\tgo tfm.execute()\n}",
"func (tfu *txFlowUpdater) run() {\n\t// start go routine for processing\n\ttfu.wg.Add(1)\n\tgo tfu.schedule()\n}",
"func (bf *brainfog) run() {\n\tfor bf.ip < len(bf.program) {\n\t\terr := bf.doInstruction()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tclose(bf.outCh)\n}",
"func (c *Cyclone) run() {\n\nrunloop:\n\tfor {\n\t\tselect {\n\t\tcase <-c.Shutdown:\n\t\t\t// received shutdown, drain input channel which will be\n\t\t\t// closed by main\n\t\t\tgoto drainloop\n\t\tcase msg := <-c.Input:\n\t\t\tif msg == nil {\n\t\t\t\t// this can happen if we read the closed Input channel\n\t\t\t\t// before the closed Shutdown channel\n\t\t\t\tcontinue runloop\n\t\t\t}\n\t\t\tif err := c.process(msg); err != nil {\n\t\t\t\tc.Death <- err\n\t\t\t\t<-c.Shutdown\n\t\t\t\tbreak runloop\n\t\t\t}\n\t\t}\n\t}\n\ndrainloop:\n\tfor {\n\t\tselect {\n\t\tcase msg := <-c.Input:\n\t\t\tif msg == nil {\n\t\t\t\t// channel is closed\n\t\t\t\tbreak drainloop\n\t\t\t}\n\t\t\tc.process(msg)\n\t\t}\n\t}\n}",
"func (ftm *FtmBridge) run() {\n\tftm.wg.Add(1)\n\tgo ftm.observeBlocks()\n}",
"func (bft *BftService) Run() {\n\tbft.subscribeMessageNotifiee()\n\tbft.proc.Go(bft.loop)\n\t// bft.proc.Go(bft.update)\n}",
"func (m *Monitor) run(ctx context.Context) {\n\tdefer close(m.stopCh)\n\tdefer close(m.events)\n\tdefer m.log.Info(\"event loop stopped\")\n\tf := filters.NewArgs()\n\tf.Add(\"event\", \"start\")\n\tf.Add(\"event\", \"die\")\n\toptions := types.EventsOptions{Filters: f}\n\tfor {\n\t\terr := func() error {\n\t\t\tm.log.Info(\"processing existing containers\")\n\t\t\tif err := m.processContainers(ctx); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tm.log.Info(\"starting event loop\")\n\t\t\tmsgChan, errChan := m.client.Events(ctx, options)\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase msg := <-msgChan:\n\t\t\t\t\tif err := m.processMessage(ctx, msg); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\tcase err := <-errChan:\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\tif err == context.Canceled {\n\t\t\treturn\n\t\t}\n\t\tm.log.Error(err)\n\t\tm.log.Info(\"reconnecting in 30 seconds\")\n\t\tselect {\n\t\tcase <-time.After(30 * time.Second):\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (this *Connection) run() {\n\tgo this.routineMain()\n}",
"func (gb *GameBoy) Run() {\n\t// Number of extra clocks consumed in the last tick.\n\tclockDebt := 0\n\n\tfor {\n\t\tselect {\n\n\t\tcase <-gb.clk.C:\n\t\t\tclockDebt = gb.RunClocks(CPUClock/BaseClock - clockDebt)\n\n\t\tcase event := <-gb.events:\n\t\t\tgb.jp.Handle(event)\n\n\t\tcase frame := <-gb.ppu.F:\n\t\t\tselect {\n\t\t\tcase gb.F <- frame:\n\t\t\tdefault:\n\t\t\t}\n\n\t\t}\n\t}\n}",
"func (r *reaper) runLoop() {\n\tfor {\n\t\tselect {\n\t\tcase <-r.sigs:\n\t\t\tprocs, err := ps.Processes()\n\t\t\tif err != nil {\n\t\t\t\tklog.Warningf(\"reaper: failed to get all procs: %v\", err)\n\t\t\t} else {\n\t\t\t\tfor _, p := range procs {\n\t\t\t\t\treaped := waitIfZombieStunnel(p)\n\t\t\t\t\tif reaped {\n\t\t\t\t\t\t// wait for only one process per SIGCHLD received over channel. It\n\t\t\t\t\t\t// doesn't have to be the same process that triggered the\n\t\t\t\t\t\t// particular SIGCHLD (there's no way to tell anyway), the\n\t\t\t\t\t\t// intention is to reap zombies as they come.\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-r.stopCh:\n\t\t\tbreak\n\t\t}\n\t}\n}",
"func (h *AutoscalersController) RunControllerLoop(stopCh <-chan struct{}) {\n\th.processingLoop(stopCh)\n}",
"func (s *Server) loopRun(pc *PeerConn, handler Handler) error {\n\tfor {\n\t\tmsg, err := pc.ReadMsg()\n\t\tswitch err {\n\t\tcase nil:\n\t\tcase io.EOF:\n\t\t\treturn nil\n\t\tdefault:\n\t\t\ts := err.Error()\n\t\t\tif strings.Contains(s, \"closed\") {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"fail to decode the message from '%s': %s\",\n\t\t\t\tpc.RemoteAddr().String(), s)\n\t\t}\n\n\t\tif err = s.Config.HandleMessage(pc, msg, handler); err != nil {\n\t\t\treturn fmt.Errorf(\"fail to handle peer message from '%s': %s\",\n\t\t\t\tpc.RemoteAddr().String(), err)\n\t\t}\n\t}\n}",
"func (v *V2RayPoint) RunLoop() {\n\tgo v.pointloop()\n}",
"func (ip *ImageProcessor) Run() {\n\tfor {\n\t\tpc := <-ip.Chan\n\t\t// Set R, G, and B\n\t\tip.Image.Pix[(uint32(pc.Y)*800*4)+(uint32(pc.X)*4)] = pc.Red\n\t\tip.Image.Pix[(uint32(pc.Y)*800*4)+(uint32(pc.X)*4)+1] = pc.Green\n\t\tip.Image.Pix[(uint32(pc.Y)*800*4)+(uint32(pc.X)*4)+2] = pc.Blue\n\t\twriteImage(ip.OutFile, &ip.Image)\n\t}\n}",
"func (trd *trxDispatcher) run() {\n\t// make sure we are orchestrated\n\tif trd.mgr == nil {\n\t\tpanic(fmt.Errorf(\"no svc manager set on %s\", trd.name()))\n\t}\n\n\t// start the block observer ticker\n\ttrd.bot = time.NewTicker(trxDispatchBlockUpdateTicker)\n\n\t// signal orchestrator we started and go\n\ttrd.mgr.started(trd)\n\tgo trd.execute()\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
stop stops the processor from sending values.
|
func (p *literalProcessor) stop() { syncClose(p.done) }
|
[
"func (r *reaper) stop() {\n\tr.stopCh <- struct{}{}\n}",
"func (s *Smr) stop() {\n\t// TODO: zq\n}",
"func (c *podChurner) stop() {\n\tc.stopCh <- struct{}{}\n}",
"func (w *Watcher) stop() {\n\tatomic.StoreUint32(&w.isStop, 1)\n}",
"func (dh *DeviceHandler) stop(ctx context.Context) {\n\tlogger.Debug(\"stopping-device-handler\")\n\tdh.exitChannel <- 1\n}",
"func (t *transport) Stop() { t.stop <- true }",
"func (i2c I2C) stop() {\n\t// Send stop condition.\n\tavr.TWCR.Set(avr.TWCR_TWEN | avr.TWCR_TWINT | avr.TWCR_TWSTO)\n\n\t// Wait for stop condition to be executed on bus.\n\tfor !avr.TWCR.HasBits(avr.TWCR_TWSTO) {\n\t}\n}",
"func (s *schedule) stop() {\n\tif !s.running {\n\t\treturn\n\t}\n\ts.running = false\n\ts.stopCh <- struct{}{}\n}",
"func (w *Watch) stop() {\n\tw.done <- struct{}{}\n}",
"func (p *RemotePeer) stop() {\n\tp.stopChan <- struct{}{}\n}",
"func (c *stoppableContext) stop() {\n\tc.stopOnce.Do(func() {\n\t\tclose(c.stopped)\n\t})\n\n\tc.stopWg.Wait()\n}",
"func (w *worker) stop() {\n\tatomic.StoreInt32(&w.running, 0)\n}",
"func (hb *heartbeat) stop() {\n\tselect {\n\tcase hb.stopChan <- struct{}{}:\n\tdefault:\n\t}\n}",
"func (s *ContinuousScanner) Stop() {\n\ts.stop <- struct{}{}\n}",
"func (hc *HealthChecker) stop() {\n\thc.closec <- struct{}{}\n}",
"func (t *BitMexGoroutine) Stop() {\n\t// 受信のループのチャンネルを終了\n\tclose(t.done)\n}",
"func (b *batchSender) Stop() {\n\tclose(b.ch)\n\n\t// If the buffer can be finished off with 2 or less calls, remove the sleep interval\n\t// so it processes whatever is left without any sleeping.\n\tif len(b.ch) < (b.maxBatch * 2) { //nolint:gomnd // magic number `2` is explained in comment above.\n\t\tb.interval = 0\n\t}\n}",
"func (c *Processor) Stop() (err error) {\n\tc.runState = RunStateStopped\n\treturn\n}",
"func (s *Scanner) Stop() {\n\ts.stop <- struct{}{}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
syncClose closes a "done" channel and waits for a response.
|
func syncClose(done chan chan struct{}) {
ch := make(chan struct{}, 0)
done <- ch
<-ch
}
|
[
"func (o *Switch) CloseAsync() {\n\to.close()\n}",
"func (ref *SyncProducer) WaitForClose() {\n\t<-ref.closeChannel\n}",
"func (a *Async) Close() {\n\ta.quit <- true\n\t// wait for watcher quit\n\t<-a.quit\n}",
"func (p *Preserver) CloseAsync() {\n\tp.r.CloseAsync()\n}",
"func (z *ZMQ4) CloseAsync() {\n\tif atomic.CompareAndSwapInt32(&z.running, 1, 0) {\n\t\tclose(z.closeChan)\n\t}\n}",
"func (t *TestProcessor) CloseAsync() {\n\tprintln(\"Closing async\")\n}",
"func (ch *Channel) Close() {}",
"func (f *Files) CloseAsync() {\n}",
"func (f *File) CloseAsync() {\n\tif atomic.CompareAndSwapInt32(&f.running, 1, 0) {\n\t\tclose(f.closeChan)\n\t}\n}",
"func (cw http2closeWaiter) Wait() {\n\t<-cw\n}",
"func (i *FanInReflect) CloseAsync() {\n\tfor _, input := range i.inputs {\n\t\tif closable, ok := input.(types.Closable); ok {\n\t\t\tclosable.CloseAsync()\n\t\t}\n\t}\n\tif atomic.CompareAndSwapInt32(&i.running, 1, 0) {\n\t\tclose(i.closeChan)\n\t}\n}",
"func (recv *IOChannel) Close() {\n\tC.g_io_channel_close((*C.GIOChannel)(recv.native))\n\n\treturn\n}",
"func TestRTCPeerConnection_Close(t *testing.T) {\n\t// Limit runtime in case of deadlocks\n\tlim := test.TimeOut(time.Second * 20)\n\tdefer lim.Stop()\n\n\treport := test.CheckRoutines(t)\n\tdefer report()\n\n\tpcOffer, pcAnswer, err := newPair()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tawaitSetup := make(chan struct{})\n\tpcAnswer.OnDataChannel(func(d *RTCDataChannel) {\n\t\tclose(awaitSetup)\n\t})\n\n\t_, err = pcOffer.CreateDataChannel(\"data\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = signalPair(pcOffer, pcAnswer)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t<-awaitSetup\n\n\terr = pcOffer.Close()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = pcAnswer.Close()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}",
"func (srv *Server) WaitClose() {\n\t<-srv.closeChan\n}",
"func (m *MQTT) CloseAsync() {\n\tgo func() {\n\t\tm.connMut.Lock()\n\t\tif m.client != nil {\n\t\t\tm.client.Disconnect(0)\n\t\t\tm.client = nil\n\t\t}\n\t\tm.connMut.Unlock()\n\t}()\n}",
"func (r *SampleReceiver) Close() {\n\tcclog.ComponentDebug(r.name, \"CLOSE\")\n\n\t// Close server like http.Shutdown()\n\n\t// in case of own go routine, send the signal and wait\n\t// r.done <- true\n\t// r.wg.Wait()\n}",
"func DoneChanClosed(oid uint64) bool {\n\tci, ok := GetManagedObject(oid)\n\tif !ok {\n\t\tpanic(\"failed to get the done chan\")\n\t}\n\tc := ci.(<-chan struct{})\n\tselect {\n\tcase <-c:\n\t\treturn true\n\tdefault:\n\t}\n\treturn false\n}",
"func TestCloseChannel_NonBlockingChanClose(t *testing.T) {\n\tclient := NewStubCloseClient(\n\t\t[]lnrpc.CloseStatusUpdate{chanPendingCloseUpdate()}, io.EOF)\n\tresp, err := runCloseChannel(&client, []string{FundingTxidString})\n\trequire.NoError(t, err)\n\trequire.Equal(t,\n\t\t\"{\\n\\t\\\"closing_txid\\\": \\\"0000000000000000000000000000000089000000000000000000000000000000\\\"\\n}\\n\",\n\t\tresp,\n\t\t\"Incorrect response from closeChannel.\")\n}",
"func (c *Connector) Close() {\n\t// Stop waiting to acquire the lock.\n\tclose(c.done)\n\t// Use write lock to modify the pipe handle.\n\tc.mu.Lock()\n\tc.pipe.Close()\n\tc.mu.Unlock()\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
tagsHash returns a hash of tag key/value pairs.
|
func (r *Row) tagsHash() uint64 {
h := fnv.New64a()
keys := r.tagsKeys()
for _, k := range keys {
h.Write([]byte(k))
h.Write([]byte(r.Tags[k]))
}
return h.Sum64()
}
|
[
"func ComputeTagsHash(tags []string) string {\n\thash := \"\"\n\tif len(tags) > 0 {\n\t\t// do not sort original slice\n\t\ttags = copyArray(tags)\n\t\th := fnv.New64()\n\t\tsort.Strings(tags)\n\t\tfor _, i := range tags {\n\t\t\th.Write([]byte(i)) //nolint:errcheck\n\t\t}\n\t\thash = strconv.FormatUint(h.Sum64(), 16)\n\t}\n\treturn hash\n}",
"func hash(ls prometheus.Tags) uint64 {\n\tlbs := make(labels.Labels, 0, len(ls))\n\tfor k, v := range ls {\n\t\tlbs = append(lbs, labels.Label{\n\t\t\tName: k,\n\t\t\tValue: v,\n\t\t})\n\t}\n\n\tsort.Slice(lbs[:], func(i, j int) bool {\n\t\treturn lbs[i].Name < lbs[j].Name\n\t})\n\n\treturn lbs.Hash()\n}",
"func ParseHashTags(strMessage string) []string {\n\tbInTag := false\n\trunes := make([]string, 0, 30)\n\tstrTag := \"\"\n\tbuf := make([]byte, 4)\n\tstrTags := make([]string, 0, 30)\n\tvar n int\n\n\tfor _, c := range strMessage {\n\t\tswitch {\n\t\tcase c == '#':\n\t\t\tif bInTag == true {\n\t\t\t\tstrTag = strings.Join(runes, \"\")\n\t\t\t\tstrTags = append(strTags, strTag)\n\t\t\t}\n\t\t\trunes = make([]string, 0, 30)\n\t\t\tbInTag = true\n\t\tcase unicode.IsSpace(c) || unicode.IsPunct(c) || c == '@':\n\t\t\tif bInTag == true {\n\t\t\t\tstrTag = strings.Join(runes, \"\")\n\t\t\t\tstrTags = append(strTags, strTag)\n\t\t\t\trunes = make([]string, 0, 30)\n\t\t\t\tbInTag = false\n\t\t\t}\n\t\tdefault:\n\t\t\tif bInTag == true {\n\t\t\t\tn = utf8.EncodeRune(buf, c)\n\t\t\t\trunes = append(runes, string(buf[:n]))\n\t\t\t}\n\t\t}\n\t}\n\tif bInTag == true {\n\t\tstrTag = strings.Join(runes, \"\")\n\t\tstrTags = append(strTags, strTag)\n\t}\n\n\treturn strTags\n}",
"func (dtk *DcmTagKey) Hash() uint32 {\n\treturn ((uint32(int(dtk.group)<<16) & 0xffff0000) | (uint32(int(dtk.element) & 0xffff)))\n}",
"func (ts *TagSet) HashH() uint64 {\n\treturn ts.hashH\n}",
"func calculateAttributesHash(attributes []string) (attrHash string) {\n\n\tkeys := make([]string, len(attributes))\n\n\tfor _, k := range attributes {\n\t\tkeys = append(keys, k)\n\t}\n\n\tsort.Strings(keys)\n\n\tvalues := make([]byte, len(keys))\n\n\tfor _, k := range keys {\n\t\tvb := []byte(k)\n\t\tfor _, bval := range vb {\n\t\t\tvalues = append(values, bval)\n\t\t}\n\t}\n\tattributesHash := primitives.Hash(values)\n\treturn hex.EncodeToString(attributesHash)\n\n}",
"func GitHashFotTag(gitRepo *git.Repository, tagName string) (hash plumbing.Hash, err error) {\n\tvar ref *plumbing.Reference\n\tref, err = gitRepo.Tag(tagName)\n\tif errors.Is(err, git.ErrTagNotFound) && !strings.HasPrefix(tagName, \"v\") {\n\t\tref, err = gitRepo.Tag(\"v\" + tagName)\n\t}\n\tif err != nil {\n\t\treturn plumbing.ZeroHash, fmt.Errorf(\"error getting commit for tag %s: %w\", tagName, err)\n\t}\n\n\treturn ref.Hash(), nil\n}",
"func (s *Service) Tags(c context.Context) (res []*academy.Tag) {\n\tif v, ok := s.TagsCache[academy.TagClassMap(academy.H5)]; ok {\n\t\tres = v\n\t}\n\treturn\n}",
"func NewHashlessTagsAccumulator() *HashlessTagsAccumulator {\n\treturn &HashlessTagsAccumulator{\n\t\t// Slice will grow as more tags are added to it. 128 tags\n\t\t// should be enough for most metrics.\n\t\tdata: make([]string, 0, 128),\n\t}\n}",
"func tags(kv ...string) map[string]string {\n\tm := make(map[string]string)\n\tfor i := 0; i < len(kv)-1; i += 2 {\n\t\tm[kv[i]] = kv[i+1]\n\t}\n\treturn m\n}",
"func (addr *Address) Tag() []byte {\n\tvar a = make([]byte, 32)\n\tcopy(a, addr.calcDoubleHash()[32:])\n\treturn a\n}",
"func tagKeysToMap(tags string) map[string]*string {\n\toutput := make(map[string]*string)\n\n\tfor _, tag := range strings.Split(strings.TrimSpace(tags), \",\") {\n\t\tsplit := strings.SplitN(tag, \"=\", 2)\n\t\tkey := strings.TrimSpace(split[0])\n\t\tvalue := \"\"\n\n\t\tif key == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(split) > 1 {\n\t\t\tvalue = strings.TrimSpace(split[1])\n\t\t}\n\n\t\toutput[key] = &value\n\t}\n\n\tif len(output) == 0 {\n\t\treturn nil\n\t}\n\n\treturn output\n}",
"func (a *Article) GetHashedTags() []string {\n\ttags := []string{}\n\tfor _, tag := range a.Tags {\n\t\ttags = append(tags, fmt.Sprintf(\"#%s\", tag))\n\t}\n\n\treturn tags\n}",
"func (m MockPeer) Tags() map[string]string {\n\tif m.TagsFunc == nil {\n\t\treturn nil\n\t}\n\treturn m.TagsFunc()\n}",
"func (t *parallelMapFilterTable) Hash() hash.Hash { return t.hash }",
"func (spec Spec) DeepHash() string {\n\thash := sha512.New512_224()\n\tspec.DefaultService.hash(hash)\n\tfor _, rule := range spec.Rules {\n\t\trule.hash(hash)\n\t}\n\tsvcs := make([]string, len(spec.AllServices))\n\ti := 0\n\tfor k := range spec.AllServices {\n\t\tsvcs[i] = k\n\t\ti++\n\t}\n\tsort.Strings(svcs)\n\tfor _, svc := range svcs {\n\t\thash.Write([]byte(svc))\n\t\tspec.AllServices[svc].hash(hash)\n\t}\n\tspec.ShardCluster.hash(hash)\n\thash.Write([]byte(spec.VCL))\n\tfor _, auth := range spec.Auths {\n\t\tauth.hash(hash)\n\t}\n\tfor _, acl := range spec.ACLs {\n\t\tacl.hash(hash)\n\t}\n\tfor _, rw := range spec.Rewrites {\n\t\trw.hash(hash)\n\t}\n\tfor _, reqDisp := range spec.Dispositions {\n\t\treqDisp.hash(hash)\n\t}\n\th := new(big.Int)\n\th.SetBytes(hash.Sum(nil))\n\treturn h.Text(62)\n}",
"func Hashtags(ctx *context.Context) {\n\n\t// get the LANG-ubn repository name prefix\n\tvar repo_prefix string\n\tif strings.HasSuffix(ctx.Repo.Repository.Name, \"-ubn\") {\n\t\trepo_prefix = ctx.Repo.Repository.Name\n\t} else {\n\t\tchar_index := strings.LastIndex(ctx.Repo.Repository.Name, \"-ubn-\")\n\t\trepo_prefix = ctx.Repo.Repository.Name[0:char_index + 4]\n\t}\n\n\tctx.Data[\"username\"] = ctx.Repo.Repository.Owner.Name\n\tctx.Data[\"reponame\"] = ctx.Repo.Repository.Name\n\tctx.Data[\"RepoLink\"] = ctx.Repo.Repository.Link()\n\tctx.Data[\"Title\"] = ctx.Tr(\"repo.hashtag.all_hashtags\", ctx.Repo.Repository.Owner.Name + \"/\" + repo_prefix)\n\tresults, err := models.GetHashtagSummary(repo_prefix, ctx.Repo.Repository.Owner.ID)\n\n\tif err != nil {\n\t\tlog.Error(4, \"Hashtags: %v\", err)\n\t\tctx.Handle(http.StatusInternalServerError, \"GetHashtagSummary\", err)\n\t\treturn\n\t}\n\tctx.Data[\"Tags\"] = results\n\n\tctx.HTML(200, HASHTAGS)\n}",
"func (m *Measurement) uniqueTagValues(expr influxql.Expr) map[string][]string {\n\t// Track unique value per tag.\n\ttags := make(map[string]map[string]struct{})\n\n\t// Find all tag values referenced in the expression.\n\tinfluxql.WalkFunc(expr, func(n influxql.Node) {\n\t\tswitch n := n.(type) {\n\t\tcase *influxql.BinaryExpr:\n\t\t\t// Ignore operators that are not equality.\n\t\t\tif n.Op != influxql.EQ {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Extract ref and string literal.\n\t\t\tvar key, value string\n\t\t\tswitch lhs := n.LHS.(type) {\n\t\t\tcase *influxql.VarRef:\n\t\t\t\tif rhs, ok := n.RHS.(*influxql.StringLiteral); ok {\n\t\t\t\t\tkey, value = lhs.Val, rhs.Val\n\t\t\t\t}\n\t\t\tcase *influxql.StringLiteral:\n\t\t\t\tif rhs, ok := n.RHS.(*influxql.VarRef); ok {\n\t\t\t\t\tkey, value = rhs.Val, lhs.Val\n\t\t\t\t}\n\t\t\t}\n\t\t\tif key == \"\" {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Add value to set.\n\t\t\tif tags[key] == nil {\n\t\t\t\ttags[key] = make(map[string]struct{})\n\t\t\t}\n\t\t\ttags[key][value] = struct{}{}\n\t\t}\n\t})\n\n\t// Convert to map of slices.\n\tout := make(map[string][]string)\n\tfor k, values := range tags {\n\t\tout[k] = make([]string, 0, len(values))\n\t\tfor v := range values {\n\t\t\tout[k] = append(out[k], v)\n\t\t}\n\t\tsort.Strings(out[k])\n\t}\n\treturn out\n}",
"func (v *View) TagFilter() map[string]string {\n\tfilter := map[string]string{}\n\tfor _, t := range v.tags {\n\t\tp := strings.Split(t, \"=\")\n\t\tif len(p) == 2 {\n\t\t\tfilter[p[0]] = p[1]\n\t\t} else {\n\t\t\tfilter[p[0]] = \"\"\n\t\t}\n\t}\n\treturn filter\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
marshalStrings encodes an array of strings into a byte slice.
|
func marshalStrings(a []string) (ret []byte) {
for _, s := range a {
// Create a slice for len+data
b := make([]byte, 2+len(s))
binary.BigEndian.PutUint16(b[0:2], uint16(len(s)))
copy(b[2:], s)
// Append it to the full byte slice.
ret = append(ret, b...)
}
return
}
|
[
"func marshalString(str string) []byte { return []byte(strconv.Itoa(len(str)) + \":\" + str) }",
"func MarshalStringSlice(item Any) []*string {\n\tvar data []*string\n\n\tswitch reflect.TypeOf(item).Kind() {\n\tcase reflect.Slice:\n\t\tval := reflect.ValueOf(item)\n\t\tmax := val.Len()\n\t\tfor i := 0; i < max; i++ {\n\t\t\ts := fmt.Sprint(val.Index(i).Interface())\n\t\t\tdata = append(data, &s)\n\t\t}\n\t}\n\treturn data\n}",
"func (e *Encoder) encodeStringArray(v []string) error {\n\t// Special case for a nil array\n\tif v == nil {\n\t\treturn e.encodePrefixed('*', \"-1\")\n\t}\n\n\t// First encode the number of elements\n\tn := len(v)\n\tif err := e.encodePrefixed('*', strconv.Itoa(n)); err != nil {\n\t\treturn err\n\t}\n\n\t// Then encode each value\n\tfor _, el := range v {\n\t\tif err := e.encodeBulkString(BulkString(el)); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func arrayConvertorStringToBytes(input []string) (output [][]byte) {\r\n\tfor i := 0; i < len(input); i++ {\r\n\t\toutput = append(output, []byte(input[i]))\r\n\t}\r\n\treturn output\r\n}",
"func encodeByteArrayAsString(p unsafe.Pointer, dst []byte, opts encOpts, len int) []byte {\n\t// For byte type, size is guaranteed to be 1,\n\t// so the slice length is the same as the array's.\n\t// see golang.org/ref/spec#Size_and_alignment_guarantees\n\tb := *(*[]byte)(unsafe.Pointer(&sliceHeader{\n\t\tData: p,\n\t\tLen: len,\n\t\tCap: len,\n\t}))\n\tdst = append(dst, '\"')\n\tdst = appendEscapedBytes(dst, b, opts)\n\tdst = append(dst, '\"')\n\n\treturn dst\n}",
"func encodeByteSlice(p unsafe.Pointer, dst []byte, opts encOpts) ([]byte, error) {\n\tb := *(*[]byte)(p)\n\tif b == nil {\n\t\treturn append(dst, \"null\"...), nil\n\t}\n\tdst = append(dst, '\"')\n\n\tif opts.flags.has(rawByteSlice) {\n\t\tdst = appendEscapedBytes(dst, b, opts)\n\t} else {\n\t\tn := base64.StdEncoding.EncodedLen(len(b))\n\t\tif a := cap(dst) - len(dst); a < n {\n\t\t\tnew := make([]byte, cap(dst)+(n-a))\n\t\t\tcopy(new, dst)\n\t\t\tdst = new[:len(dst)]\n\t\t}\n\t\tend := len(dst) + n\n\t\tbase64.StdEncoding.Encode(dst[len(dst):end], b)\n\n\t\tdst = dst[:end]\n\t}\n\treturn append(dst, '\"'), nil\n}",
"func (w *Writer) WriteBulkStringBytes(b []byte) error {\n\tif b == nil {\n\t\tw.writeLength(BulkStringType, -1)\n\t} else {\n\t\tw.writeLength(BulkStringType, int64(len(b)))\n\t\tw.w.Write(b)\n\t}\n\t_, err := w.w.Write(crlf)\n\tif err != nil {\n\t\treturn writeError(err.Error())\n\t}\n\treturn nil\n}",
"func EncodeBulkString(s string) []byte {\n\tif len(s) > bulkStringMaxLength {\n\t\tpanic(\"BulkString is over 512 MB\")\n\t}\n\treturn []byte(typeBulkStrings + strconv.Itoa(len(s)) + crlf + s + crlf)\n}",
"func (e *Encoder) marshalSlice(val reflect.Value, child bool) (string, bool) {\n\tvar ok bool\n\tif ok = supportedBaseKind(val); !ok {\n\t\treturn \"\", false\n\t}\n\tvar sl, str string\n\t// check the type of slice and handle\n\tfor j := 0; j < val.Len(); j++ {\n\t\tstr = \"\"\n\t\tstr, ok = e.stringify(val.Index(j), child)\n\t\tif !ok {\n\t\t\treturn \"\", false\n\t\t}\n\t\tif j == 0 {\n\t\t\tsl = str\n\t\t\tcontinue\n\t\t}\n\t\tsl = fmt.Sprintf(\"%s,%s\", sl, str)\n\t}\n\tif child {\n\t\tsl = fmt.Sprintf(\"%s%s%s\", e.sepBeg, sl, e.sepEnd)\n\t}\n\treturn sl, true\n}",
"func bindStrings(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := ParsedABI(K_Strings)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil\n}",
"func WriteBytesStr(b []byte, w io.Writer, array bool) error {\n\tvar prefix string\n\tif array {\n\t\tprefix = \"[\" + strconv.Itoa(len(b)) + \"]byte{\"\n\t} else {\n\t\tprefix = \"[]byte{\"\n\t}\n\treturn writeBytesStr(b, prefix, w)\n}",
"func (rw *Writer) WriteBulkStringBytes(s []byte) (int, error) {\n\tif s == nil {\n\t\treturn rw.WriteBulkStringHeader(-1)\n\t}\n\n\trw.buf = rw.buf[:0]\n\trw.buf = append(rw.buf, '$')\n\trw.buf = strconv.AppendUint(rw.buf, uint64(len(s)), 10)\n\trw.buf = append(rw.buf, '\\r', '\\n')\n\trw.buf = append(rw.buf, s...)\n\trw.buf = append(rw.buf, '\\r', '\\n')\n\n\treturn rw.w.Write(rw.buf)\n}",
"func (i *StringMapItem) MarshalBinary() ([]byte, error) {\n\tn := i.size\n\tif len(i.str) < i.size {\n\t\tn = len(i.str)\n\t}\n\n\tbuffer := new(bytes.Buffer)\n\tif err := binary.Write(buffer, model.ByteOrder, []byte(i.str)[0:n]); err != nil {\n\t\treturn nil, err\n\t}\n\trep := make([]byte, i.size)\n\tcopy(rep, buffer.Bytes())\n\treturn rep, nil\n}",
"func ConvertToBulkStringBytes(content *string) []byte {\n\tif content == nil { // can be null\n\t\treturn []byte{'$', '-', '1', '\\r', '\\n'}\n\t}\n\n\tbytes := append([]byte(\"$\"), strconv.Itoa(len(*content))...)\n\tbytes = append(bytes, '\\r', '\\n')\n\n\tbytes = append(bytes, []byte(*content)...)\n\tbytes = append(bytes, '\\r', '\\n')\n\n\treturn bytes\n}",
"func string2bytes(s string) []byte {\n\tstringHeader := (*reflect.StringHeader)(unsafe.Pointer(&s))\n\n\tbh := reflect.SliceHeader{\n\t\tData: stringHeader.Data,\n\t\tLen: stringHeader.Len,\n\t\tCap: stringHeader.Len,\n\t}\n\n\treturn *(*[]byte)(unsafe.Pointer(&bh))\n}",
"func StringToBytes(s string) (b []byte) {\n\t/* #nosec G103 */\n\tsh := (*reflect.StringHeader)(unsafe.Pointer(&s))\n\t/* #nosec G103 */\n\tbh := (*reflect.SliceHeader)(unsafe.Pointer(&b))\n\n\tbh.Data, bh.Len, bh.Cap = sh.Data, sh.Len, sh.Len\n\treturn b\n}",
"func packSString(v []string, ptr0 **C.char) {\n\tconst m = 0x7fffffff\n\tfor i0 := range v {\n\t\tptr1 := (*(*[m / sizeOfPtr]*C.char)(unsafe.Pointer(ptr0)))[i0]\n\t\tv[i0] = packPCharString(ptr1)\n\t}\n}",
"func (a Strings) MarshalJSONArray(enc *gojay.Encoder) {\n\tfor _, item := range a {\n\t\tenc.String(item)\n\t}\n}",
"func EncodeBinarySlice(seed []byte) []string {\n\twords := make([]string, len(seed)+1) // Extra word for checksumByte\n\tfor i, b := range seed {\n\t\twords[i] = strconv.FormatInt(int64(b), 2)\n\t}\n\tchecksum := checksumByte(seed)\n\twords[len(words)-1] = strconv.FormatInt(int64(checksum), 2)\n\treturn words\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
unmarshalStrings decodes a byte slice into an array of strings.
|
func unmarshalStrings(b []byte) (ret []string) {
for {
// If there's no more data then exit.
if len(b) == 0 {
return
}
// Decode size + data.
n := binary.BigEndian.Uint16(b[0:2])
ret = append(ret, string(b[2:n+2]))
// Move the byte slice forward and retry.
b = b[n+2:]
}
}
|
[
"func UnmarshalStrippedStringSlice(reader io.Reader, consumer runtime.Consumer) ([]StrippedString, error) {\n\tvar elements []json.RawMessage\n\tif err := consumer.Consume(reader, &elements); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar result []StrippedString\n\tfor _, element := range elements {\n\t\tobj, err := unmarshalStrippedString(element, consumer)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresult = append(result, obj)\n\t}\n\treturn result, nil\n}",
"func (s *StringArray) DecodeFromBytes(b []byte) error {\n\ts.ArraySize = int32(binary.LittleEndian.Uint32(b[:4]))\n\tif s.ArraySize <= 0 {\n\t\treturn nil\n\t}\n\n\tvar offset = 4\n\tfor i := 1; i <= int(s.ArraySize); i++ {\n\t\tstr, err := DecodeString(b[offset:])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.Strings = append(s.Strings, str)\n\t\toffset += str.Len()\n\t}\n\n\treturn nil\n}",
"func decodeByteSlice(s *Stream, val reflect.Value) error {\n\t// b = byte slice contained string content\n\tb, err := s.Bytes()\n\tif err != nil {\n\t\treturn wrapStreamError(err, val.Type())\n\t}\n\tval.SetBytes(b)\n\treturn nil\n}",
"func (MapStringString) DecodeSlice() {}",
"func string2bytes(s string) []byte {\n\tstringHeader := (*reflect.StringHeader)(unsafe.Pointer(&s))\n\n\tbh := reflect.SliceHeader{\n\t\tData: stringHeader.Data,\n\t\tLen: stringHeader.Len,\n\t\tCap: stringHeader.Len,\n\t}\n\n\treturn *(*[]byte)(unsafe.Pointer(&bh))\n}",
"func bytesToStrings(in []byte) []string {\n\ts := strings.TrimRight(string(in), \"\\n\")\n\tif s == \"\" { // empty (not {\"\"}, len=1)\n\t\treturn []string{}\n\t}\n\treturn strings.Split(s, \"\\n\")\n}",
"func marshalStrings(a []string) (ret []byte) {\n\tfor _, s := range a {\n\t\t// Create a slice for len+data\n\t\tb := make([]byte, 2+len(s))\n\t\tbinary.BigEndian.PutUint16(b[0:2], uint16(len(s)))\n\t\tcopy(b[2:], s)\n\n\t\t// Append it to the full byte slice.\n\t\tret = append(ret, b...)\n\t}\n\treturn\n}",
"func DecodeStringArray(b []byte) (*StringArray, error) {\n\ts := &StringArray{}\n\tif err := s.DecodeFromBytes(b); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s, nil\n}",
"func unmarshalString(v []byte) (string, error) {\n\tvar str string\n\terr := json.Unmarshal(v, &str)\n\tif err != nil {\n\t\treturn str, errors.Wrap(err, \"could not unmarshal bytes to string\")\n\t}\n\treturn str, nil\n}",
"func UnmarshalBytes(src []byte) ([]byte, []byte, error) {\n\ttail, n, err := UnmarshalVarUint64(src)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"cannot unmarshal string size: %w\", err)\n\t}\n\tsrc = tail\n\tif uint64(len(src)) < n {\n\t\treturn nil, nil, fmt.Errorf(\"src is too short for reading string with size %d; len(src)=%d\", n, len(src))\n\t}\n\treturn src[n:], src[:n], nil\n}",
"func ToByteSlice(s string) (bs []byte)",
"func marshalString(str string) []byte { return []byte(strconv.Itoa(len(str)) + \":\" + str) }",
"func (o *Strings) FromBytes(data []byte) error {\n\tbuf := uio.NewBigEndianBuffer(data)\n\tif buf.Len() == 0 {\n\t\treturn fmt.Errorf(\"Strings DHCP option must always list at least one String\")\n\t}\n\n\t*o = make(Strings, 0)\n\tfor buf.Has(1) {\n\t\tucLen := buf.Read8()\n\t\tif ucLen == 0 {\n\t\t\treturn fmt.Errorf(\"DHCP Strings must have length greater than 0\")\n\t\t}\n\t\t*o = append(*o, string(buf.CopyN(int(ucLen))))\n\t}\n\treturn buf.FinError()\n}",
"func SolidityUnpackString(data string, types []string) ([]interface{}, error) {\n\n\tif data[0:2] == \"0x\" {\n\t\tdata = data[2:]\n\t}\n\tvar resp = make([]interface{}, len(types))\n\tvar stringCount = 0\n\tfor i := 0; i < len(types); i++ {\n\t\tpartialData := data[i*64 : (i+1)*64]\n\t\tconvertedData, count, err := parseNextValueFromSolidityHexStr(partialData, types[i], data[i*64:], len(types)-i, stringCount)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tstringCount = count\n\t\tresp[i] = convertedData\n\t}\n\treturn resp, nil\n}",
"func NewFromByteSlice(bytes []byte) *Stringish {\n\treturn &Stringish{string(bytes)}\n}",
"func DisassembleStrings(c *cryptos.Crypto, s []byte) ([]string, error) {\n\tr, err := DisassembleString(c, s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn strings.Split(r, \" \"), nil\n}",
"func StringToByteSlice(s *string) []byte {\n\tvar bytes []byte\n\tstringHeader := (*reflect.StringHeader)(unsafe.Pointer(s))\n\tbytesHeader := (*reflect.SliceHeader)(unsafe.Pointer(&bytes))\n\tbytesHeader.Data = stringHeader.Data\n\tbytesHeader.Len = stringHeader.Len\n\tbytesHeader.Cap = stringHeader.Len\n\treturn bytes\n}",
"func StringBytes(b []byte) string { return *(*string)(Pointer(&b)) }",
"func ByteSlice2String(bs []byte) string {\n\treturn *(*string)(unsafe.Pointer(&bs))\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetBroadcastPayload will return the object to send to all chat users.
|
func (e *UserDisabledEvent) GetBroadcastPayload() EventPayload {
return EventPayload{
"type": ErrorUserDisabled,
"id": e.ID,
"timestamp": e.Timestamp,
"user": e.User,
}
}
|
[
"func (d delegate) GetBroadcasts(overhead, limit int) [][]byte { return nil }",
"func (p *ChatPool) GetBroadcastChann() chan Message {\n\treturn p.Broadcast\n}",
"func (e *Ender) GetBroadcast() (*Broadcast, error) {\n\tif !e.valid {\n\t\treturn nil, errors.New(\"ender: unvalid process\")\n\t}\n\n\tsrvBro := modelBroadcastToSrvBroadcast(e.broadcastModel)\n\treturn srvBro, nil\n}",
"func (s *Starter) GetBroadcast() (*Broadcast, error) {\n\tif !s.valid {\n\t\treturn nil, errors.New(\"starter: unvalid process\")\n\t}\n\tsrvBro := modelBroadcastToSrvBroadcast(s.broadcastModel)\n\treturn srvBro, nil\n}",
"func (s *server) GetBroadcasts(overhead, limit int) [][]byte {\n\treturn s.queue.GetBroadcasts(overhead, limit)\n}",
"func (this User) broadcast(message interface{}) {\n mu.Lock()\n for _, client := range clients {\n if client.User.Id != this.Id {\n websocket.JSON.Send(client.Websocket, message)\n }\n }\n mu.Unlock()\n}",
"func (m *DeviceAndAppManagementAssignmentFilter) GetPayloads()([]PayloadByFilterable) {\n val, err := m.GetBackingStore().Get(\"payloads\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]PayloadByFilterable)\n }\n return nil\n}",
"func (n *GlobalNotification) Payload() map[string]interface{} {\r\n\treturn n.payload\r\n}",
"func (bp *BasePayload) GetPayload() []byte {\n\treturn bp.Payload\n}",
"func (a *API) GetUserInstancePayload(user *UserModel) (*UserInstanceEventPayload, error) {\n\trooms := &[]RoomModel{}\n\terr := a.DB.Model(user).Related(rooms, \"Rooms\").Error\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\troomIDs := []string{}\n\tfor _, r := range *rooms {\n\t\troomIDs = append(roomIDs, r.ID)\n\t}\n\treturn &UserInstanceEventPayload{\n\t\tID: user.ID,\n\t\tName: user.Name,\n\t\tPhoto: user.Photo,\n\t\tRoomIDs: roomIDs,\n\t}, nil\n}",
"func GetBroadcastTxs(st StateDB) (common.BroadTxSlice, error) {\n\tmgr := GetManager(GetVersionInfo(st))\n\tif mgr == nil {\n\t\treturn nil, ErrFindManager\n\t}\n\topt, err := mgr.FindOperator(mc.MSKeyBroadcastTx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvalue, err := opt.GetValue(st)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn value.(common.BroadTxSlice), nil\n}",
"func broadcastWebSocket(event models.Event) {\n\t//data, err := json.Marshal(event)\n\t//if err != nil {\n\t//\tbeego.Error(\"Fail to marshal event:\", err)\n\t//\treturn\n\t//}\n\n\tfor sub := subscribers.Front(); sub != nil; sub = sub.Next() {\n\t\t// Immediately send event to WebSocket users.\n\t\tws := sub.Value.(Subscriber).Conn\n\t\tname := sub.Value.(Subscriber).Name\n\t\tif name == event.User {\n\t\t\tcontinue\n\t\t}\n\t\tif ws != nil {\n\t\t\tlogs.Info(\"broadcastWebSocket:\",string(event.Content))\n\t\t\tif ws.WriteMessage(websocket.TextMessage, []byte(event.Content)) != nil {\n\t\t\t\t// User disconnected.\n\t\t\t\tlogs.Error(\"WriteMessage error\")\n\t\t\t\tunsubscribe <- sub.Value.(Subscriber).Name\n\t\t\t}\n\t\t}\n\t}\n}",
"func broadcastWebSocket(event models.Event) {\n\tdata, err := json.Marshal(event)\n\tif err != nil {\n\t\tbeego.Error(\"Fail to marshal event:\", err)\n\t\treturn\n\t}\n\n\tfor sub := subscribers.Front(); sub != nil; sub = sub.Next() {\n\t\t// Immediately send event to WebSocket users.\n\t\tif sub.Value.(Subscriber).UserID != event.UserID {\n\t\t\tws := sub.Value.(Subscriber).Conn\n\t\t\tif ws != nil {\n\t\t\t\tif ws.WriteMessage(websocket.TextMessage, data) != nil {\n\t\t\t\t\t// User disconnected.\n\t\t\t\t\tunsubscribe <- sub.Value.(Subscriber).UserID\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}",
"func (c *CoordinatorHelper) AllBroadcasts(ctx context.Context) ([]*storage.Broadcast, error) {\n\treturn c.broadcastStorage.GetAllBroadcasts(ctx)\n}",
"func (b *Broadcast) Copy() *Broadcast {\n\tbroadcast := &Broadcast{\n\t\tRoomid: atomic.LoadInt64(&b.Roomid),\n\t\tUID: atomic.LoadInt64(&b.UID),\n\t\tUname: b.Uname,\n\t\tPopularity: atomic.LoadUint32(&b.Popularity),\n\t\tMaxPopularity: atomic.LoadUint32(&b.MaxPopularity),\n\t\tTitle: b.Title,\n\t\tUsercover: b.Usercover,\n\t\tKeyframe: b.Keyframe,\n\t\tLivetime: b.Livetime,\n\t\tEndtime: b.Endtime,\n\t\tParticipantduring10Min: atomic.LoadInt64(&b.Participantduring10Min),\n\t\tGoldCoin: atomic.LoadUint64(&b.GoldCoin),\n\t\tSilverCoin: atomic.LoadUint64(&b.SilverCoin),\n\t\tParticipant: atomic.LoadInt64(&b.Participant),\n\t\tGoldUser: atomic.LoadInt64(&b.GoldUser),\n\t\tDanmuCount: atomic.LoadUint64(&b.DanmuCount),\n\t}\n\treturn broadcast\n}",
"func (u *UpdateReadChannelDiscussionInbox) GetBroadcastID() (value int, ok bool) {\n\tif !u.Flags.Has(0) {\n\t\treturn value, false\n\t}\n\treturn u.BroadcastID, true\n}",
"func (e *Event) GetRawPayload() json.RawMessage {\n\tif e == nil || e.RawPayload == nil {\n\t\treturn json.RawMessage{}\n\t}\n\treturn *e.RawPayload\n}",
"func (p *Publish) Payload() []byte {\n\treturn p.bytes(p.payloadPos)\n}",
"func (app *App) GroupBroadcast(ctx context.Context, frontendType, groupName, route string, v interface{}) error {\n\tlogger.Log.Debugf(\"Type=Broadcast Route=%s, Data=%+v\", route, v)\n\n\tmembers, err := app.GroupMembers(ctx, groupName)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn app.sendDataToMembers(members, frontendType, route, v)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
CreateAlbum queries the Imgur API in order to create an anonymous album, using a client ID
|
func CreateAlbum(title string, clientID string) (albumID, deleteHash interface{}) {
apiURL := "https://api.imgur.com"
resource := "/3/album/"
data := url.Values{}
data.Set("title", title)
u, _ := url.ParseRequestURI(apiURL)
u.Path = resource
urlStr := u.String() // "https://api.com/user/"
client := &http.Client{}
r, _ := http.NewRequest("POST", urlStr, strings.NewReader(data.Encode())) // URL-encoded payload
r.Header.Add("Authorization", "Client-ID "+clientID)
r.Header.Add("Content-Type", "application/x-www-form-urlencoded")
r.Header.Add("Content-Length", strconv.Itoa(len(data.Encode())))
resp, _ := client.Do(r)
var result map[string]interface{}
json.NewDecoder(resp.Body).Decode(&result)
nestedMap := result["data"]
newMap, _ := nestedMap.(map[string]interface{})
albumID = newMap["id"]
deleteHash = newMap["deletehash"]
fmt.Println(color.GreenString("\n[+]"), "Successfully created an album with the following values:")
fmt.Println(color.GreenString("albumID:"), albumID, color.GreenString("Album DeleteHash:"), deleteHash)
fmt.Println(" ")
return albumID, deleteHash
}
|
[
"func NewAlbum() Album {\n\treturn Album{\n\t\tCreated: time.Now().String(),\n\t\tID: uuid.New().String(),\n\t\tPhotos: make([]string, 0),\n\t}\n}",
"func (s *Service) Create(ctx context.Context, title string) (*Album, error) {\n\treq := &photoslibrary.CreateAlbumRequest{\n\t\tAlbum: &photoslibrary.Album{Title: title},\n\t}\n\tres, err := s.photos.Create(req).Context(ctx).Do()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating album: %w\", err)\n\t}\n\talbum := toAlbum(res)\n\treturn &album, nil\n}",
"func CreateAlbum(req CreateAlbumRequest) error {\n\tinsertAlbum := `INSERT INTO Album (year, title, date) VALUES (?, ?, ?)`\n\tif _, err := configure.SQL.Query(insertAlbum, req.Year, req.AlbumTitle, req.AlbumDate); err != nil {\n\t\tlog.Println(\"Failed on inserting album\")\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (p *Photos) CreateAlbum(ctx context.Context, title string, uploadItems []UploadItem) ([]*AddResult, error) {\n\tlog.Printf(\"Creating album %s\", title)\n\talbum, err := p.service.CreateAlbum(ctx, &photoslibrary.CreateAlbumRequest{\n\t\tAlbum: &photoslibrary.Album{Title: title},\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not create an album: %s\", err)\n\t}\n\treturn p.add(ctx, uploadItems, photoslibrary.BatchCreateMediaItemsRequest{\n\t\tAlbumId: album.Id,\n\t\tAlbumPosition: &photoslibrary.AlbumPosition{Position: \"LAST_IN_ALBUM\"},\n\t}), nil\n}",
"func CreateAlbum(title string, artist string) *Album {\n\treturn &Album{\n\t\tTitle: title,\n\t\tArtist: artist,\n\t}\n}",
"func DeleteAlbum(albumDeleteHash string, clientID string) {\n\turl := \"https://api.imgur.com/3/album/\" + albumDeleteHash\n\tmethod := \"DELETE\"\n\n\tpayload := &bytes.Buffer{}\n\twriter := multipart.NewWriter(payload)\n\terr := writer.Close()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(method, url, payload)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treq.Header.Add(\"Authorization\", \"Client-ID \"+clientID)\n\n\treq.Header.Set(\"Content-Type\", writer.FormDataContentType())\n\tres, err := client.Do(req)\n\tdefer res.Body.Close()\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tif strings.Contains(string(body), \"200\") {\n\t\tfmt.Println(color.GreenString(\"[+]\"), \"Delete was a success\")\n\t}\n\n}",
"func (service *AlbumDiscogService) CreateAlbumDiscog(attributes *library.AlbumAttributes) error {\n\tif service.insert == nil {\n\t\tstmt, err := service.prepareInsert()\n\t\tif err != nil {\n\t\t\tservice.session.Logger.Println(err)\n\t\t\treturn err\n\t\t}\n\t\tservice.insert = stmt\n\t}\n\n\t_, err := service.insert.Exec(\n\t\tattributes.ArtistName, attributes.ArtistSort,\n\t\tattributes.Name,\n\t\tattributes.Sort,\n\t\tattributes.ReleaseDate,\n\t\tattributes.ArtistName, attributes.ArtistSort,\n\t\tattributes.GenreName)\n\n\tif err != nil {\n\t\tservice.session.Logger.Println(err)\n\t\treturn err\n\t}\n\treturn nil\n}",
"func NewAlbum() *Album {\n\talbum := new(Album)\n\talbum.Key = \"\"\n\talbum.Id = 0\n\talbum.EncodedKey = \"\"\n\talbum.Title = \"\"\n\talbum.Artists = dna.StringArray{}\n\talbum.Coverart = \"\"\n\talbum.Topics = dna.StringArray{}\n\talbum.Plays = 0\n\talbum.Songids = dna.IntArray{}\n\talbum.YearReleased = \"\"\n\talbum.Nsongs = 0\n\talbum.Description = \"\"\n\talbum.DateCreated = time.Time{}\n\talbum.Checktime = time.Time{}\n\t// add more 6 fields\n\talbum.IsAlbum = 0\n\talbum.IsHit = 0\n\talbum.IsOfficial = 0\n\talbum.Likes = 0\n\talbum.StatusId = 0\n\talbum.Comments = 0\n\talbum.ArtistIds = dna.IntArray{}\n\treturn album\n}",
"func NewAlbum() *Album {\n\talbum := new(Album)\n\talbum.Id = 0\n\talbum.Key = \"\"\n\talbum.Title = \"\"\n\talbum.Artists = dna.StringArray{}\n\talbum.Plays = 0\n\talbum.Songids = dna.IntArray{}\n\talbum.Nsongs = 0\n\talbum.Description = \"\"\n\talbum.Coverart = \"\"\n\talbum.DateCreated = time.Time{}\n\talbum.Checktime = time.Time{}\n\treturn album\n}",
"func addAlbum(ctx context.Context, client *spanner.Client, singerId int64,\n\talbumTitle string) (*int64, error) {\n\talbumId := rand.Int63()\n\t_, err := client.ReadWriteTransaction(ctx, func(ctx context.Context,\n\t\ttxn *spanner.ReadWriteTransaction) error {\n\t\tstmt := spanner.Statement{\n\t\t\tSQL: `INSERT Albums (SingerId, AlbumId, AlbumTitle) VALUES\n (@SingerId, @AlbumId, @AlbumTitle)`,\n\t\t\tParams: map[string]interface{}{\n\t\t\t\t\"SingerId\": singerId,\n\t\t\t\t\"AlbumId\": albumId,\n\t\t\t\t\"AlbumTitle\": albumTitle,\n\t\t\t},\n\t\t}\n\t\trowCount, err := txn.Update(ctx, stmt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Printf(ctx, \"%d record(s) inserted.\\n\", rowCount)\n\t\treturn nil\n\t})\n\treturn &albumId, err\n}",
"func ArtistAlbum(id string, page, limit int) (string, error) {\n\t_offset, _limit := formatParams(page, limit)\n\tpreParams := \"{\\\"offset\\\": \"+ _offset +\", \\\"limit\\\": \"+_limit +\", \\\"total\\\": true, \\\"csrf_token\\\": \\\"\\\"}\"\n\tparams, encSecKey, encErr := EncParams(preParams)\n\tif encErr != nil {\n\t\treturn \"\", encErr\n\t}\n\tres, resErr := post(\"http://music.163.com/weapi/artist/albums/\"+id, params, encSecKey)\n\tif resErr != nil {\n\t\treturn \"\", resErr\n\t}\n\treturn res, nil\n}",
"func PutAlbum(jsonData string, db *neoism.Database) string {\n\t// TODO: Write a data verification method\n\n\t// Parse the json data into an album struct\n\tvar a Album\n\terr := json.Unmarshal([]byte(jsonData), &a)\n\tif err != nil {\n\t\treturn \"{ \\\"err\\\": \\\"Unable to parse json request\\\" }\"\n\t\tfmt.Println(err)\n\t}\n\n\t// Set the submitted date to the current time\n\ta.Submitted = int32(time.Now().Unix())\n\tfmt.Println(a.Submitted)\n\n\t// Create a new node in Neo4j DB\n\tres := []struct {\n\t\tN neoism.Node\n\t}{}\n\n\tcq := neoism.CypherQuery{\n\t\tStatement: \"CREATE (n:Album {name: {name}, year: {year}, submitted: {submitted}}) RETURN n\",\n\t\tParameters: neoism.Props{\"name\": a.Name, \"year\": a.Year, \"submitted\": a.Submitted},\n\t\tResult: res,\n\t}\n\tdb.Cypher(&cq)\n\n\t// TODO: Create relationships to artist, genre\n\n\treturn \"\"\n}",
"func postAlbums(c *gin.Context) {\n\tvar newAlbum album\n\n\t// Call BinsJSON to bind the received JSON to\n\t// newAlbum.\n\tif err := c.BindJSON(&newAlbum); err != nil {\n\t\treturn\n\t}\n\n\t// Add the new album to the albums.json.\n\tif result := AddAlbum(newAlbum); result == false {\n\t\tc.IndentedJSON(http.StatusNotAcceptable, gin.H{\"message\": \"ID not avaible\"})\n\t\treturn\n\t}\n\n\tc.IndentedJSON(http.StatusCreated, newAlbum)\n}",
"func (b *PhotosSaveBuilder) AlbumID(v int) *PhotosSaveBuilder {\n\tb.Params[\"album_id\"] = v\n\treturn b\n}",
"func (b *PhotosGetBuilder) AlbumID(v string) *PhotosGetBuilder {\n\tb.Params[\"album_id\"] = v\n\treturn b\n}",
"func GetAlbumFromAPI(id dna.Int) (*Album, error) {\n\tvar album *Album = NewAlbum()\n\talbum.Id = id\n\tapialbum, err := GetAPIAlbum(id)\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tif apialbum.Response.MsgCode == 1 {\n\t\t\tif GetKey(apialbum.Id) != GetKey(album.Id) {\n\t\t\t\terrMes := dna.Sprintf(\"Resulted key and computed key are not match. %v =/= %v , id: %v =/= %v\", GetKey(apialbum.Id), GetKey(album.Id), id, apialbum.Id)\n\t\t\t\tpanic(errMes.String())\n\t\t\t}\n\n\t\t\talbum.Title = apialbum.Title\n\t\t\talbum.Artists = dna.StringArray(apialbum.Artists.Split(\" , \").Map(func(val dna.String, idx dna.Int) dna.String {\n\t\t\t\treturn val.Trim()\n\t\t\t}).([]dna.String)).SplitWithRegexp(\",\").Filter(func(v dna.String, i dna.Int) dna.Bool {\n\t\t\t\tif v != \"\" {\n\t\t\t\t\treturn true\n\t\t\t\t} else {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t})\n\n\t\t\talbum.Topics = dna.StringArray(apialbum.Topics.Split(\", \").Map(func(val dna.String, idx dna.Int) dna.String {\n\t\t\t\treturn val.Trim()\n\t\t\t}).([]dna.String)).SplitWithRegexp(\" / \").Unique().Filter(func(v dna.String, i dna.Int) dna.Bool {\n\t\t\t\tif v != \"\" {\n\t\t\t\t\treturn true\n\t\t\t\t} else {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t})\n\t\t\talbum.Plays = apialbum.Plays\n\t\t\t// album.Songids\n\t\t\t// album.Nsongs\n\t\t\t// album.EncodedKey\n\t\t\t// album.Coverart\n\t\t\t// album.DateCreated\n\t\t\talbum.YearReleased = apialbum.YearReleased\n\t\t\talbum.Description = apialbum.Description.RemoveHtmlTags(\"\")\n\n\t\t\talbum.ArtistIds = apialbum.ArtistIds.Split(\",\").ToIntArray()\n\t\t\talbum.IsAlbum = apialbum.IsAlbum\n\t\t\talbum.IsHit = apialbum.IsHit\n\t\t\talbum.IsOfficial = apialbum.IsOfficial\n\t\t\talbum.Likes = apialbum.Likes\n\t\t\talbum.StatusId = apialbum.StatusId\n\t\t\talbum.Comments = apialbum.Comments\n\t\t\talbum.Checktime = time.Now()\n\t\t\treturn album, nil\n\t\t} else {\n\t\t\treturn nil, errors.New(\"Message code invalid \" + apialbum.Response.MsgCode.ToString().String())\n\t\t}\n\t}\n}",
"func GetAlbum(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"short_id\"]\n\n\talbum, err := database.GetAlbumByShortID(id)\n\tif err != nil {\n\t\tutils.RespondWithJSON(w, http.StatusInternalServerError, \"error\", nil)\n\t\treturn\n\t}\n\n\tutils.RespondWithJSON(w, http.StatusOK, \"success\", album)\n\treturn\n}",
"func postAlbums(c *gin.Context) {\n\tvar newAlbum album\n\n\t// Call BindJSON to bind the received JSON to newAlbum\n\t// newAlbum\n\tif err := c.BindJSON(&newAlbum); err != nil {\n\t\treturn\n\t}\n\n\talbums = append(albums, newAlbum)\n\tc.IndentedJSON(http.StatusCreated, newAlbum)\n}",
"func getGooglePhotosAlbumId(name string, c *gphotos.Client) string {\n\tif name == \"\" {\n\t\treturn \"\"\n\t}\n\n\talbum, err := c.GetOrCreateAlbumByName(name)\n\tif err != nil {\n\t\tlog.Printf(\"error creating album: name=%s, error=%v\", name, err)\n\t\treturn \"\"\n\t}\n\treturn album.Id\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetAlbumImages is a function that supposed to retrieve response images
|
func GetResponseImages(albumID string, clientID string) (imageLink string) { // removed: (imageLink interface{})
// This hash is the albumID hash
url := "https://api.imgur.com/3/album/" + albumID + "/images.json"
method := "GET"
payload := &bytes.Buffer{}
writer := multipart.NewWriter(payload)
err := writer.Close()
if err != nil {
fmt.Println(err)
}
client := &http.Client{}
req, err := http.NewRequest(method, url, payload)
if err != nil {
fmt.Println(err)
}
req.Header.Add("Authorization", "Client-ID "+clientID)
req.Header.Set("Content-Type", writer.FormDataContentType())
res, err := client.Do(req)
if err != nil {
fmt.Println("[-] Error connecting:", err)
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
var results AlbumImages
errr := json.Unmarshal([]byte(body), &results)
if errr != nil {
fmt.Println("[!] Error unmarshalling::", errr)
}
datavalues := results.Data
if results.Success == true {
for field := range datavalues {
if strings.Contains(datavalues[field].Description, "response") {
fmt.Println("[+] ImageID:", datavalues[field].ID)
fmt.Println("[+] ImageTitle:", datavalues[field].Title)
fmt.Println("[+] Description:", datavalues[field].Description)
fmt.Println("[+] ImageLink:", datavalues[field].Link)
fmt.Println(" ")
responseURL = datavalues[field].Link
}
// fmt.Println("[+] Logic worked and got a response from a client: ", datavalues[field].Link)
}
}
return responseURL
}
|
[
"func GetImagesForAlbumV1(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\tuuidParam := params[\"uuid\"]\n\n\timageModels := service.CreateDefaultImageService().GetAllImagesForAlbum(uuid.MustParse(uuidParam))\n\tdata, _ := json.Marshal(imageModels)\n\t_, _ = w.Write(data)\n\n}",
"func showImagesInAlbum(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\",\"application/json\")\n\tparam := mux.Vars(r)\n\titer:=Session.Query(\"SELECT imagelist FROM albumtable WHERE albname=?;\",param[\"album\"]).Iter()\n\tvar data []string\n\tfor iter.Scan(&data){\n\t\tjson.NewEncoder(w).Encode(data)\n\t}\n\tif err := iter.Close(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}",
"func (c *GetImagesByAlbumApiController) GetImagesByAlbum(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\tid := params[\"id\"]\n\tresult, err := c.service.GetImagesByAlbum(id)\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), constants.ErrorDBRecordNotFound) || strings.Contains(err.Error(), constants.ErrorDBNoSuchTable) {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tEncodeJSONResponse(result, http.StatusOK, w)\n}",
"func getAlbums(c *gin.Context) {\n\tc.IndentedJSON(http.StatusOK, albums)\n}",
"func getImages(hostBase string, organization string, application string) (*http.Response, []*server.Image, error) {\n\n\turl := getImagesURL(hostBase, organization, application)\n\n\tkiln.LogInfo.Printf(\"Invoking get at URL %s\", url)\n\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\treq.Header.Add(\"Accept\", \"application/json\")\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", \"e30K.e30K.e30K\"))\n\tclient := &http.Client{}\n\tresponse, err := client.Do(req)\n\n\timages := []*server.Image{}\n\n\tbytes, err := ioutil.ReadAll(response.Body)\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tbody := string(bytes)\n\n\tkiln.LogInfo.Printf(\"Response is %s\", body)\n\n\tjson.Unmarshal(bytes, &images)\n\n\treturn response, images, err\n\n}",
"func getAllAlbums(c *gin.Context) {\n\tc.JSON(http.StatusOK, albums)\n}",
"func getAlbums(c *gin.Context) {\n\n\tvar albums []album.Album\n\n\tdbClient.Select(&albums, \"SELECT id, title, artist, price FROM album;\")\n\n\tc.IndentedJSON(http.StatusOK, albums)\n}",
"func GetUserImages(w http.ResponseWriter, r *http.Request) {\n\n\t//Get current Session\n\tsession, _ := store.Get(r, \"session\")\n\tname := session.Values[\"username\"].(string)\n\n\t//Get User\n\tuser, err := model.GetUserByUsername(name)\n\tif err != nil {\n\n\t\tw.WriteHeader(http.StatusConflict)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\n\t}\n\n\t//Get Images\n\timages, err := user.GetImages()\n\tif err != nil {\n\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\n\t}\n\n\t//Get like and comment counts for each Image\n\tfor i := 0; i < len(images); i++ {\n\n\t\timages[i].Likes, err = images[i].GetLikeCounts()\n\t\tif err != nil {\n\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\n\t\t}\n\t\tcomments, err := images[i].GetComments()\n\t\tif err != nil {\n\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\n\t\t}\n\t\timages[i].Comments = len(comments)\n\n\t}\n\n\t//Make Response JSON\n\tresponseModel := struct {\n\t\tImages []model.Image\n\t}{\n\t\tImages: images,\n\t}\n\tresponseJSON, err := json.Marshal(responseModel)\n\tif err != nil {\n\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\n\t}\n\n\t//Write response\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(responseJSON)\n\n}",
"func (m *Market) Getalbums(ownerId int, offset int, count int) (resp responses.MarketGetalbums, err error) {\n\tparams := map[string]interface{}{}\n\n\tparams[\"owner_id\"] = ownerId\n\n\tif offset > 0 {\n\t\tparams[\"offset\"] = offset\n\t}\n\n\tif count > 0 {\n\t\tparams[\"count\"] = count\n\t}\n\n\terr = m.SendObjRequest(\"market.getAlbums\", params, &resp)\n\n\treturn\n}",
"func ShowImagesInAlbum(albName string) ([]string, *utils.ApplicationError) {\n\treturn model.ShowImagesInAlbum(albName)\n}",
"func GetPhotosByAlbumKeyHandler(w http.ResponseWriter, r *http.Request) {\n\tfuncTag := \"GetPhotosByAlbumKeyHandler\"\n\n\t// process request params\n\tmp, err := requester.GetRequestParams(r, nil, routeKeyAlbumID)\n\tif err != nil {\n\t\terr = apierr.Errorf(err, funcTag, \"process request params\")\n\t\tresponder.SendJSONError(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\t// get the photos\n\tps, err := photoDB.GetPhotosByAlbumKey(mp[routeKeyAlbumID])\n\tif err != nil {\n\t\terr = apierr.Errorf(err, funcTag, \"get photos by album key\")\n\t\tresponder.SendJSONError(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\t// give the photos their url from s3\n\t// TODO: have the client pass in a quality filter via query params (\"1024\" below)\n\tfor _, p := range ps {\n\t\trelativePath := fmt.Sprintf(\"%s/%s\", \"1024\", p.Src)\n\t\tp.Src = aws.S3PublicAssetURL(relativePath)\n\t}\n\n\t// build the return data\n\tres := &GetPhotosResponse{}\n\tres.Photos = ps\n\n\t// return\n\tresponder.SendJSON(w, res)\n}",
"func getImages(app App) []docker.APIImages {\n\tpDebug(\"Getting images %s\", app.Image)\n\timgs, _ := client.ListImages(docker.ListImagesOptions{All: false, Filter: app.Image})\n\treturn imgs\n}",
"func GetGalleryImages(db *sql.DB) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprintf(w, \"GET /galleries/\"+mux.Vars(r)[\"id\"]+\"/images\")\n\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(\"Not Implemented\"))\n\t}\n}",
"func (this *DockerapiController) GetImages() {\n\taddress := \"/images/json\"\n\tresult := RequestUnixSocket(address, \"GET\")\n\tthis.Ctx.WriteString(result)\n}",
"func getImages() ([]types.ImageSummary, error) {\n\timages, err := client.ImageList(context.Background(), types.ImageListOptions{})\n\tif err != nil {\n\t\treturn []types.ImageSummary{}, err\n\t}\n\treturn images, nil\n}",
"func (m *VirtualEndpoint) GetGalleryImages()([]CloudPcGalleryImageable) {\n val, err := m.GetBackingStore().Get(\"galleryImages\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]CloudPcGalleryImageable)\n }\n return nil\n}",
"func (r QuayAdapter) GetImageNames() ([]string, error) {\n\tlog.Debug(\"QuayAdapter::GetImages\")\n\tlog.Debug(\"BundleSpecLabel: %s\", BundleSpecLabel)\n\tlog.Debug(\"Loading image list for quay.io Org: [ %v ]\", r.config.Org)\n\n\tvar imageList []string\n\n\t// check if we're configured for specific images\n\tif len(r.config.Images) > 0 {\n\t\tlog.Debugf(\"Configured to use images: %v\", r.config.Images)\n\t\timageList = append(imageList, r.config.Images...)\n\t}\n\n\t// discover images\n\treq, err := http.NewRequest(\"GET\", fmt.Sprintf(quayCatalogURL, r.config.URL, r.config.Org), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Accept\", \"application/json\")\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %v\", r.config.Token))\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to load catalog response at %s - %v\", fmt.Sprintf(quayCatalogURL, r.config.URL, r.config.Org), err)\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tcatalogResp := quayImageResponse{}\n\terr = json.NewDecoder(resp.Body).Decode(&catalogResp)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to decode Catalog response from '%s'\", fmt.Sprintf(quayCatalogURL, r.config.URL, r.config.Org))\n\t\treturn nil, err\n\t}\n\n\tfor _, repo := range catalogResp.Repositories {\n\t\timageList = append(imageList, repo.Name)\n\t}\n\n\tif len(imageList) == 0 {\n\t\tlog.Warn(\"image list is empty. No images were discovered\")\n\t\treturn imageList, nil\n\t}\n\n\tvar uniqueList []string\n\timageMap := make(map[string]struct{})\n\tfor _, image := range imageList {\n\t\timageMap[image] = struct{}{}\n\t}\n\n\t// create a unique image list\n\tfor key := range imageMap {\n\t\tuniqueList = append(uniqueList, key)\n\t}\n\treturn uniqueList, nil\n}",
"func (s *Service) List(ctx context.Context) ([]Album, error) {\n\tvar result []Album\n\talbumsListCall := s.photos.List().PageSize(maxAlbumsPerPage).ExcludeNonAppCreatedData()\n\terr := albumsListCall.Pages(ctx, func(response *photoslibrary.ListAlbumsResponse) error {\n\t\tfor _, res := range response.Albums {\n\t\t\tresult = append(result, toAlbum(res))\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tvar emptyResult []Album\n\t\treturn emptyResult, fmt.Errorf(\"listing albums: %w\", err)\n\t}\n\treturn result, nil\n}",
"func (h *Handler) GetImages(w http.ResponseWriter, r *http.Request) {\n\t// first list all the pools so that we can retrieve images from all pools\n\tpools, err := ceph.ListPoolSummaries(h.context, h.config.clusterInfo.Name)\n\tif err != nil {\n\t\tlogger.Errorf(\"failed to list pools: %+v\", err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tresult := []model.BlockImage{}\n\n\t// for each pool, get further details about all the images in the pool\n\tfor _, p := range pools {\n\t\timages, ok := h.getImagesForPool(w, p.Name)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\n\t\tresult = append(result, images...)\n\t}\n\n\tFormatJsonResponse(w, result)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetLinkClient is a function is to grab the tasking link for the client
|
func GetLinkClient(albumID string, clientID string) (imageLink string) {
// This hash is the albumID hash
url := "https://api.imgur.com/3/album/" + albumID + "/images"
method := "GET"
payload := &bytes.Buffer{}
writer := multipart.NewWriter(payload)
err := writer.Close()
if err != nil {
fmt.Println(err)
}
client := &http.Client{}
req, err := http.NewRequest(method, url, payload)
if err != nil {
fmt.Println(err)
}
req.Header.Add("Authorization", "Client-ID "+clientID)
req.Header.Set("Content-Type", writer.FormDataContentType())
res, err := client.Do(req)
if err != nil {
fmt.Println("[-] Error connecting:", err)
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
var results AlbumImages
errr := json.Unmarshal([]byte(body), &results)
if errr != nil {
fmt.Println("[!] Error unmarshalling::", errr)
}
datavalues := results.Data
if results.Success == true {
imageLink = datavalues[0].Link
}
return imageLink
}
|
[
"func GetLink(key string) (string, error) {\n\treturn RedisClient.Get(Ctx, key).Result()\n}",
"func OIDCGetLink(discoveryURL, clientID, clientSecret, redirectURL string) (string, error) {\n\tctx := context.Background()\n\tprovider, err := oidc.NewProvider(ctx, discoveryURL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\toauth2Config := oauth2.Config{\n\t\tClientID: clientID,\n\t\tClientSecret: clientSecret,\n\t\tRedirectURL: redirectURL,\n\t\tEndpoint: provider.Endpoint(),\n\t\tScopes: []string{oidc.ScopeOpenID},\n\t}\n\n\treturn fmt.Sprintf(\"{\\\"url\\\": \\\"%s\\\"}\", oauth2Config.AuthCodeURL(\"\", oauth2.AccessTypeOffline, oauth2.ApprovalForce)), nil\n}",
"func (client PrivateEndpointConnectionClient) GetPrivateLinkResourceSender(req *http.Request) (*http.Response, error) {\n\treturn client.Send(req, azure.DoRetryWithRegistration(client.Client))\n}",
"func (i IntransitiveActivity) GetLink() IRI {\n\treturn IRI(i.ID)\n}",
"func (comm *Communicator) GetItemLink(longLivedToken string, fileId string, attempt int) (*DownloadManagerItemResponse, error) {\n\tserverBase := comm.GetActiveUrl()\n\turl := fmt.Sprintf(\"%s/api/bulk/%s/get/%s\", serverBase.String(), longLivedToken, fileId)\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tlog.Printf(\"ERROR communicator.GetItemLink could not establish connection: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tbodyContent, readErr := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\tif readErr != nil {\n\t\tlog.Printf(\"ERROR communicator.GetItemLink could not read server response: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tswitch resp.StatusCode {\n\tcase 200:\n\t\trtn, parseErr := ParseDownloadManagerItemResponse(bodyContent)\n\t\tif parseErr != nil {\n\t\t\tlog.Printf(\"ERROR communicator.GetItemLink offending content was %s\", string(bodyContent))\n\t\t\tlog.Printf(\"ERROR communicator.GetItemLink could not understand server response: %s\", parseErr)\n\t\t\treturn nil, parseErr\n\t\t}\n\t\treturn rtn, nil\n\tcase 502:\n\t\tfallthrough\n\tcase 503:\n\t\tfallthrough\n\tcase 504:\n\t\tif attempt > 10 {\n\t\t\tlog.Printf(\"ERROR communicator.GetItemLink could not contact server after %d attempts, giving up\", attempt)\n\t\t\treturn nil, errors.New(\"server not responding\")\n\t\t}\n\t\tlog.Printf(\"ERROR communcator.GetItemLink could not contact server on attemt %d. Retrying after a delay...\", attempt)\n\t\ttime.Sleep(5 * time.Second)\n\t\treturn comm.GetItemLink(longLivedToken, fileId, attempt+1)\n\tdefault:\n\t\tlog.Printf(\"ERROR communicator.GetItemLink server returned an error %d: %s\", resp.StatusCode, string(bodyContent))\n\t\treturn nil, errors.New(\"server returned an error\")\n\t}\n}",
"func (t *Task) getDestinationClient() (compat.Client, error) {\n\tcluster, err := t.Owner.GetDestinationCluster(t.Client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient, err := cluster.GetClient(t.Client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn client, nil\n}",
"func (mc *mgmtClient) getLinkWithoutLock(ctx context.Context) (RPCLink, error) {\n\tif mc.rpcLink != nil {\n\t\treturn mc.rpcLink, nil\n\t}\n\n\tvar err error\n\tmc.rpcLink, err = mc.ns.NewRPCLink(ctx, mc.links.ManagementPath())\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn mc.rpcLink, nil\n}",
"func getLink(header http.Header, rel string) string {\n\tlinks := getLinks(header, rel)\n\tif len(links) < 1 {\n\t\treturn \"\"\n\t}\n\n\treturn links[0]\n}",
"func (t *Task) getDestinationClient() (compat.Client, error) {\n\tif t.DestClient == nil {\n\t\treturn nil, fmt.Errorf(\"destination client is not initialized\")\n\t}\n\treturn t.DestClient, nil\n}",
"func GetClientURL(localEndpoint *kubeadmapi.APIEndpoint) string {\n\treturn \"https://\" + net.JoinHostPort(localEndpoint.AdvertiseAddress, strconv.Itoa(constants.EtcdListenClientPort))\n}",
"func (to *TOClient) getURL(path string) string {\n\treturn strings.TrimSuffix(to.URL, \"/\") + \"/\" + strings.TrimPrefix(path, \"/\")\n}",
"func (ac *azureClient) GetLink(ctx context.Context, resourceGroupName, zoneName, vnetLinkName string) (privatedns.VirtualNetworkLink, error) {\n\tctx, _, done := tele.StartSpanWithLogger(ctx, \"privatedns.AzureClient.GetLink\")\n\tdefer done()\n\tvnetLink, err := ac.vnetlinks.Get(ctx, resourceGroupName, zoneName, vnetLinkName)\n\tif err != nil {\n\t\treturn privatedns.VirtualNetworkLink{}, err\n\t}\n\treturn vnetLink, nil\n}",
"func GetLink(ctx context.Context, client dynamic.Interface, namespace, name string) (Link, error) {\n\tunstructured, err := client.Resource(LinkGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn Link{}, err\n\t}\n\treturn NewLink(*unstructured)\n}",
"func (o OrderedCollectionPage) GetLink() IRI {\n\treturn IRI(o.ID)\n}",
"func (t *Task) getSourceClient() (compat.Client, error) {\n\tcluster, err := t.Owner.GetSourceCluster(t.Client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient, err := cluster.GetClient(t.Client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn client, nil\n}",
"func (s *Service) Link(req *api.Link) (*api.LinkReply, onet.ClientError) {\n\tif req.Pin == \"\" {\n\t\tlog.Lvl3(\"Current session ping:\", s.pin)\n\t\treturn &api.LinkReply{}, nil\n\t} else if req.Pin != s.pin {\n\t\treturn nil, onet.NewClientError(errors.New(\"Wrong ping\"))\n\t}\n\n\tgenesis, _ := chains.New(req.Roster, nil)\n\n\tmaster := &chains.Master{req.Key, genesis.Hash, req.Roster, req.Admins}\n\tif _, err := master.Append(master); err != nil {\n\t\treturn nil, onet.NewClientError(err)\n\t}\n\treturn &api.LinkReply{base64.StdEncoding.EncodeToString(genesis.Hash)}, nil\n}",
"func (p *proteusAPI) GetUserDefinedLink(getLinkDefinitionJson string) (string, error) {\n\tα := struct {\n\t\tM OperationGetUserDefinedLink `xml:\"tns:getUserDefinedLink\"`\n\t}{\n\t\tOperationGetUserDefinedLink{\n\t\t\t&getLinkDefinitionJson,\n\t\t},\n\t}\n\n\tγ := struct {\n\t\tM OperationGetUserDefinedLinkResponse `xml:\"getUserDefinedLinkResponse\"`\n\t}{}\n\tif err := p.cli.RoundTripWithAction(\"GetUserDefinedLink\", α, &γ); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn *γ.M.Return, nil\n}",
"func (s *StanServer) ClientURL() string {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tif s.providedServerURL != \"\" {\n\t\treturn s.providedServerURL\n\t} else if s.natsServer != nil {\n\t\treturn s.natsServer.ClientURL()\n\t} else {\n\t\treturn \"\"\n\t}\n}",
"func (generator *Generator) GetRedirectLink(userId string) (string, error) {\n\tu := uuid.New().String()\n\thash := getHashOf(u)\n\turl := fmt.Sprintf(\"%s/api/auth/link/%s?u=%s&h=%s\", generator.baseAddress, userId, u, hash)\n\treturn url, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
DeleteAlbum will delete a given album
|
func DeleteAlbum(albumDeleteHash string, clientID string) {
url := "https://api.imgur.com/3/album/" + albumDeleteHash
method := "DELETE"
payload := &bytes.Buffer{}
writer := multipart.NewWriter(payload)
err := writer.Close()
if err != nil {
fmt.Println(err)
}
client := &http.Client{}
req, err := http.NewRequest(method, url, payload)
if err != nil {
fmt.Println(err)
}
req.Header.Add("Authorization", "Client-ID "+clientID)
req.Header.Set("Content-Type", writer.FormDataContentType())
res, err := client.Do(req)
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
fmt.Println(err)
}
if strings.Contains(string(body), "200") {
fmt.Println(color.GreenString("[+]"), "Delete was a success")
}
}
|
[
"func (m *Market) Deletealbum(ownerId int, albumId int) (resp responses.Ok, err error) {\n\tparams := map[string]interface{}{}\n\n\tparams[\"owner_id\"] = ownerId\n\n\tparams[\"album_id\"] = albumId\n\n\terr = m.SendObjRequest(\"market.deleteAlbum\", params, &resp)\n\n\treturn\n}",
"func (s *SqliteBackend) DeleteAlbum(a *Album) error {\n\t// Open database\n\tdb, err := s.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer db.Close()\n\n\t// Attempt to delete this album by its ID, if available\n\ttx := db.MustBegin()\n\tif a.ID != 0 {\n\t\ttx.Exec(\"DELETE FROM albums WHERE id = ?;\", a.ID)\n\t\treturn tx.Commit()\n\t}\n\n\t// Else, attempt to remove the album by its artist ID and title\n\ttx.Exec(\"DELETE FROM albums WHERE artist_id = ? AND title = ?;\", a.ArtistID, a.Title)\n\treturn tx.Commit()\n}",
"func deleteAlbum(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\",\"application/json\")\n\tparam := mux.Vars(r)\n\t//CQL Operation\n\tif err:= Session.Query(`DELETE FROM albumtable WHERE albname=? IF EXISTS;`,param[\"album\"]).Exec();err!=nil {\n\t\tfmt.Println(err)\n\t} else {\n\t\tfmt.Fprintf(w, \"Album deleted\")\n\t}\n}",
"func TestAlbumAddDeletePhoto(t *testing.T) {\n\talbum := api.Album{\n\t\tAlbumTitle: WellKnownAlbumTitle,\n\t}\n\n\tnewAlbum, err := Client.V1().CreateAlbum(album)\n\tif err != nil {\n\t\tt.Errorf(\"expected success creating album: %v\", err)\n\t\tt.FailNow()\n\t}\n\n\t// Add Photos\n\tphotos := []string{\n\t\tWellKnownPhotoID,\n\t}\n\terr = Client.V1().AddPhotosToAlbum(newAlbum.AlbumUID, photos)\n\tif err != nil {\n\t\tt.Errorf(\"expected to add photos to album: %v\", err)\n\t\t// Note: We do NOT FailNow() here because we want to clean up\n\t}\n\n\t// Get the photos by album\n\tupdatedPhotos, err := Client.V1().GetPhotos(&api.PhotoOptions{\n\t\tCount: 100,\n\t\tAlbumUID: newAlbum.AlbumUID,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"expecting to list photos by album: %v\", err)\n\t\t// Note: We do NOT FailNow() here because we want to clean up\n\t}\n\n\tvar updatedPhotoIDs []string\n\tfor _, photo := range updatedPhotos {\n\t\tupdatedPhotoIDs = append(updatedPhotoIDs, photo.PhotoUID)\n\t}\n\tif len(updatedPhotos) != 2 {\n\t\tt.Errorf(\"expecting 2 well known photo in album, found: %d\", len(updatedPhotos))\n\t}\n\n\terr = Client.V1().DeletePhotosFromAlbum(newAlbum.AlbumUID, updatedPhotoIDs)\n\tif err != nil {\n\t\tt.Errorf(\"expected to delete newly created photos from album: %v\", err)\n\t\t// Note: We do NOT FailNow() here because we want to clean up\n\t}\n\n\t// Get the photos by album\n\tupdatedPhotos, err = Client.V1().GetPhotos(&api.PhotoOptions{\n\t\tCount: 100,\n\t\tAlbumUID: newAlbum.AlbumUID,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"expecting to list photos by album: %v\", err)\n\t\t// Note: We do NOT FailNow() here because we want to clean up\n\t}\n\n\tif len(updatedPhotos) != 0 {\n\t\tt.Errorf(\"expected empty album, found %d photos\", len(updatedPhotos))\n\t\t// Note: We do NOT FailNow() here because we want to clean up\n\t}\n\n\terr = Client.V1().DeleteAlbums([]string{newAlbum.AlbumUID})\n\tif err != nil {\n\t\tt.Errorf(\"expected delete album %s, album not deleted: %v\", newAlbum.AlbumUID, err)\n\t\tt.FailNow()\n\t}\n\n\t// put the album back\n\tCreateWellKnownAlbum()\n}",
"func (l *Lidarr) DeleteAlbum(albumID int64, deleteFiles, addImportExclusion bool) error {\n\treturn l.DeleteAlbumContext(context.Background(), albumID, deleteFiles, addImportExclusion)\n}",
"func (l *Lidarr) DeleteAlbumContext(ctx context.Context, albumID int64, deleteFiles, addImportExclusion bool) error {\n\treq := starr.Request{URI: path.Join(bpAlbum, fmt.Sprint(albumID)), Query: make(url.Values)}\n\treq.Query.Set(\"deleteFiles\", fmt.Sprint(deleteFiles))\n\treq.Query.Set(\"addImportListExclusion\", fmt.Sprint(addImportExclusion))\n\n\tif err := l.DeleteAny(ctx, req); err != nil {\n\t\treturn fmt.Errorf(\"api.Delete(%s): %w\", &req, err)\n\t}\n\n\treturn nil\n}",
"func (m *MockUsecase) DeleteAlbumFromMediateka(userID, albumID int) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteAlbumFromMediateka\", userID, albumID)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}",
"func (s State) RemoveAlbum(a Album) State {\n\tfor idx, album := range s.Albums {\n\t\tif album.ID == a.ID {\n\t\t\ts.Albums = append(s.Albums[:idx], s.Albums[idx+1:]...)\n\t\t}\n\t}\n\treturn s\n}",
"func (mr *MockUsecaseMockRecorder) DeleteAlbumFromMediateka(userID, albumID interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteAlbumFromMediateka\", reflect.TypeOf((*MockUsecase)(nil).DeleteAlbumFromMediateka), userID, albumID)\n}",
"func (m *Market) Removefromalbum(ownerId int, itemId int, albumIds []int) (resp responses.Ok, err error) {\n\tparams := map[string]interface{}{}\n\n\tparams[\"owner_id\"] = ownerId\n\n\tparams[\"item_id\"] = itemId\n\n\tparams[\"album_ids\"] = SliceToString(albumIds)\n\n\terr = m.SendObjRequest(\"market.removeFromAlbum\", params, &resp)\n\n\treturn\n}",
"func (a API) DeleteSong(c *gin.Context) (int, interface{}, error) {\n\tname := c.Param(\"name\")\n\te := a.err.Fn(\"DeleteSong\").Tag(\"name\", name)\n\tinvalid, err := a.s.deleteSong(name)\n\tif err != nil {\n\t\treturn 500, nil, e.UK(err)\n\t}\n\tif invalid {\n\t\treturn 400, nil, e.DB(err)\n\t}\n\treturn 201, nil, nil\n}",
"func (s *SqliteBackend) DeleteSong(a *Song) error {\n\t// Open database\n\tdb, err := s.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer db.Close()\n\n\t// Attempt to delete this song by its ID, if available\n\ttx := db.MustBegin()\n\tif a.ID != 0 {\n\t\ttx.Exec(\"DELETE FROM songs WHERE id = ?;\", a.ID)\n\t\treturn tx.Commit()\n\t}\n\n\t// Else, attempt to remove the song by its file name\n\ttx.Exec(\"DELETE FROM songs WHERE file_name = ?;\", a.FileName)\n\treturn tx.Commit()\n}",
"func DeleteArtistSong(id int,o orm.Ormer) (err error) {\n\tsql := \"DELETE FROM artist_song WHERE artist_song.artist_id = \" + strconv.Itoa(id)\n\t_, err = o.Raw(sql).Exec()\n\treturn err\n}",
"func (s *SqliteBackend) PurgeOrphanAlbums() (int, error) {\n\t// Open database\n\tdb, err := s.Open()\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tdefer db.Close()\n\n\t// Select all albums without a song referencing their album ID\n\trows, err := db.Queryx(\"SELECT albums.id FROM albums LEFT JOIN songs ON \" +\n\t\t\"albums.id = songs.album_id WHERE songs.album_id IS NULL;\")\n\tif err != nil && err != sql.ErrNoRows {\n\t\treturn -1, err\n\t}\n\n\t// Open a transaction to remove all orphaned albums\n\ttx := db.MustBegin()\n\n\t// Iterate all rows\n\talbum := new(Album)\n\ttotal := 0\n\tfor rows.Next() {\n\t\t// Scan ID into struct\n\t\tif err := rows.StructScan(album); err != nil {\n\t\t\treturn -1, err\n\t\t}\n\n\t\t// Remove album\n\t\ttx.Exec(\"DELETE FROM albums WHERE id = ?;\", album.ID)\n\t\ttotal++\n\t}\n\n\treturn total, tx.Commit()\n}",
"func (r *mediaRepo) DeleteMediasLibrary(id uint) error {\n\tquery := r.DB.Unscoped().Where(\"library_id = ?\", id).Delete(&Media{})\n\treturn query.Error\n}",
"func (ar CoverDbRepository) Delete(entity *domain.Cover) (err error) {\n\t_, err = ar.AppContext.DB.Delete(entity)\n\n\treturn\n}",
"func CreateAlbum(title string, clientID string) (albumID, deleteHash interface{}) {\n\n\tapiURL := \"https://api.imgur.com\"\n\tresource := \"/3/album/\"\n\tdata := url.Values{}\n\tdata.Set(\"title\", title)\n\n\tu, _ := url.ParseRequestURI(apiURL)\n\tu.Path = resource\n\turlStr := u.String() // \"https://api.com/user/\"\n\n\tclient := &http.Client{}\n\tr, _ := http.NewRequest(\"POST\", urlStr, strings.NewReader(data.Encode())) // URL-encoded payload\n\tr.Header.Add(\"Authorization\", \"Client-ID \"+clientID)\n\tr.Header.Add(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\tr.Header.Add(\"Content-Length\", strconv.Itoa(len(data.Encode())))\n\n\tresp, _ := client.Do(r)\n\tvar result map[string]interface{}\n\n\tjson.NewDecoder(resp.Body).Decode(&result)\n\n\tnestedMap := result[\"data\"]\n\tnewMap, _ := nestedMap.(map[string]interface{})\n\n\talbumID = newMap[\"id\"]\n\tdeleteHash = newMap[\"deletehash\"]\n\n\tfmt.Println(color.GreenString(\"\\n[+]\"), \"Successfully created an album with the following values:\")\n\tfmt.Println(color.GreenString(\"albumID:\"), albumID, color.GreenString(\"Album DeleteHash:\"), deleteHash)\n\tfmt.Println(\" \")\n\n\treturn albumID, deleteHash\n\n}",
"func (s State) RemovePhotoFromAlbum(a Album, p Photo) State {\n\tfor aIdx := range s.Albums {\n\t\tif a.ID == s.Albums[aIdx].ID {\n\t\t\tfor pIdx := range s.Albums[aIdx].Photos {\n\t\t\t\tif s.Albums[aIdx].Photos[pIdx] == p.Hash {\n\t\t\t\t\ts.Albums[aIdx].Photos = append(s.Albums[aIdx].Photos[:pIdx], s.Albums[aIdx].Photos[pIdx+1:]...)\n\t\t\t\t\treturn s\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn s\n}",
"func (ar AlbumDbRepository) CleanUp() error {\n\t_, err := ar.AppContext.DB.Exec(\"DELETE FROM albums WHERE NOT EXISTS (SELECT id FROM tracks WHERE tracks.album_id = albums.id)\")\n\treturn err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewCreateStorageV1CSINodeOK creates CreateStorageV1CSINodeOK with default headers values
|
func NewCreateStorageV1CSINodeOK() *CreateStorageV1CSINodeOK {
return &CreateStorageV1CSINodeOK{}
}
|
[
"func (*UkoV4) NewKeystoreCreationRequestKeystoreTypeIbmCloudKmsInternalExternalCreateKeystoreTypeIbmCloudKmsInternalExternalCreateKeystoreTypeIbmCloudKmsInternalCreateKeystoreTypeIbmCloudKmsInternalExternalCreateKeystoreTypeIbmCloudKmsInternalCreateKeystoreTypeIbmCloudKmsInternalCreateKeystoreTypeIbmCloudKmsInternalUpdateKeystoreTypeIbmCloudKmsInternalExternalCreateKeystoreTypeIbmCloudKmsInternalCreateKeystoreTypeIbmCloudKmsInternalCreateKeystoreTypeIbmCloudKmsInternalUpdateKeystoreTypeIbmCloudKmsInternalCreateKeystoreTypeIbmCloudKmsInternalUpdateKeystoreTypeIbmCloudKmsInternalUpdateKeystoreTypeBaseUpdate(typeVar string, vault *VaultReferenceInCreationRequest, ibmVariant string) (_model *KeystoreCreationRequestKeystoreTypeIbmCloudKmsInternalExternalCreateKeystoreTypeIbmCloudKmsInternalExternalCreateKeystoreTypeIbmCloudKmsInternalCreateKeystoreTypeIbmCloudKmsInternalExternalCreateKeystoreTypeIbmCloudKmsInternalCreateKeystoreTypeIbmCloudKmsInternalCreateKeystoreTypeIbmCloudKmsInternalUpdateKeystoreTypeIbmCloudKmsInternalExternalCreateKeystoreTypeIbmCloudKmsInternalCreateKeystoreTypeIbmCloudKmsInternalCreateKeystoreTypeIbmCloudKmsInternalUpdateKeystoreTypeIbmCloudKmsInternalCreateKeystoreTypeIbmCloudKmsInternalUpdateKeystoreTypeIbmCloudKmsInternalUpdateKeystoreTypeBaseUpdate, err error) {\n\t_model = &KeystoreCreationRequestKeystoreTypeIbmCloudKmsInternalExternalCreateKeystoreTypeIbmCloudKmsInternalExternalCreateKeystoreTypeIbmCloudKmsInternalCreateKeystoreTypeIbmCloudKmsInternalExternalCreateKeystoreTypeIbmCloudKmsInternalCreateKeystoreTypeIbmCloudKmsInternalCreateKeystoreTypeIbmCloudKmsInternalUpdateKeystoreTypeIbmCloudKmsInternalExternalCreateKeystoreTypeIbmCloudKmsInternalCreateKeystoreTypeIbmCloudKmsInternalCreateKeystoreTypeIbmCloudKmsInternalUpdateKeystoreTypeIbmCloudKmsInternalCreateKeystoreTypeIbmCloudKmsInternalUpdateKeystoreTypeIbmCloudKmsInternalUpdateKeystoreTypeBaseUpdate{\n\t\tType: core.StringPtr(typeVar),\n\t\tVault: vault,\n\t\tIbmVariant: core.StringPtr(ibmVariant),\n\t}\n\terr = core.ValidateStruct(_model, \"required parameters\")\n\treturn\n}",
"func NewCreateStorageV1CSINodeCreated() *CreateStorageV1CSINodeCreated {\n\n\treturn &CreateStorageV1CSINodeCreated{}\n}",
"func NewCreateStorageV1CSINodeUnauthorized() *CreateStorageV1CSINodeUnauthorized {\n\n\treturn &CreateStorageV1CSINodeUnauthorized{}\n}",
"func NewWatchStorageV1CSINodeListOK() *WatchStorageV1CSINodeListOK {\n\treturn &WatchStorageV1CSINodeListOK{}\n}",
"func CreateBucket(w http.ResponseWriter, r *http.Request) *appError {\n session, err := store.Get(r, \"session-name\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n s3 := S3{\n EndPointString: session.Values[\"Endpoint\"].(string),\n AccessKey: session.Values[\"AccessKey\"].(string),\n SecretKey: session.Values[\"SecretKey\"].(string),\n Namespace: session.Values[\"Namespace\"].(string),\n }\n\n decoder := json.NewDecoder(r.Body)\n var bucket NewBucket\n err = decoder.Decode(&bucket)\n if err != nil {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Can't decode JSON data\"}\n }\n\n // Add the necessary headers for Metadata Search and Access During Outage\n createBucketHeaders := map[string][]string{}\n createBucketHeaders[\"Content-Type\"] = []string{\"application/xml\"}\n createBucketHeaders[\"x-emc-is-stale-allowed\"] = []string{\"true\"}\n createBucketHeaders[\"x-emc-metadata-search\"] = []string{\"ObjectName,x-amz-meta-image-width;Integer,x-amz-meta-image-height;Integer,x-amz-meta-gps-latitude;Decimal,x-amz-meta-gps-longitude;Decimal\"}\n\n createBucketResponse, _ := s3Request(s3, bucket.Name, \"PUT\", \"/\", createBucketHeaders, \"\")\n\n // Enable CORS after the bucket creation to allow the web browser to send requests directly to ECS\n if createBucketResponse.Code == 200 {\n enableBucketCorsHeaders := map[string][]string{}\n enableBucketCorsHeaders[\"Content-Type\"] = []string{\"application/xml\"}\n corsConfiguration := `\n <CORSConfiguration>\n <CORSRule>\n <AllowedOrigin>*</AllowedOrigin>\n <AllowedHeader>*</AllowedHeader>\n <ExposeHeader>x-amz-meta-image-width</ExposeHeader>\n <ExposeHeader>x-amz-meta-image-height</ExposeHeader>\n <ExposeHeader>x-amz-meta-gps-latitude</ExposeHeader>\n <ExposeHeader>x-amz-meta-gps-longitude</ExposeHeader>\n <AllowedMethod>HEAD</AllowedMethod>\n <AllowedMethod>GET</AllowedMethod>\n <AllowedMethod>PUT</AllowedMethod>\n <AllowedMethod>POST</AllowedMethod>\n <AllowedMethod>DELETE</AllowedMethod>\n </CORSRule>\n </CORSConfiguration>\n `\n enableBucketCorsResponse, _ := s3Request(s3, bucket.Name, \"PUT\", \"/?cors\", enableBucketCorsHeaders, corsConfiguration)\n if enableBucketCorsResponse.Code == 200 {\n rendering.JSON(w, http.StatusOK, struct {\n CorsConfiguration string `json:\"cors_configuration\"`\n Bucket string `json:\"bucket\"`\n } {\n CorsConfiguration: corsConfiguration,\n Bucket: bucket.Name,\n })\n } else {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Bucket created, but CORS can't be enabled\"}\n }\n } else {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Bucket can't be created\"}\n }\n return nil\n}",
"func NewReplaceStorageV1CSINodeOK() *ReplaceStorageV1CSINodeOK {\n\n\treturn &ReplaceStorageV1CSINodeOK{}\n}",
"func NewCreateStorageV1CSINodeAccepted() *CreateStorageV1CSINodeAccepted {\n\n\treturn &CreateStorageV1CSINodeAccepted{}\n}",
"func CreateDescribeLogstoreStorageRequest() (request *DescribeLogstoreStorageRequest) {\n\trequest = &DescribeLogstoreStorageRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Sas\", \"2018-12-03\", \"DescribeLogstoreStorage\", \"sas\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func NewReplaceStorageV1CSINodeCreated() *ReplaceStorageV1CSINodeCreated {\n\n\treturn &ReplaceStorageV1CSINodeCreated{}\n}",
"func (client IdentityClient) createTagDefault(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/tagDefaults\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateTagDefaultResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}",
"func NewReplaceStorageV1CSINodeOK() *ReplaceStorageV1CSINodeOK {\n\treturn &ReplaceStorageV1CSINodeOK{}\n}",
"func NewCreateIOCDefault(code int) *CreateIOCDefault {\n\treturn &CreateIOCDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func NewReplaceStorageV1CSINodeCreated() *ReplaceStorageV1CSINodeCreated {\n\treturn &ReplaceStorageV1CSINodeCreated{}\n}",
"func newNs(ctx context.Context, cl client.Client, name string) error {\n\tns := &corev1.Namespace{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t}\n\tif err := cl.Create(ctx, ns); err != nil {\n\t\tif !errors.IsAlreadyExists(err) {\n\t\t\treturn fmt.Errorf(\"failed to create namespace %s: %v\", ns.Name, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func NewStorageNode(storageProtocol string, configparams ...map[string]string) (storageoperations, error) {\n\tcomnserv, err := buildCommonService(nil, nil)\n\tif err == nil {\n\t\tif storageProtocol == \"fc\" {\n\t\t\treturn &fcstorage{cs: comnserv}, nil\n\t\t} else if storageProtocol == \"iscsi\" {\n\t\t\treturn &iscsistorage{cs: comnserv}, nil\n\t\t} else if storageProtocol == \"nfs\" {\n\t\t\treturn &nfsstorage{cs: comnserv, mounter: mount.New(\"\")}, nil\n\t\t}\n\t\treturn nil, errors.New(\"Error: Invalid storage protocol -\" + storageProtocol)\n\t}\n\treturn nil, err\n}",
"func (client StorageGatewayClient) createFileSystem(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/storageGateways/{storageGatewayId}/fileSystems\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateFileSystemResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}",
"func New() gocsi.StoragePluginProvider {\n\tsvc := service.New()\n\treturn &gocsi.StoragePlugin{\n\t\tController: svc,\n\t\tIdentity: svc,\n\t\tNode: svc,\n\t\tBeforeServe: svc.BeforeServe,\n\t\tRegisterAdditionalServers: svc.RegisterAdditionalServers,\n\n\t\tEnvVars: []string{\n\t\t\t// Enable request validation\n\t\t\tgocsi.EnvVarSpecReqValidation + \"=true\",\n\n\t\t\t// Enable serial volume access\n\t\t\tgocsi.EnvVarSerialVolAccess + \"=true\",\n\t\t},\n\t}\n}",
"func NewReadStorageV1beta1CSINodeOK() *ReadStorageV1beta1CSINodeOK {\n\treturn &ReadStorageV1beta1CSINodeOK{}\n}",
"func Initialize() (MinIOInfo, error) {\n\tendpoint := config.GetConfiguration().MinIOAddress + \":\" + config.GetConfiguration().MinIOPort\n accessKeyID := config.GetConfiguration().AccessKeyID\n secretAccessKey := config.GetConfiguration().SecretAccessKey\n useSSL := false\n\n minioInfo := MinIOInfo{}\n // Initialize minio client object.\n minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL)\n if err != nil {\n log.Fatalln(err)\n return minioInfo, err\n }\n\n // Make a new bucket.\n bucketNames := []string{\"binary\", \"container\", \"operatingsystem\"}\n location := \"us-west-1\"\n\n for _, bucketName := range bucketNames {\n err := minioClient.MakeBucket(bucketName, location)\n if err != nil {\n // Check to see if we already own this bucket (which happens if you run this twice)\n exists, errBucketExists := minioClient.BucketExists(bucketName)\n if errBucketExists == nil && exists {\n log.Printf(\"We already own %s\\n\", bucketName)\n } else {\n log.Fatalln(err)\n return minioInfo, err\n }\n } else {\n log.Printf(\"Successfully created %s\\n\", bucketName)\n }\n }\n\n minioInfo.minioC = minioClient\n return minioInfo, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetPayload sets the payload to the create storage v1 c s i node o k response
|
func (o *CreateStorageV1CSINodeOK) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {
o.Payload = payload
}
|
[
"func (o *CreateStorageV1CSINodeCreated) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}",
"func (o *ReplaceStorageV1CSINodeCreated) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}",
"func (o *CreateClusterCreated) SetPayload(payload *models.Kluster) {\n\to.Payload = payload\n}",
"func (o *DeleteStorageByIDOK) SetPayload(payload *models.Storage) {\n\to.Payload = payload\n}",
"func (o *SetVSphereEndpointCreated) SetPayload(payload *models.VsphereInfo) {\n\to.Payload = payload\n}",
"func (o *ReplaceStorageV1CSINodeOK) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}",
"func (o *CreateCoreV1NamespacedServiceAccountTokenCreated) SetPayload(payload *models.IoK8sAPIAuthenticationV1TokenRequest) {\n\to.Payload = payload\n}",
"func (o *CreateHPCResourceCreated) SetPayload(payload *models.CreatedResponse) {\n\to.Payload = payload\n}",
"func (o *SizeCreated) SetPayload(payload *models.SteeringRequestID) {\n\to.Payload = payload\n}",
"func (o *GetKeysIDOK) SetPayload(payload *models.Key) {\n\to.Payload = payload\n}",
"func (o *CreateClusterDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}",
"func (o *CreateStorageSSLCertificateDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}",
"func (nf *NetworkPayload) SetPayload(newpayload []byte) {\n}",
"func (o *CreateTaskInternalServerError) SetPayload(payload interface{}) {\n\to.Payload = payload\n}",
"func (o *PutSlideSuperlikeCreated) SetPayload(payload models.Success) {\n\to.Payload = payload\n}",
"func (o *GetServicesHaproxyRuntimeAclsIDOK) SetPayload(payload *models.ACLFile) {\n\to.Payload = payload\n}",
"func (o *PutAPIInventoryAPIIDSpecsProvidedSpecCreated) SetPayload(payload *models.RawSpec) {\n\to.Payload = payload\n}",
"func (o *GenerateFromTemplateInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}",
"func (o *GetPresignedForClusterFilesInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewCreateStorageV1CSINodeCreated creates CreateStorageV1CSINodeCreated with default headers values
|
func NewCreateStorageV1CSINodeCreated() *CreateStorageV1CSINodeCreated {
return &CreateStorageV1CSINodeCreated{}
}
|
[
"func NewCreateStorageV1CSINodeOK() *CreateStorageV1CSINodeOK {\n\n\treturn &CreateStorageV1CSINodeOK{}\n}",
"func NewReplaceStorageV1CSINodeCreated() *ReplaceStorageV1CSINodeCreated {\n\n\treturn &ReplaceStorageV1CSINodeCreated{}\n}",
"func NewReplaceStorageV1CSINodeCreated() *ReplaceStorageV1CSINodeCreated {\n\treturn &ReplaceStorageV1CSINodeCreated{}\n}",
"func NewCreateStorageV1CSINodeUnauthorized() *CreateStorageV1CSINodeUnauthorized {\n\n\treturn &CreateStorageV1CSINodeUnauthorized{}\n}",
"func NewCreateStorageV1CSINodeAccepted() *CreateStorageV1CSINodeAccepted {\n\n\treturn &CreateStorageV1CSINodeAccepted{}\n}",
"func (service *ContrailService) CreateContrailStorageNode(\n\tctx context.Context,\n\trequest *models.CreateContrailStorageNodeRequest) (*models.CreateContrailStorageNodeResponse, error) {\n\tmodel := request.ContrailStorageNode\n\tif model.UUID == \"\" {\n\t\tmodel.UUID = uuid.NewV4().String()\n\t}\n\tauth := common.GetAuthCTX(ctx)\n\tif auth == nil {\n\t\treturn nil, common.ErrorUnauthenticated\n\t}\n\n\tif model.FQName == nil {\n\t\tif model.DisplayName != \"\" {\n\t\t\tmodel.FQName = []string{auth.DomainID(), auth.ProjectID(), model.DisplayName}\n\t\t} else {\n\t\t\tmodel.FQName = []string{auth.DomainID(), auth.ProjectID(), model.UUID}\n\t\t}\n\t}\n\tmodel.Perms2 = &models.PermType2{}\n\tmodel.Perms2.Owner = auth.ProjectID()\n\tif err := common.DoInTransaction(\n\t\tservice.DB,\n\t\tfunc(tx *sql.Tx) error {\n\t\t\treturn db.CreateContrailStorageNode(ctx, tx, request)\n\t\t}); err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"err\": err,\n\t\t\t\"resource\": \"contrail_storage_node\",\n\t\t}).Debug(\"db create failed on create\")\n\t\treturn nil, common.ErrorInternal\n\t}\n\treturn &models.CreateContrailStorageNodeResponse{\n\t\tContrailStorageNode: request.ContrailStorageNode,\n\t}, nil\n}",
"func (cc *ContrailCommand) CreateNode(host vcenter.ESXIHost) error {\n\tlog.Debug(\"Create Node:\", cc.AuthToken)\n\tnodeResource := contrailCommandNodeSync{\n\t\tResources: []*nodeResources{\n\t\t\t{\n\t\t\t\tKind: \"node\",\n\t\t\t\tData: &nodeData{\n\t\t\t\t\tNodeType: \"esxi\",\n\t\t\t\t\tUUID: host.UUID,\n\t\t\t\t\tHostname: host.Hostname,\n\t\t\t\t\tFqName: []string{\"default-global-system-config\", host.Hostname},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tjsonData, err := json.Marshal(nodeResource)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Debug(\"Sending Request\")\n\tresp, _, err := cc.sendRequest(\"/sync\", string(jsonData), \"POST\") //nolint: bodyclose\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Debug(\"Got status : \", resp.StatusCode)\n\tswitch resp.StatusCode {\n\tdefault:\n\t\treturn fmt.Errorf(\"resource creation failed, %d\", resp.StatusCode)\n\tcase 200, 201:\n\t}\n\treturn nil\n}",
"func CreateDescribeLogstoreStorageRequest() (request *DescribeLogstoreStorageRequest) {\n\trequest = &DescribeLogstoreStorageRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Sas\", \"2018-12-03\", \"DescribeLogstoreStorage\", \"sas\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func NewStorageNode(storageProtocol string, configparams ...map[string]string) (storageoperations, error) {\n\tcomnserv, err := buildCommonService(nil, nil)\n\tif err == nil {\n\t\tif storageProtocol == \"fc\" {\n\t\t\treturn &fcstorage{cs: comnserv}, nil\n\t\t} else if storageProtocol == \"iscsi\" {\n\t\t\treturn &iscsistorage{cs: comnserv}, nil\n\t\t} else if storageProtocol == \"nfs\" {\n\t\t\treturn &nfsstorage{cs: comnserv, mounter: mount.New(\"\")}, nil\n\t\t}\n\t\treturn nil, errors.New(\"Error: Invalid storage protocol -\" + storageProtocol)\n\t}\n\treturn nil, err\n}",
"func (client StorageGatewayClient) createFileSystem(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/storageGateways/{storageGatewayId}/fileSystems\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateFileSystemResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}",
"func NewCreateIOCDefault(code int) *CreateIOCDefault {\n\treturn &CreateIOCDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func (sdk *SDK) NewNode(prefer *cloudsvr.PreferAttrs) (*cloudsvr.CloudNode, *cloudsvr.PreferAttrs, error) {\n\n\tvar (\n\t\tpassword, _ = utils.GenPassword(24)\n\t\treq = &CreateInstanceRequest{\n\t\t\tImageID: OsImage,\n\t\t\tPassword: password,\n\t\t\tInstanceName: NodeName,\n\t\t\tInstanceChargeType: \"PostPaid\", // require RMB 100+\n\t\t\tSecurityGroupID: \"whatever\", // will be automatic rewrite\n\t\t\tInternetChargeType: \"PayByTraffic\", // traffic payment\n\t\t\tInternetMaxBandwidthOut: \"100\", // 100M\n\t\t\tLabels: NodeLabels,\n\t\t}\n\t)\n\n\t// if prefered attributes set, use prefer region & instance-type\n\tif prefer != nil && prefer.Valid() == nil {\n\t\tvar (\n\t\t\treg = prefer.RegionOrZone\n\t\t\ttyp = prefer.InstanceType\n\t\t)\n\t\tlog.Printf(\"create aliyun ecs by using prefered region %s, instance type %s ...\", reg, typ)\n\n\t\treq.RegionID = reg // cn-beijing\n\t\treq.InstanceType = typ // ecs.n4.large\n\n\t\tcreated, err := sdk.createNode(req)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tlog.Printf(\"created prefered aliyun ecs succeed: %s\", created.ID)\n\t\treturn created, prefer, nil\n\t}\n\n\tlog.Infoln(\"creating aliyun ecs by trying all regions & types ...\")\n\n\t// if prefered created failed, or without prefer region & instance-type\n\t// try best on all region & instance-types to create the new aliyun ecs\n\tvar (\n\t\tregions []RegionType // all of aliyun regions\n\t\ttypes []InstanceTypeItemType // all of instance types within given range of mems & cpus\n\t\terr error\n\t\tcreated *cloudsvr.CloudNode\n\t)\n\n\t// list all regions\n\tregions, err = sdk.ListRegions()\n\tif err != nil {\n\t\tlog.Errorf(\"sdk.NewNode.ListRegions() error: %v\", err)\n\t\treturn nil, nil, err\n\t}\n\n\t// list specified range of instance types\n\ttypes, err = sdk.ListInstanceTypes(2, 4, 2, 8) // TODO range of given cpus/mems ranges\n\tif err != nil {\n\t\tlog.Errorf(\"sdk.NewNode.ListInstanceTypes() error: %v\", err)\n\t\treturn nil, nil, err\n\t}\n\n\tvar (\n\t\tuseRegionID, useInsType string\n\t)\n\t// range all regions & types to try to create ecs instance\n\tfor _, reg := range regions {\n\t\tfor _, typ := range types {\n\t\t\treq.RegionID = reg.RegionID // cn-beijing\n\t\t\treq.InstanceType = typ.InstanceTypeID // ecs.n4.large\n\n\t\t\t// if created succeed, directly return\n\t\t\tcreated, err = sdk.createNode(req)\n\t\t\tif err == nil {\n\t\t\t\tuseRegionID, useInsType = reg.RegionID, typ.InstanceTypeID\n\t\t\t\tgoto END\n\t\t\t}\n\n\t\t\tif sdk.isFatalError(err) {\n\t\t\t\tlog.Errorf(\"create aliyun ecs got fatal error, stop retry: %v\", err)\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\n\t\t\tlog.Warnf(\"create aliyun ecs failed: %v, will retry another region or type\", err)\n\t\t}\n\t}\n\nEND:\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tlog.Printf(\"created aliyun ecs %s at %s and type is %s\", created.ID, useRegionID, useInsType)\n\treturn created, &cloudsvr.PreferAttrs{RegionOrZone: useRegionID, InstanceType: useInsType}, nil\n}",
"func (s *StorageClusterAPI) Create(w http.ResponseWriter, r *http.Request) {\n\tstorage := &config.StorageCluster{}\n\terr := api.GetJSONBodyFromRequest(r, storage)\n\tif err != nil {\n\t\tapi.Error(w, err)\n\t\treturn\n\t}\n\terr = s.storageClusterService.Save(storage)\n\tif err != nil {\n\t\tapi.Error(w, err)\n\t\treturn\n\t}\n\tapi.NoContent(w)\n}",
"func UINodeNew(w http.ResponseWriter, r *http.Request) {\n\tdefer common.Recover()\n\n\tp := &ui.Page{\n\t\tTitle: \"New Node\",\n\t\tURL: strings.Split(r.URL.Path, \"/\"),\n\t}\n\n\tt := ui.GetTemplate(\"nodesNew\")\n\terr := t.ExecuteTemplate(w, \"base\", p)\n\tui.ErrorWriter(w, err)\n}",
"func (service *ContrailService) RESTCreateContrailStorageNode(c echo.Context) error {\n\trequestData := &models.CreateContrailStorageNodeRequest{}\n\tif err := c.Bind(requestData); err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"err\": err,\n\t\t\t\"resource\": \"contrail_storage_node\",\n\t\t}).Debug(\"bind failed on create\")\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, \"Invalid JSON format\")\n\t}\n\tctx := c.Request().Context()\n\tresponse, err := service.CreateContrailStorageNode(ctx, requestData)\n\tif err != nil {\n\t\treturn common.ToHTTPError(err)\n\t}\n\treturn c.JSON(http.StatusCreated, response)\n}",
"func CreateBucket(w http.ResponseWriter, r *http.Request) *appError {\n session, err := store.Get(r, \"session-name\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n s3 := S3{\n EndPointString: session.Values[\"Endpoint\"].(string),\n AccessKey: session.Values[\"AccessKey\"].(string),\n SecretKey: session.Values[\"SecretKey\"].(string),\n Namespace: session.Values[\"Namespace\"].(string),\n }\n\n decoder := json.NewDecoder(r.Body)\n var bucket NewBucket\n err = decoder.Decode(&bucket)\n if err != nil {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Can't decode JSON data\"}\n }\n\n // Add the necessary headers for Metadata Search and Access During Outage\n createBucketHeaders := map[string][]string{}\n createBucketHeaders[\"Content-Type\"] = []string{\"application/xml\"}\n createBucketHeaders[\"x-emc-is-stale-allowed\"] = []string{\"true\"}\n createBucketHeaders[\"x-emc-metadata-search\"] = []string{\"ObjectName,x-amz-meta-image-width;Integer,x-amz-meta-image-height;Integer,x-amz-meta-gps-latitude;Decimal,x-amz-meta-gps-longitude;Decimal\"}\n\n createBucketResponse, _ := s3Request(s3, bucket.Name, \"PUT\", \"/\", createBucketHeaders, \"\")\n\n // Enable CORS after the bucket creation to allow the web browser to send requests directly to ECS\n if createBucketResponse.Code == 200 {\n enableBucketCorsHeaders := map[string][]string{}\n enableBucketCorsHeaders[\"Content-Type\"] = []string{\"application/xml\"}\n corsConfiguration := `\n <CORSConfiguration>\n <CORSRule>\n <AllowedOrigin>*</AllowedOrigin>\n <AllowedHeader>*</AllowedHeader>\n <ExposeHeader>x-amz-meta-image-width</ExposeHeader>\n <ExposeHeader>x-amz-meta-image-height</ExposeHeader>\n <ExposeHeader>x-amz-meta-gps-latitude</ExposeHeader>\n <ExposeHeader>x-amz-meta-gps-longitude</ExposeHeader>\n <AllowedMethod>HEAD</AllowedMethod>\n <AllowedMethod>GET</AllowedMethod>\n <AllowedMethod>PUT</AllowedMethod>\n <AllowedMethod>POST</AllowedMethod>\n <AllowedMethod>DELETE</AllowedMethod>\n </CORSRule>\n </CORSConfiguration>\n `\n enableBucketCorsResponse, _ := s3Request(s3, bucket.Name, \"PUT\", \"/?cors\", enableBucketCorsHeaders, corsConfiguration)\n if enableBucketCorsResponse.Code == 200 {\n rendering.JSON(w, http.StatusOK, struct {\n CorsConfiguration string `json:\"cors_configuration\"`\n Bucket string `json:\"bucket\"`\n } {\n CorsConfiguration: corsConfiguration,\n Bucket: bucket.Name,\n })\n } else {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Bucket created, but CORS can't be enabled\"}\n }\n } else {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Bucket can't be created\"}\n }\n return nil\n}",
"func newFileSystem(efssvc *efs.EFS, subnetIDs []*string, sgid string, ownedTag *efs.Tag, key string) string {\n\tlog.Info(\"Creating file system...\", \"key\", key)\n\tfsInput := &efs.CreateFileSystemInput{\n\t\tCreationToken: aws.String(fmt.Sprintf(\"%s:%s\", fsTokenMarker, key)),\n\t\t// TODO(efried): Make this configurable\n\t\tEncrypted: aws.Bool(true),\n\t\tTags: []*efs.Tag{ownedTag},\n\t}\n\tfsd, err := efssvc.CreateFileSystem(fsInput)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfsid := *fsd.FileSystemId\n\tlog.Info(\"Created new file system\", \"fsid\", fsid)\n\n\treturn fsid\n}",
"func (client IdentityClient) createTagDefault(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/tagDefaults\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateTagDefaultResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}",
"func (fs *Ipfs) createNode(ctx context.Context, repoPath string) (icore.CoreAPI, error) {\n\t// Open the repo\n\trepo, err := fsrepo.Open(repoPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Construct the node\n\tnodeOptions := &core.BuildCfg{\n\t\tOnline: true,\n\n\t\t// This option sets the node to be a full DHT node\n\t\t// (both fetching and storing DHT Records)\n\t\tRouting: libp2p.DHTOption,\n\n\t\t// Routing: libp2p.DHTClientOption,\n\t\t// This option sets the node to be a client DHT node (only fetching records)\n\n\t\tRepo: repo,\n\t}\n\n\tnode, err := core.NewNode(ctx, nodeOptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfs.ipfsNode = node\n\n\t// Attach the Core API to the constructed node\n\treturn coreapi.NewCoreAPI(node)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetPayload sets the payload to the create storage v1 c s i node created response
|
func (o *CreateStorageV1CSINodeCreated) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {
o.Payload = payload
}
|
[
"func (o *CreateStorageV1CSINodeOK) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}",
"func (o *ReplaceStorageV1CSINodeCreated) SetPayload(payload *models.IoK8sAPIStorageV1CSINode) {\n\to.Payload = payload\n}",
"func (o *CreateHPCResourceCreated) SetPayload(payload *models.CreatedResponse) {\n\to.Payload = payload\n}",
"func (o *CreateClusterCreated) SetPayload(payload *models.Kluster) {\n\to.Payload = payload\n}",
"func (o *CreateCoreV1NamespacedServiceAccountTokenCreated) SetPayload(payload *models.IoK8sAPIAuthenticationV1TokenRequest) {\n\to.Payload = payload\n}",
"func (o *SetVSphereEndpointCreated) SetPayload(payload *models.VsphereInfo) {\n\to.Payload = payload\n}",
"func (o *CreateZoneCreated) SetPayload(payload *models.CreateZoneResponse) {\n\to.Payload = payload\n}",
"func (o *ReplaceNodeV1alpha1RuntimeClassCreated) SetPayload(payload *models.IoK8sAPINodeV1alpha1RuntimeClass) {\n\to.Payload = payload\n}",
"func (o *SizeCreated) SetPayload(payload *models.SteeringRequestID) {\n\to.Payload = payload\n}",
"func (o *CreateResourceRequestCreated) SetPayload(payload *models.ApprovalSystemCreatedResponse) {\n\to.Payload = payload\n}",
"func (nf *NetworkPayload) SetPayload(newpayload []byte) {\n}",
"func (o *PutSlideSuperlikeCreated) SetPayload(payload models.Success) {\n\to.Payload = payload\n}",
"func (o *CreateTaskInternalServerError) SetPayload(payload interface{}) {\n\to.Payload = payload\n}",
"func (o *PutSlideLikeCreated) SetPayload(payload models.Success) {\n\to.Payload = payload\n}",
"func (o *CreateStorageSSLCertificateCreated) SetPayload(payload *models.SslCertificate) {\n\to.Payload = payload\n}",
"func (o *SyncCreated) SetPayload(payload interface{}) {\n\to.Payload = payload\n}",
"func (o *CreateSpoeCreated) SetPayload(payload string) {\n\to.Payload = payload\n}",
"func (o *SetResourceCreated) SetPayload(payload *models.Resource) {\n\to.Payload = payload\n}",
"func (o *CreateProjectCreated) SetPayload(payload *models.CreatedResponse) {\n\to.Payload = payload\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewCreateStorageV1CSINodeAccepted creates CreateStorageV1CSINodeAccepted with default headers values
|
func NewCreateStorageV1CSINodeAccepted() *CreateStorageV1CSINodeAccepted {
return &CreateStorageV1CSINodeAccepted{}
}
|
[
"func NewCreateStorageV1CSINodeOK() *CreateStorageV1CSINodeOK {\n\n\treturn &CreateStorageV1CSINodeOK{}\n}",
"func NewCreateCoreV1NamespaceAccepted() *CreateCoreV1NamespaceAccepted {\n\treturn &CreateCoreV1NamespaceAccepted{}\n}",
"func NewStoragePoolCreateAccepted() *StoragePoolCreateAccepted {\n\treturn &StoragePoolCreateAccepted{}\n}",
"func NewReplaceStorageV1CSINodeCreated() *ReplaceStorageV1CSINodeCreated {\n\n\treturn &ReplaceStorageV1CSINodeCreated{}\n}",
"func NewCreateComputeGatewayAccepted() *CreateComputeGatewayAccepted {\n\treturn &CreateComputeGatewayAccepted{}\n}",
"func NewCreateNetworkingV1beta1NamespacedIngressAccepted() *CreateNetworkingV1beta1NamespacedIngressAccepted {\n\n\treturn &CreateNetworkingV1beta1NamespacedIngressAccepted{}\n}",
"func NewReplaceStorageV1CSINodeCreated() *ReplaceStorageV1CSINodeCreated {\n\treturn &ReplaceStorageV1CSINodeCreated{}\n}",
"func NewCreateStorageV1CSINodeCreated() *CreateStorageV1CSINodeCreated {\n\n\treturn &CreateStorageV1CSINodeCreated{}\n}",
"func NewCreateNetworkingV1NamespacedNetworkPolicyAccepted() *CreateNetworkingV1NamespacedNetworkPolicyAccepted {\n\treturn &CreateNetworkingV1NamespacedNetworkPolicyAccepted{}\n}",
"func NewVolumeCreateAccepted() *VolumeCreateAccepted {\n\treturn &VolumeCreateAccepted{}\n}",
"func NewCreateBlockDeviceAccepted() *CreateBlockDeviceAccepted {\n\treturn &CreateBlockDeviceAccepted{}\n}",
"func NewClusterCreateAccepted() *ClusterCreateAccepted {\n\treturn &ClusterCreateAccepted{}\n}",
"func (sdk *SDK) NewNode(prefer *cloudsvr.PreferAttrs) (*cloudsvr.CloudNode, *cloudsvr.PreferAttrs, error) {\n\n\tvar (\n\t\tpassword, _ = utils.GenPassword(24)\n\t\treq = &CreateInstanceRequest{\n\t\t\tImageID: OsImage,\n\t\t\tPassword: password,\n\t\t\tInstanceName: NodeName,\n\t\t\tInstanceChargeType: \"PostPaid\", // require RMB 100+\n\t\t\tSecurityGroupID: \"whatever\", // will be automatic rewrite\n\t\t\tInternetChargeType: \"PayByTraffic\", // traffic payment\n\t\t\tInternetMaxBandwidthOut: \"100\", // 100M\n\t\t\tLabels: NodeLabels,\n\t\t}\n\t)\n\n\t// if prefered attributes set, use prefer region & instance-type\n\tif prefer != nil && prefer.Valid() == nil {\n\t\tvar (\n\t\t\treg = prefer.RegionOrZone\n\t\t\ttyp = prefer.InstanceType\n\t\t)\n\t\tlog.Printf(\"create aliyun ecs by using prefered region %s, instance type %s ...\", reg, typ)\n\n\t\treq.RegionID = reg // cn-beijing\n\t\treq.InstanceType = typ // ecs.n4.large\n\n\t\tcreated, err := sdk.createNode(req)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tlog.Printf(\"created prefered aliyun ecs succeed: %s\", created.ID)\n\t\treturn created, prefer, nil\n\t}\n\n\tlog.Infoln(\"creating aliyun ecs by trying all regions & types ...\")\n\n\t// if prefered created failed, or without prefer region & instance-type\n\t// try best on all region & instance-types to create the new aliyun ecs\n\tvar (\n\t\tregions []RegionType // all of aliyun regions\n\t\ttypes []InstanceTypeItemType // all of instance types within given range of mems & cpus\n\t\terr error\n\t\tcreated *cloudsvr.CloudNode\n\t)\n\n\t// list all regions\n\tregions, err = sdk.ListRegions()\n\tif err != nil {\n\t\tlog.Errorf(\"sdk.NewNode.ListRegions() error: %v\", err)\n\t\treturn nil, nil, err\n\t}\n\n\t// list specified range of instance types\n\ttypes, err = sdk.ListInstanceTypes(2, 4, 2, 8) // TODO range of given cpus/mems ranges\n\tif err != nil {\n\t\tlog.Errorf(\"sdk.NewNode.ListInstanceTypes() error: %v\", err)\n\t\treturn nil, nil, err\n\t}\n\n\tvar (\n\t\tuseRegionID, useInsType string\n\t)\n\t// range all regions & types to try to create ecs instance\n\tfor _, reg := range regions {\n\t\tfor _, typ := range types {\n\t\t\treq.RegionID = reg.RegionID // cn-beijing\n\t\t\treq.InstanceType = typ.InstanceTypeID // ecs.n4.large\n\n\t\t\t// if created succeed, directly return\n\t\t\tcreated, err = sdk.createNode(req)\n\t\t\tif err == nil {\n\t\t\t\tuseRegionID, useInsType = reg.RegionID, typ.InstanceTypeID\n\t\t\t\tgoto END\n\t\t\t}\n\n\t\t\tif sdk.isFatalError(err) {\n\t\t\t\tlog.Errorf(\"create aliyun ecs got fatal error, stop retry: %v\", err)\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\n\t\t\tlog.Warnf(\"create aliyun ecs failed: %v, will retry another region or type\", err)\n\t\t}\n\t}\n\nEND:\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tlog.Printf(\"created aliyun ecs %s at %s and type is %s\", created.ID, useRegionID, useInsType)\n\treturn created, &cloudsvr.PreferAttrs{RegionOrZone: useRegionID, InstanceType: useInsType}, nil\n}",
"func (c *icontext) JSONAccepted(content interface{}) error {\n\treturn c.JSON(http.StatusAccepted, content)\n}",
"func NewWatchStorageV1CSINodeListOK() *WatchStorageV1CSINodeListOK {\n\treturn &WatchStorageV1CSINodeListOK{}\n}",
"func NewCreateFilterAccepted() *CreateFilterAccepted {\n\n\treturn &CreateFilterAccepted{}\n}",
"func NewPostManagementKubernetesIoV1NodesAccepted() *PostManagementKubernetesIoV1NodesAccepted {\n\n\treturn &PostManagementKubernetesIoV1NodesAccepted{}\n}",
"func NewCreateExtensionsV1beta1NamespacedIngressAccepted() *CreateExtensionsV1beta1NamespacedIngressAccepted {\n\n\treturn &CreateExtensionsV1beta1NamespacedIngressAccepted{}\n}",
"func (client IdentityClient) createTagDefault(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/tagDefaults\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateTagDefaultResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewCreateStorageV1CSINodeUnauthorized creates CreateStorageV1CSINodeUnauthorized with default headers values
|
func NewCreateStorageV1CSINodeUnauthorized() *CreateStorageV1CSINodeUnauthorized {
return &CreateStorageV1CSINodeUnauthorized{}
}
|
[
"func NewReplaceStorageV1CSINodeUnauthorized() *ReplaceStorageV1CSINodeUnauthorized {\n\n\treturn &ReplaceStorageV1CSINodeUnauthorized{}\n}",
"func NewReplaceStorageV1CSINodeUnauthorized() *ReplaceStorageV1CSINodeUnauthorized {\n\treturn &ReplaceStorageV1CSINodeUnauthorized{}\n}",
"func NewWatchStorageV1CSINodeListUnauthorized() *WatchStorageV1CSINodeListUnauthorized {\n\treturn &WatchStorageV1CSINodeListUnauthorized{}\n}",
"func NewReadStorageV1beta1CSINodeUnauthorized() *ReadStorageV1beta1CSINodeUnauthorized {\n\treturn &ReadStorageV1beta1CSINodeUnauthorized{}\n}",
"func NewCreateStorageV1CSINodeOK() *CreateStorageV1CSINodeOK {\n\n\treturn &CreateStorageV1CSINodeOK{}\n}",
"func NewUnauthorized(cause error) Unauthorized { return Unauthorized(cause.Error()) }",
"func NewCreateCoreV1NamespaceUnauthorized() *CreateCoreV1NamespaceUnauthorized {\n\treturn &CreateCoreV1NamespaceUnauthorized{}\n}",
"func NewUnauthorized() gin.H {\n\treturn gin.H{\"error\": \"user is not authorized\"}\n}",
"func NewGetNodeUnauthorized() *GetNodeUnauthorized {\n\treturn &GetNodeUnauthorized{}\n}",
"func NewWatchStorageV1StorageClassUnauthorized() *WatchStorageV1StorageClassUnauthorized {\n\treturn &WatchStorageV1StorageClassUnauthorized{}\n}",
"func NewDeleteStorageV1beta1CollectionCSINodeUnauthorized() *DeleteStorageV1beta1CollectionCSINodeUnauthorized {\n\treturn &DeleteStorageV1beta1CollectionCSINodeUnauthorized{}\n}",
"func NewReadStorageV1beta1CSIDriverUnauthorized() *ReadStorageV1beta1CSIDriverUnauthorized {\n\n\treturn &ReadStorageV1beta1CSIDriverUnauthorized{}\n}",
"func (a *SANApiService) IscsiCredentialsCreate(ctx context.Context, info IscsiCredentials) (IscsiCredentialsResponse, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Post\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue IscsiCredentialsResponse\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/protocols/san/iscsi/credentials\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/hal+json\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/hal+json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &info\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode < 300 {\n\t\t// If we succeed, return the data, otherwise pass on to decode error.\n\t\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"));\n\t\tif err == nil { \n\t\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t\t}\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\tif localVarHttpResponse.StatusCode == 201 {\n\t\t\tvar v IscsiCredentialsResponse\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"));\n\t\t\t\tif err != nil {\n\t\t\t\t\tnewErr.error = err.Error()\n\t\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t\t}\n\t\t\t\tnewErr.model = v\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\t\n\t\tif localVarHttpResponse.StatusCode == 0 {\n\t\t\tvar v ErrorResponse\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"));\n\t\t\t\tif err != nil {\n\t\t\t\t\tnewErr.error = err.Error()\n\t\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t\t}\n\t\t\t\tnewErr.model = v\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\t\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}",
"func NewCreateStorageV1CSINodeCreated() *CreateStorageV1CSINodeCreated {\n\n\treturn &CreateStorageV1CSINodeCreated{}\n}",
"func (a *HyperflexApiService) CreateHyperflexExtIscsiStoragePolicy(ctx context.Context) ApiCreateHyperflexExtIscsiStoragePolicyRequest {\n\treturn ApiCreateHyperflexExtIscsiStoragePolicyRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}",
"func NewUnauthorized(err error, msg string) error {\n\treturn &unauthorized{wrap(err, msg, \"\")}\n}",
"func NewDeleteStorageV1beta1CSIDriverUnauthorized() *DeleteStorageV1beta1CSIDriverUnauthorized {\n\treturn &DeleteStorageV1beta1CSIDriverUnauthorized{}\n}",
"func NewReplaceStorageV1CSINodeCreated() *ReplaceStorageV1CSINodeCreated {\n\n\treturn &ReplaceStorageV1CSINodeCreated{}\n}",
"func (suite *TenantTestSuite) TestCreateUnauthorized() {\n\trequest, _ := http.NewRequest(\"POST\", \"/api/v2/admin/tenants\", strings.NewReader(\"\"))\n\trequest.Header.Set(\"x-api-key\", \"FOO\")\n\trequest.Header.Set(\"Accept\", \"application/json\")\n\tresponse := httptest.NewRecorder()\n\n\tsuite.router.ServeHTTP(response, request)\n\n\tcode := response.Code\n\toutput := response.Body.String()\n\n\tsuite.Equal(401, code, \"Internal Server Error\")\n\tsuite.Equal(suite.respUnauthorized, output, \"Response body mismatch\")\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Deprecated: Use ProxyRequest.ProtoReflect.Descriptor instead.
|
func (*ProxyRequest) Descriptor() ([]byte, []int) {
return file_proto_sample_proto_rawDescGZIP(), []int{4}
}
|
[
"func (*ProxyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_url_proto_rawDescGZIP(), []int{5}\n}",
"func (*ProxyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proxy_v3_proto_rawDescGZIP(), []int{0}\n}",
"func (*ProxyEndpointRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_grpc_debug_proxy_endpoint_proto_rawDescGZIP(), []int{0}\n}",
"func (*UpdateReverseProxyRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_reverse_proxy_proto_rawDescGZIP(), []int{9}\n}",
"func (*DiscoveryRequest) Descriptor() ([]byte, []int) {\n\treturn file_xds_envoy_simplified_proto_rawDescGZIP(), []int{1}\n}",
"func (*ListenRequest) Descriptor() ([]byte, []int) {\n\treturn file_faultinjector_proto_rawDescGZIP(), []int{8}\n}",
"func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}",
"func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_proto_webApi_webApi_proto_rawDescGZIP(), []int{1}\n}",
"func (*WebhookRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_dialogflow_v2beta1_webhook_proto_rawDescGZIP(), []int{0}\n}",
"func (*AllowRequest) Descriptor() ([]byte, []int) {\n\treturn file_config_module_proxy_v1_proxy_proto_rawDescGZIP(), []int{2}\n}",
"func (*GetIdentityAwareProxyClientRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_iap_v1_service_proto_rawDescGZIP(), []int{27}\n}",
"func (*CreateReverseProxyRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_reverse_proxy_proto_rawDescGZIP(), []int{0}\n}",
"func (*TelemetryRequest) Descriptor() ([]byte, []int) {\n\treturn file_automate_gateway_api_telemetry_telemetry_proto_rawDescGZIP(), []int{0}\n}",
"func (*FindEnabledReverseProxyRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_reverse_proxy_proto_rawDescGZIP(), []int{2}\n}",
"func (*ModifyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{10}\n}",
"func (*CreateIdentityAwareProxyClientRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_iap_v1_service_proto_rawDescGZIP(), []int{26}\n}",
"func (*RefreshRequest) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{16}\n}",
"func (*ConfigRequest_V1_Deprecated) Descriptor() ([]byte, []int) {\n\treturn file_config_opensearch_config_request_proto_rawDescGZIP(), []int{0, 0, 23}\n}",
"func (*ConfigRequest_V1_System_Proxy) Descriptor() ([]byte, []int) {\n\treturn file_config_deployment_config_request_proto_rawDescGZIP(), []int{0, 0, 0, 3}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Deprecated: Use ProxyResponse.ProtoReflect.Descriptor instead.
|
func (*ProxyResponse) Descriptor() ([]byte, []int) {
return file_proto_sample_proto_rawDescGZIP(), []int{5}
}
|
[
"func (*ProxyResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_url_proto_rawDescGZIP(), []int{6}\n}",
"func (*ProxyEndpointResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_grpc_debug_proxy_endpoint_proto_rawDescGZIP(), []int{1}\n}",
"func (*DiscoveryResponse) Descriptor() ([]byte, []int) {\n\treturn file_xds_envoy_simplified_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListenResponse) Descriptor() ([]byte, []int) {\n\treturn file_faultinjector_proto_rawDescGZIP(), []int{9}\n}",
"func (*AddPeerResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{30}\n}",
"func (*FindEnabledReverseProxyResponse) Descriptor() ([]byte, []int) {\n\treturn file_service_reverse_proxy_proto_rawDescGZIP(), []int{3}\n}",
"func (*ProxyEndpointRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_grpc_debug_proxy_endpoint_proto_rawDescGZIP(), []int{0}\n}",
"func (*GetTelemetryResponse) Descriptor() ([]byte, []int) {\n\treturn file_determined_api_v1_master_proto_rawDescGZIP(), []int{4}\n}",
"func (*ProxyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proxy_v3_proto_rawDescGZIP(), []int{0}\n}",
"func (*WarnResponse) Descriptor() ([]byte, []int) {\n\treturn file_punishments_punishments_proto_rawDescGZIP(), []int{13}\n}",
"func (*UnWarnResponse) Descriptor() ([]byte, []int) {\n\treturn file_punishments_punishments_proto_rawDescGZIP(), []int{23}\n}",
"func (*RefreshResponse) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{17}\n}",
"func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}",
"func (*CreateReverseProxyResponse) Descriptor() ([]byte, []int) {\n\treturn file_service_reverse_proxy_proto_rawDescGZIP(), []int{1}\n}",
"func (*ProxyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_url_proto_rawDescGZIP(), []int{5}\n}",
"func (*GetPeerInfoResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{28}\n}",
"func (*TelemetryResponse) Descriptor() ([]byte, []int) {\n\treturn file_automate_gateway_api_telemetry_telemetry_proto_rawDescGZIP(), []int{1}\n}",
"func (*DLResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_internal_proto_rawDescGZIP(), []int{6}\n}",
"func (*NuLlResponse) Descriptor() ([]byte, []int) {\n\treturn file_testdata_gentest_unary_type_test_proto_rawDescGZIP(), []int{5}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Fields of the UnitOfMedicine.
|
func (UnitOfMedicine) Fields() []ent.Field {
return []ent.Field{
field.String("name"),
}
}
|
[
"func (Medicalfile) Fields() []ent.Field {\n return []ent.Field{\n\t\tfield.String(\"detail\").NotEmpty(),\n\t\tfield.Time(\"added_time\"),\n }\n}",
"func (m *ShowMeasurementsMapper) Fields() []string { return []string{\"name\"} }",
"func (Medicaltreatmentrights) Fields() []ent.Field {\n\treturn nil\n}",
"func (Physician) Fields() []ent.Field {\n return []ent.Field{\n field.String(\"Physicianname\"),\n field.String(\"Physicianemail\"). \n Unique(), \n field.String(\"Password\"), \n } \n}",
"func (Patientroom) Fields() []ent.Field {\n return []ent.Field{\n field.String(\"Typeroom\").\n Unique(),\n \n } \n}",
"func (Roomtype) Fields() []ent.Field {\n return []ent.Field{\n field.Int(\"ROOMPRICE\"),\n\t field.String(\"TYPEDEATAIL\").\n\t \t NotEmpty(),\n }\n}",
"func (Dispense) Fields() []ent.Field {\n\treturn []ent.Field{\n field.String(\"note\").NotEmpty(), \n }\n\n}",
"func (PatientInfo) Fields() []ent.Field {\r\n\treturn []ent.Field{\r\n\t\tfield.String(\"cardNumber\").NotEmpty().Unique(),\r\n\t\tfield.String(\"name\").NotEmpty(),\r\n\t\tfield.String(\"gender\").NotEmpty(),\r\n\t\tfield.Int(\"age\").Positive(),\r\n\t}\r\n}",
"func (Diagnosis) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"symptom\").NotEmpty(),\n\t\tfield.String(\"Opinionresult\").Validate(func(s string) error {\n\t\t\tmatch, _ := regexp.MatchString(\"[ก-๘]\", s)\n\t\t\tif !match {\n\t\t\t\treturn errors.New(\"กรุณากรอกภาษาไทย [ก-๘] \")\n\t\t\t}\n\t\t\treturn nil\n\t\t}),\n\t\tfield.String(\"note\").MaxLen(25),\n\n\t\t//field.DATE\n\n\t\tfield.Time(\"diagnosisDate\").Default(time.Now),\n\t}\n}",
"func (Patientrecord) Fields() []ent.Field {\n\treturn []ent.Field{\n\n field.String(\"Name\"),\n\n }\n}",
"func (Operationroom) Fields() []ent.Field {\n return []ent.Field{\n field.String(\"operationroom_name\").NotEmpty(),\n }\n}",
"func (DrugAllergy) Fields() []ent.Field {\n return []ent.Field{\n \n }\n }",
"func (e Department) EntFields() ent.Fields { return ent_Department_fields }",
"func (Dentist) Fields() []ent.Field {\n return []ent.Field{\n\tfield.String(\"name\").NotEmpty(),\n\tfield.Int(\"age\").Positive(),\n\tfield.String(\"cardid\").NotEmpty(),\n\tfield.Time(\"birthday\"),\n\tfield.String(\"experience\").NotEmpty(),\n\tfield.String(\"tel\").NotEmpty(),\n\tfield.String(\"email\").NotEmpty(),\n\tfield.String(\"password\").NotEmpty(),\n\n }\n}",
"func (MedicalProcedure) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"procedureOrder\").Validate(func(s string) error {\n match, _ := regexp.MatchString(\"[U]+[N]+[S]\\\\d{6}\" ,s)\n if !match {\n return errors.New(\"รูปแบบรหัสไม่ถูกต้อง\")\n }\n return nil\n }),\n\t\tfield.String(\"procedureRoom\").MaxLen(4).MinLen(4),\n\t\tfield.Time(\"Addtime\"),\n\t\tfield.String(\"procedureDescripe\").NotEmpty(),\n\t}\n}",
"func (Examinationroom) Fields() []ent.Field {\n return []ent.Field{\n field.String(\"examinationroom_name\").NotEmpty(),\n }\n }",
"func (Dentist) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"Dentist_name\").NotEmpty(),\n\t \n\t}\n}",
"func HasUnitOfMedicine() predicate.Medicine {\n\treturn predicate.Medicine(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.To(UnitOfMedicineTable, FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2O, true, UnitOfMedicineTable, UnitOfMedicineColumn),\n\t\t)\n\t\tsqlgraph.HasNeighbors(s, step)\n\t})\n}",
"func (Detail) Fields() []ent.Field {\n\treturn []ent.Field{\n\t\tfield.String(\"explain\").Validate(func(s string) error {\n\t\t\tmatch, _ := regexp.MatchString(\"^[ก-๏\\\\s]+$\", s)\n\t\t\tif !match {\n\t\t\t\treturn errors.New(\"กรอกรายละเอียดเป็นภาษาไทยเท่านั้น\")\n\t\t\t}\n\t\t\treturn nil\n\t\t}),\n\t\tfield.String(\"phone\").MaxLen(10).MinLen(10),\n\t\tfield.String(\"email\").Match(regexp.MustCompile(\"^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$\")),\n\t\tfield.String(\"departmentid\").MaxLen(3).MinLen(3),\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Edges of the UnitOfMedicine.
|
func (UnitOfMedicine) Edges() []ent.Edge {
return []ent.Edge{
edge.To("Medicine", Medicine.Type),
}
}
|
[
"func (Physician) Edges() []ent.Edge {\n return []ent.Edge{\n edge.To(\"Physician\", Patientofphysician.Type),\n \n }\n}",
"func (LevelOfDangerous) Edges() []ent.Edge {\r\n\treturn []ent.Edge{\r\n\t\tedge.To(\"Medicine\", Medicine.Type),\r\n\t}\r\n}",
"func (Medicalfile) Edges() []ent.Edge {\n return []ent.Edge{\n edge.From(\"dentist\", Dentist.Type).Ref(\"medicalfiles\").Unique(),\n edge.From(\"patient\", Patient.Type).Ref(\"medicalfiles\").Unique(),\n edge.From(\"nurse\", Nurse.Type).Ref(\"medicalfiles\").Unique(),\n edge.From(\"medicalcare\", MedicalCare.Type).Ref(\"medicalfiles\").Unique(),\n edge.To(\"dentalexpenses\", DentalExpense.Type).StorageKey(edge.Column(\"medicalfile_id\")),\n }\n}",
"func (Patientroom) Edges() []ent.Edge {\n return []ent.Edge{\n edge.To(\"Patientroom\", Patientofphysician.Type),\n }\n}",
"func (Operationroom) Edges() []ent.Edge {\n return []ent.Edge{\n edge.To(\"operationroom_id\",Booking.Type),\n }\n}",
"func (Medicaltreatmentrights) Edges() []ent.Edge {\n\treturn nil\n}",
"func (Dentist) Edges() []ent.Edge {\n return []ent.Edge{\n\tedge.From(\"nurse\", Nurse.Type).Ref(\"dentists\").Unique(),\n\tedge.From(\"degree\", Degree.Type).Ref(\"dentists\").Unique(),\n\tedge.From(\"expert\", Expert.Type).Ref(\"dentists\").Unique(),\n\tedge.From(\"gender\", Gender.Type).Ref(\"dentists\").Unique(),\n\n\tedge.To(\"medicalfiles\", Medicalfile.Type).StorageKey(edge.Column(\"dentist_id\")),\n\tedge.To(\"queue\", Queue.Type).StorageKey(edge.Column(\"dentist_id\")),\n\tedge.To(\"appointment\", Appointment.Type).StorageKey(edge.Column(\"dentist_id\")),\n\t\n }\n}",
"func (Dispense) Edges() []ent.Edge {\n\treturn []ent.Edge{\n edge.From(\"drug\",Drug.Type).Ref(\"dispenses\").Unique(),\n edge.From(\"patient\", Patient.Type).Ref(\"dispenses\").Unique(),\n edge.From(\"user\", User.Type).Ref(\"dispenses\").Unique(),\n \n \n }\n\n}",
"func (Degree) Edges() []ent.Edge {\n return []ent.Edge{\n \n edge.To(\"dentists\", Dentist.Type).StorageKey(edge.Column(\"degree_id\")),\n }\n}",
"func (MedicalProcedure) Edges() []ent.Edge {\n\treturn []ent.Edge{\n\t\tedge.From(\"Patient\", Patient.Type).Ref(\"PatientToMedicalProcedure\").Unique(),\n\t\tedge.From(\"ProcedureType\", ProcedureType.Type).Ref(\"ProcedureToMedicalProcedure\").Unique(),\n\t\tedge.From(\"Doctor\", Doctor.Type).Ref(\"DoctorToMedicalProcedure\").Unique(),\n\t}\n}",
"func (DrugAllergy) Edges() []ent.Edge {\n return []ent.Edge{\n edge. From(\"doctor\",Doctor.Type).Ref(\"Doctor_DrugAllergy\").Unique(),\n edge. From(\"patient\",Patient.Type).Ref(\"Patient_DrugAllergy\").Unique(),\n edge. From(\"medicine\",Medicine.Type).Ref(\"Medicine_DrugAllergy\").Unique(),\n edge. From(\"manner\",Manner.Type).Ref(\"Manner_DrugAllergy\").Unique(),\n }\n }",
"func (District) Edges() []ent.Edge {\n\treturn nil\n}",
"func (Doctor) Edges() []ent.Edge {\n return []ent.Edge{\n\n edge.From(\"title\", Title.Type).\n Ref(\"doctors\").\n Unique(),\n\n edge.From(\"gender\", Gender.Type).\n Ref(\"doctors\").\n Unique(),\n\n edge.From(\"position\", Position.Type).\n Ref(\"doctors\").\n Unique(),\n\n edge.From(\"disease\", Disease.Type).\n Ref(\"doctors\").\n Unique(),\n\n edge.To(\"offices\", Office.Type).\n StorageKey(edge.Column(\"doctor_id\")),\n\n edge.To(\"departments\", Department.Type).\n StorageKey(edge.Column(\"doctor_id\")),\n\n edge.To(\"schedules\", Schedule.Type).\n StorageKey(edge.Column(\"doctor_id\")),\n\n edge.To(\"trainings\",Training.Type).\n StorageKey(edge.Column(\"doctor_id\")),\n\n edge.To(\"specialdoctors\",Specialdoctor.Type).\n StorageKey(edge.Column(\"doctor_id\")),\n\n edge.To(\"details\", Detail.Type).Unique(),\n }\n}",
"func (Dentalkind) Edges() []ent.Edge {\n\treturn []ent.Edge{\n\t\tedge.To(\"Dentalappointment\", Dentalappointment.Type).StorageKey(edge.Column(\"kindname\")),\n\t}\n}",
"func (Roomtype) Edges() []ent.Edge {\n return []ent.Edge{\n edge.To(\"Room1\", Room.Type),\n }\n}",
"func (Promotionamount) Edges() []ent.Edge {\n\treturn []ent.Edge{\n\t\tedge.To(\"promotion\", Promotion.Type),\n\t}\n}",
"func (Financier) Edges() []ent.Edge {\n\treturn []ent.Edge{\n\t\tedge.To(\"bills\", Bill.Type).StorageKey(edge.Column(\"officer_id\")),\n\t\tedge.From(\"user\", User.Type).Ref(\"financier\").Unique(),\n\t}\n}",
"func (PatientInfo) Edges() []ent.Edge {\r\n\treturn []ent.Edge{\r\n\t\tedge.To(\"drugallergys\", DrugAllergy.Type).StorageKey(edge.Column(\"patient_id\")),\r\n\t\tedge.To(\"patientprescription\", Prescription.Type).StorageKey(edge.Column(\"patient_id\")),\r\n\t}\r\n}",
"func (g Ugraph) E() int {\n edges := 0\n for _, v := range g {\n edges += len(v)\n }\n return edges/2\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewConnection creates a new PGSQLConnection by using the URL found in the DATABASE_URL environment variable
|
func NewConnection() (*PGSQLConnection, error) {
url, ok := os.LookupEnv(databaseENV)
if !ok {
return nil, fmt.Errorf("missing ENV %s", databaseENV)
}
db, err := sqlx.Connect("postgres", url)
if err != nil {
return nil, err
}
return &PGSQLConnection{
connection: db,
}, nil
}
|
[
"func newPostgresConnection(cmd *cobra.Command, kind string) (*sqlx.DB, error) {\n\thost, _ := cmd.Flags().GetString(\"postgres-host\")\n\tport, _ := cmd.Flags().GetInt(\"postgres-port\")\n\tsslmode, _ := cmd.Flags().GetString(\"postgres-sslmode\")\n\n\tuser, _ := cmd.Flags().GetString(kind + \"-postgres-user\")\n\tif user == \"\" {\n\t\treturn nil, errors.Errorf(\"flag must not be empty: %s-postgres-user\", kind)\n\t}\n\n\tpassword, _ := cmd.Flags().GetString(kind + \"-postgres-password\")\n\tif password == \"\" {\n\t\treturn nil, errors.Errorf(\"flag must not be empty: %s-postgres-password\", kind)\n\t}\n\n\tdbname, _ := cmd.Flags().GetString(kind + \"-postgres-name\")\n\tif dbname == \"\" {\n\t\treturn nil, errors.Errorf(\"flag must not be empty: %s-postgres-name\", kind)\n\t}\n\n\t// use default dbname, if not provided\n\tif dbname == \"\" {\n\t\tdbname = user\n\t}\n\n\treturn pq.NewConnection(user, password, dbname, host, sslmode, port)\n}",
"func newDbConnection(connStr string) *sql.DB {\n\tdb, err := sql.Open(\"postgres\", connStr)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\terr = db.Ping()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(\"[es] new db connection established.\")\n\treturn db\n}",
"func CreatePGConnection(opts map[string]string) (*sql.DB, error) {\n\tport, err := strconv.Atoi(opts[\"port\"])\n\tif err != nil {\n\t\tlog.S().Fatal(\"Invalid port number : \", opts[\"port\"])\n\t}\n\n\tpsqlInfo := fmt.Sprintf(\"host=%s port=%d user=%s \"+\n\t\t\"password=%s dbname=%s sslmode=%s\",\n\t\topts[\"host\"], port, opts[\"user\"], opts[\"password\"], opts[\"dbname\"], opts[\"sslmode\"])\n\n\tif util.IsProductionEnv() {\n\t\tpsqlInfo = fmt.Sprintf(\"host=%s port=%d user=%s \"+\n\t\t\t\"password=%s dbname=%s sslrootcert=./rds-combined-ca-bundle.pem sslmode=%s\",\n\t\t\topts[\"host\"], port, opts[\"user\"], opts[\"password\"], opts[\"dbname\"], opts[\"sslmode\"])\n\t}\n\n\tdb, err := sql.Open(\"postgres\", psqlInfo)\n\tif err != nil {\n\t\tlog.S().Fatal(err)\n\t}\n\n\terr = db.Ping()\n\tif err != nil {\n\t\tlog.S().Fatal(err)\n\t}\n\n\t// Setting database connection config\n\tdb.SetMaxOpenConns(viper.GetInt(`database.pg.max_open_connection`))\n\tdb.SetMaxIdleConns(viper.GetInt(`database.pg.max_idle_connection`))\n\tdb.SetConnMaxLifetime(viper.GetDuration(`database.pg.max_connection_lifetime`))\n\n\tlog.S().Info(\"Connected to PG DB Server: \", opts[\"host\"], \" at port:\", opts[\"port\"], \" successfully!\")\n\n\treturn db, nil\n}",
"func newDbConnection() (db *sql.DB) {\n\tif *dbName == \"\" || *hostName == \"\" || *dbUserName == \"\" || *dbPassword == \"\" {\n\t\tlog.Println(\"postgres credentials not set\")\n\t\tflag.PrintDefaults()\n\t\treturn nil\n\t}\n\tdbInfo := fmt.Sprintf(\"host=%s user=%s password=%s dbname=%s sslmode=disable\",\n\t\t*hostName, *dbUserName, *dbPassword, *dbName, )\n\n\tmaxWait := 3\n\tvar err error\n\tfor {\n\t\tdb, err = sql.Open(\"postgres\", dbInfo)\n\t\tif err != nil {\n\t\t\t// wait for postgres to be ready\n\t\t\tif (maxWait >= 0) {\n\t\t\t\tlog.Println(\"wating for postgres, sleeping...\")\n\t\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\t\tmaxWait--\n\t\t\t} else {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t} else {\n\t\t\treturn db\n\t\t}\n\t}\n}",
"func New(url string) (*Conn, error) {\n\tconn, err := pgx.Connect(context.Background(), url)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to connection to database: %v\\n\", err)\n\t}\n\n\treturn &Conn{db: conn}, nil\n}",
"func newPostgresConnection(dsn string) (*postgresConnection, error) {\n\tconn := postgresConnection{\n\t\tDSN: dsn,\n\t\tDB: nil,\n\t}\n\tdb, err := conn.open()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not open postgres db connection, err = %w\", err)\n\t}\n\tconn.DB = db\n\tif err := conn.Check(); err != nil {\n\t\treturn nil, fmt.Errorf(\"postgres db connection check failed, err = %w\", err)\n\t}\n\treturn &conn, nil\n}",
"func createConnection() *sql.DB {\n\t// load .env file\n\terr := godotenv.Load(\".env\")\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Error loading .env file\")\n\t}\n\n\t// Open the connection\n\tdb, err := sql.Open(\"postgres\", os.Getenv(\"POSTGRES_URL\"))\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// check the connection\n\terr = db.Ping()\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Println(\"Successfully connected!\")\n\t// return the connection\n\treturn db\n}",
"func NewConnection(pgsql postgres.Client) core.Connection {\n\treturn &connection{\n\t\tpg: pgsql,\n\t\tmainPg: pgsql.MainDatastore(),\n\t\tappUser: &applicationUser{\n\t\t\tpg: pgsql,\n\t\t\tmainPg: pgsql.MainDatastore(),\n\t\t},\n\t}\n}",
"func createConnection() *sql.DB {\n\t//load .env file\n\terr := godotenv.Load(\".env\")\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Error loading .env file\")\n\t}\n\n\t//open connection\n\tdb, err := sql.Open(\"postgres\", os.Getenv(\"POSTGRES_DB\"))\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t//check connection\n\terr = db.Ping()\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Println(\"Successfully connected!\")\n // return the connection\n return db\n}",
"func CreatePGConnection(opts map[string]string) (*PgDB, error) {\n\tpgdb := pg.Connect(&pg.Options{\n\t\tUser: opts[\"user\"],\n\t\tPassword: opts[\"password\"],\n\t\tDatabase: opts[\"dbname\"],\n\t\tAddr: opts[\"host\"] + \":\" + opts[\"port\"],\n\t})\n\n\tvar n int\n\t_, err := pgdb.QueryOne(pg.Scan(&n), \"SELECT 1\")\n\tif err != nil {\n\t\tlog.Error(\"Could not connect to PG DB Server:\", opts[\"host\"], \" at port:\", opts[\"port\"])\n\t\tlog.Fatal(err)\n\t\treturn nil, err\n\t}\n\tlog.Info(\"Connected to PG DB Server: \", opts[\"host\"], \" at port:\", opts[\"port\"], \" successfully!\")\n\n\tif viper.GetBool(`pg.debug`) {\n\t\tpgdb.AddQueryHook(dbLogger{})\n\t}\n\tlog.Info(\"DB DEBUG = \", viper.GetBool(`pg.debug`))\n\n\treturn &PgDB{DB: pgdb}, nil\n}",
"func PGSQLConnect() *PostgreSQLConnection {\n\tif connection == nil {\n\t\tdbHost := configuration.Database.Host\n\t\tdbPort := configuration.Database.Port\n\t\tdbUser := configuration.Database.User\n\t\tdbPassword := configuration.Database.Password\n\t\tdatabase := configuration.Database.DatabaseName\n\n\t\tconn := fmt.Sprintf(\"host=%s port=%s user=%s password=%s database=%s\"+\n\t\t\t\" sslmode=disable\", dbHost, dbPort, dbUser, dbPassword, database)\n\n\t\tdb, err := sql.Open(\"postgres\", conn)\n\n\t\t// Based on the users of the application\n\t\tdb.SetMaxOpenConns(10)\n\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[!] Couldn't connect to the database. Reason %v\\n\", err)\n\t\t\treturn nil\n\t\t}\n\n\t\terr = db.Ping()\n\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[!] Couldn't ping to the database. Reason %v\\n\", err)\n\t\t\treturn nil\n\t\t}\n\t\tconnection = &PostgreSQLConnection{}\n\t\tconnection.db = db\n\t}\n\n\treturn connection\n}",
"func New(host, port, user, pass, name string) *DB {\n return &DB{\n Conn: pg.Connect(&pg.Options{\n Addr: host + \":\" + port,\n User: user,\n Password: pass,\n Database: name,\n }),\n }\n}",
"func New(s Settings) (*sqlx.DB, error) {\n\tconn, err := sqlx.Connect(\"pgx\", fmt.Sprintf(\"postgres://postgres:devpassword@postgres:5432/%s?sslmode=disable\", s.Database))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn conn, nil\n}",
"func ConnectPostgreSQL() *sql.DB {\n\turl := configuration.GetConfiguration().PostgreSQLUrl\n\tpgUrl, err := pq.ParseURL(url)\n\n\tif err != nil {\n\t\tlog.Panicln(err)\n\t}\n\n\tdb, err := sql.Open(\"postgres\", pgUrl)\n\n\tif err != nil {\n\t\tlog.Panicln(err)\n\t}\n\n\terr = db.Ping()\n\n\tif err != nil {\n\t\tlog.Panicln(err)\n\t}\n\n\treturn db\n}",
"func connectDatabase(connectionURL string) *sql.DB {\n\tdb, err := sql.Open(\"postgres\", connectionURL)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn db\n}",
"func newConnection() (*gredis.Client, error) {\n\thost = os.Getenv(\"REDIS_HOST\")\n\tport = os.Getenv(\"REDIS_PORT\")\n\trdb := gredis.NewClient(&gredis.Options{\n\t\tAddr: fmt.Sprintf(\"%s:%s\", host, port),\n\t\tPassword: password,\n\t\tDB: db,\n\t})\n\n\tstatus := rdb.Ping(rdb.Context())\n\terr := status.Err()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn rdb, nil\n}",
"func PostgressDBConnection() (*sql.DB, error) {\n\tconnStr := \"{{cookiecutter.postgres_uri}}\"\n\tdb, err := sql.Open(\"postgres\", connStr)\n\tif err != nil {\n\t\tlog.Println(\"Database connection not created!\")\n\t\tlog.Fatal(err)\n\t\treturn nil, err\n\t}\n\n\tif err = db.Ping(); err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\treturn db, err\n\n}",
"func New(ctx context.Context, url string) (*DB, error) {\n\tdb, err := pgxpool.Connect(ctx, url)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to connection to database: %s\", err)\n\t}\n\n\treturn &DB{db}, err\n}",
"func NewPostgreSQL(cfg PostgreConfig) (*sqlx.DB, error) {\n\tvar connErr error\n\n\tpostgreOnce.Do(func() {\n\n\t\tq := make(url.Values)\n\t\tq.Set(\"sslmode\", \"disable\")\n\n\t\tu := url.URL{\n\t\t\tScheme: \"postgres\",\n\t\t\tUser: url.UserPassword(cfg.User, cfg.Password),\n\t\t\tHost: cfg.Host,\n\t\t\tRawQuery: q.Encode(),\n\t\t}\n\n\t\tconn, err := sqlx.Connect(\"postgres\", u.String())\n\t\tif err != nil {\n\t\t\tconnErr = errors.Wrap(err, \"db\")\n\t\t\treturn\n\t\t}\n\n\t\tconn.SetMaxIdleConns(cfg.MaxIdleConns)\n\t\tconn.SetMaxOpenConns(cfg.MaxOpenConns)\n\n\t\tpostgreConn = conn\n\t})\n\treturn postgreConn, connErr\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
UpdateCluster updates a clusters color in the database
|
func (p PGSQLConnection) UpdateCluster(cluster *ClusterModel) error {
tx, err := p.connection.Beginx()
if err != nil {
return err
}
_, err = tx.NamedExec("UPDATE clusters SET color = :color WHERE cluster_name = :cluster_name", cluster)
if err != nil {
return err
}
return tx.Commit()
}
|
[
"func updateCluster(app *firebase.App, clusterID string) error {\n\tclustersMutex.Lock()\n\n\tvar avgTemp float64\n\tvar avgPres float64\n\tvar avgHum float64\n\n\tfor _, b := range clusters.Clusters[clusterID].Balloons {\n\t\tif len(b.Temperature) < 1 {\n\t\t\tcontinue // If no temperature value is set yet, skip\n\t\t}\n\n\t\tavgTemp += b.Temperature[len(b.Temperature)-1].Value\n\t\tavgPres += b.Pressure[len(b.Pressure)-1].Value\n\t\tavgHum += b.Humidity[len(b.Humidity)-1].Value\n\t}\n\n\t// Calculate average by dividing by amount of balloons used\n\t// for measurement\n\tavgTemp /= float64(len(clusters.Clusters[clusterID].Balloons))\n\tavgPres /= float64(len(clusters.Clusters[clusterID].Balloons))\n\tavgHum /= float64(len(clusters.Clusters[clusterID].Balloons))\n\n\t// Update averages of cluster\n\tclusters.Clusters[clusterID].AverageTemp = avgTemp\n\tclusters.Clusters[clusterID].AverageHumidity = avgHum\n\tclusters.Clusters[clusterID].AveragePressure = avgPres\n\n\tclustersMutex.Unlock()\n\treturn nil\n}",
"func (client ContainerEngineClient) updateCluster(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPut, \"/clusters/{clusterId}\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response UpdateClusterResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\tapiReferenceLink := \"https://docs.oracle.com/iaas/api/#/en/containerengine/20180222/Cluster/UpdateCluster\"\n\t\terr = common.PostProcessServiceError(err, \"ContainerEngine\", \"UpdateCluster\", apiReferenceLink)\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}",
"func (s *Server) Update(ctx context.Context, q *ClusterUpdateRequest) (*appv1.Cluster, error) {\n\tif !s.enf.EnforceClaims(ctx.Value(\"claims\"), \"clusters\", \"update\", q.Cluster.Server) {\n\t\treturn nil, grpc.ErrPermissionDenied\n\t}\n\terr := kube.TestConfig(q.Cluster.RESTConfig())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclust, err := s.db.UpdateCluster(ctx, q.Cluster)\n\treturn redact(clust), err\n}",
"func UpdateCluster(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, z *api.ZookeeperCluster) error {\n\tt.Logf(\"updating zookeeper cluster: %s\", z.Name)\n\terr := f.Client.Update(goctx.TODO(), z)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to update CR: %v\", err)\n\t}\n\n\tt.Logf(\"updated zookeeper cluster: %s\", z.Name)\n\treturn nil\n}",
"func (c *HandlerComp) intClusterUpdate(params ops.ClusterUpdateParams, ai *auth.Info, oObj *models.Cluster) (*models.Cluster, error) {\n\tctx := params.HTTPRequest.Context()\n\tvar err error\n\tif ai == nil {\n\t\tai, err = c.GetAuthInfo(params.HTTPRequest)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tvar uP = [centrald.NumActionTypes][]string{\n\t\tcentrald.UpdateRemove: params.Remove,\n\t\tcentrald.UpdateAppend: params.Append,\n\t\tcentrald.UpdateSet: params.Set,\n\t}\n\tua, err := c.MakeStdUpdateArgs(emptyCluster, params.ID, params.Version, uP)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif params.Payload == nil {\n\t\terr = c.eUpdateInvalidMsg(\"missing payload\")\n\t\treturn nil, err\n\t}\n\tif ua.IsModified(\"Name\") && params.Payload.Name == \"\" {\n\t\terr := c.eUpdateInvalidMsg(\"non-empty name is required\")\n\t\treturn nil, err\n\t}\n\tif oObj == nil {\n\t\tc.RLock()\n\t\tdefer c.RUnlock()\n\t\tc.ClusterLock()\n\t\tdefer c.ClusterUnlock()\n\t\toObj, err = c.DS.OpsCluster().Fetch(ctx, params.ID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif ua.IsModified(\"ClusterUsagePolicy\") {\n\t\tif oObj.State != common.ClusterStateDeployable {\n\t\t\terr := c.eUpdateInvalidMsg(\"invalid state\")\n\t\t\treturn nil, err\n\t\t}\n\t\tif err := c.validateClusterUsagePolicy(params.Payload.ClusterUsagePolicy, common.AccountSecretScopeCluster); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif ua.Version == 0 {\n\t\tua.Version = int32(oObj.Meta.Version)\n\t} else if int32(oObj.Meta.Version) != ua.Version {\n\t\terr = centrald.ErrorIDVerNotFound\n\t\treturn nil, err\n\t}\n\tif err = c.app.AuditLog.Ready(); err != nil {\n\t\treturn nil, err\n\t}\n\tif ua.IsModified(\"ClusterVersion\") || ua.IsModified(\"Service\") || ua.IsModified(\"ClusterAttributes\") || ua.IsModified(\"ClusterIdentifier\") || ua.IsModified(\"State\") || ua.IsModified(\"Messages\") {\n\t\tif err = ai.InternalOK(); err != nil {\n\t\t\tc.app.AuditLog.Post(ctx, ai, centrald.ClusterUpdateAction, models.ObjID(params.ID), models.ObjName(oObj.Name), \"\", true, \"Update unauthorized\")\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tif err = ai.CapOK(centrald.CSPDomainManagementCap, models.ObjIDMutable(oObj.AccountID)); err != nil {\n\t\t\tc.app.AuditLog.Post(ctx, ai, centrald.ClusterUpdateAction, models.ObjID(params.ID), models.ObjName(oObj.Name), \"\", true, \"Update unauthorized\")\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif ua.IsModified(\"ClusterIdentifier\") {\n\t\tif !ua.IsModified(\"State\") {\n\t\t\terr := c.eMissingMsg(\"state must be set with clusterIdentifier\")\n\t\t\treturn nil, err\n\t\t}\n\t\t// when transitioning to DEPLOYABLE state ClusterIdentifier must be reset, e.g. set to empty string\n\t\tif params.Payload.State == common.ClusterStateDeployable && params.Payload.ClusterIdentifier != \"\" {\n\t\t\terr := c.eMissingMsg(\"clusterIdentifier must be cleared when transitioning to %s\", common.ClusterStateDeployable)\n\t\t\treturn nil, err\n\t\t}\n\t\t// ClusterIdentifier may be modified (set to non-empty value) only when changing state from DEPLOYABLE to MANAGED\n\t\tif !(oObj.State == common.ClusterStateDeployable && params.Payload.State == common.ClusterStateManaged) {\n\t\t\terr := c.eInvalidState(\"invalid state transition (%s ⇒ %s)\", oObj.State, params.Payload.State)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif ua.IsModified(\"State\") {\n\t\tif !c.validateClusterState(params.Payload.State) {\n\t\t\terr := c.eUpdateInvalidMsg(\"invalid cluster state\")\n\t\t\treturn nil, err\n\t\t}\n\t\t// when transitioning from DEPLOYABLE state to MANAGED ClusterIdentifier is required\n\t\tif oObj.State == common.ClusterStateDeployable && params.Payload.State == common.ClusterStateManaged && (!ua.IsModified(\"ClusterIdentifier\") || params.Payload.ClusterIdentifier == \"\") {\n\t\t\terr := c.eMissingMsg(\"clusterIdentifier must be set when transitioning to %s\", common.ClusterStateManaged)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdom, err := c.ops.intCspDomainFetch(ctx, ai, string(oObj.CspDomainID))\n\tif err != nil {\n\t\tc.Log.Errorf(\"Cluster[%s]: error looking up CSPDomain[%s]: %s\", oObj.Meta.ID, oObj.CspDomainID, err.Error())\n\t\treturn nil, err\n\t}\n\tdetail := \"\"\n\tif a := ua.FindUpdateAttr(\"AuthorizedAccounts\"); a != nil && a.IsModified() {\n\t\tdetail, err = c.authAccountValidator.validateAuthorizedAccountsUpdate(ctx, ai, centrald.ClusterUpdateAction, params.ID, models.ObjName(oObj.Name), a, oObj.AuthorizedAccounts, params.Payload.AuthorizedAccounts)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\t// TBD: validate clusterAttributes by clusterType\n\tobj, err := c.DS.OpsCluster().Update(ctx, ua, params.Payload)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.clusterApplyInheritedProperties(ctx, ai, obj, dom) // no error possible\n\tif len(detail) > 0 {\n\t\tc.app.AuditLog.Post(ctx, ai, centrald.ClusterUpdateAction, models.ObjID(params.ID), models.ObjName(oObj.Name), \"\", false, fmt.Sprintf(\"Updated authorizedAccounts %s\", detail))\n\t}\n\tc.setDefaultObjectScope(params.HTTPRequest, obj)\n\treturn obj, nil\n}",
"func (a *LocalKeyAgent) UpdateCluster(cluster string) {\n\ta.siteName = cluster\n}",
"func (s *Server) updateCluster(report *healthReport) {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tif s.connectivity.startTime.Before(report.startTime) {\n\t\ts.connectivity = report\n\t}\n}",
"func (c *Controller) onUpdate(oldObj, newObj interface{}) {\n\toldcluster := oldObj.(*crv1.Pgcluster)\n\tnewcluster := newObj.(*crv1.Pgcluster)\n\n\tlog.Debugf(\"pgcluster onUpdate for cluster %s (namespace %s)\", newcluster.ObjectMeta.Namespace,\n\t\tnewcluster.ObjectMeta.Name)\n\n\t// if the status of the pgcluster shows that it has been bootstrapped, then proceed with\n\t// creating the cluster (i.e. the cluster deployment, services, etc.)\n\tif newcluster.Status.State == crv1.PgclusterStateBootstrapped {\n\t\tclusteroperator.AddClusterBase(c.Client, newcluster, newcluster.GetNamespace())\n\t\treturn\n\t}\n\n\t// if the 'shutdown' parameter in the pgcluster update shows that the cluster should be either\n\t// shutdown or started but its current status does not properly reflect that it is, then\n\t// proceed with the logic needed to either shutdown or start the cluster\n\tif newcluster.Spec.Shutdown && newcluster.Status.State != crv1.PgclusterStateShutdown {\n\t\tclusteroperator.ShutdownCluster(c.Client, *newcluster)\n\t} else if !newcluster.Spec.Shutdown &&\n\t\tnewcluster.Status.State == crv1.PgclusterStateShutdown {\n\t\tclusteroperator.StartupCluster(c.Client, *newcluster)\n\t}\n\n\t// check to see if the \"autofail\" label on the pgcluster CR has been changed from either true to false, or from\n\t// false to true. If it has been changed to false, autofail will then be disabled in the pg cluster. If has\n\t// been changed to true, autofail will then be enabled in the pg cluster\n\tif newcluster.ObjectMeta.Labels[config.LABEL_AUTOFAIL] != \"\" {\n\t\tautofailEnabledOld, err := strconv.ParseBool(oldcluster.ObjectMeta.Labels[config.LABEL_AUTOFAIL])\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t\tautofailEnabledNew, err := strconv.ParseBool(newcluster.ObjectMeta.Labels[config.LABEL_AUTOFAIL])\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif autofailEnabledNew != autofailEnabledOld {\n\t\t\tutil.ToggleAutoFailover(c.Client, autofailEnabledNew,\n\t\t\t\tnewcluster.ObjectMeta.Labels[config.LABEL_PGHA_SCOPE],\n\t\t\t\tnewcluster.ObjectMeta.Namespace)\n\t\t}\n\n\t}\n\n\t// handle standby being enabled and disabled for the cluster\n\tif oldcluster.Spec.Standby && !newcluster.Spec.Standby {\n\t\tif err := clusteroperator.DisableStandby(c.Client, *newcluster); err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t} else if !oldcluster.Spec.Standby && newcluster.Spec.Standby {\n\t\tif err := clusteroperator.EnableStandby(c.Client, *newcluster); err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// see if any of the resource values have changed, and if so, update them\n\tif !reflect.DeepEqual(oldcluster.Spec.Resources, newcluster.Spec.Resources) ||\n\t\t!reflect.DeepEqual(oldcluster.Spec.Limits, newcluster.Spec.Limits) {\n\t\tif err := clusteroperator.UpdateResources(c.Client, c.Client.Config, newcluster); err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// see if any of the pgBackRest repository resource values have changed, and\n\t// if so, update them\n\tif !reflect.DeepEqual(oldcluster.Spec.BackrestResources, newcluster.Spec.BackrestResources) ||\n\t\t!reflect.DeepEqual(oldcluster.Spec.BackrestLimits, newcluster.Spec.BackrestLimits) {\n\t\tif err := backrestoperator.UpdateResources(c.Client, newcluster); err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// see if any of the pgBouncer values have changed, and if so, update the\n\t// pgBouncer deployment\n\tif !reflect.DeepEqual(oldcluster.Spec.PgBouncer, newcluster.Spec.PgBouncer) {\n\t\tif err := updatePgBouncer(c, oldcluster, newcluster); err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// if we are not in a standby state, check to see if the tablespaces have\n\t// differed, and if so, add the additional volumes to the primary and replicas\n\tif !reflect.DeepEqual(oldcluster.Spec.TablespaceMounts, newcluster.Spec.TablespaceMounts) {\n\t\tif err := updateTablespaces(c, oldcluster, newcluster); err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (s *DiscoveryServer) updateCluster(push *model.PushContext, clusterName string, edsCluster *EdsCluster) error {\n\t// TODO: should we lock this as well ? Once we move to event-based it may not matter.\n\tvar locEps []endpoint.LocalityLbEndpoints\n\tdirection, subsetName, hostname, port := model.ParseSubsetKey(clusterName)\n\tif direction == model.TrafficDirectionInbound ||\n\t\tdirection == model.TrafficDirectionOutbound {\n\t\tlabels := push.SubsetToLabels(subsetName, hostname)\n\t\tinstances, err := edsCluster.discovery.Env.ServiceDiscovery.InstancesByPort(hostname, port, labels)\n\t\tif err != nil {\n\t\t\tadsLog.Errorf(\"endpoints for service cluster %q returned error %v\", clusterName, err)\n\t\t\ttotalXDSInternalErrors.Add(1)\n\t\t\treturn err\n\t\t}\n\t\tif len(instances) == 0 {\n\t\t\tpush.Add(model.ProxyStatusClusterNoInstances, clusterName, nil, \"\")\n\t\t\tadsLog.Debugf(\"EDS: cluster %q (host=%s ports=%v labels=%v) has no instances\", clusterName, hostname, port, labels)\n\t\t}\n\t\tedsInstances.With(prometheus.Labels{\"cluster\": clusterName}).Set(float64(len(instances)))\n\n\t\tlocEps = localityLbEndpointsFromInstances(instances)\n\t}\n\n\t// There is a chance multiple goroutines will update the cluster at the same time.\n\t// This could be prevented by a lock - but because the update may be slow, it may be\n\t// better to accept the extra computations.\n\t// We still lock the access to the LoadAssignments.\n\tedsCluster.mutex.Lock()\n\tdefer edsCluster.mutex.Unlock()\n\tedsCluster.LoadAssignment = &xdsapi.ClusterLoadAssignment{\n\t\tClusterName: clusterName,\n\t\tEndpoints: locEps,\n\t}\n\tif len(locEps) > 0 && edsCluster.NonEmptyTime.IsZero() {\n\t\tedsCluster.NonEmptyTime = time.Now()\n\t}\n\treturn nil\n}",
"func (sqlStore *SQLStore) UpdateCluster(cluster *model.Cluster) error {\n\trawMetadata, err := buildRawMetadata(cluster)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to build raw cluster metadata\")\n\t}\n\t_, err = sqlStore.execBuilder(sqlStore.db, sq.\n\t\tUpdate(\"Cluster\").\n\t\tSetMap(map[string]interface{}{\n\t\t\t\"State\": cluster.State,\n\t\t\t\"Provider\": cluster.Provider,\n\t\t\t\"ProviderMetadataRaw\": rawMetadata.ProviderMetadataRaw,\n\t\t\t\"Provisioner\": cluster.Provisioner,\n\t\t\t\"ProvisionerMetadataRaw\": rawMetadata.ProvisionerMetadataRaw,\n\t\t\t\"UtilityMetadataRaw\": rawMetadata.UtilityMetadataRaw,\n\t\t\t\"AllowInstallations\": cluster.AllowInstallations,\n\t\t}).\n\t\tWhere(\"ID = ?\", cluster.ID),\n\t)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to update cluster\")\n\t}\n\n\treturn nil\n}",
"func (spmn *ScaleioPrimaryMdmNode) UpdateCluster() error {\n\tlog.Debugln(\"UpdateCluster ENTER\")\n\n\turl := spmn.State.SchedulerAddress + \"/api/state\"\n\n\tstate := &types.UpdateCluster{\n\t\tAcknowledged: false,\n\t}\n\n\tresponse, err := json.MarshalIndent(state, \"\", \" \")\n\tif err != nil {\n\t\tlog.Errorln(\"Failed to marshall state object:\", err)\n\t\tlog.Debugln(\"UpdateCluster LEAVE\")\n\t\treturn err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", url, bytes.NewBuffer(response))\n\tif err != nil {\n\t\tlog.Errorln(\"Failed to create new HTTP request:\", err)\n\t\tlog.Debugln(\"UpdateCluster LEAVE\")\n\t\treturn err\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Errorln(\"Failed to make HTTP call:\", err)\n\t\tlog.Debugln(\"UpdateCluster LEAVE\")\n\t\treturn err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1048576))\n\tif err != nil {\n\t\tlog.Errorln(\"Failed to read the HTTP Body:\", err)\n\t\tlog.Debugln(\"UpdateCluster LEAVE\")\n\t\treturn err\n\t}\n\n\tlog.Debugln(\"response Status:\", resp.Status)\n\tlog.Debugln(\"response Headers:\", resp.Header)\n\tlog.Debugln(\"response Body:\", string(body))\n\n\tvar newstate types.UpdateCluster\n\terr = json.Unmarshal(body, &newstate)\n\tif err != nil {\n\t\tlog.Errorln(\"Failed to unmarshal the UpdateState object:\", err)\n\t\tlog.Debugln(\"UpdateCluster LEAVE\")\n\t\treturn err\n\t}\n\n\tlog.Debugln(\"Acknowledged:\", newstate.Acknowledged)\n\n\tif !newstate.Acknowledged {\n\t\tlog.Errorln(\"Failed to receive an acknowledgement\")\n\t\tlog.Debugln(\"UpdateCluster LEAVE\")\n\t\treturn common.ErrStateChangeNotAcknowledged\n\t}\n\n\tlog.Errorln(\"UpdateCluster Succeeded\")\n\tlog.Debugln(\"UpdateCluster LEAVE\")\n\treturn nil\n}",
"func Update(setValuesFlag, valuesYamlFile, chartLocation, version string) error {\n\t_ = utils.CreateDirIfNotExist(utils.GetSpaceCloudDirectory())\n\n\tcharList, err := utils.HelmList(model.HelmSpaceCloudNamespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(charList) < 1 {\n\t\tutils.LogInfo(\"Space cloud cluster not found, setup a new cluster using the setup command\")\n\t\treturn nil\n\t}\n\n\tclusterID := charList[0].Name\n\tisOk := false\n\tprompt := &survey.Confirm{\n\t\tMessage: fmt.Sprintf(\"Space cloud cluster with id (%s) will be upgraded, Do you want to continue\", clusterID),\n\t}\n\tif err := survey.AskOne(prompt, &isOk); err != nil {\n\t\treturn err\n\t}\n\tif !isOk {\n\t\treturn nil\n\t}\n\n\tvaluesFileObj, err := utils.ExtractValuesObj(setValuesFlag, valuesYamlFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// set clusterId of existing cluster\n\tcharInfo, err := utils.HelmGet(clusterID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvaluesFileObj[\"clusterId\"] = charInfo.Config[\"clusterId\"]\n\n\t_, err = utils.HelmUpgrade(clusterID, chartLocation, utils.GetHelmChartDownloadURL(model.HelmSpaceCloudChartDownloadURL, version), \"\", valuesFileObj)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Println()\n\tutils.LogInfo(fmt.Sprintf(\"Space Cloud (cluster id: \\\"%s\\\") has been successfully upgraded! 👍\", charList[0].Name))\n\treturn nil\n}",
"func (c *AKSCluster) UpdateCluster(request *bTypes.UpdateClusterRequest) error {\n\tlog := logger.WithFields(logrus.Fields{\"action\": constants.TagUpdateCluster})\n\tclient, err := c.GetAKSClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient.With(log.Logger)\n\n\t// send separate requests because Azure not supports multiple nodepool modification\n\t// Azure not supports adding and deleting nodepools\n\tvar nodePoolAfterUpdate []*model.AzureNodePoolModel\n\tvar updatedCluster *banzaiAzureTypes.ResponseWithValue\n\tif requestNodes := request.Azure.NodePools; requestNodes != nil {\n\t\tfor name, np := range requestNodes {\n\t\t\tif existNodePool := c.getExistingNodePoolByName(name); np != nil && existNodePool != nil {\n\t\t\t\tlog.Infof(\"NodePool is exists[%s], update...\", name)\n\n\t\t\t\tcount := int32(np.Count)\n\n\t\t\t\t// create request model for aks-client\n\t\t\t\tccr := azureCluster.CreateClusterRequest{\n\t\t\t\t\tName: c.modelCluster.Name,\n\t\t\t\t\tLocation: c.modelCluster.Location,\n\t\t\t\t\tResourceGroup: c.modelCluster.Azure.ResourceGroup,\n\t\t\t\t\tKubernetesVersion: c.modelCluster.Azure.KubernetesVersion,\n\t\t\t\t\tProfiles: []containerservice.AgentPoolProfile{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: &name,\n\t\t\t\t\t\t\tCount: &count,\n\t\t\t\t\t\t\tVMSize: containerservice.VMSizeTypes(existNodePool.NodeInstanceType),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tnodePoolAfterUpdate = append(nodePoolAfterUpdate, &model.AzureNodePoolModel{\n\t\t\t\t\tID: existNodePool.ID,\n\t\t\t\t\tClusterModelId: existNodePool.ClusterModelId,\n\t\t\t\t\tName: name,\n\t\t\t\t\tCount: np.Count,\n\t\t\t\t\tNodeInstanceType: existNodePool.NodeInstanceType,\n\t\t\t\t})\n\n\t\t\t\tupdatedCluster, err = c.updateWithPolling(client, &ccr)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Infof(\"There's no nodepool with this name[%s]\", name)\n\t\t\t}\n\t\t}\n\t}\n\n\tif updatedCluster != nil {\n\t\tupdateCluster := &model.ClusterModel{\n\t\t\tModel: c.modelCluster.Model,\n\t\t\tName: c.modelCluster.Name,\n\t\t\tLocation: c.modelCluster.Location,\n\t\t\tNodeInstanceType: c.modelCluster.NodeInstanceType,\n\t\t\tCloud: c.modelCluster.Cloud,\n\t\t\tOrganizationId: c.modelCluster.OrganizationId,\n\t\t\tSecretId: c.modelCluster.SecretId,\n\t\t\tStatus: c.modelCluster.Status,\n\t\t\tAzure: model.AzureClusterModel{\n\t\t\t\tResourceGroup: c.modelCluster.Azure.ResourceGroup,\n\t\t\t\tKubernetesVersion: c.modelCluster.Azure.KubernetesVersion,\n\t\t\t\tNodePools: nodePoolAfterUpdate,\n\t\t\t},\n\t\t}\n\t\tc.modelCluster = updateCluster\n\t\tc.azureCluster = &updatedCluster.Value\n\t}\n\n\treturn nil\n}",
"func cleanCluster(req *restful.Request, clusterID string) error {\n\t// 参数\n\tdata := operator.M{\n\t\tclusterIDTag: \"\",\n\t\tupdateTimeTag: time.Now(),\n\t}\n\tcondition := operator.NewLeafCondition(operator.Eq, operator.M{clusterIDTag: clusterID})\n\treturn UpdateMany(req.Request.Context(), tableName, condition, data)\n}",
"func (w *ServerInterfaceWrapper) UpdateCluster(ctx echo.Context) error {\n\tvar err error\n\t// ------------- Path parameter \"clusterId\" -------------\n\tvar clusterId ClusterIdParameter\n\n\terr = runtime.BindStyledParameterWithLocation(\"simple\", false, \"clusterId\", runtime.ParamLocationPath, ctx.Param(\"clusterId\"), &clusterId)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter clusterId: %s\", err))\n\t}\n\n\tctx.Set(BearerAuthScopes, []string{\"\"})\n\n\t// Invoke the callback with all the unmarshalled arguments\n\terr = w.Handler.UpdateCluster(ctx, clusterId)\n\treturn err\n}",
"func (c *blueDataClusters) Update(blueDataCluster *v1alpha1.BlueDataCluster) (result *v1alpha1.BlueDataCluster, err error) {\n\tresult = &v1alpha1.BlueDataCluster{}\n\terr = c.client.Put().\n\t\tNamespace(c.ns).\n\t\tResource(\"bluedataclusters\").\n\t\tName(blueDataCluster.Name).\n\t\tBody(blueDataCluster).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}",
"func (c *Controller) processCluster(updateCtx context.Context, workerNum uint, clusterInfo *ClusterInfo) {\n\tdefer c.clusterList.ClusterProcessed(clusterInfo)\n\n\tcluster := clusterInfo.Cluster\n\tclusterLog := c.logger.WithField(\"cluster\", cluster.Alias).WithField(\"worker\", workerNum)\n\n\tclusterLog.Infof(\"Processing cluster (%s)\", cluster.LifecycleStatus)\n\n\terr := c.doProcessCluster(updateCtx, clusterLog, clusterInfo)\n\n\t// log the error and resolve the special error cases\n\tif err != nil {\n\t\tclusterLog.Errorf(\"Failed to process cluster: %s\", err)\n\n\t\t// treat \"provider not supported\" as no error\n\t\tif err == provisioner.ErrProviderNotSupported {\n\t\t\terr = nil\n\t\t}\n\t} else {\n\t\tclusterLog.Infof(\"Finished processing cluster\")\n\t}\n\n\t// update the cluster state in the registry\n\tif !c.dryRun {\n\t\tif err != nil {\n\t\t\tif cluster.Status.Problems == nil {\n\t\t\t\tcluster.Status.Problems = make([]*api.Problem, 0, 1)\n\t\t\t}\n\t\t\tcluster.Status.Problems = append(cluster.Status.Problems, &api.Problem{\n\t\t\t\tTitle: err.Error(),\n\t\t\t\tType: errTypeGeneral,\n\t\t\t})\n\n\t\t\tif len(cluster.Status.Problems) > errorLimit {\n\t\t\t\tcluster.Status.Problems = cluster.Status.Problems[len(cluster.Status.Problems)-errorLimit:]\n\t\t\t\tcluster.Status.Problems[0] = &api.Problem{\n\t\t\t\t\tType: errTypeCoalescedProblems,\n\t\t\t\t\tTitle: \"<multiple problems>\",\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tcluster.Status.Problems = []*api.Problem{}\n\t\t}\n\t\terr = c.registry.UpdateCluster(cluster)\n\t\tif err != nil {\n\t\t\tclusterLog.Errorf(\"Unable to update cluster state: %s\", err)\n\t\t}\n\t}\n}",
"func (client ContainerEngineClient) UpdateCluster(ctx context.Context, request UpdateClusterRequest) (response UpdateClusterResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.DefaultRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.updateCluster, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = UpdateClusterResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = UpdateClusterResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(UpdateClusterResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into UpdateClusterResponse\")\n\t}\n\treturn\n}",
"func UpdateColor(c *fiber.Ctx) error {\n\treturn c.JSON(\"Update color\")\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetAllClusters retrieves all clusters in database
|
func (p PGSQLConnection) GetAllClusters() ([]ClusterModel, error) {
clusters := []ClusterModel{}
if err := p.connection.Select(&clusters, "SELECT * FROM clusters"); err != nil {
return nil, err
}
return clusters, nil
}
|
[
"func GetAllCluster() []m.Cluster {\n\tclusters := []m.Cluster{}\n\tGCoreDB.Find(&clusters)\n\n\treturn clusters\n}",
"func (us *ClusterStore) GetAll() ([]model.Cluster, error) {\n\tvar cs []model.Cluster\n\tif err := us.db.Preload(clause.Associations).Find(&cs).Error; err != nil {\n\t\tif errors.Is(err, gorm.ErrRecordNotFound) {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn cs, nil\n}",
"func (mcs *MySQLClusterService) GetAll() error {\n\tvar err error\n\tmcs.MySQLClusters, err = mcs.MySQLClusterRepo.GetAll()\n\n\treturn err\n}",
"func ListAllCluster(c echo.Context) error {\n\tcblog.Info(\"call ListAllCluster()\")\n\n\tvar req struct {\n\t\tNameSpace string\n\t\tConnectionName string\n\t}\n\n\tif err := c.Bind(&req); err != nil {\n\t\treturn echo.NewHTTPError(http.StatusInternalServerError, err.Error())\n\t}\n\n\t// To support for Get-Query Param Type API\n\tif req.ConnectionName == \"\" {\n\t\treq.ConnectionName = c.QueryParam(\"ConnectionName\")\n\t}\n\n\t// Call common-runtime API\n\tallResourceList, err := cmrt.ListAllResource(req.ConnectionName, rsCluster)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusInternalServerError, err.Error())\n\t}\n\n\t// To support for Get-Query Param Type API\n\tif req.NameSpace == \"\" {\n\t\treq.NameSpace = c.QueryParam(\"NameSpace\")\n\t}\n\n\t// Resource Name has namespace prefix when from Tumblebug\n\tif req.NameSpace != \"\" {\n\t\tnameSpace := req.NameSpace + \"-\"\n\t\tfor idx, IID := range allResourceList.AllList.MappedList {\n\t\t\tif IID.NameId != \"\" {\n\t\t\t\tallResourceList.AllList.MappedList[idx].NameId = strings.Replace(IID.NameId, nameSpace, \"\", 1)\n\t\t\t}\n\t\t}\n\t\tfor idx, IID := range allResourceList.AllList.OnlySpiderList {\n\t\t\tif IID.NameId != \"\" {\n\t\t\t\tallResourceList.AllList.OnlySpiderList[idx].NameId = strings.Replace(IID.NameId, nameSpace, \"\", 1)\n\t\t\t}\n\t\t}\n\t\tfor idx, IID := range allResourceList.AllList.OnlyCSPList {\n\t\t\tif IID.NameId != \"\" {\n\t\t\t\tallResourceList.AllList.OnlyCSPList[idx].NameId = strings.Replace(IID.NameId, nameSpace, \"\", 1)\n\t\t\t}\n\t\t}\n\t}\n\n\tvar jsonResult struct {\n\t\tConnection string\n\t\tAllResourceList *cmrt.AllResourceList\n\t}\n\tjsonResult.Connection = req.ConnectionName\n\tjsonResult.AllResourceList = &allResourceList\n\n\treturn c.JSON(http.StatusOK, &jsonResult)\n}",
"func (mcr *MiddlewareClusterRepo) GetAll() ([]metadata.MiddlewareCluster, error) {\n\tsql := `\n\t\tselect id, cluster_name, owner_id, env_id, del_flag, create_time, last_update_time\n\t\tfrom t_meta_middleware_cluster_info\n\t\twhere del_flag = 0\n\t\torder by id;\n\t`\n\tlog.Debugf(\"metadata MiddlewareClusterRepo.GetAll() sql: \\n%s\", sql)\n\n\tresult, err := mcr.Execute(sql)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// init []*MiddlewareClusterInfo\n\tmiddlewareClusterInfoList := make([]*MiddlewareClusterInfo, result.RowNumber())\n\tfor i := range middlewareClusterInfoList {\n\t\tmiddlewareClusterInfoList[i] = NewEmptyMiddlewareClusterInfoWithGlobal()\n\t}\n\t// map to struct\n\terr = result.MapToStructSlice(middlewareClusterInfoList, constant.DefaultMiddlewareTag)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// init []dependency.Entity\n\tentityList := make([]metadata.MiddlewareCluster, result.RowNumber())\n\tfor i := range entityList {\n\t\tentityList[i] = middlewareClusterInfoList[i]\n\t}\n\n\treturn entityList, nil\n}",
"func (c *Core) ListClusters() ([]models.ClusterView, error) {\n\tclustersPath := path.Join(c.projectPath, \"clusters\")\n\tclusterIDStrings, err := c.kvClient.List(clustersPath)\n\tif err != nil {\n\t\tlogging.Error(\"no clusters found: %v\", err)\n\t\treturn nil, fmt.Errorf(\"no clusters found\")\n\t}\n\tclusterIDs := make([]uint64, len(clusterIDStrings))\n\tfor i, clusterIDString := range clusterIDStrings {\n\t\tclusterID, err := strconv.ParseUint(clusterIDString, 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"found cluster with invalid id: '%s'\", clusterIDString)\n\t\t}\n\t\tclusterIDs[i] = clusterID\n\t}\n\n\tclusters := make([]models.ClusterView, 0)\n\n\tfor _, clusterID := range clusterIDs {\n\t\treleaseAgentConfigKey := c.getReleaseAgentConfigKey(clusterID)\n\t\treleaseAgentConfig, exists, err := c.getReleaseAgentConfig(releaseAgentConfigKey)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"cannot get cluster '%d': %v\", clusterID, err)\n\t\t}\n\t\tif !exists {\n\t\t\tlogging.Info(\"cluster config for cluster '%d' does not exist\", clusterID)\n\t\t\tcontinue\n\t\t}\n\t\tclusters = append(clusters, models.ClusterView{\n\t\t\tID: clusterID,\n\t\t\tName: releaseAgentConfig.ClusterName,\n\t\t\tNatsChannel: releaseAgentConfig.NatsChannel,\n\t\t\tOptimiserNatsChannel: releaseAgentConfig.OptimiserNatsChannel,\n\t\t})\n\t}\n\n\treturn clusters, nil\n}",
"func getAllEksClusters(awsSession *session.Session, excludeAfter time.Time) ([]*string, error) {\n\tsvc := eks.New(awsSession)\n\tresult, err := svc.ListClusters(&eks.ListClustersInput{})\n\tif err != nil {\n\t\treturn nil, errors.WithStackTrace(err)\n\t}\n\tfilteredClusters, err := filterOutRecentEksClusters(svc, result.Clusters, excludeAfter)\n\tif err != nil {\n\t\treturn nil, errors.WithStackTrace(err)\n\t}\n\treturn filteredClusters, nil\n}",
"func (client ContainerEngineClient) listClusters(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/clusters\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response ListClustersResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\tapiReferenceLink := \"https://docs.oracle.com/iaas/api/#/en/containerengine/20180222/ClusterSummary/ListClusters\"\n\t\terr = common.PostProcessServiceError(err, \"ContainerEngine\", \"ListClusters\", apiReferenceLink)\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}",
"func listClusters(w http.ResponseWriter, r *http.Request, t auth.Token) (err error) {\n\tctx := r.Context()\n\tallowed := permission.Check(t, permission.PermClusterRead)\n\tif !allowed {\n\t\treturn permission.ErrUnauthorized\n\t}\n\tclusters, err := servicemanager.Cluster.List(ctx)\n\tif err != nil {\n\t\tif err == provTypes.ErrNoCluster {\n\t\t\tw.WriteHeader(http.StatusNoContent)\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tadmin := permission.Check(t, permission.PermClusterAdmin)\n\tif !admin {\n\t\tfor i := range clusters {\n\t\t\tclusters[i].CleanUpSensitive()\n\t\t}\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\treturn json.NewEncoder(w).Encode(clusters)\n}",
"func getAllDBClusters(ctx context.Context, rdsClient rdsiface.RDSAPI, maxPages int, log logrus.FieldLogger) ([]*rds.DBCluster, error) {\n\tvar clusters []*rds.DBCluster\n\terr := retryWithIndividualEngineFilters(log, auroraEngines(), func(filters []*rds.Filter) error {\n\t\tvar pageNum int\n\t\tvar out []*rds.DBCluster\n\t\terr := rdsClient.DescribeDBClustersPagesWithContext(ctx, &rds.DescribeDBClustersInput{\n\t\t\tFilters: filters,\n\t\t}, func(ddo *rds.DescribeDBClustersOutput, lastPage bool) bool {\n\t\t\tpageNum++\n\t\t\tout = append(out, ddo.DBClusters...)\n\t\t\treturn pageNum <= maxPages\n\t\t})\n\t\tif err == nil {\n\t\t\t// only append to clusters on nil error, just in case we have to retry.\n\t\t\tclusters = append(clusters, out...)\n\t\t}\n\t\treturn trace.Wrap(err)\n\t})\n\treturn clusters, trace.Wrap(err)\n}",
"func (svc ServerlessClusterService) List(ctx context.Context) (*[]models.Cluster, *Response, error) {\n\tvar clusterList []models.Cluster\n\tgraphqlRequest := models.GraphqlRequest{\n\t\tName: \"clusters\",\n\t\tOperation: models.Query,\n\t\tInput: nil,\n\t\tArgs: models.ClusterListInput{\n\t\t\tProductType: models.Starter,\n\t\t},\n\t\tResponse: clusterList,\n\t}\n\treq, err := svc.client.NewRequest(&graphqlRequest)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := svc.client.Do(ctx, req, &clusterList)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn &clusterList, resp, err\n}",
"func FetchClusters(c *gin.Context) {\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, \"Start listing clusters\")\n\n\tvar clusters []banzaiSimpleTypes.ClusterSimple\n\tvar response []*cloud.ClusterRepresentation\n\tdatabase.Find(&clusters)\n\n\tif len(clusters) <= 0 {\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, \"No clusters found\")\n\t\tcloud.SetResponseBodyJson(c, http.StatusNotFound, gin.H{\n\t\t\tcloud.JsonKeyStatus: http.StatusNotFound,\n\t\t\tcloud.JsonKeyMessage: \"No clusters found!\",\n\t\t})\n\t\treturn\n\t}\n\n\tfor _, cl := range clusters {\n\t\tclust := cloud.GetClusterRepresentation(&cl)\n\t\tif clust != nil {\n\t\t\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, fmt.Sprintf(\"Append %#v cluster representation to response\", clust))\n\t\t\tresponse = append(response, clust)\n\t\t}\n\n\t}\n\tcloud.SetResponseBodyJson(c, http.StatusOK, gin.H{\n\t\tcloud.JsonKeyStatus: http.StatusOK,\n\t\tcloud.JsonKeyData: response,\n\t})\n}",
"func (m *ModelCluster) GetClusterInfoList(ctx context.Context,\n\trequest *bcsdatamanager.GetClusterListRequest) ([]*bcsdatamanager.Cluster, int64, error) {\n\terr := ensureTable(ctx, &m.Public)\n\tvar total int64\n\tif err != nil {\n\t\treturn nil, total, err\n\t}\n\tdimension := request.Dimension\n\tif dimension == \"\" {\n\t\tdimension = types.DimensionMinute\n\t}\n\tcond := make([]*operator.Condition, 0)\n\tif request.GetProject() != \"\" {\n\t\tcond = append(cond, operator.NewLeafCondition(operator.Eq, operator.M{\n\t\t\tProjectIDKey: request.Project,\n\t\t}))\n\t} else if request.GetBusiness() != \"\" {\n\t\tcond = append(cond, operator.NewLeafCondition(operator.Eq, operator.M{\n\t\t\tBusinessIDKey: request.GetBusiness(),\n\t\t}))\n\t}\n\tcond = append(cond, operator.NewLeafCondition(operator.Eq, operator.M{\n\t\tDimensionKey: dimension,\n\t}))\n\tcond = append(cond, operator.NewLeafCondition(operator.Gte, operator.M{\n\t\tMetricTimeKey: primitive.NewDateTimeFromTime(getStartTime(dimension)),\n\t}))\n\tconds := operator.NewBranchCondition(operator.And, cond...)\n\ttempClusterList := make([]map[string]string, 0)\n\terr = m.DB.Table(m.TableName).Find(conds).WithProjection(map[string]int{ClusterIDKey: 1, \"_id\": 0}).\n\t\tWithSort(map[string]interface{}{ClusterIDKey: 1}).All(ctx, &tempClusterList)\n\tif err != nil {\n\t\tblog.Errorf(\"get cluster id list error\")\n\t\treturn nil, total, err\n\t}\n\n\tclusterList := distinctSlice(\"cluster_id\", &tempClusterList)\n\tif len(clusterList) == 0 {\n\t\treturn nil, total, nil\n\t}\n\ttotal = int64(len(clusterList))\n\tpage := int(request.Page)\n\tsize := int(request.Size)\n\tif size == 0 {\n\t\tsize = DefaultSize\n\t}\n\tendIndex := (page + 1) * size\n\tstartIndex := page * size\n\tif startIndex >= len(clusterList) {\n\t\treturn nil, total, nil\n\t}\n\tif endIndex >= len(clusterList) {\n\t\tendIndex = len(clusterList)\n\t}\n\tchooseCluster := clusterList[startIndex:endIndex]\n\tresponse := make([]*bcsdatamanager.Cluster, 0)\n\tfor _, cluster := range chooseCluster {\n\t\tclusterRequest := &bcsdatamanager.GetClusterInfoRequest{\n\t\t\tClusterID: cluster,\n\t\t\tDimension: dimension,\n\t\t}\n\t\tclusterInfo, err := m.GetClusterInfo(ctx, clusterRequest)\n\t\tif err != nil {\n\t\t\tblog.Errorf(\"get cluster[%s] info err:%v\", cluster, err)\n\t\t} else {\n\t\t\tresponse = append(response, clusterInfo)\n\t\t}\n\t}\n\treturn response, total, nil\n}",
"func (c starterClusterServiceOp) List(ctx context.Context) (*[]models.Cluster, *Response, error) {\n\tvar clusterList []models.Cluster\n\tgraphqlRequest := models.GraphqlRequest{\n\t\tName: \"clusters\",\n\t\tOperation: models.Query,\n\t\tInput: clusterList,\n\t\tArgs: models.ClusterListInput{\n\t\t\tProductType: models.Starter,\n\t\t},\n\t\tResponse: clusterList,\n\t}\n\treq, err := c.client.NewRequest(&graphqlRequest)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := c.client.Do(ctx, req, &clusterList)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn &clusterList, resp, err\n}",
"func (ds *DiscoveryService) ListClusters(request *restful.Request, response *restful.Response) {\n\tkey := request.Request.URL.String()\n\tout, cached := ds.cdsCache.cachedDiscoveryResponse(key)\n\tif !cached {\n\t\tif sc := request.PathParameter(ServiceCluster); sc != ds.mesh.IstioServiceCluster {\n\t\t\terrorResponse(response, http.StatusNotFound,\n\t\t\t\tfmt.Sprintf(\"Unexpected %s %q\", ServiceCluster, sc))\n\t\t\treturn\n\t\t}\n\n\t\t// service-node holds the IP address\n\t\tip := request.PathParameter(ServiceNode)\n\t\t// CDS computes clusters that are referenced by RDS routes for a particular proxy node\n\t\t// TODO: this implementation is inefficient as it is recomputing all the routes for all proxies\n\t\t// There is a lot of potential to cache and reuse cluster definitions across proxies and also\n\t\t// skip computing the actual HTTP routes\n\t\tinstances := ds.services.HostInstances(map[string]bool{ip: true})\n\t\tservices := ds.services.Services()\n\t\thttpRouteConfigs := buildOutboundHTTPRoutes(instances, services, &ProxyContext{\n\t\t\tDiscovery: ds.services,\n\t\t\tConfig: ds.config,\n\t\t\tMeshConfig: ds.mesh,\n\t\t\tIPAddress: ip,\n\t\t})\n\n\t\t// de-duplicate and canonicalize clusters\n\t\tclusters := httpRouteConfigs.clusters().normalize()\n\n\t\t// apply custom policies for HTTP clusters\n\t\tfor _, cluster := range clusters {\n\t\t\tinsertDestinationPolicy(ds.config, cluster)\n\t\t}\n\n\t\tvar err error\n\t\tif out, err = json.MarshalIndent(ClusterManager{Clusters: clusters}, \" \", \" \"); err != nil {\n\t\t\terrorResponse(response, http.StatusInternalServerError, err.Error())\n\t\t\treturn\n\t\t}\n\t\tds.cdsCache.updateCachedDiscoveryResponse(key, out)\n\t}\n\twriteResponse(response, out)\n}",
"func (c *ClientImpl) GetClusters(ctx context.Context, hcpHostURL string) (models.ClusterResp, error) {\n\tspan, _ := opentracing.StartSpanFromContext(ctx, \"Get Clusters\")\n\tdefer span.Finish()\n\n\tsession, err := c.getSession(ctx, hcpHostURL, hcpUserName, hcpPassword)\n\tif err != nil {\n\t\treturn models.ClusterResp{}, err\n\t}\n\n\tstatus = Failure\n\tmonitor := metrics.StartExternalCall(externalSvcName, \"Fetch Clusters\")\n\tdefer func() { monitor.RecordWithStatus(status) }()\n\n\tresp, err := mlopsHttp.ExecuteHTTPRequest(\n\t\tctx,\n\t\tc.client,\n\t\thcpHostURL+clusterPathV2,\n\t\thttp.MethodGet,\n\t\tmap[string]string{sessionHeader: session},\n\t\tbytes.NewReader(nil),\n\t)\n\tif err != nil {\n\t\treturn models.ClusterResp{}, errors.Wrapf(err, \"while fetching clusters in MLOps controller platform.\")\n\t}\n\tresp.Body.Close()\n\n\tstatus = Success\n\n\terr = c.deleteSession(ctx, hcpHostURL, session)\n\tif err != nil {\n\t\treturn models.ClusterResp{}, err\n\t}\n\n\tclustersResp := models.ClusterResp{}\n\tjson.NewDecoder(resp.Body).Decode(&clustersResp)\n\n\treturn clustersResp, nil\n}",
"func (s *Server) List(ctx context.Context, q *ClusterQuery) (*appv1.ClusterList, error) {\n\tclusterList, err := s.db.ListClusters(ctx)\n\tif clusterList != nil {\n\t\tnewItems := make([]appv1.Cluster, 0)\n\t\tfor _, clust := range clusterList.Items {\n\t\t\tif s.enf.EnforceClaims(ctx.Value(\"claims\"), \"clusters\", \"get\", clust.Server) {\n\t\t\t\tnewItems = append(newItems, *redact(&clust))\n\t\t\t}\n\t\t}\n\t\tclusterList.Items = newItems\n\t}\n\treturn clusterList, err\n}",
"func (adm Admin) ListClusters() (string, error) {\n\tconn := newConnection(adm.ZkSvr)\n\terr := conn.Connect()\n\tif err != nil {\n\t\tfmt.Println(\"Failed to connect to zookeeper.\")\n\t\treturn \"\", err\n\t}\n\tdefer conn.Disconnect()\n\n\tvar clusters []string\n\n\tchildren, err := conn.Children(\"/\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, cluster := range children {\n\t\tif ok, err := conn.IsClusterSetup(cluster); ok && err == nil {\n\t\t\tclusters = append(clusters, cluster)\n\t\t}\n\t}\n\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"Existing clusters: \\n\")\n\n\tfor _, cluster := range clusters {\n\t\tbuffer.WriteString(\" \" + cluster + \"\\n\")\n\t}\n\treturn buffer.String(), nil\n}",
"func (c Client) ListClusters() (ClusterList, error) {\n\tbody, err := c.watsonClient.MakeRequest(\"GET\", c.version+\"/solr_clusters\", nil, nil)\n\tif err != nil {\n\t\treturn ClusterList{}, err\n\t}\n\tvar response ClusterList\n\terr = json.Unmarshal(body, &response)\n\treturn response, err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
DeleteCluster deletes the cluster from the database
|
func (p PGSQLConnection) DeleteCluster(cluster *ClusterModel) error {
tx, err := p.connection.Beginx()
if err != nil {
return err
}
_, err = tx.NamedExec("DELETE FROM clusters WHERE cluster_name = :cluster_name", cluster)
if err != nil {
return err
}
return tx.Commit()
}
|
[
"func (sqlStore *SQLStore) DeleteCluster(id string) error {\n\t_, err := sqlStore.execBuilder(sqlStore.db, sq.\n\t\tUpdate(\"Cluster\").\n\t\tSet(\"DeleteAt\", model.GetMillis()).\n\t\tWhere(\"ID = ?\", id).\n\t\tWhere(\"DeleteAt = 0\"),\n\t)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to mark cluster as deleted\")\n\t}\n\n\treturn nil\n}",
"func (ac *AppContext) DeleteCluster(handle interface{}) error {\n\terr := ac.rtc.RtcDeletePrefix(handle)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (c *Client) DeleteCluster(projectID string, seed string, clusterID string) error {\n\treq, err := c.newRequest(\"DELETE\", projectPath+\"/\"+projectID+datacenterSubPath+\"/\"+seed+clustersSubPath+\"/\"+clusterID, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresp, err := c.do(req, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// StatusCodes 401 and 403 mean empty response and should be treated as such\n\tif resp.StatusCode == 401 || resp.StatusCode == 403 {\n\t\treturn nil\n\t}\n\n\tif resp.StatusCode >= 299 {\n\t\treturn errors.New(\"Got non-2xx return code: \" + strconv.Itoa(resp.StatusCode))\n\t}\n\n\treturn nil\n}",
"func (api RestAPI) DeleteCluster(clusterID string) error {\n\t// construct URL to be used to access REST API endpoint\n\tserviceURL := api.controllerURL + APIPrefix + \"client/cluster/\" + clusterID\n\n\t// perform REST API call and return error code\n\terr := performWriteRequest(serviceURL, http.MethodDelete, nil)\n\treturn err\n}",
"func DeleteCluster(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, z *api.ZookeeperCluster) error {\n\tt.Logf(\"deleting zookeeper cluster: %s\", z.Name)\n\terr := f.Client.Delete(goctx.TODO(), z)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to delete CR: %v\", err)\n\t}\n\n\tt.Logf(\"deleted zookeeper cluster: %s\", z.Name)\n\treturn nil\n}",
"func (svc ServerlessClusterService) Delete(ctx context.Context,\n\tinput *models.ClusterDeleteInput) (*models.ClusterId, *Response, error) {\n\tvar clusterId models.ClusterId\n\tgraphqlRequest := models.GraphqlRequest{\n\t\tName: \"deleteCluster\",\n\t\tOperation: models.Mutation,\n\t\tInput: nil,\n\t\tArgs: *input,\n\t\tResponse: clusterId,\n\t}\n\treq, err := svc.client.NewRequest(&graphqlRequest)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := svc.client.Do(ctx, req, &clusterId)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn &clusterId, resp, err\n}",
"func DeleteCluster(name string, orgID string) {\n\n\tStep(fmt.Sprintf(\"Delete cluster [%s] in org [%s]\", name, orgID), func() {\n\t\tbackupDriver := Inst().Backup\n\t\tclusterDeleteReq := &api.ClusterDeleteRequest{\n\t\t\tOrgId: orgID,\n\t\t\tName: name,\n\t\t}\n\t\tctx, err := backup.GetPxCentralAdminCtx()\n\t\texpect(err).NotTo(haveOccurred(),\n\t\t\tfmt.Sprintf(\"Failed to fetch px-central-admin ctx: [%v]\",\n\t\t\t\terr))\n\t\tbackupDriver.DeleteCluster(ctx, clusterDeleteReq)\n\t\t// Best effort cleanup, dont fail test, if deletion fails\n\t\t//expect(err).NotTo(haveOccurred(),\n\t\t//\tfmt.Sprintf(\"Failed to delete cluster [%s] in org [%s]\", name, orgID))\n\t})\n}",
"func (c starterClusterServiceOp) Delete(ctx context.Context, input *models.ClusterDeleteInput) (*models.ClusterId, *Response, error) {\n\tvar clusterId models.ClusterId\n\tgraphqlRequest := models.GraphqlRequest{\n\t\tName: \"deleteCluster\",\n\t\tOperation: models.Mutation,\n\t\tInput: nil,\n\t\tArgs: *input,\n\t\tResponse: clusterId,\n\t}\n\treq, err := c.client.NewRequest(&graphqlRequest)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := c.client.Do(ctx, req, &clusterId)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn &clusterId, resp, err\n}",
"func (client ContainerEngineClient) deleteCluster(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodDelete, \"/clusters/{clusterId}\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response DeleteClusterResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\tapiReferenceLink := \"https://docs.oracle.com/iaas/api/#/en/containerengine/20180222/Cluster/DeleteCluster\"\n\t\terr = common.PostProcessServiceError(err, \"ContainerEngine\", \"DeleteCluster\", apiReferenceLink)\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}",
"func (a *Actuator) Delete(cluster *clusterv1.Cluster) error {\n\ta.log.Info(\"Deleting cluster\", \"cluster-name\", cluster.Name, \"cluster-namespace\", cluster.Namespace)\n\n\tscope, err := scope.NewClusterScope(scope.ClusterScopeParams{\n\t\tCluster: cluster,\n\t\tClient: a.Client,\n\t\tLogger: a.log,\n\t})\n\tif err != nil {\n\t\treturn errors.Errorf(\"failed to create scope: %+v\", err)\n\t}\n\n\tdefer scope.Close()\n\n\tec2svc := ec2.NewService(scope)\n\telbsvc := elb.NewService(scope)\n\n\tif err := elbsvc.DeleteLoadbalancers(); err != nil {\n\t\treturn errors.Errorf(\"unable to delete load balancers: %+v\", err)\n\t}\n\n\tif err := ec2svc.DeleteBastion(); err != nil {\n\t\treturn errors.Errorf(\"unable to delete bastion: %+v\", err)\n\t}\n\n\tif err := ec2svc.DeleteNetwork(); err != nil {\n\t\ta.log.Error(err, \"Error deleting cluster\", \"cluster-name\", cluster.Name, \"cluster-namespace\", cluster.Namespace)\n\t\treturn &controllerError.RequeueAfterError{\n\t\t\tRequeueAfter: 5 * time.Second,\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (w *worker) deleteCluster(cluster *chop.ChiCluster) error {\n\tw.a.V(2).M(cluster).S().P()\n\tdefer w.a.V(2).M(cluster).E().P()\n\n\tw.a.V(1).\n\t\tWithEvent(cluster.CHI, eventActionDelete, eventReasonDeleteStarted).\n\t\tWithStatusAction(cluster.CHI).\n\t\tM(cluster).F().\n\t\tInfo(\"Delete cluster %s/%s - started\", cluster.Address.Namespace, cluster.Name)\n\n\t// Delete all shards\n\tcluster.WalkShards(func(index int, shard *chop.ChiShard) error {\n\t\treturn w.deleteShard(shard)\n\t})\n\n\t// Delete Cluster Service\n\t_ = w.c.deleteServiceCluster(cluster)\n\n\tw.a.V(1).\n\t\tWithEvent(cluster.CHI, eventActionDelete, eventReasonDeleteCompleted).\n\t\tWithStatusAction(cluster.CHI).\n\t\tM(cluster).F().\n\t\tInfo(\"Delete cluster %s/%s - completed\", cluster.Address.Namespace, cluster.Name)\n\n\treturn nil\n}",
"func DeleteCluster(c *cli.Context) error {\n\n\tclusters, err := getClusters(c.Bool(\"all\"), c.String(\"name\"))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(clusters) == 0 {\n\t\tif !c.IsSet(\"all\") && !c.IsSet(\"name\") {\n\t\t\treturn fmt.Errorf(\"No cluster with name '%s' found (You can add `--all` and `--name <CLUSTER-NAME>` to delete other clusters)\", c.String(\"name\"))\n\t\t}\n\t\treturn fmt.Errorf(\"No cluster(s) found\")\n\t}\n\n\t// remove clusters one by one instead of appending all names to the docker command\n\t// this allows for more granular error handling and logging\n\tfor _, cluster := range clusters {\n\t\tlog.Printf(\"Removing cluster [%s]\", cluster.name)\n\t\tif len(cluster.workers) > 0 {\n\t\t\t// TODO: this could be done in goroutines\n\t\t\tlog.Printf(\"...Removing %d workers\\n\", len(cluster.workers))\n\t\t\tfor _, worker := range cluster.workers {\n\t\t\t\tif err := removeContainer(worker.ID); err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tdeleteClusterDir(cluster.name)\n\t\tlog.Println(\"...Removing server\")\n\t\tif err := removeContainer(cluster.server.ID); err != nil {\n\t\t\treturn fmt.Errorf(\" Couldn't remove server for cluster %s\\n%+v\", cluster.name, err)\n\t\t}\n\n\t\tif err := disconnectRegistryFromNetwork(cluster.name, c.IsSet(\"keep-registry-volume\")); err != nil {\n\t\t\tlog.Warningf(\"Couldn't disconnect Registry from network %s\\n%+v\", cluster.name, err)\n\t\t}\n\n\t\tif c.IsSet(\"prune\") {\n\t\t\t// disconnect any other container that is connected to the k3d network\n\t\t\tnid, err := getClusterNetwork(cluster.name)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warningf(\"Couldn't get the network for cluster %q\\n%+v\", cluster.name, err)\n\t\t\t}\n\t\t\tcids, err := getContainersInNetwork(nid)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warningf(\"Couldn't get the list of containers connected to network %q\\n%+v\", nid, err)\n\t\t\t}\n\t\t\tfor _, cid := range cids {\n\t\t\t\terr := disconnectContainerFromNetwork(cid, nid)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Warningf(\"Couldn't disconnect container %q from network %q\", cid, nid)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tlog.Printf(\"...%q has been forced to disconnect from %q's network\", cid, cluster.name)\n\t\t\t}\n\t\t}\n\n\t\tif err := deleteClusterNetwork(cluster.name); err != nil {\n\t\t\tlog.Warningf(\"Couldn't delete cluster network for cluster %s\\n%+v\", cluster.name, err)\n\t\t}\n\n\t\tlog.Println(\"...Removing docker image volume\")\n\t\tif err := deleteImageVolume(cluster.name); err != nil {\n\t\t\tlog.Warningf(\"Couldn't delete image docker volume for cluster %s\\n%+v\", cluster.name, err)\n\t\t}\n\n\t\tlog.Infof(\"Removed cluster [%s]\", cluster.name)\n\t}\n\n\treturn nil\n}",
"func (adm Admin) DropCluster(cluster string) error {\n\tconn := newConnection(adm.ZkSvr)\n\terr := conn.Connect()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Disconnect()\n\n\tkb := KeyBuilder{cluster}\n\tc := kb.cluster()\n\n\treturn conn.DeleteTree(c)\n}",
"func (d *Dao) RemoveCluster(ctx context.Context, cname string) (jobid string, err error) {\n\tsub, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\tvar (\n\t\tappids []*etcd.Node\n\t)\n\n\tappids, err = d.e.LS(sub, fmt.Sprintf(\"%s/%s/appids\", etcd.ClusterDir, cname))\n\tif client.IsKeyNotFound(err) {\n\t\terr = nil\n\t} else if err != nil {\n\t\treturn\n\t}\n\n\tif len(appids) > 0 {\n\t\terr = ErrClusterAssigned\n\t\treturn\n\t}\n\n\tj, err := d.createDestroyClusterJob(ctx, cname)\n\tif err == nil {\n\t\tjobid, err = d.saveJob(ctx, j)\n\t\treturn\n\t}\n\tif !client.IsKeyNotFound(err) {\n\t\tlog.Errorf(\"create destroy cluster job fail %v\", err)\n\t\treturn\n\t}\n\n\tlog.Warn(\"cluster has no info, destroy directly, job should be deleted manually\")\n\t// clear etcd info\n\t// NOTE: same as scheduler.destroyCluster\n\tvar nodes []*etcd.Node\n\tnodes, err = d.e.LS(ctx, fmt.Sprintf(etcd.ClusterInstancesDir, cname))\n\tif err != nil {\n\t\tlog.Errorf(\"get cluster(%s) nodes info err %v\", cname, err)\n\t}\n\td.e.RMDir(ctx, fmt.Sprintf(\"%s/%s\", etcd.ClusterDir, cname))\n\tfor _, node := range nodes {\n\t\terr = d.e.RMDir(ctx, etcd.InstanceDirPrefix+\"/\"+node.Value)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"rm instance dir (%s) fail err %v\", node.Value, err)\n\t\t} else {\n\t\t\tlog.Infof(\"instance dir (%s) removed\", node.Value)\n\t\t}\n\t}\n\treturn \"\", nil\n}",
"func (adm Admin) DropCluster(cluster string) error {\n\tconn := newConnection(adm.zkSvr)\n\tif err := conn.Connect(); err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Disconnect()\n\n\tkb := keyBuilder{clusterID: cluster}\n\treturn conn.DeleteTree(kb.cluster())\n}",
"func DeleteBKCluster(t *testing.T, k8client client.Client, b *bkapi.BookkeeperCluster) error {\n\tlog.Printf(\"deleting bookkeeper cluster: %s\", b.Name)\n\terr := k8client.Delete(goctx.TODO(), b)\n\tif err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"failed to delete CR: %v\", err)\n\t}\n\n\tlog.Printf(\"deleted bookkeeper cluster: %s\", b.Name)\n\treturn nil\n}",
"func (adm Admin) DropCluster(cluster string) error {\n\tkb := KeyBuilder{cluster}\n\tc := kb.cluster()\n\n\treturn adm.zkClient.DeleteTree(c)\n}",
"func (m *Monitor) deleteCluster(managedCluster *clusterv1.ManagedCluster) {\n\tglog.V(2).Info(\"Processing Cluster Delete.\")\n\n\tclusterToDelete := managedCluster.GetName()\n\tfor clusterIdx, cluster := range m.ManagedClusterInfo {\n\t\tif clusterToDelete == cluster.Namespace {\n\t\t\tglog.Infof(\"Removing %s from Insights cluster list\", clusterToDelete)\n\t\t\tm.ManagedClusterInfo = append(m.ManagedClusterInfo[:clusterIdx], m.ManagedClusterInfo[clusterIdx+1:]...)\n\t\t}\n\t}\n}",
"func (w *ServerInterfaceWrapper) DeleteCluster(ctx echo.Context) error {\n\tvar err error\n\t// ------------- Path parameter \"clusterId\" -------------\n\tvar clusterId ClusterIdParameter\n\n\terr = runtime.BindStyledParameterWithLocation(\"simple\", false, \"clusterId\", runtime.ParamLocationPath, ctx.Param(\"clusterId\"), &clusterId)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter clusterId: %s\", err))\n\t}\n\n\tctx.Set(BearerAuthScopes, []string{\"\"})\n\n\t// Invoke the callback with all the unmarshalled arguments\n\terr = w.Handler.DeleteCluster(ctx, clusterId)\n\treturn err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GenCerts generate a CA cert or a server cert signed by CA cert if isCA is true, the outfile will be a CA cert/key named as cacert.pem/cakey.pem if isCA is false, the outfile will be named as is, for example, outfilecert.pem, outfilekey.pem
|
func GenCerts(hosts []string, outname string, isCA bool) (err error) {
priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return fmt.Errorf("GenerateKey: %v", err)
}
template := x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{
Organization: []string{"Acme Co"},
},
NotBefore: time.Now(),
NotAfter: time.Now().Add(time.Hour * 24 * 3650),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
}
var (
cakey *ecdsa.PrivateKey
cacrt *x509.Certificate
derBytes []byte
)
// valid for these names
if isCA {
template.IsCA = true
template.KeyUsage |= x509.KeyUsageCertSign
outname = "ca"
derBytes, err = x509.CreateCertificate(rand.Reader, &template, &template, publicKey(priv), priv)
if err != nil {
return fmt.Errorf("Failed to create certificate: %v", err)
}
} else {
for _, h := range hosts {
if ip := net.ParseIP(h); ip != nil {
template.IPAddresses = append(template.IPAddresses, ip)
} else {
template.DNSNames = append(template.DNSNames, h)
}
}
// ca key file
ca_data, err := os.ReadFile("ca-key.pem")
if err != nil {
return fmt.Errorf("Read ca-key.pem: %v", err)
}
block, _ := pem.Decode(ca_data)
cakey, _ = x509.ParseECPrivateKey(block.Bytes)
// ca cert file
ca_data, err = os.ReadFile("ca-cert.pem")
if err != nil {
return fmt.Errorf("Read ca-cert.pem: %v", err)
}
block, _ = pem.Decode(ca_data)
cacrt, _ = x509.ParseCertificate(block.Bytes)
// generate C2 server certificate, signed by our CA
derBytes, err = x509.CreateCertificate(rand.Reader, &template, cacrt, publicKey(priv), cakey)
if err != nil {
return fmt.Errorf("Failed to create certificate: %v", err)
}
}
// output to pem files
out := &bytes.Buffer{}
outcert := fmt.Sprintf("%s-cert.pem", outname)
outkey := fmt.Sprintf("%s-key.pem", outname)
// cert
pem.Encode(out, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
err = os.WriteFile(outcert, out.Bytes(), 0600)
if err != nil {
return fmt.Errorf("Write %s: %v", outcert, err)
}
out.Reset()
// key
pem.Encode(out, pemBlockForKey(priv))
err = os.WriteFile(outkey, out.Bytes(), 0600)
if err != nil {
return fmt.Errorf("Write %s: %v", outkey, err)
}
return
}
|
[
"func genCerts(date time.Time) ([]byte, []byte, error) {\n\t// Create ca signing key\n\tca := &x509.Certificate{\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{\"I Can Haz Expired Certs\"},\n\t\t},\n\t\tSerialNumber: big.NewInt(42),\n\t\tNotBefore: date.Truncate(8760 * time.Hour),\n\t\tNotAfter: date,\n\t\tIsCA: true,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},\n\t\tKeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,\n\t\tBasicConstraintsValid: true,\n\t}\n\n\t// Create a private key\n\tkey, err := rsa.GenerateKey(rand.Reader, 4096)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"Could not generate rsa key - %s\", err)\n\t}\n\n\t// Use ca key to sign a CSR and create a public Cert\n\tcsr := &key.PublicKey\n\tcert, err := x509.CreateCertificate(rand.Reader, ca, ca, csr, key)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"Could not generate certificate - %s\", err)\n\t}\n\n\t// Convert keys into []byte\n\tc := pem.EncodeToMemory(&pem.Block{Type: \"CERTIFICATE\", Bytes: cert})\n\tk := pem.EncodeToMemory(&pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(key)})\n\treturn c, k, nil\n}",
"func GenerateCryptoCerts(input networkspec.Config, kubeConfigPath string) error {\n\n\tconfigPath := filepath.Join(input.ArtifactsLocation, \"crypto-config\")\n\terr := client.ExecuteCommand(\"cryptogen\", \"generate\", \"--config=./../configFiles/crypto-config.yaml\", fmt.Sprintf(\"--output=%v\", configPath))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif kubeConfigPath == \"\" {\n\t\tfor i := 0; i < len(input.OrdererOrganizations); i++ {\n\t\t\torg := input.OrdererOrganizations[i]\n\t\t\terr = changeKeyName(input.ArtifactsLocation, \"orderer\", org.Name, \"ca\", org.NumCA)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = changeKeyName(input.ArtifactsLocation, \"orderer\", org.Name, \"tlsca\", org.NumCA)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tfor i := 0; i < len(input.PeerOrganizations); i++ {\n\t\t\torg := input.PeerOrganizations[i]\n\t\t\terr = changeKeyName(input.ArtifactsLocation, \"peer\", org.Name, \"ca\", org.NumCA)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = changeKeyName(input.ArtifactsLocation, \"peer\", org.Name, \"tlsca\", org.NumCA)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (am *admissionManager) generateCerts(create bool) (\n\tserverCertificate, serverPrivateKey, caCertificate []byte,\n\terr error) {\n\tvar caPrivateKey []byte\n\tcaPrivateKey, err = utils.SetUpCaKey()\n\tif err != nil {\n\t\tklog.Errorf(\"set up ca key failed %v\", err)\n\t\treturn nil, nil, nil, err\n\t}\n\tcaCertificate, err = utils.SetUpCaCert(webhookconstants.ComponentName, caPrivateKey)\n\tif err != nil {\n\t\tklog.Errorf(\"set up ca cert failed %v\", err)\n\t\treturn nil, nil, nil, err\n\t}\n\tnamespace := utils.GetCurrentNamespace()\n\tdomains, ips := subjectAltNames(namespace, am.externalService)\n\tserverCertificate, serverPrivateKey, err = utils.SetUpSignedCertAndKey(domains, ips,\n\t\twebhookconstants.ComponentName,\n\t\tcaPrivateKey, caCertificate, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth})\n\tif err != nil {\n\t\tklog.Errorf(\"set up server cert error %v\", err)\n\t\treturn nil, nil, nil, err\n\t}\n\tif create {\n\t\t// try to create a new secret to save certificate and privateKey and ca certificate.\n\t\t_, err = am.kubeClient.CoreV1().Secrets(namespace).Create(context.Background(),\n\t\t\t&corev1.Secret{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: certsSecretName,\n\t\t\t\t\tNamespace: namespace,\n\t\t\t\t},\n\t\t\t\tData: map[string][]byte{\n\t\t\t\t\tserverCert: serverCertificate,\n\t\t\t\t\tserverKey: serverPrivateKey,\n\t\t\t\t\tcaCert: caCertificate,\n\t\t\t\t},\n\t\t\t}, metav1.CreateOptions{})\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"create new certificate secret error %v\", err)\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t} else {\n\t\t// try to update an old secret to save certificate and privateKey and ca certificate.\n\t\tif err = utils.UpdateSecret(am.kubeClient, namespace, certsSecretName,\n\t\t\tfunc(secret *corev1.Secret) {\n\t\t\t\tsecret.Data = map[string][]byte{\n\t\t\t\t\tserverCert: serverCertificate,\n\t\t\t\t\tserverKey: serverPrivateKey,\n\t\t\t\t\tcaCert: caCertificate,\n\t\t\t\t}\n\t\t\t}); err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t}\n\treturn caCertificate, serverCertificate, serverPrivateKey, nil\n}",
"func genCertsIfMIssing(\n\tt *testing.T,\n\tcapem string,\n\tcakey string,\n) error {\n\t_, err := os.Stat(capem)\n\tif err == nil {\n\t\t_, err = os.Stat(cakey)\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tserialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)\n\tserialNumber, err := rand.Int(rand.Reader, serialNumberLimit)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to generate serial number: %s\", err)\n\t}\n\n\tcaTemplate := x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tCountry: []string{\"US\"},\n\t\t\tOrganization: []string{\"elastic\"},\n\t\t\tOrganizationalUnit: []string{\"beats\"},\n\t\t},\n\t\tIssuer: pkix.Name{\n\t\t\tCountry: []string{\"US\"},\n\t\t\tOrganization: []string{\"elastic\"},\n\t\t\tOrganizationalUnit: []string{\"beats\"},\n\t\t\tLocality: []string{\"locality\"},\n\t\t\tProvince: []string{\"province\"},\n\t\t\tStreetAddress: []string{\"Mainstreet\"},\n\t\t\tPostalCode: []string{\"12345\"},\n\t\t\tSerialNumber: \"23\",\n\t\t\tCommonName: \"*\",\n\t\t},\n\t\tIPAddresses: []net.IP{\n\t\t\tnet.IP{127, 0, 0, 1},\n\t\t},\n\n\t\tSignatureAlgorithm: x509.SHA512WithRSA,\n\t\tPublicKeyAlgorithm: x509.ECDSA,\n\t\tNotBefore: time.Now(),\n\t\tNotAfter: time.Now().AddDate(10, 0, 0),\n\t\tSubjectKeyId: []byte(\"12345\"),\n\t\tBasicConstraintsValid: true,\n\t\tIsCA: true,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{\n\t\t\tx509.ExtKeyUsageClientAuth,\n\t\t\tx509.ExtKeyUsageServerAuth},\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment |\n\t\t\tx509.KeyUsageDigitalSignature |\n\t\t\tx509.KeyUsageCertSign,\n\t}\n\n\t// generate keys\n\tpriv, err := rsa.GenerateKey(rand.Reader, 4096)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to generate ca private key: %v\", err)\n\t}\n\tpub := &priv.PublicKey\n\n\t// generate certificate\n\tcaBytes, err := x509.CreateCertificate(\n\t\trand.Reader,\n\t\t&caTemplate,\n\t\t&caTemplate,\n\t\tpub, priv)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to generate ca certificate: %v\", err)\n\t}\n\n\t// write key file\n\tkeyOut, err := os.OpenFile(cakey, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to open key file for writing: %v\", err)\n\t}\n\tpem.Encode(keyOut, &pem.Block{\n\t\tType: \"RSA PRIVATE KEY\",\n\t\tBytes: x509.MarshalPKCS1PrivateKey(priv)})\n\tkeyOut.Close()\n\n\t// write certificate\n\tcertOut, err := os.Create(capem)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to open cert.pem for writing: %s\", err)\n\t}\n\tpem.Encode(certOut, &pem.Block{Type: \"CERTIFICATE\", Bytes: caBytes})\n\tcertOut.Close()\n\n\treturn nil\n}",
"func OutputCerts(config *certgenConfig, kubeclient *kubernetes.Clientset, certs *certs.Certificates) error {\n\tvar secrets []*corev1.Secret\n\tvar errs []error\n\n\tforce := certgen.NoOverwrite\n\tif config.Overwrite {\n\t\tforce = certgen.Overwrite\n\t}\n\n\tif config.OutputYAML || config.OutputKube {\n\t\tswitch config.Format {\n\t\tcase \"legacy\":\n\t\t\tsecrets, errs = certgen.AsLegacySecrets(config.Namespace, config.NameSuffix, certs)\n\t\tcase \"compact\":\n\t\t\tsecrets, errs = certgen.AsSecrets(config.Namespace, config.NameSuffix, certs)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unsupported Secrets format %q\", config.Format)\n\t\t}\n\n\t\tif len(errs) > 0 {\n\t\t\treturn utilerrors.NewAggregate(errs)\n\t\t}\n\t}\n\n\tif config.OutputPEM {\n\t\tfmt.Printf(\"Writing certificates to PEM files in %s/\\n\", config.OutputDir)\n\t\tif err := certgen.WriteCertsPEM(config.OutputDir, certs, force); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to write certificates to %q: %w\", config.OutputDir, err)\n\t\t}\n\t}\n\n\tif config.OutputYAML {\n\t\tfmt.Printf(\"Writing %q format Secrets to YAML files in %s/\\n\", config.Format, config.OutputDir)\n\t\tif err := certgen.WriteSecretsYAML(config.OutputDir, secrets, force); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to write Secrets to %q: %w\", config.OutputDir, err)\n\t\t}\n\t}\n\n\tif config.OutputKube {\n\t\tfmt.Printf(\"Writing %q format Secrets to namespace %q\\n\", config.Format, config.Namespace)\n\t\tif err := certgen.WriteSecretsKube(kubeclient, secrets, force); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to write certificates to %q: %w\", config.Namespace, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func GenerateCert(hosts []string, certFile, keyFile, caFile, caKeyFile, org string, bits int) error {\n\ttemplate, err := newCertificate(org)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// client\n\tif len(hosts) == 1 && hosts[0] == \"\" {\n\t\ttemplate.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}\n\t\ttemplate.KeyUsage = x509.KeyUsageDigitalSignature\n\t} else { // server\n\t\ttemplate.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}\n\t\tfor _, h := range hosts {\n\t\t\tif ip := net.ParseIP(h); ip != nil {\n\t\t\t\ttemplate.IPAddresses = append(template.IPAddresses, ip)\n\n\t\t\t} else {\n\t\t\t\ttemplate.DNSNames = append(template.DNSNames, h)\n\t\t\t}\n\t\t}\n\t}\n\n\ttlsCert, err := tls.LoadX509KeyPair(caFile, caKeyFile)\n\tif err != nil {\n\t\treturn err\n\n\t}\n\n\tpriv, err := rsa.GenerateKey(rand.Reader, bits)\n\tif err != nil {\n\t\treturn err\n\n\t}\n\n\tx509Cert, err := x509.ParseCertificate(tlsCert.Certificate[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tderBytes, err := x509.CreateCertificate(rand.Reader, template, x509Cert, &priv.PublicKey, tlsCert.PrivateKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcertOut, err := os.Create(certFile)\n\tif err != nil {\n\t\treturn err\n\n\t}\n\n\tpem.Encode(certOut, &pem.Block{Type: \"CERTIFICATE\", Bytes: derBytes})\n\tcertOut.Close()\n\n\tkeyOut, err := os.OpenFile(keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)\n\tif err != nil {\n\t\treturn err\n\n\t}\n\n\tpem.Encode(keyOut, &pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(priv)})\n\tkeyOut.Close()\n\n\treturn nil\n}",
"func (s *Server) GenerateCertificates(certDir, pubKeyFile, privKeyFile, caFile string) (err error) {\n\tif s.insecure {\n\t\treturn nil\n\t}\n\n\tvar ca *servertls.Certificate\n\tif passphrase := os.Getenv(EnvTLSServerPassword); passphrase != \"\" {\n\t\tdefaultOpts := servertls.DefaultCertificateOpts(s.Name+\"-ca\", certDir)\n\t\tdefaultOpts.Passphrase = passphrase\n\t\tca = servertls.NewCertificate(s.Name+\"-ca\", defaultOpts).GenerateCertificateAuthority()\n\t} else {\n\t\tca = servertls.GenerateCertificateAuthority(s.Name+\"-ca\", certDir)\n\t}\n\ts.ui.Log.Infof(\"saving the CA certificates to %s and %s\", ca.Opts.PrivateKeyFile, ca.Opts.CertificateFile) // s.caKeyPair.CertFile, s.caKeyPair.KeyFile)\n\tif err := ca.Persist().Error(); err != nil {\n\t\treturn fmt.Errorf(\"failed to generate the CA certificate. %s\", err)\n\t}\n\n\t// Append the CA Cert into the cert pool\n\tif s.certPool == nil {\n\t\ts.certPool = x509.NewCertPool()\n\t}\n\tif ok := s.certPool.AppendCertsFromPEM(ca.CertificatePEM()); !ok {\n\t\treturn fmt.Errorf(\"failed to append CA cert\")\n\t}\n\ts.caCert = ca\n\n\tserverCert := servertls.GenerateSignedCertificate(s.Name, certDir, ca, s.host).Persist()\n\ts.ui.Log.Infof(\"saving the server certificates to %s and %s\", serverCert.Opts.PrivateKeyFile, serverCert.Opts.CertificateFile) //s.keyPair.CertFile, s.keyPair.KeyFile)\n\tif err := serverCert.Error(); err != nil {\n\t\treturn fmt.Errorf(\"failed to generate the server certificate. %s\", err)\n\t}\n\ts.serverCert = serverCert\n\n\tif s.certificate, err = tls.X509KeyPair(serverCert.CertificatePEM(), serverCert.PrivateKeyPEM()); err != nil {\n\t\treturn err\n\t}\n\n\tclientCert := servertls.GenerateSignedClientCertificate(s.Name+\"-client\", certDir, ca, s.host).Persist()\n\ts.ui.Log.Infof(\"saving the client certificates to %s and %s\", clientCert.Opts.PrivateKeyFile, clientCert.Opts.CertificateFile) //s.keyPair.CertFile, s.keyPair.KeyFile)\n\tif err := clientCert.Error(); err != nil {\n\t\treturn fmt.Errorf(\"failed to generate the server certificate. %s\", err)\n\t}\n\n\treturn nil\n}",
"func GenCert(certf string, keyf string, certtype bool, addHosts bool) error {\n\t/* Create the basenames if needed */\n\tdir := filepath.Dir(certf)\n\terr := os.MkdirAll(dir, 0750)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdir = filepath.Dir(keyf)\n\terr = os.MkdirAll(dir, 0750)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcertBytes, keyBytes, err := GenerateMemCert(certtype, addHosts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcertOut, err := os.Create(certf)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to open %s for writing: %w\", certf, err)\n\t}\n\n\t_, err = certOut.Write(certBytes)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to write cert file: %w\", err)\n\t}\n\n\terr = certOut.Close()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to close cert file: %w\", err)\n\t}\n\n\tkeyOut, err := os.OpenFile(keyf, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to open %s for writing: %w\", keyf, err)\n\t}\n\n\t_, err = keyOut.Write(keyBytes)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to write key file: %w\", err)\n\t}\n\n\terr = keyOut.Close()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to close key file: %w\", err)\n\t}\n\n\treturn nil\n}",
"func createCertificates(_ *testing.T) error {\n\tvar err error\n\tvar srcCaCrt *os.File\n\tvar srcTLSCrt *os.File\n\tvar srcTLSKey *os.File\n\tvar destCaCrt *os.File\n\tvar destTLSCrt *os.File\n\tvar destTLSKey *os.File\n\n\tdir := \"/tmp/k8s-webhook-server/serving-certs\"\n\n\t// create directory if not existing yet\n\t_ = os.Mkdir(\"/tmp/k8s-webhook-server\", os.ModePerm)\n\t_ = os.Mkdir(dir, os.ModePerm)\n\n\t// open src files\n\tif srcCaCrt, err = os.Open(\"../../test/certs/ca.crt\"); err != nil {\n\t\treturn err\n\t}\n\tdefer srcCaCrt.Close()\n\tif srcTLSCrt, err = os.Open(\"../../test/certs/tls.crt\"); err != nil {\n\t\treturn err\n\t}\n\tdefer srcTLSCrt.Close()\n\tif srcTLSKey, err = os.Open(\"../../test/certs/tls.key\"); err != nil {\n\t\treturn err\n\t}\n\tdefer srcTLSKey.Close()\n\n\t// open dest files\n\tif destCaCrt, err = os.Create(fmt.Sprintf(\"%s/%s\", dir, \"ca.crt\")); err != nil {\n\t\treturn err\n\t}\n\tdefer destCaCrt.Close()\n\tif destTLSCrt, err = os.Create(fmt.Sprintf(\"%s/%s\", dir, \"tls.crt\")); err != nil {\n\t\treturn err\n\t}\n\tdefer destTLSCrt.Close()\n\tif destTLSKey, err = os.Create(fmt.Sprintf(\"%s/%s\", dir, \"tls.key\")); err != nil {\n\t\treturn err\n\t}\n\tdefer destTLSKey.Close()\n\n\t// copy ca.crt\n\tif _, err := io.Copy(destCaCrt, srcCaCrt); err != nil {\n\t\treturn err\n\t}\n\t// copy tls.crt\n\tif _, err := io.Copy(destTLSCrt, srcTLSCrt); err != nil {\n\t\treturn err\n\t}\n\t// copy tls.key\n\tif _, err := io.Copy(destTLSKey, srcTLSKey); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (Dev) Certificates() error {\n\tif _, err := os.Stat(\"key.pem\"); err == nil {\n\t\tif _, err := os.Stat(\"cert.pem\"); err == nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn execGo(\"run\", path.Join(runtime.GOROOT(), \"src\", \"crypto\", \"tls\", \"generate_cert.go\"), \"-ca\", \"-host\", \"localhost,*.localhost\")\n}",
"func generateCertificates(sans []string) (rootCA, leafCert *generator.Certificate, err error) {\n\trootCA, err = generator.GenerateRootCA(\"secret-agent\")\n\tif err != nil {\n\t\treturn\n\t}\n\tleafCert, err = generator.GenerateSignedCert(rootCA, v1alpha1.ECDSAWithSHA256, \"\", sans)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}",
"func generateMultiCert(t *testing.T) string {\n\ttempFile, err := ioutil.TempFile(\"/tmp\", \"cert-test\")\n\tdefer tempFile.Close()\n\tassert.NoError(t, err)\n\n\trsaKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tassert.NoError(t, err)\n\tecKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)\n\tassert.NoError(t, err)\n\n\tfor _, key := range []crypto.Signer{rsaKey, ecKey} {\n\t\tcert, err := cryptoservice.GenerateTestingCertificate(key, \"gun\")\n\t\tassert.NoError(t, err)\n\n\t\tpemBytes := trustmanager.CertToPEM(cert)\n\t\tnBytes, err := tempFile.Write(pemBytes)\n\t\tassert.NoError(t, err)\n\t\tassert.Equal(t, nBytes, len(pemBytes))\n\t}\n\treturn tempFile.Name()\n}",
"func generateRSAMutualAuthCerts(host string, caCertOut, serverKeyOut, serverCertOut, clientKeyOut, clientCertOut io.Writer) error {\n\tnotBefore := time.Now()\n\tnotAfter := notBefore.Add(validFor)\n\n\tserialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)\n\tserialNumber, err := rand.Int(rand.Reader, serialNumberLimit)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to generate serial number: %s\", err)\n\t}\n\n\t// Generate the CA key and CA cert\n\tcaKey, err := rsa.GenerateKey(rand.Reader, rsaBits)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to generate key: %v\", err)\n\t}\n\n\tcaTemplate := x509.Certificate{\n\t\tSubject: pkix.Name{\n\t\t\tCommonName: host + \"-ca\",\n\t\t\tOrganization: []string{\"Acme Co\"},\n\t\t},\n\t\tSerialNumber: serialNumber,\n\t\tNotBefore: notBefore,\n\t\tNotAfter: notAfter,\n\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},\n\t\tBasicConstraintsValid: true,\n\t}\n\n\tcaTemplate.IsCA = true\n\tcaTemplate.KeyUsage |= x509.KeyUsageCertSign\n\n\tcaBytes, err := x509.CreateCertificate(rand.Reader, &caTemplate, &caTemplate, &caKey.PublicKey, caKey)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create certificate: %s\", err)\n\t}\n\tif err := pem.Encode(caCertOut, &pem.Block{Type: \"CERTIFICATE\", Bytes: caBytes}); err != nil {\n\t\treturn fmt.Errorf(\"failed creating cert: %v\", err)\n\t}\n\n\t// Generate the Server Key and CSR for the server\n\tserverKey, err := rsa.GenerateKey(rand.Reader, rsaBits)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to generate key: %v\", err)\n\t}\n\n\t// Create the server cert and sign with the csr\n\tserialNumber, err = rand.Int(rand.Reader, serialNumberLimit)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to generate serial number: %s\", err)\n\t}\n\n\tserverTemplate := x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tCommonName: host,\n\t\t\tOrganization: []string{\"Acme Co\"},\n\t\t},\n\t\tNotBefore: notBefore,\n\t\tNotAfter: notAfter,\n\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},\n\t}\n\n\tserverBytes, err := x509.CreateCertificate(rand.Reader, &serverTemplate, &caTemplate, &serverKey.PublicKey, caKey)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create certificate: %s\", err)\n\t}\n\tif err := pem.Encode(serverCertOut, &pem.Block{Type: \"CERTIFICATE\", Bytes: serverBytes}); err != nil {\n\t\treturn fmt.Errorf(\"failed creating cert: %v\", err)\n\t}\n\tif err := pem.Encode(serverKeyOut, &pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(serverKey)}); err != nil {\n\t\treturn fmt.Errorf(\"failed creating key: %v\", err)\n\t}\n\n\t// Create the client key and certificate\n\tclientKey, err := rsa.GenerateKey(rand.Reader, rsaBits)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to generate key: %v\", err)\n\t}\n\n\tserialNumber, err = rand.Int(rand.Reader, serialNumberLimit)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to generate serial number: %s\", err)\n\t}\n\tclientTemplate := x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tCommonName: host + \"-client\",\n\t\t\tOrganization: []string{\"Acme Co\"},\n\t\t},\n\t\tNotBefore: notBefore,\n\t\tNotAfter: notAfter,\n\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},\n\t}\n\n\tclientBytes, err := x509.CreateCertificate(rand.Reader, &clientTemplate, &caTemplate, &clientKey.PublicKey, caKey)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create certificate: %s\", err)\n\t}\n\tif err := pem.Encode(clientCertOut, &pem.Block{Type: \"CERTIFICATE\", Bytes: clientBytes}); err != nil {\n\t\treturn fmt.Errorf(\"failed creating cert: %v\", err)\n\t}\n\tif err := pem.Encode(clientKeyOut, &pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(clientKey)}); err != nil {\n\t\treturn fmt.Errorf(\"failed creating key: %v\", err)\n\t}\n\n\treturn nil\n}",
"func GenCerts(ID, addr string) (keyname, certname string) {\n\tgpath := os.Getenv(\"GOPATH\")\n\tvar rebuild string\n\tvar kdzpath string\n\tif strings.Contains(gpath, \"\\\\\") == true {\n\t\tgsep := strings.Split(gpath, \"\\\\\")\n\t\trebuild = \"/\" + strings.Join(gsep[1:], \"/\")\n\t\tkdzpath = \"/mnt/c\" + rebuild + \"/src/github.com/TerminalJockey/Kudzu/certs/\"\n\n\t\tsrvkeycmd := exec.Command(\"wsl\", \"openssl\", \"req\", \"-new\", \"-newkey\", \"rsa:2048\", \"-nodes\",\n\t\t\t\"-days\", \"365\", \"-x509\", \"-addext\", \"subjectAltName = IP:\"+addr, \"-subj\", \"/CN=test\",\n\t\t\t\"-keyout\", kdzpath+ID+\".key\", \"-out\", kdzpath+ID+\".cert\")\n\t\terr := srvkeycmd.Run()\n\t\tif err != nil {\n\t\t\tif strings.HasSuffix(err.Error(), `executable file not found in %PATH%`) == true {\n\t\t\t\tfmt.Println(\"install wls for tls support on Windows (or implement your own!)\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\t\treturn ID + \".key\", ID + \".cert\"\n\t} else {\n\t\trebuild = \"/\" + gpath\n\t\tkdzpath = rebuild + \"/github.com/TerminalJockey/Kudzu/certs/\"\n\t\tsrvkeycmd := exec.Command(\"openssl\", \"req\", \"-new\", \"-newkey\", \"rsa:2048\", \"-nodes\",\n\t\t\t\"-days\", \"365\", \"-x509\", \"-addext\", \"subjectAltName = IP:\"+addr, \"-subj\", \"/CN=test\",\n\t\t\t\"-keyout\", kdzpath+ID+\".key\", \"-out\", kdzpath+ID+\".cert\")\n\t\terr := srvkeycmd.Run()\n\t\tif err != nil {\n\t\t\tif strings.HasSuffix(err.Error(), `executable file not found in %PATH%`) == true {\n\t\t\t\tfmt.Println(\"install wls for tls support (or implement your own!)\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\t\treturn ID + \".key\", ID + \".cert\"\n\t}\n}",
"func GenerateCert() (path string, err error) {\n\tpath, err = ioutil.TempDir(\"\", \"\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// generate a key\n\tprivKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// create a certificate template\n\ttmpl := &x509.Certificate{\n\t\tSerialNumber: big.NewInt(1),\n\t\tSubject: pkix.Name{\n\t\t\tCommonName: \"jsonnet-controller\",\n\t\t\tOrganization: []string{\"pelotech\"},\n\t\t},\n\t\tDNSNames: []string{\n\t\t\t\"jsonnet-controller\",\n\t\t},\n\t\tNotBefore: time.Now(),\n\t\tNotAfter: time.Now().Add(time.Hour * 24 * 365),\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},\n\t\tBasicConstraintsValid: true,\n\t}\n\n\t// self-sign the certificate\n\tderBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, &privKey.PublicKey, privKey)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Write files to disk\n\tcertPath := filepath.Join(path, \"tls.crt\")\n\tkeyPath := filepath.Join(path, \"tls.key\")\n\n\tcf, err := os.Create(certPath)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer cf.Close()\n\tif err = pem.Encode(cf, &pem.Block{Type: \"CERTIFICATE\", Bytes: derBytes}); err != nil {\n\t\treturn\n\t}\n\n\tkf, err := os.Create(keyPath)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer kf.Close()\n\tif err = pem.Encode(kf, &pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(privKey)}); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}",
"func registerCertGen(app *kingpin.Application) (*kingpin.CmdClause, *certgenConfig) {\n\tvar certgenConfig certgenConfig\n\tcertgenApp := app.Command(\"certgen\", \"Generate new TLS certs for bootstrapping gRPC over TLS.\")\n\tcertgenApp.Arg(\"outputdir\", \"Directory to write output files into (default \\\"certs\\\").\").Default(\"certs\").StringVar(&certgenConfig.OutputDir)\n\n\t// NOTE: --certificate-lifetime can be used to accept Duration string once certificate rotation is supported.\n\tcertgenApp.Flag(\"certificate-lifetime\", \"Generated certificate lifetime (in days).\").Default(strconv.Itoa(certs.DefaultCertificateLifetime)).UintVar(&certgenConfig.Lifetime)\n\tcertgenApp.Flag(\"incluster\", \"Use in cluster configuration.\").BoolVar(&certgenConfig.InCluster)\n\tcertgenApp.Flag(\"kube\", \"Apply the generated certs directly to the current Kubernetes cluster.\").BoolVar(&certgenConfig.OutputKube)\n\tcertgenApp.Flag(\"kubeconfig\", \"Path to kubeconfig (if not in running inside a cluster).\").Default(filepath.Join(os.Getenv(\"HOME\"), \".kube\", \"config\")).StringVar(&certgenConfig.KubeConfig)\n\tcertgenApp.Flag(\"namespace\", \"Kubernetes namespace, used for Kube objects.\").Default(certs.DefaultNamespace).Envar(\"CONTOUR_NAMESPACE\").StringVar(&certgenConfig.Namespace)\n\tcertgenApp.Flag(\"overwrite\", \"Overwrite existing files or Secrets.\").BoolVar(&certgenConfig.Overwrite)\n\tcertgenApp.Flag(\"pem\", \"Render the generated certs as individual PEM files to the current directory.\").BoolVar(&certgenConfig.OutputPEM)\n\tcertgenApp.Flag(\"secrets-format\", \"Specify how to format the generated Kubernetes Secrets.\").Default(\"legacy\").StringVar(&certgenConfig.Format)\n\tcertgenApp.Flag(\"secrets-name-suffix\", \"Specify a suffix to be appended to the generated Kubernetes secrets' names.\").StringVar(&certgenConfig.NameSuffix)\n\tcertgenApp.Flag(\"yaml\", \"Render the generated certs as Kubernetes Secrets in YAML form to the current directory.\").BoolVar(&certgenConfig.OutputYAML)\n\n\treturn certgenApp, &certgenConfig\n}",
"func (c *Config) GenerateFiles(dir string) (err error) {\n\tcertFile := dir + string(os.PathSeparator) + \"cert.pem\"\n\tkeyFile := dir + string(os.PathSeparator) + \"key.pem\"\n\tcert, err := os.OpenFile(certFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600)\n\tdefer cert.Close()\n\tkey, err := os.OpenFile(keyFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600)\n\tdefer key.Close()\n\terr = c.Generate(cert, key)\n\treturn\n}",
"func makeCA() (ca *x509.Certificate, caKey *rsa.PrivateKey) {\n\tca = &x509.Certificate{\n\t\tSerialNumber: big.NewInt(1000),\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{\"Demo CA Issuer\"},\n\t\t\tCountry: []string{\"US\"},\n\t\t\tLocality: []string{\"Denver\"},\n\t\t\tStreetAddress: []string{\"100 Any Street\"},\n\t\t\tPostalCode: []string{\"80001\"},\n\t\t},\n\t\tNotBefore: time.Now(),\n\t\tNotAfter: time.Now().AddDate(0, 0, 2),\n\t\tIsCA: true,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},\n\t\tBasicConstraintsValid: true,\n\t}\n\tcaPrivKey, err := rsa.GenerateKey(rand.Reader, 4096)\n\tcheckErr(\"failed generating CA key\", err)\n\n\tcaBytes, err := x509.CreateCertificate(rand.Reader, ca, ca, &caPrivKey.PublicKey, caPrivKey)\n\tcheckErr(\"failed to create (self sign) CA certificate\", err)\n\n\t// encode as PEM and write to file the CA and CA's key\n\t//\n\twritePem(CAFILE, &pem.Block{\n\t\tType: \"CERTIFICATE\",\n\t\tBytes: caBytes,\n\t})\n\tlog.Printf(\"successfully wrote CA to '%s'\", CAFILE)\n\n\twritePem(CAKEYFILE, &pem.Block{\n\t\tType: \"RSA PRIVATE KEY\",\n\t\tBytes: x509.MarshalPKCS1PrivateKey(caPrivKey),\n\t})\n\tlog.Printf(\"successfully wrote CA key to '%s'\", CAKEYFILE)\n\n\t// return the machine readable (PEM _decoded) CA + CA-Key\n\treturn ca, caPrivKey\n}",
"func GenerateCertificateFiles() (string, string, error) {\n\tcert, certKey, err := GenerateCertificate()\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tcertFile, err := ioutil.TempFile(\"\", \"cert\")\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\t_, err = certFile.Write(cert)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\t_ = certFile.Close()\n\n\tcertKeyFile, err := ioutil.TempFile(\"\", \"certKey\")\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t_, err = certKeyFile.Write(certKey)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\t_ = certKeyFile.Close()\n\treturn certFile.Name(), certKeyFile.Name(), err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NamesInCert find domain names and IPs in server certificate
|
func NamesInCert(cert_file string) (names []string) {
cert, err := ParseCertPemFile(cert_file)
if err != nil {
log.Printf("ParseCert %s: %v", cert_file, err)
return
}
for _, netip := range cert.IPAddresses {
ip := netip.String()
names = append(names, ip)
}
for _, domain := range cert.DNSNames {
names = append(names, domain)
}
return
}
|
[
"func getNameservers(resolvConf []byte) []string {\n\tnameservers := []string{}\n\tfor _, line := range getLines(resolvConf) {\n\t\tns := nsRegexp.FindSubmatch(line)\n\t\tif len(ns) > 0 {\n\t\t\tnameservers = append(nameservers, string(ns[1]))\n\t\t}\n\t}\n\treturn nameservers\n}",
"func GetNameservers(resolvConf []byte) []string {\n\tnameservers := []string{}\n\tfor _, line := range getLines(resolvConf, []byte(\"#\")) {\n\t\tns := nsRegexp.FindSubmatch(line)\n\t\tif len(ns) > 0 {\n\t\t\tnameservers = append(nameservers, string(ns[1]))\n\t\t}\n\t}\n\treturn nameservers\n}",
"func getAltNames(cfgAltNames []string, hostname, dnsdomain string, svcSubnet *net.IPNet) certutil.AltNames {\n\taltNames := certutil.AltNames{\n\t\tDNSNames: []string{\n\t\t\thostname,\n\t\t\t\"kubernetes\",\n\t\t\t\"kubernetes.default\",\n\t\t\t\"kubernetes.default.svc\",\n\t\t\tfmt.Sprintf(\"kubernetes.default.svc.%s\", dnsdomain),\n\t\t},\n\t}\n\n\t// Populate IPs/DNSNames from AltNames\n\tfor _, altname := range cfgAltNames {\n\t\tif ip := net.ParseIP(altname); ip != nil {\n\t\t\taltNames.IPs = append(altNames.IPs, ip)\n\t\t} else if len(validation.IsDNS1123Subdomain(altname)) == 0 {\n\t\t\taltNames.DNSNames = append(altNames.DNSNames, altname)\n\t\t}\n\t}\n\n\t// and lastly, extract the internal IP address for the API server\n\tinternalAPIServerVirtualIP, err := ipallocator.GetIndexedIP(svcSubnet, 1)\n\tif err != nil {\n\t\tfmt.Printf(\"[certs] WARNING: Unable to get first IP address from the given CIDR (%s): %v\\n\", svcSubnet.String(), err)\n\t}\n\taltNames.IPs = append(altNames.IPs, internalAPIServerVirtualIP)\n\treturn altNames\n}",
"func getDNSServers() []string {\n\tfile, err := ioutil.ReadFile(\"/etc/resolv.conf\")\n\tif err != nil {\n\t\treturn []string{}\n\t}\n\n\t// Lines of the form \"nameserver 1.2.3.4\" accumulate.\n\tnameservers := []string{}\n\n\tlines := strings.Split(string(file), \"\\n\")\n\tfor l := range lines {\n\t\ttrimmed := strings.TrimSpace(lines[l])\n\t\tif strings.HasPrefix(trimmed, \"#\") {\n\t\t\tcontinue\n\t\t}\n\t\tfields := strings.Fields(trimmed)\n\t\tif len(fields) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif fields[0] == \"nameserver\" {\n\t\t\tnameservers = append(nameservers, fields[1:]...)\n\t\t}\n\t}\n\n\tglog.V(3).Infof(\"nameservers to use: %v\", nameservers)\n\treturn nameservers\n}",
"func GetNameservers(resolvConf []byte, kind int) []string {\n\tvar nameservers []string\n\tfor _, line := range getLines(resolvConf, []byte(\"#\")) {\n\t\tvar ns [][]byte\n\t\tif kind == IP {\n\t\t\tns = nsRegexp.FindSubmatch(line)\n\t\t} else if kind == IPv4 {\n\t\t\tns = nsIPv4Regexpmatch.FindSubmatch(line)\n\t\t} else if kind == IPv6 {\n\t\t\tns = nsIPv6Regexpmatch.FindSubmatch(line)\n\t\t}\n\t\tif len(ns) > 0 {\n\t\t\tnameservers = append(nameservers, string(ns[1]))\n\t\t}\n\t}\n\treturn nameservers\n}",
"func lookupDomain(domain string) *[]string {\n\n\t// Split the domain by dots\n\tsplitDomain := strings.Split(domain, \".\")\n\trootDomain := strings.Join(splitDomain[len(splitDomain)-2:], \".\")\n\n\t// Check the cache for a existing lookup first\n\tresult, exists := cache[rootDomain]\n\tif exists {\n\t\treturn &result\n\t}\n\n\t// Connect to [tld].whois-server.net on port 43\n\tconn, err := net.DialTimeout(\n\t\t\"tcp\", \n\t\tnet.JoinHostPort(splitDomain[len(splitDomain)-1] + \".whois-servers.net\", \"43\"), \n\t\ttime.Second * 10,\n\t)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\t// Send a query for the root domain\n\tconn.Write([]byte(\"domain \" + rootDomain + \"\\r\\n\"))\n\tvar buffer []byte\n\tbuffer, err = ioutil.ReadAll(conn)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\t// Cleanup\n\tconn.Close()\n\n\t// Save the result in cache\n\tresponse := string(buffer[:])\n\n\t// Look for a \"Status:\" line\n\tstatusRe := regexp.MustCompile(`Status:(.*)\\n`)\n\tstatus := statusRe.FindStringSubmatch(response)\n\n\t// If no match, or status is \"free\", probably not registered\n\tif status == nil || strings.TrimSpace(status[1]) == \"free\" {\n\t\treturn nil\n\t}\n\n\t// Else, grab the name servers\n\tnsRe := regexp.MustCompile(`(Name Server|Nserver|Nameserver):(.*)\\n`)\n\tns := nsRe.FindAllStringSubmatch(response, -1)\n\n\t// Extract the actual nameserver values\n\tservers := make([]string, len(ns))\n\tfor i, server := range ns {\n\n\t\t// Cleanup the name and add it to the list\n\t\tservers[i] = strings.ToLower(strings.TrimSpace(server[2]))\n\n\t}\n\t\n\t// Save it in the cache for later\n\tcache[rootDomain] = servers\n\n\t// Return the list\n\treturn &servers\n\n}",
"func ExtractDomains(spec *api.CertificateSpec) ([]string, error) {\n\tvar err error\n\tcn := spec.CommonName\n\tif cn == nil || *cn == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing common name\")\n\t}\n\tdnsNames := spec.DNSNames\n\tif spec.CommonName != nil {\n\t\tif spec.CSR != nil {\n\t\t\treturn nil, fmt.Errorf(\"cannot specify both commonName and csr\")\n\t\t}\n\t\tif len(spec.DNSNames) >= 100 {\n\t\t\treturn nil, fmt.Errorf(\"invalid number of DNS names: %d (max 99)\", len(spec.DNSNames))\n\t\t}\n\t\tcount := utf8.RuneCount([]byte(*spec.CommonName))\n\t\tif count > 64 {\n\t\t\treturn nil, fmt.Errorf(\"the Common Name is limited to 64 characters (X.509 ASN.1 specification), but first given domain %s has %d characters\", *spec.CommonName, count)\n\t\t}\n\t} else {\n\t\tif spec.CSR == nil {\n\t\t\treturn nil, fmt.Errorf(\"either domains or csr must be specified\")\n\t\t}\n\t\tcn, dnsNames, err = ExtractCommonNameAnDNSNames(spec.CSR)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn append([]string{*cn}, dnsNames...), nil\n}",
"func NamesFromCertificateRequest(req *x509.CertificateRequest) []string {\n\tvar names []string\n\n\tif req.Subject.CommonName != \"\" {\n\t\tnames = append(names, req.Subject.CommonName)\n\t}\n\n\tfor _, n := range req.DNSNames {\n\t\tif req.Subject.CommonName == n {\n\t\t\tcontinue\n\t\t}\n\t\tnames = append(names, n)\n\t}\n\n\treturn names\n}",
"func subjectAltNames(namespace, svcName string) ([]string, []net.IP) {\n\treturn []string{\n\t\t\"localhost\",\n\t\tsvcName,\n\t\tfmt.Sprintf(\"%v.%v.svc\", svcName, namespace),\n\t\tfmt.Sprintf(\"%v.%v.svc.cluster.local\", svcName, namespace),\n\t}, []net.IP{net.ParseIP(\"127.0.0.1\")}\n}",
"func GetNameserversAsCIDR(resolvConf []byte) []string {\n\tvar nameservers []string\n\tfor _, nameserver := range GetNameservers(resolvConf, IP) {\n\t\tvar address string\n\t\t// If IPv6, strip zone if present\n\t\tif strings.Contains(nameserver, \":\") {\n\t\t\taddress = strings.Split(nameserver, \"%\")[0] + \"/128\"\n\t\t} else {\n\t\t\taddress = nameserver + \"/32\"\n\t\t}\n\t\tnameservers = append(nameservers, address)\n\t}\n\treturn nameservers\n}",
"func getDNSNameservers(resolvConfPath string) ([]string, error) {\n\tif resolvConfPath == \"\" {\n\t\tresolvConfPath = defaultResolvConfPath\n\t}\n\n\tfile, err := os.Open(resolvConfPath)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"Could not open '%s'.\", resolvConfPath)\n\t}\n\tdefer mustClose(file)\n\n\tscanner := bufio.NewScanner(file)\n\n\tvar servers []string\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tmatch := resolvConfNameserverPattern.FindStringSubmatch(line)\n\t\tif len(match) == 2 {\n\t\t\tservers = append(servers, match[1])\n\t\t}\n\t}\n\n\tif err = scanner.Err(); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"Could not read '%s'.\", resolvConfPath)\n\t}\n\n\tif len(servers) == 0 {\n\t\treturn nil, errors.Errorf(\"No nameservers found in '%s'.\", resolvConfPath)\n\t}\n\n\treturn servers, nil\n}",
"func GetAllCertnames(c *puppetdb.Client) []string {\n\tcertnames := []string{}\n\tnodes, err := c.Nodes()\n\tif err == nil {\n\t\tfor _, n := range nodes {\n\t\t\tif !stringInSlice(n.Certname, certnames) {\n\t\t\t\tcertnames = append(certnames, n.Certname)\n\t\t\t}\n\t\t}\n\n\t}\n\n\treturn certnames\n}",
"func parseNameServer() ([]net.IP, error) {\n\tfile, err := os.Open(\"/etc/resolv.conf\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error opening /etc/resolv.conf: %v\", err)\n\t}\n\tdefer file.Close()\n\n\tscan := bufio.NewScanner(file)\n\tscan.Split(bufio.ScanLines)\n\n\tip := make([]net.IP, 0)\n\n\tfor scan.Scan() {\n\t\tserverString := scan.Text()\n\t\tif strings.Contains(serverString, \"nameserver\") {\n\t\t\ttmpString := strings.Replace(serverString, \"nameserver\", \"\", 1)\n\t\t\tnameserver := strings.TrimSpace(tmpString)\n\t\t\tsip := net.ParseIP(nameserver)\n\t\t\tif sip != nil && !sip.Equal(config.Config.ListenIP) {\n\t\t\t\tip = append(ip, sip)\n\t\t\t}\n\t\t}\n\t}\n\tif len(ip) == 0 {\n\t\treturn nil, fmt.Errorf(\"there is no nameserver in /etc/resolv.conf\")\n\t}\n\treturn ip, nil\n}",
"func (c *Client) DNSNameservers(ctx context.Context) ([]string, error) {\n\tconst uriFmt = \"/api/v2/domain/%v/dns/nameservers\"\n\n\treq, err := c.buildRequest(ctx, http.MethodGet, fmt.Sprintf(uriFmt, c.domain), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar resp DomainDNSNameservers\n\tif err = c.performRequest(req, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp.DNS, nil\n}",
"func getDNSConf() []string {\n\tservers := []string{}\n\t_, err := os.Stat(\"/etc/resolv.conf\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tj, _ := dns.ClientConfigFromFile(\"/etc/resolv.conf\")\n\n\tservers = append(servers, fmt.Sprintf(\"%s:53\", j.Servers[0]))\n\tif len(servers) < 2 {\n\t\tservers = append(servers, fmt.Sprintf(\"%s:53\", j.Servers[0]))\n\t} else {\n\t\tservers = append(servers, fmt.Sprintf(\"%s:53\", j.Servers[1]))\n\t}\n\n\treturn servers\n\n}",
"func (m *MacOSEnterpriseWiFiConfiguration) GetTrustedServerCertificateNames()([]string) {\n val, err := m.GetBackingStore().Get(\"trustedServerCertificateNames\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]string)\n }\n return nil\n}",
"func ExtractCommonNameAnDNSNames(csr []byte) (cn *string, san []string, err error) {\n\tcertificateRequest, err := extractCertificateRequest(csr)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"parsing CSR failed: %w\", err)\n\t\treturn\n\t}\n\tcnvalue := certificateRequest.Subject.CommonName\n\tcn = &cnvalue\n\tsan = certificateRequest.DNSNames[:]\n\tfor _, ip := range certificateRequest.IPAddresses {\n\t\tsan = append(san, ip.String())\n\t}\n\treturn\n}",
"func (v *Validator) FindNameservers(ctx context.Context, qname string) (nss []string, err error) {\n\tfor ok := true; ok; qname, ok = parent(qname) {\n\t\trrs, err := v.resolver.ResolveCtx(ctx, qname, \"NS\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, rr := range rrs {\n\t\t\tif rr.Type == \"NS\" {\n\t\t\t\tnss = append(nss, rr.Value)\n\t\t\t}\n\t\t}\n\t\tif len(nss) > 0 {\n\t\t\treturn nss, nil\n\t\t}\n\t}\n\n\treturn nil, errors.New(\"not found\")\n}",
"func getSearchDomains(resolvConf []byte) []string {\n\tdomains := []string{}\n\tfor _, line := range getLines(resolvConf) {\n\t\tmatch := searchRegexp.FindSubmatch(line)\n\t\tif match == nil {\n\t\t\tcontinue\n\t\t}\n\t\tdomains = strings.Fields(string(match[1]))\n\t}\n\treturn domains\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ParseCertPemFile read from PEM file and return parsed cert
|
func ParseCertPemFile(cert_file string) (cert *x509.Certificate, err error) {
cert_data, err := os.ReadFile(cert_file)
if err != nil {
err = fmt.Errorf("Read ca-cert.pem: %v", err)
return
}
return ParsePem(cert_data)
}
|
[
"func parseCertificate(path string) (cert *x509.Certificate, err error) {\n var pemData []byte\n\n pemData, err = ioutil.ReadFile(path)\n if err != nil {\n return\n }\n block, rest := pem.Decode(pemData)\n if block == nil || len(rest) != 0 {\n err = errors.New(\"Failed to decode the PEM certificate\")\n return\n }\n cert, err = x509.ParseCertificate(block.Bytes)\n\n return\n}",
"func parsePEMCert(certData []byte) (*x509.Certificate, error) {\n\tpemBlock, trailingData := pem.Decode(certData)\n\tif pemBlock == nil {\n\t\treturn nil, fmt.Errorf(\"invalid PEM data\")\n\t}\n\tif len(trailingData) != 0 {\n\t\treturn nil, fmt.Errorf(\"trailing data after first PEM block\")\n\t}\n\treturn x509.ParseCertificate(pemBlock.Bytes)\n}",
"func ParseCertPEM(certPem []byte) (output []*x509.Certificate, err error) {\n\tfor len(certPem) > 0 {\n\t\tvar block *pem.Block\n\t\tblock, certPem = pem.Decode(certPem)\n\t\tif block == nil {\n\t\t\tbreak\n\t\t}\n\t\tif block.Type != BlockTypeCertificate || len(block.Headers) != 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tcert, certErr := x509.ParseCertificate(block.Bytes)\n\t\tif certErr != nil {\n\t\t\terr = ex.New(certErr)\n\t\t\treturn\n\t\t}\n\t\toutput = append(output, cert)\n\t}\n\n\treturn\n}",
"func getCertFromPem(pemBytes []byte) (*x509.Certificate, error) {\n\tpemCert, _ := pem.Decode(pemBytes)\n\tif pemCert == nil || pemCert.Type != \"CERTIFICATE\" {\n\t\treturn nil, fmt.Errorf(\"decoding pem bytes: %v\", pemBytes)\n\t}\n\n\tcert, err := x509.ParseCertificate(pemCert.Bytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"parsing x509 cert: %v\", err)\n\t}\n\n\treturn cert, nil\n}",
"func getCertFromPem(pemBytes []byte) (*x509.Certificate, error) {\n\tpemCert, _ := pem.Decode(pemBytes)\n\tif pemCert == nil {\n\t\treturn nil, fmt.Errorf(\"failed to decode pem bytes: %v\", pemBytes)\n\t}\n\n\tcert, err := x509.ParseCertificate(pemCert.Bytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse x509 cert: %v\", err)\n\t}\n\n\treturn cert, nil\n}",
"func ParseCert(pemcert []byte) (*x509.Certificate, error) {\n\tblock, _ := pem.Decode(pemcert)\n\tcert, err := x509.ParseCertificate(block.Bytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cert, nil\n}",
"func parseCertificate(path string) *x509.Certificate {\n\tcertBytes, err := os.ReadFile(path)\n\tExpect(err).NotTo(HaveOccurred())\n\tpemBlock, _ := pem.Decode(certBytes)\n\tcert, err := x509.ParseCertificate(pemBlock.Bytes)\n\tExpect(err).NotTo(HaveOccurred())\n\treturn cert\n}",
"func LoadPemEncodedCertificate(name string) (certificate *x509.Certificate, err error) {\n\tvar content []byte\n\n\tif content, err = Load(name); err != nil {\n\t\tlog.Fatalf(\"%s\\n\", err)\n\t} else {\n\t\tcertificate, err = tlsutil.ParsePEMEncodedCACert(content)\n\t\tif err != nil {\n\t\t\tlog.Println(\"LoadPemEncodedCertificate\", name)\n\t\t\tpanic(err)\n\t\t}\n\t\tif app.Debug {\n\t\t\tfmt.Println(Jsonify(certificate))\n\t\t}\n\t}\n\treturn certificate, err\n}",
"func loadCaCertPem(in io.Reader) ([]byte, error) {\n\tcaCertPemBytes, err := ioutil.ReadAll(in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tblock, _ := pem.Decode(caCertPemBytes)\n\tif block == nil {\n\t\treturn nil, errors.New(\"could not decode pem\")\n\t}\n\tif block.Type != \"CERTIFICATE\" {\n\t\treturn nil, fmt.Errorf(\"ca bundle contains wrong pem type: %q\", block.Type)\n\t}\n\tif _, err := x509.ParseCertificate(block.Bytes); err != nil {\n\t\treturn nil, fmt.Errorf(\"ca bundle contains invalid x509 certificate: %v\", err)\n\t}\n\treturn caCertPemBytes, nil\n}",
"func parseCert(crtPEM []byte) (*bcx509.Certificate, error) {\n\tcertBlock, _ := pem.Decode(crtPEM)\n\tif certBlock == nil {\n\t\treturn nil, fmt.Errorf(\"decode pem failed, invalid certificate\")\n\t}\n\n\tcert, err := bcx509.ParseCertificate(certBlock.Bytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"x509 parse cert failed, %s\", err)\n\t}\n\n\treturn cert, nil\n}",
"func CertsFromPEM(pemCerts []byte) ([]*x509.Certificate, error) {\n\tok := false\n\tcerts := []*x509.Certificate{}\n\tfor len(pemCerts) > 0 {\n\t\tvar block *pem.Block\n\t\tblock, pemCerts = pem.Decode(pemCerts)\n\t\tif block == nil {\n\t\t\tbreak\n\t\t}\n\t\t// Only use PEM \"CERTIFICATE\" blocks without extra headers\n\t\tif block.Type != \"CERTIFICATE\" || len(block.Headers) != 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tcert, err := x509.ParseCertificate(block.Bytes)\n\t\tif err != nil {\n\t\t\treturn certs, err\n\t\t}\n\n\t\tcerts = append(certs, cert)\n\t\tok = true\n\t}\n\n\tif !ok {\n\t\treturn certs, errors.New(\"could not read any certificates\")\n\t}\n\treturn certs, nil\n}",
"func parsePem(data []byte) (*pem.Block, error) {\n\tblock, remainder := pem.Decode(data)\n\tif block == nil {\n\t\treturn nil, errors.New(\"invalid format\")\n\t}\n\tif len(remainder) > 0 {\n\t\treturn nil, errors.New(\"additional information in pem, file should contain a single public or private key\")\n\t}\n\treturn block, nil\n}",
"func loadPem(name, expectedType string) ([]byte, error) {\n\tpems, err := ioutil.ReadFile(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbin, rest := pem.Decode(pems)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif bin.Type != expectedType {\n\t\treturn nil, fmt.Errorf(\"Expecting BEGIN %s\", expectedType)\n\t}\n\tif len(rest) != 0 {\n\t\treturn nil, errors.New(\"Extraneous file data after certificate\")\n\t}\n\n\treturn bin.Bytes, nil\n}",
"func ParsePublicCertFile(certFile string) (x509Certs []*x509.Certificate, err error) {\n\t// Read certificate file.\n\tvar data []byte\n\tif data, err = ioutil.ReadFile(certFile); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\n\t// Trimming leading and tailing white spaces.\n\tdata = bytes.TrimSpace(data)\n\n\t// Parse all certs in the chain.\n\tcurrent := data\n\tfor len(current) > 0 {\n\t\tvar pemBlock *pem.Block\n\t\tif pemBlock, current = pem.Decode(current); pemBlock == nil {\n\t\t\treturn nil, fmt.Errorf(\"could not read PEM block from file %s\", certFile)\n\t\t}\n\n\t\tvar x509Cert *x509.Certificate\n\t\tif x509Cert, err = x509.ParseCertificate(pemBlock.Bytes); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tx509Certs = append(x509Certs, x509Cert)\n\t}\n\n\tif len(x509Certs) == 0 {\n\t\treturn nil, fmt.Errorf(\"empty public certificate file %s\", certFile)\n\t}\n\n\treturn x509Certs, nil\n}",
"func decodePEMFile(filePath string) ([]byte, error) {\n\tf, err := os.Open(filePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\tbuf, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tp, _ := pem.Decode(buf)\n\tif p == nil {\n\t\treturn nil, fmt.Errorf(\"no pem block found\")\n\t}\n\treturn p.Bytes, nil\n}",
"func readCert(t *testing.T) []byte {\n\tcert, err := ioutil.ReadFile(\"testdata/root.pem\")\n\tif err != nil {\n\t\tt.Fatalf(\"Error reading cert: %s\", err.Error())\n\t}\n\treturn cert\n}",
"func ParseCertificate(pemBytes []byte) (*x509.Certificate, error) {\n\tblock, _ := pem.Decode(pemBytes)\n\tif block == nil {\n\t\treturn nil, errors.Wrap(ErrInvalidPEMBlock, \"failed to decode certificate\")\n\t}\n\n\tcert, err := x509.ParseCertificate(block.Bytes)\n\treturn cert, errors.Wrap(err, \"failed to parse certificate\")\n}",
"func parseCertificate(cert []byte) (*x509.Certificate, error) {\n\tblock, _ := pem.Decode(cert)\n\tif block == nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse PEM certificate\")\n\t}\n\n\tx509Cert, err := x509.ParseCertificate(block.Bytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse certificate: %w\", err)\n\t}\n\n\treturn x509Cert, nil\n}",
"func ReadCert(filename string) (*x509.Certificate, error) {\n\tblock, err := ReadBlock(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := IsType(block, certPEMType); err != nil {\n\t\treturn nil, err\n\t}\n\n\tcert, err := x509.ParseCertificate(block.Bytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse certificate: %w\", err)\n\t}\n\n\treturn cert, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetFingerprint return SHA256 fingerprint of a cert
|
func GetFingerprint(cert_file string) string {
cert, err := ParseCertPemFile(cert_file)
if err != nil {
log.Printf("GetFingerprint: ParseCert %s: %v", cert_file, err)
return ""
}
return SHA256SumRaw(cert.Raw)
}
|
[
"func Fingerprint(certificate []byte) FingerprintBytes {\n\treturn sha256.Sum256(certificate)\n}",
"func GetFingerPrint(signer ssh.Signer) (string, string) {\n\treturn ssh.FingerprintLegacyMD5(signer.PublicKey()),\n\t\tssh.FingerprintSHA256(signer.PublicKey())\n}",
"func getFingerprint(fn string) (fingerprint string, err kv.Error) {\n\tdata, errGo := ioutil.ReadFile(fn)\n\tif errGo != nil {\n\t\treturn \"\", kv.Wrap(errGo).With(\"filename\", fn).With(\"stack\", stack.Trace().TrimRuntime())\n\t}\n\n\tkey, err := extractPubKey(data)\n\tif err != nil {\n\t\treturn \"\", err.With(\"filename\", fn)\n\t}\n\n\treturn ssh.FingerprintSHA256(key), nil\n}",
"func HandleCertFingerprintRequest(w http.ResponseWriter, req *http.Request) {\n\t//logger.Info.Printf(\"Cert Hash\")\n\tfingerprint, err := GetCertificateFingerprint()\n\tif err != nil {\n\t\tlogger.Error.Printf(\"%s\\n\", err.Error())\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(fingerprint)\n}",
"func spkiFingerprint(cert string) (fingerprint, error) {\n\tprivateKeyFile, err := os.Open(cert)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%s: %w\", cert, err)\n\t}\n\tdefer privateKeyFile.Close()\n\n\tpemFileInfo, err := privateKeyFile.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar size int64 = pemFileInfo.Size()\n\tpemBytes := make([]byte, size)\n\tbuffer := bufio.NewReader(privateKeyFile)\n\t_, err = buffer.Read(pemBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Get first block of PEM file\n\tdata, rest := pem.Decode([]byte(pemBytes))\n\tconst certificateBlock = \"CERTIFICATE\"\n\tif data.Type != certificateBlock {\n\t\tfor len(rest) > 0 {\n\t\t\tdata, rest = pem.Decode(rest)\n\t\t\tif data.Type == certificateBlock {\n\t\t\t\t// Sign the CERTIFICATE block with SHA1\n\t\t\t\th := sha1.New()\n\t\t\t\t_, err := h.Write(data.Bytes)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\treturn fingerprint(h.Sum(nil)), nil\n\t\t\t}\n\t\t}\n\t\treturn nil, errors.New(\"Cannot find CERTIFICATE in file\")\n\t}\n\th := sha1.New()\n\t_, err = h.Write(data.Bytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn fingerprint(h.Sum(nil)), nil\n}",
"func (cs *cs3a) fingerprint() (string, string) {\n\treturn fmt.Sprintf(\"%x\", cs.id), fmt.Sprintf(\"%x\", cs.fingerprintBin)\n}",
"func (socket *Socket) CertFP() (string, error) {\n\tvar tlsConn, isTLS = socket.conn.(*tls.Conn)\n\tif !isTLS {\n\t\treturn \"\", errNotTLS\n\t}\n\n\t// ensure handehake is performed, and timeout after a few seconds\n\ttlsConn.SetDeadline(time.Now().Add(handshakeTimeout))\n\terr := tlsConn.Handshake()\n\ttlsConn.SetDeadline(time.Time{})\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tpeerCerts := tlsConn.ConnectionState().PeerCertificates\n\tif len(peerCerts) < 1 {\n\t\treturn \"\", errNoPeerCerts\n\t}\n\n\trawCert := sha256.Sum256(peerCerts[0].Raw)\n\tfingerprint := hex.EncodeToString(rawCert[:])\n\n\treturn fingerprint, nil\n}",
"func TestRequestFingerprint(t *testing.T) {\n\n\treq, err := http.NewRequest(\"GET\", \"http://example.com\", nil)\n\texpect(t, err, nil)\n\n\tfp := getRequestFingerprint(req)\n\n\texpect(t, fp, \"92a65ed4ca2b7100037a4cba9afd15ea\")\n\n}",
"func fingerprint(b []byte) string {\n\tvar buf bytes.Buffer\n\n\ths := fmt.Sprintf(\"%x\", sha256.Sum256(b))\n\n\tfor i, c := range hs {\n\t\tbuf.WriteByte(byte(c))\n\t\tif (i+1)%2 == 0 && i != len(hs)-1 {\n\t\t\tbuf.WriteByte(byte(':'))\n\t\t}\n\t}\n\n\treturn buf.String()\n}",
"func fingerprintKey(s string) (fingerprint string, err error) {\n\tdata, err := base64.StdEncoding.DecodeString(s)\n\tif err != nil {\n\t\treturn \"\", errors.New(\"Can't base64 decode original key\")\n\t}\n\tsha256 := sha256.New()\n\tsha256.Write(data)\n\tb64 := base64.StdEncoding.EncodeToString(sha256.Sum(nil))\n\treturn strings.TrimRight(b64, \"=\"), nil\n}",
"func (a *AuthorizationRequest) GetFingerprint() string {\n\tif a == nil || a.Fingerprint == nil {\n\t\treturn \"\"\n\t}\n\treturn *a.Fingerprint\n}",
"func flattenCertificateCertificateDescriptionCertFingerprint(c *Client, i interface{}, res *Certificate) *CertificateCertificateDescriptionCertFingerprint {\n\tm, ok := i.(map[string]interface{})\n\tif !ok {\n\t\treturn nil\n\t}\n\n\tr := &CertificateCertificateDescriptionCertFingerprint{}\n\n\tif dcl.IsEmptyValueIndirect(i) {\n\t\treturn EmptyCertificateCertificateDescriptionCertFingerprint\n\t}\n\tr.Sha256Hash = dcl.FlattenString(m[\"sha256Hash\"])\n\n\treturn r\n}",
"func (a *Authorization) GetFingerprint() string {\n\tif a == nil || a.Fingerprint == nil {\n\t\treturn \"\"\n\t}\n\treturn *a.Fingerprint\n}",
"func fingerprint(str []byte) uint64 {\n\tvar hi = hash32(str, 0, len(str), 0)\n\tvar lo = hash32(str, 0, len(str), 102072)\n\tif (hi == 0) && (lo == 0 || lo == 1) {\n\t\t// Turn 0/1 into another fingerprint\n\t\thi ^= 0x130f9bef\n\t\tlo ^= 0x94a0a928\n\t}\n\treturn (uint64(hi) << 32) | uint64(lo&0xffffffff)\n}",
"func expandCertificateCertificateDescriptionCertFingerprint(c *Client, f *CertificateCertificateDescriptionCertFingerprint, res *Certificate) (map[string]interface{}, error) {\n\tif dcl.IsEmptyValueIndirect(f) {\n\t\treturn nil, nil\n\t}\n\n\tm := make(map[string]interface{})\n\tif v := f.Sha256Hash; !dcl.IsEmptyValueIndirect(v) {\n\t\tm[\"sha256Hash\"] = v\n\t}\n\n\treturn m, nil\n}",
"func (pk *PublicKey) Fingerprint() []byte {\n\tb := pk.Serialize(nil)\n\th := sha1.New()\n\th.Write(b[2:])\n\treturn h.Sum(nil)\n}",
"func GetThumbprint(cert string) ([sha1.Size]byte, error) {\n\tcertStr := strings.ReplaceAll(cert, \"-----BEGIN CERTIFICATE-----\", \"\")\n\tcertStr = strings.ReplaceAll(certStr, \"-----END CERTIFICATE-----\", \"\")\n\tcertStr = strings.ReplaceAll(certStr, \"\\n\", \"\")\n\n\tdata, err := base64.StdEncoding.DecodeString(certStr)\n\tif err != nil {\n\t\treturn [20]byte{}, err\n\t}\n\n\treturn sha1.Sum(data), nil\n}",
"func (pr *PkgDecoder) Fingerprint() [8]byte {\n\tvar fp [8]byte\n\tcopy(fp[:], pr.elemData[len(pr.elemData)-8:])\n\treturn fp\n}",
"func (keyRing *KeyRing) GetFingerprint() (string, error) {\n\tfor _, entity := range keyRing.entities {\n\t\tfp := entity.PrimaryKey.Fingerprint\n\t\treturn hex.EncodeToString(fp[:]), nil\n\t}\n\treturn \"\", errors.New(\"can't find public key\")\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
RequestBindIPToNatgateway in aliyun don't need to check eip again which is different from SManagerResongDriver. RequestBindIPToNatgateway because func ieip.Associate will fail if eip has been associate
|
func (self *SAliyunRegionDriver) RequestBindIPToNatgateway(ctx context.Context, task taskman.ITask, natgateway *models.SNatGateway,
eipId string) error {
taskman.LocalTaskRun(task, func() (jsonutils.JSONObject, error) {
model, err := models.ElasticipManager.FetchById(eipId)
if err != nil {
return nil, err
}
lockman.LockObject(ctx, model)
defer lockman.ReleaseObject(ctx, model)
eip := model.(*models.SElasticip)
iregion, err := natgateway.GetIRegion()
if err != nil {
return nil, err
}
ieip, err := iregion.GetIEipById(eip.GetExternalId())
if err != nil {
return nil, errors.Wrap(err, "fetch eip failed")
}
conf := &cloudprovider.AssociateConfig{
InstanceId: natgateway.GetExternalId(),
Bandwidth: eip.Bandwidth,
AssociateType: api.EIP_ASSOCIATE_TYPE_NAT_GATEWAY,
}
err = ieip.Associate(conf)
if err != nil {
return nil, errors.Wrap(err, "fail to bind eip to natgateway")
}
err = cloudprovider.WaitStatus(ieip, api.EIP_STATUS_READY, 5*time.Second, 100*time.Second)
if err != nil {
return nil, err
}
// database
_, err = db.Update(eip, func() error {
eip.AssociateType = api.EIP_ASSOCIATE_TYPE_NAT_GATEWAY
eip.AssociateId = natgateway.GetId()
return nil
})
if err != nil {
return nil, errors.Wrapf(err, "fail to update eip '%s' in database", eip.Id)
}
return nil, nil
})
return nil
}
|
[
"func (as *SCIONLabAS) BindIP(isVPN bool, connectionIP string) string {\n\tif isVPN {\n\t\treturn connectionIP\n\t}\n\treturn as.ServerIP()\n}",
"func (service *HTTPRestService) reserveIPAddress(w http.ResponseWriter, r *http.Request) {\n\tlog.Printf(\"[Azure CNS] reserveIPAddress\")\n\n\tvar req cns.ReserveIPAddressRequest\n\treturnMessage := \"\"\n\treturnCode := 0\n\taddr := \"\"\n\taddress := \"\"\n\terr := service.Listener.Decode(w, r, &req)\n\n\tlog.Request(service.Name, &req, err)\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif req.ReservationID == \"\" {\n\t\treturnCode = ReservationNotFound\n\t\treturnMessage = fmt.Sprintf(\"[Azure CNS] Error. ReservationId is empty\")\n\t}\n\n\tswitch r.Method {\n\tcase \"POST\":\n\t\tic := service.ipamClient\n\n\t\tifInfo, err := service.imdsClient.GetPrimaryInterfaceInfoFromMemory()\n\t\tif err != nil {\n\t\t\treturnMessage = fmt.Sprintf(\"[Azure CNS] Error. GetPrimaryIfaceInfo failed %v\", err.Error())\n\t\t\treturnCode = UnexpectedError\n\t\t\tbreak\n\t\t}\n\n\t\tasID, err := ic.GetAddressSpace()\n\t\tif err != nil {\n\t\t\treturnMessage = fmt.Sprintf(\"[Azure CNS] Error. GetAddressSpace failed %v\", err.Error())\n\t\t\treturnCode = UnexpectedError\n\t\t\tbreak\n\t\t}\n\n\t\tpoolID, err := ic.GetPoolID(asID, ifInfo.Subnet)\n\t\tif err != nil {\n\t\t\treturnMessage = fmt.Sprintf(\"[Azure CNS] Error. GetPoolID failed %v\", err.Error())\n\t\t\treturnCode = UnexpectedError\n\t\t\tbreak\n\t\t}\n\n\t\taddr, err = ic.ReserveIPAddress(poolID, req.ReservationID)\n\t\tif err != nil {\n\t\t\treturnMessage = fmt.Sprintf(\"[Azure CNS] ReserveIpAddress failed with %+v\", err.Error())\n\t\t\treturnCode = AddressUnavailable\n\t\t\tbreak\n\t\t}\n\n\t\taddressIP, _, err := net.ParseCIDR(addr)\n\t\tif err != nil {\n\t\t\treturnMessage = fmt.Sprintf(\"[Azure CNS] ParseCIDR failed with %+v\", err.Error())\n\t\t\treturnCode = UnexpectedError\n\t\t\tbreak\n\t\t}\n\t\taddress = addressIP.String()\n\n\tdefault:\n\t\treturnMessage = \"[Azure CNS] Error. ReserveIP did not receive a POST.\"\n\t\treturnCode = InvalidParameter\n\n\t}\n\n\tresp := cns.Response{\n\t\tReturnCode: returnCode,\n\t\tMessage: returnMessage,\n\t}\n\n\treserveResp := &cns.ReserveIPAddressResponse{Response: resp, IPAddress: address}\n\terr = service.Listener.Encode(w, &reserveResp)\n\tlog.Response(service.Name, reserveResp, resp.ReturnCode, ReturnCodeToString(resp.ReturnCode), err)\n}",
"func (c *Client) BindIPStrategy(request *BindIPStrategyRequest) (response *BindIPStrategyResponse, err error) {\n if request == nil {\n request = NewBindIPStrategyRequest()\n }\n response = NewBindIPStrategyResponse()\n err = c.Send(request, response)\n return\n}",
"func configureApInterface(ip string, iface string) error {\n\tcmd := exec.Command(\"ip\", \"addr\", \"add\", ip, \"dev\", iface)\n\n\tif err := cmd.Start(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := cmd.Wait(); err != nil {\n\t\treturn nil\n\t}\n\n\treturn nil\n}",
"func (rndr *Renderer) AddIPSecIPinIPVpnTunnel(serviceInfo *common.ServiceInfo, sp *sasemodel.IPSecVpnTunnel, reSync bool) error {\n\tvppIPIPTunnel := &vpp_interfaces.IPIPLink{\n\t\tTunnelMode: vpp_interfaces.IPIPLink_POINT_TO_POINT,\n\t\tSrcAddr: sp.TunnelSourceIp,\n\t\tDstAddr: sp.TunnelDestinationIp,\n\t}\n\n\tvppIPinIPInterface := &vpp_interfaces.Interface{\n\t\tName: sp.TunnelName,\n\t\tType: vpp_interfaces.Interface_IPIP_TUNNEL,\n\t\tEnabled: true,\n\t\tLink: &vpp_interfaces.Interface_Ipip{\n\t\t\tIpip: vppIPIPTunnel,\n\t\t},\n\t}\n\n\t// Check for Tunnel Interface IP configuration\n\tif sp.InterfaceType == config.UnnumberedIP {\n\t\tintfName := rndr.GetInterfaceNameWithIP(serviceInfo, sp.TunnelSourceIp)\n\t\trndr.Log.Debug(\"AddIPSecIPinIPVpnTunnel: unnummbered Interface: \", intfName)\n\t\tif intfName != config.Invalid {\n\t\t\tvppIPinIPInterface.Unnumbered = &vpp_interfaces.Interface_Unnumbered{\n\t\t\t\tInterfaceWithIp: intfName,\n\t\t\t}\n\t\t}\n\t} else {\n\t\tvppIPinIPInterface.IpAddresses = append(vppIPinIPInterface.IpAddresses, sp.TunnelSourceIp)\n\t}\n\n\trndr.Log.Info(\"AddIPSecIPinIPVpnTunnel: vppIPinIPInterface: \", vppIPinIPInterface)\n\n\t// Test Purpose\n\tif rndr.MockTest {\n\t\treturn renderer.MockCommit(serviceInfo.GetServicePodLabel(), vpp_interfaces.InterfaceKey(vppIPinIPInterface.Name), vppIPinIPInterface, config.Add)\n\t}\n\n\t// Commit is for local base vpp vswitch\n\tif serviceInfo.GetServicePodLabel() == common.GetBaseServiceLabel() {\n\t\trndr.Log.Info(\" AddIPSecIPinIPVpnTunnel: Post txn to local vpp agent\",\n\t\t\t\"Key: \", vpp_interfaces.InterfaceKey(vppIPinIPInterface.Name), \"Value: \", vppIPinIPInterface)\n\t\tif reSync == true {\n\t\t\ttxn := rndr.ResyncTxnFactory()\n\t\t\ttxn.Put(vpp_interfaces.InterfaceKey(vppIPinIPInterface.Name), vppIPinIPInterface)\n\t\t} else {\n\t\t\ttxn := rndr.UpdateTxnFactory(fmt.Sprintf(\"IPinIPVpnTunnel %s\", vpp_interfaces.InterfaceKey(vppIPinIPInterface.Name)))\n\t\t\ttxn.Put(vpp_interfaces.InterfaceKey(vppIPinIPInterface.Name), vppIPinIPInterface)\n\t\t}\n\t} else {\n\t\trenderer.Commit(rndr.RemoteDB, serviceInfo.GetServicePodLabel(), vpp_interfaces.InterfaceKey(vppIPinIPInterface.Name), vppIPinIPInterface, config.Add)\n\t}\n\n\t// Get Security Association Information from the SA Name reference\n\tsa, err := rndr.CacheSAConfigGet(sp.SecurityAssociation)\n\tif err != nil {\n\t\trndr.Log.Debug(\"AddIPSecIPinIPVpnTunnel: Security Association Not Found: \", sp.SecurityAssociation)\n\n\t\t// Add the dependency in the pending tunnel protect list\n\t\trndr.AddTunnelToPendingTunnelProtectList(sp.TunnelName, sp.SecurityAssociation)\n\t\treturn nil\n\t}\n\n\tvar saIn, saOut []uint32\n\tsaIn = append(saIn, uint32(sa.InboundID))\n\tsaOut = append(saOut, uint32(sa.OutboundID))\n\n\trndr.Log.Info(\"AddIPSecIPinIPVpnTunnel: Protect the Tunnel with SA: \")\n\treturn rndr.IPSecTunnelProtectionAdd(serviceInfo, sp.TunnelName, saIn, saOut, reSync)\n}",
"func maybeUpdateBridgeIPAddr(\n\tctx *zedrouterContext,\n\tifname string) {\n\n\tstatus := getSwitchNetworkInstanceUsingPort(ctx, ifname)\n\tif status == nil {\n\t\treturn\n\t}\n\tlog.Infof(\"maybeUpdateBridgeIPAddr: found \"+\n\t\t\"NetworkInstance %s\", status.DisplayName)\n\n\tif !status.Activated {\n\t\tlog.Errorf(\"maybeUpdateBridgeIPAddr: \"+\n\t\t\t\"network instance %s not activated\\n\", status.DisplayName)\n\t\treturn\n\t}\n\tupdateBridgeIPAddr(ctx, status)\n\treturn\n}",
"func (b *Bridge) setIP() error {\n\tif b.IP == nil {\n\t\treturn nil\n\t}\n\tcmd := exec.Command(\"ifconfig\", b.Device, b.IP.String(), \"netmask\", fmt.Sprintf(\"0x%s\", b.Netmask.String()))\n\tfmt.Printf(\"cmd: %s\\n\", strings.Join(cmd.Args, \" \"))\n\treturn cmd.Run()\n}",
"func (nm *networkManager) applyIPConfig(extIf *externalInterface, targetIf *net.Interface) error {\n\t// Add IP addresses.\n\tfor _, addr := range extIf.IPAddresses {\n\t\tlog.Printf(\"[net] Adding IP address %v to interface %v.\", addr, targetIf.Name)\n\n\t\terr := netlink.AddIpAddress(targetIf.Name, addr.IP, addr)\n\t\tif err != nil && !strings.Contains(strings.ToLower(err.Error()), \"file exists\") {\n\t\t\tlog.Printf(\"[net] Failed to add IP address %v: %v.\", addr, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Add IP routes.\n\tfor _, route := range extIf.Routes {\n\t\troute.LinkIndex = targetIf.Index\n\n\t\tlog.Printf(\"[net] Adding IP route %+v.\", route)\n\n\t\terr := netlink.AddIpRoute((*netlink.Route)(route))\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[net] Failed to add IP route %v: %v.\", route, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (rndr *Renderer) AddIPinIPTunnel(serviceInfo *common.ServiceInfo, sp *sasemodel.IPSecVpnTunnel, reSync bool) error {\n\tvppIPIPTunnel := &vpp_interfaces.IPIPLink{\n\t\tTunnelMode: vpp_interfaces.IPIPLink_POINT_TO_POINT,\n\t\tSrcAddr: sp.TunnelSourceIp,\n\t\tDstAddr: sp.TunnelDestinationIp,\n\t}\n\n\tvppIPinIPInterface := &vpp_interfaces.Interface{\n\t\tName: sp.TunnelName,\n\t\tType: vpp_interfaces.Interface_IPIP_TUNNEL,\n\t\tEnabled: true,\n\t\tLink: &vpp_interfaces.Interface_Ipip{\n\t\t\tIpip: vppIPIPTunnel,\n\t\t},\n\t}\n\n\t// Check for Tunnel Interface IP configuration\n\tif sp.InterfaceType == config.UnnumberedIP {\n\t\tintfName := rndr.GetInterfaceNameWithIP(serviceInfo, sp.TunnelSourceIp)\n\t\trndr.Log.Debug(\"AddIPinIPTunnel: unnummbered Interface: \", intfName)\n\t\tif intfName != config.Invalid {\n\t\t\tvppIPinIPInterface.Unnumbered = &vpp_interfaces.Interface_Unnumbered{\n\t\t\t\tInterfaceWithIp: intfName,\n\t\t\t}\n\t\t}\n\t} else {\n\t\tvppIPinIPInterface.IpAddresses = append(vppIPinIPInterface.IpAddresses, sp.TunnelSourceIp)\n\t}\n\n\trndr.Log.Info(\"AddIPinIPTunnel: vppIPinIPInterface: \", vppIPinIPInterface)\n\n\t// Test Purpose\n\tif rndr.MockTest {\n\t\treturn renderer.MockCommit(serviceInfo.GetServicePodLabel(), vpp_interfaces.InterfaceKey(vppIPinIPInterface.Name), vppIPinIPInterface, config.Add)\n\t}\n\n\t// Commit is for local base vpp vswitch\n\tif serviceInfo.GetServicePodLabel() == common.GetBaseServiceLabel() {\n\t\trndr.Log.Info(\" AddIPinIPTunnel: Post txn to local vpp agent\",\n\t\t\t\"Key: \", vpp_interfaces.InterfaceKey(vppIPinIPInterface.Name), \"Value: \", vppIPinIPInterface)\n\t\tif reSync == true {\n\t\t\ttxn := rndr.ResyncTxnFactory()\n\t\t\ttxn.Put(vpp_interfaces.InterfaceKey(vppIPinIPInterface.Name), vppIPinIPInterface)\n\t\t} else {\n\t\t\ttxn := rndr.UpdateTxnFactory(fmt.Sprintf(\"IPinIPTunnel %s\", vpp_interfaces.InterfaceKey(vppIPinIPInterface.Name)))\n\t\t\ttxn.Put(vpp_interfaces.InterfaceKey(vppIPinIPInterface.Name), vppIPinIPInterface)\n\t\t}\n\t} else {\n\t\treturn renderer.Commit(rndr.RemoteDB, serviceInfo.GetServicePodLabel(), vpp_interfaces.InterfaceKey(vppIPinIPInterface.Name), vppIPinIPInterface, config.Add)\n\t}\n\n\treturn nil\n}",
"func (sh *DeviceInteractor) ReserveIPPair(ctx context.Context, FabricID uint, DeviceOneID uint, DeviceTwoID uint, IPType string,\n\tipaddressOne string, ipaddressTwo string, InterfaceOneID uint, InterfaceTwoID uint) error {\n\tLOG := appcontext.Logger(ctx)\n\n\tLOG.Infof(\"Reserve IP (%s %s) for %d IPType %s\", ipaddressOne, ipaddressTwo, FabricID, IPType)\n\tvar ipDB domain.IPPairAllocationPool\n\tvar ipCount int64\n\n\t//if Requested IP in the Pool,then move to Used IP Pair\n\tif ipCount, ipDB = sh.GetIPPairCountInPool(ctx, FabricID, ipaddressOne, ipaddressTwo, IPType); ipCount == 1 {\n\t\t//If the Requested IP is in the available and there are interfaces are UsedIP then release the existing IP Address\n\t\teIP1, eIP2, eerr := sh.GetAlreadyAllocatedIPPair(ctx, FabricID, DeviceOneID, DeviceTwoID, IPType, InterfaceOneID, InterfaceTwoID)\n\t\tif eerr == nil {\n\t\t\tsh.ReleaseIPPair(ctx, FabricID, DeviceOneID, DeviceTwoID, IPType, eIP1, eIP2, InterfaceOneID, InterfaceTwoID)\n\t\t}\n\t\tLOG.Infof(\"IP available in IP Pair Pool %s %s for IPType %s\", ipaddressOne, ipaddressTwo, IPType)\n\t\tsh.moveReservedFromIPPairPoolToUsedIPPair(ctx, FabricID, DeviceOneID, DeviceTwoID, IPType, ipaddressOne, ipaddressTwo, InterfaceOneID, InterfaceTwoID, ipDB)\n\t\treturn nil\n\t}\n\n\t//check if its already be assigned to this Interface pair\n\t_, err := sh.Db.GetUsedIPPairOnDeviceInterfaceIDIPAddresssAndType(FabricID, DeviceOneID, DeviceTwoID, ipaddressOne, ipaddressTwo, IPType, InterfaceOneID, InterfaceTwoID)\n\tif err != nil {\n\t\tstatusMsg := fmt.Sprintf(\"IPPair(%s,%s) not present in the Used IP Table for Fabric %d Device (%d,%d)\", ipaddressOne, ipaddressTwo, FabricID, DeviceOneID, DeviceTwoID)\n\t\treturn errors.New(statusMsg)\n\t}\n\treturn nil\n}",
"func (s *BasejossListener) EnterFuncIp(ctx *FuncIpContext) {}",
"func updateBridgeIPAddr(\n\tctx *zedrouterContext,\n\tstatus *types.NetworkInstanceStatus) {\n\n\tlog.Infof(\"updateBridgeIPAddr(%s)\\n\", status.Key())\n\n\told := status.BridgeIPAddr\n\terr := setBridgeIPAddr(ctx, status)\n\tif err != nil {\n\t\tlog.Infof(\"updateBridgeIPAddr: %s\\n\", err)\n\t\treturn\n\t}\n\tif status.BridgeIPAddr != old && status.BridgeIPAddr != \"\" {\n\t\tlog.Infof(\"updateBridgeIPAddr(%s) restarting dnsmasq\\n\",\n\t\t\tstatus.Key())\n\t\trestartDnsmasq(status)\n\t}\n}",
"func SetTunIP(tunName string, mtu uint32, subnetIP global.Address, subnetMask uint8, client bool) error {\n\tip, subnet, err := ParseCIDR(subnetIP, subnetMask)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\tip = ip.To4()\n\tif ip[3]%2 == 0 {\n\t\treturn errors.New(\"Invalid ip address.\")\n\t}\n\n\tpeer := net.IP(make([]byte, 4))\n\tcopy([]byte(peer), []byte(ip))\n\tpeer[3]++\n\n\tsargs := fmt.Sprintf(\"%s %s %s up\", tunName, ip, peer)\n\targs := strings.Split(sargs, \" \")\n\tcmd := exec.Command(\"ifconfig\", args...)\n\tif err := cmd.Run(); nil != err {\n\t\treturn errors.New(fmt.Sprintf(\"ifconfig %v err:%v\", sargs, err))\n\t} else {\n\t\tlog.Infof(\"ifconfig %s\", sargs)\n\t}\n\n\tsargs = fmt.Sprintf(\"link set dev %s up mtu %d qlen 100\", tunName, mtu)\n\targs = strings.Split(sargs, \" \")\n\tcmd = exec.Command(\"ip\", args...)\n\tif err := cmd.Run(); nil != err {\n\t\treturn errors.New(fmt.Sprintf(\"ip %v err:%v\", sargs, err))\n\t} else {\n\t\tlog.Infof(\"ip %s\", sargs)\n\t}\n\n\tsargs = fmt.Sprintf(\"route add %s via %s dev %s\", subnet, peer, tunName)\n\targs = strings.Split(sargs, \" \")\n\tcmd = exec.Command(\"ip\", args...)\n\tif err := cmd.Run(); nil != err {\n\t\treturn errors.New(fmt.Sprintf(\"ip %v err:%v\", sargs, err))\n\t} else {\n\t\tlog.Infof(\"ip %s\", sargs)\n\t}\n\n\tif client { // for client\n\t\tpeerStr := fmt.Sprintf(\"%d.%d.%d.%d\", peer[0], peer[1], peer[2], peer[3])\n\t\tif err := RedirectGateway(tunName, peerStr); nil != err {\n\t\t\tlog.Errorf(\"%v\", err)\n\t\t}\n\t} else { // for server\n\t\tsargs = \"net.ipv4.ip_forward=1\"\n\t\targs = strings.Split(sargs, \" \")\n\t\tcmd = exec.Command(\"sysctl\", args...)\n\t\tif err := cmd.Run(); nil != err {\n\t\t\tlog.Errorf(\"sysctl %v err:%v\", sargs, err)\n\t\t}\n\n\t\tsargs = \"-t nat -A POSTROUTING -j MASQUERADE\"\n\t\targs = strings.Split(sargs, \" \")\n\t\tcmd = exec.Command(\"iptables\", args...)\n\t\tif err := cmd.Run(); nil != err {\n\t\t\tlog.Errorf(\"iptables %v err:%v\", sargs, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (_class PIFClass) ReconfigureIP(sessionID SessionRef, self PIFRef, mode IPConfigurationMode, ip string, netmask string, gateway string, dns string) (_err error) {\n\t_method := \"PIF.reconfigure_ip\"\n\t_sessionIDArg, _err := convertSessionRefToXen(fmt.Sprintf(\"%s(%s)\", _method, \"session_id\"), sessionID)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_selfArg, _err := convertPIFRefToXen(fmt.Sprintf(\"%s(%s)\", _method, \"self\"), self)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_modeArg, _err := convertEnumIPConfigurationModeToXen(fmt.Sprintf(\"%s(%s)\", _method, \"mode\"), mode)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_ipArg, _err := convertStringToXen(fmt.Sprintf(\"%s(%s)\", _method, \"IP\"), ip)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_netmaskArg, _err := convertStringToXen(fmt.Sprintf(\"%s(%s)\", _method, \"netmask\"), netmask)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_gatewayArg, _err := convertStringToXen(fmt.Sprintf(\"%s(%s)\", _method, \"gateway\"), gateway)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_dnsArg, _err := convertStringToXen(fmt.Sprintf(\"%s(%s)\", _method, \"DNS\"), dns)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_, _err = _class.client.APICall(_method, _sessionIDArg, _selfArg, _modeArg, _ipArg, _netmaskArg, _gatewayArg, _dnsArg)\n\treturn\n}",
"func (nu NetworkUtils) EnableIPForwarding(ifName string) error {\n\t// Enable ip forwading on linux vm.\n\t// sysctl -w net.ipv4.ip_forward=1\n\tcmd := fmt.Sprint(enableIPForwardCmd)\n\t_, err := nu.plClient.ExecuteCommand(cmd)\n\tif err != nil {\n\t\tlog.Printf(\"[net] Enable ipforwarding failed with: %v\", err)\n\t\treturn err\n\t}\n\n\t// Append a rule in forward chain to allow forwarding from bridge\n\tif err := iptables.AppendIptableRule(iptables.V4, iptables.Filter, iptables.Forward, \"\", iptables.Accept); err != nil {\n\t\tlog.Printf(\"[net] Appending forward chain rule: allow traffic coming from snatbridge failed with: %v\", err)\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (o Netscaler) EnableNsip(req model.NsipEnable) (err error) {\r\n\tresp := model.Response{}\r\n\terr = o.Session.Post(BaseURI+\"nsip?action=enable\", req, &resp)\r\n\tif err != nil {\r\n\t\treturn\r\n\t}\r\n\tif resp.Errorcode != 0 {\r\n\t\terr = errors.New(resp.Message)\r\n\t}\r\n\treturn\r\n}",
"func (runner *runner) EnsureIPAddress(args []string, ip net.IP) (bool, error) {\n\t// Check if the ip address exists\n\tintName := runner.GetInterfaceToAddIP()\n\targsShowAddress := []string{\n\t\t\"interface\", \"ipv4\", \"show\", \"address\",\n\t\t\"name=\" + intName,\n\t}\n\n\tipToCheck := ip.String()\n\n\texists, _ := checkIPExists(ipToCheck, argsShowAddress, runner)\n\tif exists == true {\n\t\tglog.V(4).Infof(\"not adding IP address %q as it already exists\", ipToCheck)\n\t\treturn true, nil\n\t}\n\n\t// IP Address is not already added, add it now\n\tglog.V(4).Infof(\"running netsh interface ipv4 add address %v\", args)\n\tout, err := runner.exec.Command(cmdNetsh, args...).CombinedOutput()\n\n\tif err == nil {\n\t\t// Once the IP Address is added, it takes a bit to initialize and show up when querying for it\n\t\t// Query all the IP addresses and see if the one we added is present\n\t\t// PS: We are using netsh interface ipv4 show address here to query all the IP addresses, instead of\n\t\t// querying net.InterfaceAddrs() as it returns the IP address as soon as it is added even though it is uninitialized\n\t\tglog.V(3).Infof(\"Waiting until IP: %v is added to the network adapter\", ipToCheck)\n\t\tfor {\n\t\t\tif exists, _ := checkIPExists(ipToCheck, argsShowAddress, runner); exists {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t\ttime.Sleep(500 * time.Millisecond)\n\t\t}\n\t}\n\tif ee, ok := err.(utilexec.ExitError); ok {\n\t\t// netsh uses exit(0) to indicate a success of the operation,\n\t\t// as compared to a malformed commandline, for example.\n\t\tif ee.Exited() && ee.ExitStatus() != 0 {\n\t\t\treturn false, nil\n\t\t}\n\t}\n\treturn false, fmt.Errorf(\"error adding ipv4 address: %v: %s\", err, out)\n}",
"func (d *InterfaceAddressDescriptor) Create(key string, _ *interfaces.Interface) (metadata interface{}, err error) {\n\tiface, addr, source, _, _ := interfaces.ParseInterfaceAddressKey(key)\n\tif source == netalloc_api.IPAddressSource_EXISTING {\n\t\t// already exists, nothing to do\n\t\treturn nil, nil\n\t}\n\n\tifMeta, found := d.intfIndex.LookupByName(iface)\n\tif !found {\n\t\terr = errors.Errorf(\"failed to find interface %s\", iface)\n\t\td.log.Error(err)\n\t\treturn nil, err\n\t}\n\n\tipAddr, err := d.addrAlloc.GetOrParseIPAddress(addr, iface, netalloc_api.IPAddressForm_ADDR_WITH_MASK)\n\tif err != nil {\n\t\td.log.Error(err)\n\t\treturn nil, err\n\t}\n\n\t// switch to the namespace with the interface\n\tnsCtx := nslinuxcalls.NewNamespaceMgmtCtx()\n\trevert, err := d.nsPlugin.SwitchToNamespace(nsCtx, ifMeta.Namespace)\n\tif err != nil {\n\t\td.log.Error(err)\n\t\treturn nil, err\n\t}\n\tdefer revert()\n\n\tif ipAddr.IP.To4() == nil {\n\t\t// Enable IPv6 for loopback \"lo\" and the interface being configured\n\t\tfor _, iface := range [2]string{\"lo\", ifMeta.HostIfName} {\n\t\t\tipv6SysctlValueName := fmt.Sprintf(DisableIPv6SysctlTemplate, iface)\n\n\t\t\t// Read current sysctl value\n\t\t\tvalue, err := getSysctl(ipv6SysctlValueName)\n\t\t\tif err != nil || value == \"0\" {\n\t\t\t\tif err != nil {\n\t\t\t\t\td.log.Warnf(\"could not read sysctl value for %v: %v\",\n\t\t\t\t\t\tifMeta.HostIfName, err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Write sysctl to enable IPv6\n\t\t\t_, err = setSysctl(ipv6SysctlValueName, \"0\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to enable IPv6 (%s=%s): %v\",\n\t\t\t\t\tipv6SysctlValueName, value, err)\n\t\t\t}\n\t\t}\n\t}\n\n\terr = d.ifHandler.AddInterfaceIP(ifMeta.HostIfName, ipAddr)\n\n\t// an attempt to add already assigned IP is not considered as error\n\tif errors.Cause(err) == syscall.EEXIST {\n\t\terr = nil\n\t}\n\treturn nil, err\n}",
"func setupIpvlanInRemoteNs(netNs ns.NetNS, srcIfName, dstIfName string) (*ebpf.Map, error) {\n\trl := unix.Rlimit{\n\t\tCur: unix.RLIM_INFINITY,\n\t\tMax: unix.RLIM_INFINITY,\n\t}\n\n\terr := unix.Setrlimit(unix.RLIMIT_MEMLOCK, &rl)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to increase rlimit: %s\", err)\n\t}\n\n\tm, err := ebpf.NewMap(&ebpf.MapSpec{\n\t\tType: ebpf.ProgramArray,\n\t\tKeySize: 4,\n\t\tValueSize: 4,\n\t\tMaxEntries: 1,\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create root BPF map for %q: %s\", dstIfName, err)\n\t}\n\n\terr = netNs.Do(func(_ ns.NetNS) error {\n\t\tvar err error\n\n\t\tif srcIfName != dstIfName {\n\t\t\terr = link.Rename(srcIfName, dstIfName)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to rename ipvlan from %q to %q: %s\", srcIfName, dstIfName, err)\n\t\t\t}\n\t\t}\n\n\t\tipvlan, err := netlink.LinkByName(dstIfName)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to lookup ipvlan device %q: %s\", dstIfName, err)\n\t\t}\n\n\t\tqdiscAttrs := netlink.QdiscAttrs{\n\t\t\tLinkIndex: ipvlan.Attrs().Index,\n\t\t\tHandle: netlink.MakeHandle(0xffff, 0),\n\t\t\tParent: netlink.HANDLE_CLSACT,\n\t\t}\n\t\tqdisc := &netlink.GenericQdisc{\n\t\t\tQdiscAttrs: qdiscAttrs,\n\t\t\tQdiscType: \"clsact\",\n\t\t}\n\t\tif err = netlink.QdiscAdd(qdisc); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to create clsact qdisc on %q: %s\", dstIfName, err)\n\t\t}\n\n\t\tprog, err := ebpf.NewProgram(&ebpf.ProgramSpec{\n\t\t\tType: ebpf.SchedCLS,\n\t\t\tInstructions: getEntryProgInstructions(m.FD()),\n\t\t\tLicense: \"ASL2\",\n\t\t})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to load root BPF prog for %q: %s\", dstIfName, err)\n\t\t}\n\n\t\tfilterAttrs := netlink.FilterAttrs{\n\t\t\tLinkIndex: ipvlan.Attrs().Index,\n\t\t\tParent: netlink.HANDLE_MIN_EGRESS,\n\t\t\tHandle: netlink.MakeHandle(0, 1),\n\t\t\tProtocol: 3,\n\t\t\tPriority: 1,\n\t\t}\n\t\tfilter := &netlink.BpfFilter{\n\t\t\tFilterAttrs: filterAttrs,\n\t\t\tFd: prog.FD(),\n\t\t\tName: \"polEntry\",\n\t\t\tDirectAction: true,\n\t\t}\n\t\tif err = netlink.FilterAdd(filter); err != nil {\n\t\t\tprog.Close()\n\t\t\treturn fmt.Errorf(\"failed to create cls_bpf filter on %q: %s\", dstIfName, err)\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tm.Close()\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewGobEncoderLight returns a new lockfree encoder
|
func NewGobEncoderLight() *GobEncoderLight {
ret := &GobEncoderLight{
bytes: &bytes.Buffer{},
}
ret.encoder = gob.NewEncoder(ret.bytes)
return ret
}
|
[
"func NewGobDecoderLight() *GobDecoderLight {\n\tret := &GobDecoderLight{\n\t\tbytes: &bytes.Buffer{},\n\t}\n\tret.decoder = gob.NewDecoder(ret.bytes)\n\treturn ret\n}",
"func NewEncoder() Encoder { return Encoder{} }",
"func NewGOBCodec() *GOBCodec {\n\tr := GOBCodec(0)\n\treturn &r\n}",
"func NewGobTranscoder() *GobTranscoder {\n\tret := &GobTranscoder{\n\t\tinBytes: &bytes.Buffer{},\n\t\toutBytes: &bytes.Buffer{},\n\t\tencoderMut: &sync.Mutex{},\n\t\tdecoderMut: &sync.Mutex{},\n\t}\n\tret.encoder = gob.NewEncoder(ret.outBytes)\n\tret.decoder = gob.NewDecoder(ret.inBytes)\n\treturn ret\n}",
"func NewEncoder() Encoder {\n return &encoder{}\n}",
"func NewEncoder() *Encoder {\n\tselect {\n\tcase enc := <-encObjPool:\n\t\treturn enc\n\tdefault:\n\t\treturn &Encoder{}\n\t}\n}",
"func GOB() (ret httprpc.Codec) {\n\treturn Danger(\n\t\tfunc(w io.Writer) DangerEncoder {\n\t\t\treturn gob.NewEncoder(w)\n\t\t},\n\t\tfunc(r io.Reader) DangerDecoder {\n\t\t\treturn gob.NewDecoder(r)\n\t\t},\n\t)\n}",
"func NewGobCode(conn io.ReadWriteCloser) Codec {\n\tbuf := bufio.NewWriter(conn)\n\treturn &GobCodec{conn: conn, buf: buf, dec: gob.NewDecoder(conn), enc: gob.NewEncoder(buf)}\n}",
"func New() (codec.Encoder, codec.Decoder) {\r\n\tc := &rawCodec{}\r\n\treturn c, c\r\n}",
"func newKeccak512() cipher.Sponge { return &sponge{rate: 136} }",
"func (b *blockEnc) initNewEncode() {\n\tb.recentOffsets = [3]uint32{1, 4, 8}\n\tb.litEnc.Reuse = huff0.ReusePolicyNone\n\tb.coders.setPrev(nil, nil, nil)\n}",
"func New() (Grypt, error) {\n\treturn newTinkGrypt()\n}",
"func New(b []byte, sampleRate int) (*GME, error) {\n\tvar g GME\n\tdata := unsafe.Pointer(&b[0])\n\tcerror := C.gme_open_data(data, C.long(len(b)), &g.emu, C.int(sampleRate))\n\tif err := gmeError(cerror); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &g, nil\n}",
"func NewEncoder() Encoder {\n\treturn &encoder{}\n}",
"func NewBinaryCodec() *BinaryCodec {\n var c *BinaryCodec = &BinaryCodec{}\n c.buf = &bytes.Buffer{}\n return c\n}",
"func NewGCM(b cipher.Block, tagSizeInBits int, iv []byte) (GaloisCounterMode, error) {\n if b.BlockSize() != 16 && b.BlockSize() != 18 && b.BlockSize() != 24 {\n return nil, errors.New(\"Block cipher MUST have a 128-bit block size\")\n }\n\n if tagSizeInBits <= 0 {\n tagSizeInBits = 128\n }\n\n h := make([]byte, 16)\n b.Encrypt(h, zeroes[:16])\n\n return &gcm{\n b: b,\n blockSize: b.BlockSize(),\n iv: dup(iv),\n h: h,\n tagSize: tagSizeInBits / 8,\n tmp: make([]byte, b.BlockSize()),\n }, nil\n}",
"func newKeccak768() cipher.Sponge { return &sponge{rate: 104} }",
"func newKeccak256() cipher.Sponge { return &sponge{rate: 168} }",
"func NewCodec() *Codec {\n return &Codec{}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewGobDecoderLight returns a new lockfree decoder
|
func NewGobDecoderLight() *GobDecoderLight {
ret := &GobDecoderLight{
bytes: &bytes.Buffer{},
}
ret.decoder = gob.NewDecoder(ret.bytes)
return ret
}
|
[
"func NewGobEncoderLight() *GobEncoderLight {\n\tret := &GobEncoderLight{\n\t\tbytes: &bytes.Buffer{},\n\t}\n\tret.encoder = gob.NewEncoder(ret.bytes)\n\treturn ret\n}",
"func NewDecoder() *Decoder {\n\treturn &Decoder{\n\t\tbuffer: []byte{},\n\t\tcache: make(map[string]struct{}),\n\t}\n}",
"func New(b []byte, sampleRate int) (*GME, error) {\n\tvar g GME\n\tdata := unsafe.Pointer(&b[0])\n\tcerror := C.gme_open_data(data, C.long(len(b)), &g.emu, C.int(sampleRate))\n\tif err := gmeError(cerror); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &g, nil\n}",
"func NewWithLIB(libID bstream.BlockRef, h bstream.Handler) { return }",
"func GetNewOsmpbfDecoder(r io.Reader, fsize int64) (*osmpbf.Decoder, *pb.ProgressBar, error) {\n\tbar := pb.New(int(fsize)).SetUnits(pb.U_BYTES)\n\tbar.Start()\n\n\td := osmpbf.NewDecoder(bar.NewProxyReader(r))\n\td.SetBufferSize(osmpbf.MaxBlobSize)\n\terr := d.Start(runtime.GOMAXPROCS(-1))\n\tif err != nil {\n\t\tbar.Finish()\n\t\treturn nil, nil, err\n\t}\n\treturn d, bar, nil\n}",
"func newLightFetcher(h *clientHandler) *lightFetcher {\n\tf := &lightFetcher{\n\t\thandler: h,\n\t\tchain: h.backend.blockchain,\n\t\tpeers: make(map[*peer]*fetcherPeerInfo),\n\t\tdeliverChn: make(chan fetchResponse, 100),\n\t\trequested: make(map[uint64]fetchRequest),\n\t\ttimeoutChn: make(chan uint64),\n\t\trequestTrigger: make(chan struct{}, 1),\n\t\tsyncDone: make(chan *peer),\n\t\tcloseCh: make(chan struct{}),\n\t\tmaxConfirmedTd: big.NewInt(0),\n\t}\n\th.backend.peers.notify(f)\n\n\tf.wg.Add(1)\n\tgo f.syncLoop()\n\treturn f\n}",
"func NewGobTranscoder() *GobTranscoder {\n\tret := &GobTranscoder{\n\t\tinBytes: &bytes.Buffer{},\n\t\toutBytes: &bytes.Buffer{},\n\t\tencoderMut: &sync.Mutex{},\n\t\tdecoderMut: &sync.Mutex{},\n\t}\n\tret.encoder = gob.NewEncoder(ret.outBytes)\n\tret.decoder = gob.NewDecoder(ret.inBytes)\n\treturn ret\n}",
"func NewDecoder(src blob.Fetcher) *Decoder {\n\treturn &Decoder{src: src}\n}",
"func NewDecoder(flags Flag) (*Decoder, error) {\n\tdb := C.magic_open(C.int(0))\n\tif db == nil {\n\t\treturn nil, errors.New(\"error opening magic\")\n\t}\n\td := &Decoder{db: db}\n\tif code := C.magic_setflags(db, C.int(flags)); code != 0 {\n\t\td.Close()\n\t\treturn nil, errors.New(C.GoString(C.magic_error(d.db)))\n\t}\n\n\tif code := C.magic_load(db, nil); code != 0 {\n\t\td.Close()\n\t\treturn nil, errors.New(C.GoString(C.magic_error(d.db)))\n\t}\n\treturn d, nil\n}",
"func NewGOBCodec() *GOBCodec {\n\tr := GOBCodec(0)\n\treturn &r\n}",
"func NewDecoder(opts DecoderOptions) (*Decoder, error) {\n\tvar d Decoder\n\tif err := opts.validate(); err != nil {\n\t\treturn nil, fmt.Errorf(\"imaging: error validating decoder options: %w\", err)\n\t}\n\tif opts.ConcurrencyLevel > 0 {\n\t\td.sem = make(chan struct{}, opts.ConcurrencyLevel)\n\t}\n\td.opts = opts\n\treturn &d, nil\n}",
"func New() *Gerbil {\n\tnewg := Gerbil{}\n\tnewg.Connections = make(map[string]*GerbCon)\n\tnewg.EventHandler = NewEventHandler()\n\tnewg.Responses = make(chan *GerbilResponse, 500)\n\tnewg.Broadcasts = make(chan *GerbilBroadcast, 500)\n\tgo newg.ResponseHandler()\n\tgo newg.BroadcastHandler()\n\treturn &newg\n}",
"func GOB() (ret httprpc.Codec) {\n\treturn Danger(\n\t\tfunc(w io.Writer) DangerEncoder {\n\t\t\treturn gob.NewEncoder(w)\n\t\t},\n\t\tfunc(r io.Reader) DangerDecoder {\n\t\t\treturn gob.NewDecoder(r)\n\t\t},\n\t)\n}",
"func NewDecoder(r io.Reader) io.Reader {}",
"func New() (codec.Encoder, codec.Decoder) {\r\n\tc := &rawCodec{}\r\n\treturn c, c\r\n}",
"func newDecoder(br io.ByteReader, state *state, dict *decoderDict, size int64) (d *decoder, err error) {\n\trd, err := newRangeDecoder(br)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\td = &decoder{\n\t\tState: state,\n\t\tDict: dict,\n\t\trd: rd,\n\t\tsize: size,\n\t\tstart: dict.pos(),\n\t}\n\treturn d, nil\n}",
"func NewGobCode(conn io.ReadWriteCloser) Codec {\n\tbuf := bufio.NewWriter(conn)\n\treturn &GobCodec{conn: conn, buf: buf, dec: gob.NewDecoder(conn), enc: gob.NewEncoder(buf)}\n}",
"func New() Framer {\n\tf := &framer{\n\t\tbufLock: &sync.RWMutex{},\n\t\tbuffer: make([]byte, 0),\n\t}\n\n\treturn f\n}",
"func New(client *steam.Client) *TF2 {\n\tt := &TF2{client}\n\tclient.GC.RegisterPacketHandler(t)\n\treturn t\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewGobTranscoder will return a newly initialised transcoder to help with the mundane encoding/decoding operations
|
func NewGobTranscoder() *GobTranscoder {
ret := &GobTranscoder{
inBytes: &bytes.Buffer{},
outBytes: &bytes.Buffer{},
encoderMut: &sync.Mutex{},
decoderMut: &sync.Mutex{},
}
ret.encoder = gob.NewEncoder(ret.outBytes)
ret.decoder = gob.NewDecoder(ret.inBytes)
return ret
}
|
[
"func NewGobCode(conn io.ReadWriteCloser) Codec {\n\tbuf := bufio.NewWriter(conn)\n\treturn &GobCodec{conn: conn, buf: buf, dec: gob.NewDecoder(conn), enc: gob.NewEncoder(buf)}\n}",
"func NewGOBCodec() *GOBCodec {\n\tr := GOBCodec(0)\n\treturn &r\n}",
"func FromGob(data []byte, dst interface{}) error {\n\treturn NewGobber().From(data, dst)\n}",
"func (_BondingManager *BondingManagerCaller) GetTranscoder(opts *bind.CallOpts, _transcoder common.Address) (struct {\n\tLastRewardRound *big.Int\n\tBlockRewardCut *big.Int\n\tFeeShare *big.Int\n\tPricePerSegment *big.Int\n\tPendingBlockRewardCut *big.Int\n\tPendingFeeShare *big.Int\n\tPendingPricePerSegment *big.Int\n}, error) {\n\tret := new(struct {\n\t\tLastRewardRound *big.Int\n\t\tBlockRewardCut *big.Int\n\t\tFeeShare *big.Int\n\t\tPricePerSegment *big.Int\n\t\tPendingBlockRewardCut *big.Int\n\t\tPendingFeeShare *big.Int\n\t\tPendingPricePerSegment *big.Int\n\t})\n\tout := ret\n\terr := _BondingManager.contract.Call(opts, out, \"getTranscoder\", _transcoder)\n\treturn *ret, err\n}",
"func GOB() (ret httprpc.Codec) {\n\treturn Danger(\n\t\tfunc(w io.Writer) DangerEncoder {\n\t\t\treturn gob.NewEncoder(w)\n\t\t},\n\t\tfunc(r io.Reader) DangerDecoder {\n\t\t\treturn gob.NewDecoder(r)\n\t\t},\n\t)\n}",
"func (_BondingManager *BondingManagerCallerSession) GetTranscoder(_transcoder common.Address) (struct {\n\tLastRewardRound *big.Int\n\tBlockRewardCut *big.Int\n\tFeeShare *big.Int\n\tPricePerSegment *big.Int\n\tPendingBlockRewardCut *big.Int\n\tPendingFeeShare *big.Int\n\tPendingPricePerSegment *big.Int\n}, error) {\n\treturn _BondingManager.Contract.GetTranscoder(&_BondingManager.CallOpts, _transcoder)\n}",
"func NewGobSerializer() gbus.Serializer {\n\treturn &Gob{\n\t\tlock: &sync.Mutex{},\n\t\tregisteredSchemas: make(map[string]reflect.Type),\n\t}\n}",
"func NewEncoder() Encoder { return Encoder{} }",
"func NewFromGob(byt []byte) (*Smokering, error) {\n\tsr := New()\n\n\tsr.Lock()\n\tdefer sr.Unlock()\n\td := ringdata{}\n\n\tbuf := bytes.NewBuffer(byt)\n\tdecoder := gob.NewDecoder(buf)\n\n\tif err := decoder.Decode(&d.M); err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, kd := range d.M {\n\t\tif _, err := sr.keyFromGob(kd, true); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn sr, nil\n}",
"func (d *DFA) GobEncode() ([]byte, error) {\n\tbuffer := new(bytes.Buffer)\n\tencoder := gob.NewEncoder(buffer)\n\tif err := encoder.Encode(d.initial); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not GOB encode initial state\")\n\t}\n\tif err := encoder.Encode(d.table); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not GOB encode sparse table\")\n\t}\n\treturn buffer.Bytes(), nil\n}",
"func (_BondingManager *BondingManagerSession) Transcoder(_blockRewardCut *big.Int, _feeShare *big.Int, _pricePerSegment *big.Int) (*types.Transaction, error) {\n\treturn _BondingManager.Contract.Transcoder(&_BondingManager.TransactOpts, _blockRewardCut, _feeShare, _pricePerSegment)\n}",
"func (_BondingManager *BondingManagerTransactorSession) Transcoder(_blockRewardCut *big.Int, _feeShare *big.Int, _pricePerSegment *big.Int) (*types.Transaction, error) {\n\treturn _BondingManager.Contract.Transcoder(&_BondingManager.TransactOpts, _blockRewardCut, _feeShare, _pricePerSegment)\n}",
"func (d *DFA) GobDecode(bs []byte) error {\n\tbuffer := bytes.NewBuffer(bs)\n\tdecoder := gob.NewDecoder(buffer)\n\tvar initial State\n\tvar table []Cell\n\tif err := decoder.Decode(&initial); err != nil {\n\t\treturn errors.Wrapf(err, \"could not GOB decode initial state\")\n\t}\n\tif err := decoder.Decode(&table); err != nil {\n\t\treturn errors.Wrapf(err, \"could not GOB decode sparse table\")\n\t}\n\td.initial = initial\n\td.table = table\n\treturn nil\n}",
"func NewGobDecoderLight() *GobDecoderLight {\n\tret := &GobDecoderLight{\n\t\tbytes: &bytes.Buffer{},\n\t}\n\tret.decoder = gob.NewDecoder(ret.bytes)\n\treturn ret\n}",
"func NewEncoder() Encoder {\n return &encoder{}\n}",
"func GobDecode(buffer []byte, value interface{}) error {\n buf := bytes.NewBuffer(buffer)\n decoder := gob.NewDecoder(buf)\n err := decoder.Decode(value)\n if err != nil {\n return gobDebug.Error(err)\n }\n return nil\n}",
"func (e *Exchange) FromGob(g []byte) error {\n\t// start := time.Now()\n\tgob.Register(time.Time{})\n\tgob.Register(TableRows{})\n\t// gob.Register(Dialogs{})\n\tb := &bytes.Buffer{}\n\tb.Write(g)\n\tdec := gob.NewDecoder(b)\n\terr := dec.Decode(e)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed screen gob decode %v\", err)\n\t}\n\t// end := time.Now()\n\t// centerr.InfoLog.Printf(\"fromgob: time difference %v\", end.Sub(start))\n\treturn nil\n}",
"func NewCoder(buf []byte) *Coder {\n\tret := new(Coder)\n\n\tret.buf = buf\n\t// Figure 15.\n\tret.pos = 2\n\t// Figure 14.\n\tret.low = uint16(buf[0])<<8 | uint16(buf[1])\n\t// Figure 13.\n\tret.rng = 0xFF00\n\tret.cur_byte = -1\n\tif ret.low >= ret.rng {\n\t\tret.low = ret.rng\n\t\tret.pos = len(buf) - 1\n\t}\n\n\t// 3.8.1.3. Initial Values for the Context Model\n\tret.SetTable(DefaultStateTransition)\n\n\treturn ret\n}",
"func (rnn *RNN) GobDecode(b []byte) error {\n\tinput := bytes.NewBuffer(b)\n\tdec := gob.NewDecoder(input) // Will read from network.\n\n\tvar backup bkp\n\terr := dec.Decode(&backup)\n\trnn.bh = make([]float64, len(backup.Bh))\n\trnn.by = make([]float64, len(backup.By))\n\trnn.hprev = make([]float64, len(backup.Hprev))\n\tif err == nil {\n\t\trnn.whh = backup.Whh\n\t\trnn.why = backup.Why\n\t\trnn.wxh = backup.Wxh\n\t\trnn.config = backup.Config\n\t\tcopy(rnn.bh, backup.Bh)\n\t\tcopy(rnn.by, backup.By)\n\t\tcopy(rnn.hprev, backup.Hprev)\n\t}\n\treturn err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
DecodeType will attempt to decode the buffer into the pointer outT
|
func (g *GobTranscoder) DecodeType(buf []byte, outT interface{}) error {
g.decoderMut.Lock()
defer func() {
g.inBytes.Reset()
g.decoderMut.Unlock()
}()
reader := bytes.NewReader(buf)
if _, err := io.Copy(g.inBytes, reader); err != nil {
return err
}
return g.decoder.Decode(outT)
}
|
[
"func (g *GobDecoderLight) DecodeType(buf []byte, outT interface{}) error {\n\tdefer func() {\n\t\tg.bytes.Reset()\n\t}()\n\treader := bytes.NewReader(buf)\n\tif _, err := io.Copy(g.bytes, reader); err != nil {\n\t\treturn err\n\t}\n\treturn g.decoder.Decode(outT)\n}",
"func (dec *Decoder) decodeType(isInterface bool) Code {\n\treturn 0\n}",
"func decodeType(t byte) (byte, byte) { return t >> 2, t & 3 }",
"func (g *Generator) genTypeDecoder(t reflect.Type, out string, tags fieldTags, indent int) error {\n\tws := strings.Repeat(\" \", indent)\n\n\tunmarshalerIface := reflect.TypeOf((*easyjson.Unmarshaler)(nil)).Elem()\n\tif reflect.PtrTo(t).Implements(unmarshalerIface) {\n\t\tfmt.Fprintln(g.out, ws+\"(\"+out+\").UnmarshalEasyJSON(in)\")\n\t\treturn nil\n\t}\n\n\tunmarshalerIface = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()\n\tif reflect.PtrTo(t).Implements(unmarshalerIface) {\n\t\tfmt.Fprintln(g.out, ws+\"if data := in.Raw(); in.Ok() {\")\n\t\tfmt.Fprintln(g.out, ws+\" in.AddError( (\"+out+\").UnmarshalJSON(data) )\")\n\t\tfmt.Fprintln(g.out, ws+\"}\")\n\t\treturn nil\n\t}\n\n\tunmarshalerIface = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()\n\tif reflect.PtrTo(t).Implements(unmarshalerIface) {\n\t\tfmt.Fprintln(g.out, ws+\"if data := in.UnsafeBytes(); in.Ok() {\")\n\t\tfmt.Fprintln(g.out, ws+\" in.AddError( (\"+out+\").UnmarshalText(data) )\")\n\t\tfmt.Fprintln(g.out, ws+\"}\")\n\t\treturn nil\n\t}\n\n\terr := g.genTypeDecoderNoCheck(t, out, tags, indent)\n\treturn err\n}",
"func (self *Decoder) Decode(val interface{}) error {\n vv := rt.UnpackEface(val)\n vp := vv.Value\n\n /* check for nil type */\n if vv.Type == nil {\n return &json.InvalidUnmarshalError{}\n }\n\n /* must be a non-nil pointer */\n if vp == nil || vv.Type.Kind() != reflect.Ptr {\n return &json.InvalidUnmarshalError{Type: vv.Type.Pack()}\n }\n\n /* create a new stack, and call the decoder */\n sb, etp := newStack(), rt.PtrElem(vv.Type)\n nb, err := decodeTypedPointer(self.s, self.i, etp, vp, sb, self.f)\n\n /* return the stack back */\n self.i = nb\n freeStack(sb)\n\n /* avoid GC ahead */\n runtime.KeepAlive(vv)\n return err\n}",
"func (t *LegacyTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {\n\tvalueType, compression := gocbcore.DecodeCommonFlags(flags)\n\n\t// Make sure compression is disabled\n\tif compression != gocbcore.NoCompression {\n\t\treturn errors.New(\"unexpected value compression\")\n\t}\n\n\t// Normal types of decoding\n\tif valueType == gocbcore.BinaryType {\n\t\tswitch typedOut := out.(type) {\n\t\tcase *[]byte:\n\t\t\t*typedOut = bytes\n\t\t\treturn nil\n\t\tcase *interface{}:\n\t\t\t*typedOut = bytes\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn errors.New(\"you must encode binary in a byte array or interface\")\n\t\t}\n\t} else if valueType == gocbcore.StringType {\n\t\tswitch typedOut := out.(type) {\n\t\tcase *string:\n\t\t\t*typedOut = string(bytes)\n\t\t\treturn nil\n\t\tcase *interface{}:\n\t\t\t*typedOut = string(bytes)\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn errors.New(\"you must encode a string in a string or interface\")\n\t\t}\n\t} else if valueType == gocbcore.JSONType {\n\t\terr := json.Unmarshal(bytes, &out)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"unexpected expectedFlags value\")\n}",
"func (caller *B) Decode(encoding []byte) {\n\tcaller.ByteMember = decodeByte(encoding[4:]) //first 4 bytes are the TypeId\n\t//so use what's left to get the byte\n}",
"func Decode(e core.Value, out interface{}) {\n\tv := reflect.ValueOf(out)\n\tdecode(e, v.Elem())\n}",
"func (avp *avp) decode() (interface{}, error) {\n\tswitch avp.payload.dataType {\n\tcase avpDataTypeEmpty:\n\t\treturn nil, nil\n\tcase avpDataTypeUint16:\n\t\treturn avp.payload.toUint16()\n\tcase avpDataTypeUint32:\n\t\treturn avp.payload.toUint32()\n\tcase avpDataTypeUint64:\n\t\treturn avp.payload.toUint64()\n\tcase avpDataTypeString:\n\t\treturn avp.payload.toString()\n\tcase avpDataTypeBytes:\n\t\treturn avp.payload.data, nil\n\tcase avpDataTypeResultCode:\n\t\treturn avp.payload.toResultCode()\n\tcase avpDataTypeMsgID:\n\t\tv, err := avp.payload.toUint16()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn avpMsgType(v), nil\n\t}\n\treturn nil, fmt.Errorf(\"unhandled AVP data type\")\n}",
"func decodeMsgPack(buf []byte, out interface{}) error {\n\treturn codec.NewDecoder(bytes.NewReader(buf), msgpackHandle).Decode(out)\n}",
"func (d *Decoder) Peek() Type {\n\tdefer func() { d.lastCall = peekCall }()\n\tif d.lastCall == readCall {\n\t\td.value, d.err = d.Read()\n\t}\n\treturn d.value.typ\n}",
"func interfaceDecode(dec *gob.Decoder) Pythagoras {\n\t//The decode will fail unless the concrete type on the wire has been registered.\n\t//we registered it in the calling function\n\tvar p Pythagoras\n\terr := dec.Decode(&p)\n\tif err != nil {\n\t\tlog.Fatal(\"Decode:\", err)\n\t}\n\treturn p\n}",
"func (t *JSONTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {\n\tvalueType, compression := gocbcore.DecodeCommonFlags(flags)\n\n\t// Make sure compression is disabled\n\tif compression != gocbcore.NoCompression {\n\t\treturn errors.New(\"unexpected value compression\")\n\t}\n\n\t// Normal types of decoding\n\tif valueType == gocbcore.BinaryType {\n\t\treturn errors.New(\"binary datatype is not supported by JSONTranscoder\")\n\t} else if valueType == gocbcore.StringType {\n\t\treturn errors.New(\"string datatype is not supported by JSONTranscoder\")\n\t} else if valueType == gocbcore.JSONType {\n\t\terr := json.Unmarshal(bytes, &out)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"unexpected expectedFlags value\")\n}",
"func (d *Decoder) Decode(r io.Reader, t *dials.Type) (reflect.Value, error) {\n\ttomlBytes, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn reflect.Value{}, fmt.Errorf(\"error reading TOML: %s\", err)\n\t}\n\n\t// Use the TagCopyingMangler to copy over TOML tags from dials tags if TOML\n\t// tags aren't specified.\n\ttfmr := transform.NewTransformer(t.Type(),\n\t\t&tagformat.TagCopyingMangler{\n\t\t\tSrcTag: common.DialsTagName, NewTag: TOMLTagName})\n\tval, tfmErr := tfmr.Translate()\n\tif tfmErr != nil {\n\t\treturn reflect.Value{}, fmt.Errorf(\"failed to convert tags: %s\", tfmErr)\n\t}\n\n\t// Get a pointer to our value, so we can pass that.\n\tinstance := val.Addr().Interface()\n\terr = tomlparser.Unmarshal(tomlBytes, instance)\n\tif err != nil {\n\t\treturn reflect.Value{}, err\n\t}\n\n\tunmangledVal, unmangleErr := tfmr.ReverseTranslate(val)\n\tif unmangleErr != nil {\n\t\treturn reflect.Value{}, unmangleErr\n\t}\n\n\treturn unmangledVal, nil\n}",
"func (dec *Decoder) Decode(e interface{}) error {\n\tif e == nil {\n\t\treturn dec.DecodeValue(reflect.Value{})\n\t}\n\tvalue := reflect.ValueOf(e)\n\t// If e represents a value as opposed to a pointer, the answer won't\n\t// get back to the caller. Make sure it's a pointer.\n\tif value.Type().Kind() != reflect.Ptr {\n\t\tdec.err = errors.New(\"binpack: attempt to decode into a non-pointer\")\n\t\treturn dec.err\n\t}\n\treturn dec.DecodeValue(value)\n}",
"func Decode(data []byte) any {\n\tvar buffer = new(protocol.ByteBuffer)\n\tbuffer.WriteUBytes(data)\n\tvar packet = protocol.Read(buffer)\n\treturn packet\n}",
"func interfaceDecode(dec *gob.Decoder) Pythagoras {\n\t// The decode will fail unless the concrete type on the wire has been\n\t// registered. We registered it in the calling function.\n\tvar p Pythagoras\n\terr := dec.Decode(&p)\n\tif err != nil {\n\t\tlog.Fatal(\"decode:\", err)\n\t}\n\treturn p\n}",
"func (d *Decoder) Decode(ctx context.Context, b []byte) (interface{}, error) {\n\tnv := reflect.New(d.typ).Interface()\n\tif err := d.fn(ctx, b, nv); err != nil {\n\t\treturn nil, err\n\t}\n\tptr := reflect.ValueOf(nv)\n\treturn ptr.Elem().Interface(), nil\n}",
"func (t *RawBinaryTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {\n\tvalueType, compression := gocbcore.DecodeCommonFlags(flags)\n\n\t// Make sure compression is disabled\n\tif compression != gocbcore.NoCompression {\n\t\treturn errors.New(\"unexpected value compression\")\n\t}\n\n\t// Normal types of decoding\n\tif valueType == gocbcore.BinaryType {\n\t\tswitch typedOut := out.(type) {\n\t\tcase *[]byte:\n\t\t\t*typedOut = bytes\n\t\t\treturn nil\n\t\tcase *interface{}:\n\t\t\t*typedOut = bytes\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn errors.New(\"you must encode binary in a byte array or interface\")\n\t\t}\n\t} else if valueType == gocbcore.StringType {\n\t\treturn errors.New(\"only binary datatype is supported by RawBinaryTranscoder\")\n\t} else if valueType == gocbcore.JSONType {\n\t\treturn errors.New(\"only binary datatype is supported by RawBinaryTranscoder\")\n\t}\n\n\treturn errors.New(\"unexpected expectedFlags value\")\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
DecodeType will attempt to decode the buffer into the pointer outT
|
func (g *GobDecoderLight) DecodeType(buf []byte, outT interface{}) error {
defer func() {
g.bytes.Reset()
}()
reader := bytes.NewReader(buf)
if _, err := io.Copy(g.bytes, reader); err != nil {
return err
}
return g.decoder.Decode(outT)
}
|
[
"func (g *GobTranscoder) DecodeType(buf []byte, outT interface{}) error {\n\tg.decoderMut.Lock()\n\tdefer func() {\n\t\tg.inBytes.Reset()\n\t\tg.decoderMut.Unlock()\n\t}()\n\treader := bytes.NewReader(buf)\n\tif _, err := io.Copy(g.inBytes, reader); err != nil {\n\t\treturn err\n\t}\n\treturn g.decoder.Decode(outT)\n}",
"func (dec *Decoder) decodeType(isInterface bool) Code {\n\treturn 0\n}",
"func decodeType(t byte) (byte, byte) { return t >> 2, t & 3 }",
"func (g *Generator) genTypeDecoder(t reflect.Type, out string, tags fieldTags, indent int) error {\n\tws := strings.Repeat(\" \", indent)\n\n\tunmarshalerIface := reflect.TypeOf((*easyjson.Unmarshaler)(nil)).Elem()\n\tif reflect.PtrTo(t).Implements(unmarshalerIface) {\n\t\tfmt.Fprintln(g.out, ws+\"(\"+out+\").UnmarshalEasyJSON(in)\")\n\t\treturn nil\n\t}\n\n\tunmarshalerIface = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()\n\tif reflect.PtrTo(t).Implements(unmarshalerIface) {\n\t\tfmt.Fprintln(g.out, ws+\"if data := in.Raw(); in.Ok() {\")\n\t\tfmt.Fprintln(g.out, ws+\" in.AddError( (\"+out+\").UnmarshalJSON(data) )\")\n\t\tfmt.Fprintln(g.out, ws+\"}\")\n\t\treturn nil\n\t}\n\n\tunmarshalerIface = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()\n\tif reflect.PtrTo(t).Implements(unmarshalerIface) {\n\t\tfmt.Fprintln(g.out, ws+\"if data := in.UnsafeBytes(); in.Ok() {\")\n\t\tfmt.Fprintln(g.out, ws+\" in.AddError( (\"+out+\").UnmarshalText(data) )\")\n\t\tfmt.Fprintln(g.out, ws+\"}\")\n\t\treturn nil\n\t}\n\n\terr := g.genTypeDecoderNoCheck(t, out, tags, indent)\n\treturn err\n}",
"func (self *Decoder) Decode(val interface{}) error {\n vv := rt.UnpackEface(val)\n vp := vv.Value\n\n /* check for nil type */\n if vv.Type == nil {\n return &json.InvalidUnmarshalError{}\n }\n\n /* must be a non-nil pointer */\n if vp == nil || vv.Type.Kind() != reflect.Ptr {\n return &json.InvalidUnmarshalError{Type: vv.Type.Pack()}\n }\n\n /* create a new stack, and call the decoder */\n sb, etp := newStack(), rt.PtrElem(vv.Type)\n nb, err := decodeTypedPointer(self.s, self.i, etp, vp, sb, self.f)\n\n /* return the stack back */\n self.i = nb\n freeStack(sb)\n\n /* avoid GC ahead */\n runtime.KeepAlive(vv)\n return err\n}",
"func (t *LegacyTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {\n\tvalueType, compression := gocbcore.DecodeCommonFlags(flags)\n\n\t// Make sure compression is disabled\n\tif compression != gocbcore.NoCompression {\n\t\treturn errors.New(\"unexpected value compression\")\n\t}\n\n\t// Normal types of decoding\n\tif valueType == gocbcore.BinaryType {\n\t\tswitch typedOut := out.(type) {\n\t\tcase *[]byte:\n\t\t\t*typedOut = bytes\n\t\t\treturn nil\n\t\tcase *interface{}:\n\t\t\t*typedOut = bytes\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn errors.New(\"you must encode binary in a byte array or interface\")\n\t\t}\n\t} else if valueType == gocbcore.StringType {\n\t\tswitch typedOut := out.(type) {\n\t\tcase *string:\n\t\t\t*typedOut = string(bytes)\n\t\t\treturn nil\n\t\tcase *interface{}:\n\t\t\t*typedOut = string(bytes)\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn errors.New(\"you must encode a string in a string or interface\")\n\t\t}\n\t} else if valueType == gocbcore.JSONType {\n\t\terr := json.Unmarshal(bytes, &out)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"unexpected expectedFlags value\")\n}",
"func (caller *B) Decode(encoding []byte) {\n\tcaller.ByteMember = decodeByte(encoding[4:]) //first 4 bytes are the TypeId\n\t//so use what's left to get the byte\n}",
"func Decode(e core.Value, out interface{}) {\n\tv := reflect.ValueOf(out)\n\tdecode(e, v.Elem())\n}",
"func (avp *avp) decode() (interface{}, error) {\n\tswitch avp.payload.dataType {\n\tcase avpDataTypeEmpty:\n\t\treturn nil, nil\n\tcase avpDataTypeUint16:\n\t\treturn avp.payload.toUint16()\n\tcase avpDataTypeUint32:\n\t\treturn avp.payload.toUint32()\n\tcase avpDataTypeUint64:\n\t\treturn avp.payload.toUint64()\n\tcase avpDataTypeString:\n\t\treturn avp.payload.toString()\n\tcase avpDataTypeBytes:\n\t\treturn avp.payload.data, nil\n\tcase avpDataTypeResultCode:\n\t\treturn avp.payload.toResultCode()\n\tcase avpDataTypeMsgID:\n\t\tv, err := avp.payload.toUint16()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn avpMsgType(v), nil\n\t}\n\treturn nil, fmt.Errorf(\"unhandled AVP data type\")\n}",
"func decodeMsgPack(buf []byte, out interface{}) error {\n\treturn codec.NewDecoder(bytes.NewReader(buf), msgpackHandle).Decode(out)\n}",
"func (d *Decoder) Peek() Type {\n\tdefer func() { d.lastCall = peekCall }()\n\tif d.lastCall == readCall {\n\t\td.value, d.err = d.Read()\n\t}\n\treturn d.value.typ\n}",
"func interfaceDecode(dec *gob.Decoder) Pythagoras {\n\t//The decode will fail unless the concrete type on the wire has been registered.\n\t//we registered it in the calling function\n\tvar p Pythagoras\n\terr := dec.Decode(&p)\n\tif err != nil {\n\t\tlog.Fatal(\"Decode:\", err)\n\t}\n\treturn p\n}",
"func (t *JSONTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {\n\tvalueType, compression := gocbcore.DecodeCommonFlags(flags)\n\n\t// Make sure compression is disabled\n\tif compression != gocbcore.NoCompression {\n\t\treturn errors.New(\"unexpected value compression\")\n\t}\n\n\t// Normal types of decoding\n\tif valueType == gocbcore.BinaryType {\n\t\treturn errors.New(\"binary datatype is not supported by JSONTranscoder\")\n\t} else if valueType == gocbcore.StringType {\n\t\treturn errors.New(\"string datatype is not supported by JSONTranscoder\")\n\t} else if valueType == gocbcore.JSONType {\n\t\terr := json.Unmarshal(bytes, &out)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"unexpected expectedFlags value\")\n}",
"func (d *Decoder) Decode(r io.Reader, t *dials.Type) (reflect.Value, error) {\n\ttomlBytes, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn reflect.Value{}, fmt.Errorf(\"error reading TOML: %s\", err)\n\t}\n\n\t// Use the TagCopyingMangler to copy over TOML tags from dials tags if TOML\n\t// tags aren't specified.\n\ttfmr := transform.NewTransformer(t.Type(),\n\t\t&tagformat.TagCopyingMangler{\n\t\t\tSrcTag: common.DialsTagName, NewTag: TOMLTagName})\n\tval, tfmErr := tfmr.Translate()\n\tif tfmErr != nil {\n\t\treturn reflect.Value{}, fmt.Errorf(\"failed to convert tags: %s\", tfmErr)\n\t}\n\n\t// Get a pointer to our value, so we can pass that.\n\tinstance := val.Addr().Interface()\n\terr = tomlparser.Unmarshal(tomlBytes, instance)\n\tif err != nil {\n\t\treturn reflect.Value{}, err\n\t}\n\n\tunmangledVal, unmangleErr := tfmr.ReverseTranslate(val)\n\tif unmangleErr != nil {\n\t\treturn reflect.Value{}, unmangleErr\n\t}\n\n\treturn unmangledVal, nil\n}",
"func (dec *Decoder) Decode(e interface{}) error {\n\tif e == nil {\n\t\treturn dec.DecodeValue(reflect.Value{})\n\t}\n\tvalue := reflect.ValueOf(e)\n\t// If e represents a value as opposed to a pointer, the answer won't\n\t// get back to the caller. Make sure it's a pointer.\n\tif value.Type().Kind() != reflect.Ptr {\n\t\tdec.err = errors.New(\"binpack: attempt to decode into a non-pointer\")\n\t\treturn dec.err\n\t}\n\treturn dec.DecodeValue(value)\n}",
"func Decode(data []byte) any {\n\tvar buffer = new(protocol.ByteBuffer)\n\tbuffer.WriteUBytes(data)\n\tvar packet = protocol.Read(buffer)\n\treturn packet\n}",
"func interfaceDecode(dec *gob.Decoder) Pythagoras {\n\t// The decode will fail unless the concrete type on the wire has been\n\t// registered. We registered it in the calling function.\n\tvar p Pythagoras\n\terr := dec.Decode(&p)\n\tif err != nil {\n\t\tlog.Fatal(\"decode:\", err)\n\t}\n\treturn p\n}",
"func (d *Decoder) Decode(ctx context.Context, b []byte) (interface{}, error) {\n\tnv := reflect.New(d.typ).Interface()\n\tif err := d.fn(ctx, b, nv); err != nil {\n\t\treturn nil, err\n\t}\n\tptr := reflect.ValueOf(nv)\n\treturn ptr.Elem().Interface(), nil\n}",
"func (t *RawBinaryTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {\n\tvalueType, compression := gocbcore.DecodeCommonFlags(flags)\n\n\t// Make sure compression is disabled\n\tif compression != gocbcore.NoCompression {\n\t\treturn errors.New(\"unexpected value compression\")\n\t}\n\n\t// Normal types of decoding\n\tif valueType == gocbcore.BinaryType {\n\t\tswitch typedOut := out.(type) {\n\t\tcase *[]byte:\n\t\t\t*typedOut = bytes\n\t\t\treturn nil\n\t\tcase *interface{}:\n\t\t\t*typedOut = bytes\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn errors.New(\"you must encode binary in a byte array or interface\")\n\t\t}\n\t} else if valueType == gocbcore.StringType {\n\t\treturn errors.New(\"only binary datatype is supported by RawBinaryTranscoder\")\n\t} else if valueType == gocbcore.JSONType {\n\t\treturn errors.New(\"only binary datatype is supported by RawBinaryTranscoder\")\n\t}\n\n\treturn errors.New(\"unexpected expectedFlags value\")\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
InitDB removes existing db file and creates a new DB and table for IP
|
func InitDB() {
os.Remove("./threat_analyser.db")
var err error
db, err = sql.Open("sqlite3", "./threat_analyser.db")
if err != nil {
log.Fatal(err)
}
createCmd := `
create table ip (ip_address TEXT PRIMARY KEY,
uuid TEXT,
created_at DATETIME,
updated_at DATETIME,
response_code TEXT);
`
_, err = db.Exec(createCmd)
if err != nil {
log.Fatal("Error creating DB table", err)
return
}
}
|
[
"func (i *API) InitDB(purge bool) error {\n\tif purge {\n\t\ti.purgeDB()\n\t}\n\treturn i.openDB()\n}",
"func InitDatabase(dbName *string, dst ...interface{}) {\n\tlog.Info().Msgf(\"Loading database %v\", *dbName)\n\tvar err error\n\tdbFile = sqlite.Open(fmt.Sprintf(\"%v.db\", *dbName))\n\tdatastore, err = gorm.Open(dbFile, &gorm.Config{\n\t\tDisableForeignKeyConstraintWhenMigrating: true,\n\t})\n\tif err != nil {\n\t\tpanic(\"failed to connect database\")\n\t}\n\n\t// Migrate the schema\n\terr = datastore.AutoMigrate(dst...)\n\tif err != nil {\n\t\tlog.Fatal().Err(err).Msg(\"Migration failed! Please check the logs!\")\n\t}\n}",
"func InitDB(config string) error {\n\treturn connectToDB(\"./files.sqlite3\")\n}",
"func initDb() error {\n\n\tvar err error\n\n\t// Start by checking if the DB exists\n\tdbExists := checkDbExists()\n\tif !dbExists && !userIsRoot() {\n\t\treturn errors.New(\"can't create a new database if not root\")\n\t}\n\n\t// Open/create the database\n\tdb, err = sql.Open(\"sqlite3\", dbFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif db == nil {\n\t\treturn errors.New(\"can't access the database\")\n\t}\n\n\t// Check that we're all good\n\terr = db.Ping()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// At this point, the DB is open\n\tdbOpen = true\n\n\t// If new DB and root, create table and set chmod rights\n\tif !dbExists {\n\t\t_, err = db.Exec(dbSchema)\n\n\t\terr = setDbRights()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func InitDBFile(config Config) {\n\tif !FileExists(config.StorePath) {\n\t\terr := os.MkdirAll(config.StorePath, 0755)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tif !FileExists(path.Join(config.StorePath, DefaultDBName)) {\n\t\terr := ioutil.WriteFile(path.Join(config.StorePath, DefaultDBName), []byte(\"\"), 0755)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}",
"func InitDb(appConfig *AppConfig) {\n\tlog.Info(\"Initialize database connection\")\n\tDbs = fmt.Sprintf(\"host=%s port=%s user=%s password=%s dbname=%s sslmode=%s\",\n\t\tappConfig.Db.Host,\n\t\tappConfig.Db.Port,\n\t\tappConfig.Db.User,\n\t\tappConfig.Db.Password,\n\t\tappConfig.Db.DbName,\n\t\tappConfig.Db.SSLMode,\n\t)\n\tlog.Info(\"Successfully initialize database connection\")\n\tdb := GetDB()\n\tlog.Info(\"Start table migrations\")\n\tdb.AutoMigrate(\n\t\t&Session{},\n\t)\n\tlog.Info(\"Table migrations achieved\")\n}",
"func InitDb(conf config.Config, reset bool) error {\n\tif !IsOpen() {\n\t\tif err := openAdapter(conf); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn adp.CreateDb(reset)\n}",
"func (p *Pool) initDB() error {\n\t// Create and open the database.\n\tdb, err := database.OpenDB(p.cfg.DBFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp.db = db\n\terr = database.CreateBuckets(p.db)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Check if the pool mode changed since the last run.\n\tvar switchMode bool\n\terr = db.View(func(tx *bolt.Tx) error {\n\t\tpbkt := tx.Bucket(database.PoolBkt)\n\t\tif pbkt == nil {\n\t\t\treturn err\n\t\t}\n\n\t\tv := pbkt.Get(database.SoloPool)\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tspMode := binary.LittleEndian.Uint32(v) == 1\n\t\tif p.cfg.SoloPool != spMode {\n\t\t\tswitchMode = true\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// If the pool mode changed, backup the current database and purge all data\n\t// for a clean slate with the updated pool mode.\n\tif switchMode {\n\t\tpLog.Info(\"Pool mode changed, backing up database before purge.\")\n\t\terr := database.Backup(p.db)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = database.Purge(p.db)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// If the pool mode did not change, upgrade the database if there is a\n\t// pending upgrade.\n\tif !switchMode {\n\t\terr = database.Upgrade(p.db)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (sql *SqlConnection) InitDB() error {\n\n\tvar err error\n\n\t// open a db connection //\n\tsql.Db, err = gorm.Open(\"sqlite3\", \"/var/tmp/tennis.db\")\n\tif err != nil {\n\t\tfmt.Println(\"Failed to connect database : \", err.Error())\n\t}\n\tsql.Db.LogMode(true)\n\n\treturn err\n}",
"func InitDb() {\n\t// get connection info from environment\n\thost = os.Getenv(\"POSTGRES_HOST\")\n\tuser = os.Getenv(\"POSTGRES_USER\")\n\tpassword = os.Getenv(\"POSTGRES_PASS\")\n\tdbname = os.Getenv(\"POSTGRES_DBNAME\")\n\tsslMode = os.Getenv(\"POSTGRES_SSL_MODE\")\n\n\t// parse the port\n\tportVal, err := strconv.ParseUint(os.Getenv(\"POSTGRES_PORT\"), 10, 16)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tport = uint16(portVal)\n\n\t// construct a connection string\n\tconnStr := fmt.Sprintf(\"host=%s port=%d user=%s password=%s dbname=%s sslmode=%s\",\n\t\thost, port, user, password, dbname, sslMode)\n\n\t// open connection to the database\n\tdb, err = sql.Open(\"postgres\", connStr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// check the connection\n\tif err := db.Ping(); err != nil {\n\t\tpanic(err)\n\t}\n\n\t// create tables\n\tCreateTables()\n}",
"func InitDB(ctx *cli.Context) error {\n\tchainID := ctx.String(\"chainID\")\n\tregDb := ConnectToDB(ctx.String(\"dataport\"), ctx.String(\"passwd\"), ctx.Int(\"database\"))\n\tif Exists(regDb, \"chainConfig\") {\n\t\tresult := Get(regDb, \"chainConfig\")\n\t\tchainConfig := new(Identity)\n\t\tif err := json.Unmarshal([]byte(result), &chainConfig); err != nil {\n\t\t\tutils.Fatalf(\"Failed to initialise database: %v\", err)\n\t\t}\n\t\tif chainConfig.ID == chainID {\n\t\t\tfmt.Println(\"Database has been initialised by chainID\", chainID, \"sometimes before\")\n\t\t} else {\n\t\t\tutils.Fatalf(\"Database has been initialised by chainID \" + chainConfig.ID)\n\t\t}\n\t} else {\n\t\terr := Set(regDb, \"chainConfig\", &Identity{\n\t\t\tName: \"\",\n\t\t\tID: chainID,\n\t\t\tHashky: \"\",\n\t\t\tExtInfo: \"\",\n\t\t})\n\t\tif err != nil {\n\t\t\tutils.Fatalf(\"Failed to initialise database: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (db *DB) Init() (dbCreated bool, err error) {\n\tdbCreated = false\n\n\t// Ensure root exists\n\terr = os.MkdirAll(db.Root, 0755)\n\tif err != nil {\n\t\treturn dbCreated, err\n\t}\n\n\t// Ensure database exists\n\t_, err = os.Stat(db.DBPath)\n\tif err == nil {\n\t\treturn dbCreated, nil // exists\n\t}\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn dbCreated, err // unexpected error\n\t}\n\n\t// Database does not exist - create\n\tfh, err := os.Create(db.DBPath)\n\tfh.Close()\n\tif err != nil {\n\t\treturn dbCreated, err\n\t}\n\tdbCreated = true\n\n\t// Ensure configfile exists\n\t_, err = os.Stat(db.ConfigPath)\n\tif err == nil {\n\t\t_, err := db.readConfig()\n\t\tif err != nil {\n\t\t\tif err != ErrNotFound {\n\t\t\t\treturn dbCreated, err\n\t\t\t}\n\t\t}\n\t\terr = db.appendConfigString(db.configPlaceholder())\n\t\tif err != nil {\n\t\t\treturn dbCreated, err\n\t\t}\n\t} else {\n\t\t// Create a placeholder config file for domain\n\t\terr = db.writeConfigString(db.configPlaceholder())\n\t\tif err != nil {\n\t\t\treturn dbCreated, err\n\t\t}\n\t}\n\n\treturn dbCreated, nil\n}",
"func InitDatabase(dbPath string, dbFile string, force bool) {\n\tif dbPath == \"\" {\n\t\tDbPath = defaultDbPath\n\t} else {\n\t\tDbPath = dbPath\n\t}\n\tif dbFile == \"\" {\n\t\tDbFile = defaultDbFile\n\t} else {\n\t\tDbFile = dbFile\n\t}\n\n\texisted, err := pathutil.DirExists(DbPath)\n\tcheckError(err)\n\tif existed {\n\t\tif force {\n\t\t\tos.RemoveAll(DbPath)\n\t\t\tlog.Info(\"Remove old dbPath and recreate: %s\", DbPath)\n\t\t\tos.MkdirAll(DbPath, os.ModePerm)\n\t\t} else {\n\t\t\tlog.Info(\"Database directory (%s) existed. Nothing happended. Use --force to reinit\", DbPath)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tos.MkdirAll(DbPath, os.ModePerm)\n\t}\n}",
"func InitDb() {\n\tdbConnection.MustExec(schema)\n}",
"func (c *PostgresClient) InitDB(models []interface{}) (*gorm.DB, error) {\n\tc.LogConfig()\n\terr := c.Connect()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.DB.LogMode(c.LogMode)\n\tc.CreateDBExtensions()\n\tc.Migrate(models)\n\treturn c.DB, nil\n}",
"func InitDB(path string) error {\n\tdb, err := sql.Open(\"sqlite3\", path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer db.Close()\n\tfor _, tableSQL := range SQLCreateTables() {\n\t\tif _, err := db.Exec(tableSQL); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Use Write Ahead Logging which improves SQLite concurrency.\n\t// Requires SQLite >= 3.7.0\n\tif _, err := db.Exec(\"PRAGMA journal_mode = WAL\"); err != nil {\n\t\treturn err\n\t}\n\n\t// Check if the WAL mode was set correctly\n\tvar journalMode string\n\tif err = db.QueryRow(\"PRAGMA journal_mode\").Scan(&journalMode); err != nil {\n\t\tlog.Fatalf(\"Unable to determine sqlite3 journal_mode: %v\", err)\n\t}\n\tif journalMode != \"wal\" {\n\t\tlog.Fatal(\"SQLite Write Ahead Logging (introducted in v3.7.0) is required. See http://perkeep.org/issue/114\")\n\t}\n\n\t_, err = db.Exec(fmt.Sprintf(`REPLACE INTO meta VALUES ('version', '%d')`, SchemaVersion()))\n\treturn err\n}",
"func initDB(db *bolt.DB) {\n\tdb.Update(func(tx *bolt.Tx) error {\n\t\tlogrus.WithField(\"bucket\", BucketKey).Debug(\"Trying to create the mount bucket\")\n\t\ttx.CreateBucketIfNotExists([]byte(BucketKey))\n\t\treturn nil\n\t})\n}",
"func DBInit() {\n\t// Mode = \"PRODUCTION\"\n\t// if Mode == \"PRODUCTION\" {\n\t// \tDatabaseURL = \"test.sqlite3\"\n\t// \tDatabaseName = \"sqlite3\"\n\t// } else if Mode == \"DEPLOY\" {\n\tDatabaseURL = os.Getenv(\"DATABASE_URL\")\n\tDatabaseName = \"postgres\"\n\t// }\n\n\tdb, err := gorm.Open(DatabaseName, DatabaseURL)\n\tif err != nil {\n\t\tpanic(\"We can't open database!(dbInit)\")\n\t}\n\t//残りのモデルはまだ入れてない。\n\tdb.AutoMigrate(&model.Post{})\n\tdb.AutoMigrate(&model.User{})\n\tdb.AutoMigrate(&model.Room{})\n\tdefer db.Close()\n}",
"func InitDatabase(db *sql.DB) {\n\tcreateLinksTableSQL := `CREATE TABLE IF NOT EXISTS links (\n\t\t\"id\" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n\t\t\"url\" TEXT,\n\t\t\"created_at\" TEXT\n\t);`\n\n\tstatement, err := db.Prepare(createLinksTableSQL)\n\tif err != nil {\n\t\tlog.Printf(\"Error creating links table: %v\\n\", err)\n\t}\n\tstatement.Exec()\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SaveIp writes a model.IP to the database or updates it if already exists
|
func (sqlDb *SqliteDB) SaveIp(ip *model.IP) error {
upsert, err := db.Prepare("INSERT OR REPLACE INTO ip (ip_address, uuid, created_at, updated_at, response_code) VALUES (?, ?, ?, ?, ?)")
defer upsert.Close()
if err != nil {
return errors.New(fmt.Sprint("ERROR preparing db insert statement:", err.Error()))
}
_, err = upsert.Exec(ip.IPAddress, ip.UUID, ip.CreatedAt, ip.UpdatedAt, ip.ResponseCode)
if err != nil {
return errors.New(fmt.Sprint("ERROR executing DB insert:", err.Error()))
}
return nil
}
|
[
"func (ic *IPCreate) Save(ctx context.Context) (*IP, error) {\n\tif _, ok := ic.mutation.IP(); !ok {\n\t\treturn nil, errors.New(\"ent: missing required field \\\"ip\\\"\")\n\t}\n\tvar (\n\t\terr error\n\t\tnode *IP\n\t)\n\tif len(ic.hooks) == 0 {\n\t\tnode, err = ic.sqlSave(ctx)\n\t} else {\n\t\tvar mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {\n\t\t\tmutation, ok := m.(*IPMutation)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"unexpected mutation type %T\", m)\n\t\t\t}\n\t\t\tic.mutation = mutation\n\t\t\tnode, err = ic.sqlSave(ctx)\n\t\t\treturn node, err\n\t\t})\n\t\tfor i := len(ic.hooks) - 1; i >= 0; i-- {\n\t\t\tmut = ic.hooks[i](mut)\n\t\t}\n\t\tif _, err := mut.Mutate(ctx, ic.mutation); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn node, err\n}",
"func SaveIPAddr(ipaddr string, amount int, ref db.DBClient) {\n\tjst, _ := time.LoadLocation(\"Asia/Tokyo\")\n\tnow := time.Now().In(jst).Format(\"2006-01-02 15:04:05 -0700 MST\")\n\tuser := User{\n\t\tIPAddr: ipaddr,\n\t\tTime: now,\n\t\tAmount: amount,\n\t}\n\terr := ref.Push(user)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n}",
"func (m mgoIPInterfacer) Set(ip IPInfo) error {\n\tctx, _ := context.WithTimeout(context.Background(), 5*time.Second)\n\tc := m.mgo(MongoIPCollection)\n\n\tu, err := c.UpdateOne(ctx, bson.M{\"ip\": ip.IP}, bson.M{\"$set\": ip},\n\t\t(&options.UpdateOptions{}).SetUpsert(true))\n\tif u.UpsertedCount == 0 && u.ModifiedCount == 0 {\n\t\tlog.Println(\"no change made to db\", err)\n\t}\n\treturn err\n}",
"func (test *Test) CreateOrUpdateIP(projectName string, ip models.IP) error {\n\treturn nil\n}",
"func (obj *Sys) VipSave(input CoSysVip, _opt ...map[string]string) (output Result, err error) {\n\tctx := context.Background()\n\treturn obj.VipSaveWithContext(ctx, input, _opt...)\n}",
"func UpdateIP(username, ip string) {\n\torm := get_DBFront()\n\tt := make(map[string]interface{})\n\tt[\"lastip\"] = ip\n\t_, err := orm.SetTable(\"user\").Where(\"username=?\", username).Update(t)\n\tif !check_err(err) {\n\t\tLog(Log_Struct{\"error\", \"DB_Error_Line_309\", err})\n\t}\n}",
"func writeIp(ip string) {\n\terr := ioutil.WriteFile(\"ip.txt\", []byte(ip), 0644)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}",
"func InsertIP(db *sql.DB, ip string, name string) {\n\tstmt, err := db.Prepare(\"INSERT INTO raspberrypis (ip, name) VALUES (?, ?)\")\n\tcheckErr(err)\n\t\n\tstmt.Exec(ip, name)\n}",
"func (obj *Sys) BindingVipSave(input CoSysBindingVip, _opt ...map[string]string) (output CoSysBindingVip, err error) {\n\tctx := context.Background()\n\treturn obj.BindingVipSaveWithContext(ctx, input, _opt...)\n}",
"func (w *IPWriter) WriteIP(ip *IP) error {\n\trootUint := (*w).uintTree[0]\n\tipTree, err := w.MakeIPTree(ip, rootUint, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\trootRst, err := w.readRecordset(0)\n\tif err != nil {\n\t\tif err != apperror.ErrNotFound {\n\t\t\treturn err\n\t\t}\n\t\trootRst = &Recordset{\n\t\t\tValue: ipTree[0].Value,\n\t\t}\n\t\terr = w.writeRecordset(rootRst, true)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\terr = w.bypass(ipTree[0], rootRst, w.writeIfNotFound)\n\treturn err\n}",
"func (o *NetworkingProjectNetadpCreate) SetIp(v []string) {\n\to.Ip = v\n}",
"func (n *hostOnlyNetwork) SaveIPv4(vbox VBoxManager) error {\n\tif n.IPv4.IP != nil && n.IPv4.Mask != nil {\n\t\tif err := vbox.vbm(\"hostonlyif\", \"ipconfig\", n.Name, \"--ip\", n.IPv4.IP.String(), \"--netmask\", net.IP(n.IPv4.Mask).String()); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func updateIP(username string, ip string) error {\n\t_, err := db.Exec(\"UPDATE users SET ip=? WHERE username=?\", ip, username)\n\treturn err\n}",
"func (obj *User) UserVipSave(input CoUserVip, _opt ...map[string]string) (output CoUserVip, err error) {\n\tctx := context.Background()\n\treturn obj.UserVipSaveWithContext(ctx, input, _opt...)\n}",
"func CreateIP(userID string, ip string, lastUsed time.Time) (IP, error) {\n\tipStruct := IP{\n\t\tUserID: userID,\n\t\tIP: ip,\n\t\tLastUsed: lastUsed,\n\t}\n\t_, err := db.Exec(\"INSERT INTO web_ips(user_id, ip, last_used) VALUES (?, ?, ?)\", userID, ip, lastUsed)\n\tif err != nil {\n\t\treturn IP{}, err\n\t}\n\n\treturn ipStruct, nil\n}",
"func (n *hostOnlyNetwork) Save(vbox VBoxManager) error {\n\tif err := n.SaveIPv4(vbox); err != nil {\n\t\treturn err\n\t}\n\n\tif n.DHCP {\n\t\tvbox.vbm(\"hostonlyif\", \"ipconfig\", n.Name, \"--dhcp\") // not implemented as of VirtualBox 4.3\n\t}\n\n\treturn nil\n}",
"func (o *WafEventNetwork) SetIp(v string) {\n\to.Ip = &v\n}",
"func (nm *networkManager) saveIPConfig(hostIf *net.Interface, extIf *externalInterface) error {\n\t// Save the default routes on the interface.\n\troutes, err := netlink.GetIpRoute(&netlink.Route{Dst: &net.IPNet{}, LinkIndex: hostIf.Index})\n\tif err != nil {\n\t\tlog.Printf(\"[net] Failed to query routes: %v.\", err)\n\t\treturn err\n\t}\n\n\tfor _, r := range routes {\n\t\tif r.Dst == nil {\n\t\t\tif r.Family == unix.AF_INET {\n\t\t\t\textIf.IPv4Gateway = r.Gw\n\t\t\t} else if r.Family == unix.AF_INET6 {\n\t\t\t\textIf.IPv6Gateway = r.Gw\n\t\t\t}\n\t\t}\n\n\t\textIf.Routes = append(extIf.Routes, (*route)(r))\n\t}\n\n\t// Save global unicast IP addresses on the interface.\n\taddrs, err := hostIf.Addrs()\n\tfor _, addr := range addrs {\n\t\tipAddr, ipNet, err := net.ParseCIDR(addr.String())\n\t\tipNet.IP = ipAddr\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif !ipAddr.IsGlobalUnicast() {\n\t\t\tcontinue\n\t\t}\n\n\t\textIf.IPAddresses = append(extIf.IPAddresses, ipNet)\n\n\t\tlog.Printf(\"[net] Deleting IP address %v from interface %v.\", ipNet, hostIf.Name)\n\n\t\terr = netlink.DeleteIpAddress(hostIf.Name, ipAddr, ipNet)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tlog.Printf(\"[net] Saved interface IP configuration %+v.\", extIf)\n\n\treturn err\n}",
"func (handler *Handler) UpdateIP(hostname, currentIP, lastIP string) {\n\n\thandler.updateDNS(lastIP, currentIP, hostname, \"remove\")\n\thandler.updateDNS(lastIP, currentIP, hostname, \"add\")\n\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetIp returns a model.IP if record is stored in the database or nil if not exists
|
func (sqlDb *SqliteDB) GetIp(ipAddr string) (*model.IP, error) {
row := db.QueryRow("SELECT * FROM ip WHERE ip_address = ?", ipAddr)
ip := model.IP{}
err := row.Scan(&ip.IPAddress, &ip.UUID, &ip.CreatedAt, &ip.UpdatedAt, &ip.ResponseCode)
if err != nil {
if err == sql.ErrNoRows {
return nil, nil
} else {
return nil, err
}
}
return &ip, nil
}
|
[
"func (test *Test) GetIP(projectName string, ip string) (models.IP, error) {\n\treturn tests.NormalIPs[0], nil\n}",
"func (d *Driver) GetIP() (string, error) {\n\td.connectAPI()\n\treturn d.driver.GetEth0IPv4(d.Node, d.VMID)\n}",
"func (s Store) GetIP(mac net.HardwareAddr) (ip net.IP, err error) {\n\tl := &Lease{}\n\tl, err = s.leases.Mac(mac)\n\tif err != nil {\n\t\tlogger.Error(\"lease error %s\", err)\n\t\treturn nil, err\n\t}\n\tip = net.ParseIP(l.IP)\n\tlogger.Critical(\"Lease IP : %s\", ip)\n\treturn ip, nil\n}",
"func (o *NetworkHyperFlexNetworkAddress) GetIp() string {\n\tif o == nil || o.Ip == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Ip\n}",
"func (na NetAddr) GetIP() (res uint32) {\n\tres = uint32(C.netaddr_get_ip(na.inner))\n\truntime.KeepAlive(na)\n\treturn\n}",
"func (o *WafEventNetwork) GetIp() string {\n\tif o == nil || o.Ip == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Ip\n}",
"func (d *driverMock) GetInstanceIP(ctx context.Context, id string) (string, error) {\n\tif d.GetInstanceIPErr != nil {\n\t\treturn \"\", d.GetInstanceIPErr\n\t}\n\tif d.cfg.UsePrivateIP {\n\t\treturn \"private_ip\", nil\n\t}\n\treturn \"ip\", nil\n}",
"func (m mgoIPInterfacer) Get(ip string) (IPInfo, error) {\n\tctx, _ := context.WithTimeout(context.Background(), 5*time.Second)\n\tc := m.mgo(MongoIPCollection)\n\n\tvar ipInfo IPInfo\n\n\terr := c.FindOne(ctx, bson.M{\"ip\": ip}).Decode(&ipInfo)\n\tlog.Println(\"using mongo as interfacer\")\n\treturn ipInfo, err\n}",
"func (client *SQLClient) GetIP() net.IP {\n\tif colon := strings.Index(client.config.Addr, \":\"); colon != -1 {\n\t\treturn net.ParseIP(client.config.Addr[0:colon])\n\t} else {\n\t\treturn net.ParseIP(client.config.Addr)\n\t}\n}",
"func (m *ServicePrincipalRiskDetection) GetIpAddress()(*string) {\n val, err := m.GetBackingStore().Get(\"ipAddress\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}",
"func (l Lease) GetIP() (ip net.IP) {\n\treturn net.ParseIP(l.IP)\n}",
"func (ip *IPAddress)GetIpAddress() (ipAddress string, err error){\n\tnetInterfaces, err := net.Interfaces()\n\tif err != nil{\n\t\tlog4go.Error(err)\n\t\treturn\n\t}\n\tLoop:\n\tfor i := 0; i < len(netInterfaces); i++{\n\t\tif(netInterfaces[i].Flags & net.FlagUp) != 0{\n\t\t\taddrs, _ := netInterfaces[i].Addrs()\n\t\t\tfor _, address := range addrs{\n\t\t\t\tif ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback(){\n\t\t\t\t\tif ipnet.IP.To4()!=nil{\n\t\t\t\t\t\tipAddress = (ipnet.IP.String())\n\t\t\t\t\t\tip.IpAddress = ipAddress\n\t\t\t\t\t\tip.IpValid = true\n\t\t\t\t\t\tbreak Loop\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}",
"func (l *L7) GetIP() string {\n\tif l.fw != nil {\n\t\treturn l.fw.IPAddress\n\t}\n\tif l.fws != nil {\n\t\treturn l.fws.IPAddress\n\t}\n\treturn \"\"\n}",
"func (o *WafEventNetwork) GetIpOk() (*string, bool) {\n\tif o == nil || o.Ip == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Ip, true\n}",
"func (o *IdentityVerificationUserData) GetIpAddress() string {\n\tif o == nil || o.IpAddress.Get() == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn *o.IpAddress.Get()\n}",
"func (o *NetworkHyperFlexNetworkAddress) GetIpOk() (*string, bool) {\n\tif o == nil || o.Ip == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Ip, true\n}",
"func (s *Storage) GetOne(value string) (*models.IP, error) {\n\tses := s.GetDBSession()\n\tdefer ses.Close()\n\tt := models.NewIP()\n\terr := ses.DB(s.database).C(s.table).Find(bson.M{\"data\": value}).One(t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn t, nil\n}",
"func GetIp() string {\n\n\tout, err := exc.ExecuteWithBash(\"ip route get 1.1.1.1 | grep -oP 'src \\\\K\\\\S+'\")\n\n\tip := strings.TrimSpace(out)\n\tif log.Check(log.WarnLevel, \"Getting RH IP \"+ip, err) {\n\t\treturn \"\"\n\t}\n\n\treturn ip\n}",
"func (_class PIFClass) GetIP(sessionID SessionRef, self PIFRef) (_retval string, _err error) {\n\t_method := \"PIF.get_IP\"\n\t_sessionIDArg, _err := convertSessionRefToXen(fmt.Sprintf(\"%s(%s)\", _method, \"session_id\"), sessionID)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_selfArg, _err := convertPIFRefToXen(fmt.Sprintf(\"%s(%s)\", _method, \"self\"), self)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_result, _err := _class.client.APICall(_method, _sessionIDArg, _selfArg)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_retval, _err = convertStringToGo(_method + \" -> \", _result.Value)\n\treturn\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
formatUnix formats a unix timestamp into a human readable string that is formatted according to the timeFormat global variable.
|
func formatUnix(unixTime int64) string {
t := time.Unix(unixTime, 0)
return t.Format(timeFormat)
}
|
[
"func FormatUnix(timestamp int64, layout string) string {\n\treturn time.Unix(timestamp, 0).Format(layout)\n}",
"func FormatUnixDateTime(timestamp int64) string {\n\treturn time.Unix(timestamp, 0).Format(DateTimeFormat)\n}",
"func FormatUnixDate(timestamp int64) string {\n\treturn time.Unix(timestamp, 0).Format(DateFormat)\n}",
"func UnixtimeToString(unixTime int64, interval string) string {\n\tt := time.Unix(unixTime, 0)\n\tinterval = strings.ToLower(interval)\n\tswitch interval {\n\tcase \"second\":\n\t\treturn t.Format(\"Jan _2 15:04:05 2006\")\n\tcase \"minute\":\n\t\treturn t.Format(\"Jan _2 15:04 2006\")\n\tcase \"hour\":\n\t\treturn t.Format(\"Jan _2 15 2006\")\n\tcase \"day\":\n\t\treturn t.Format(\"Jan _2 2006\")\n\tcase \"week\":\n\t\tyear, week := t.ISOWeek()\n\t\t//return strconv.FormatInt(int64(year), 10) + \" \" + strconv.FormatInt(int64(week), 10)\n\t\treturn fmt.Sprintf(\"%d %d\", year, week)\n\tcase \"month\":\n\t\treturn t.Format(\"Jan 2006\")\n\tcase \"year\":\n\t\treturn t.Format(\"2006\")\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"provided interval '%s' is not implemented\", interval))\n\t}\n}",
"func (t *TimePeriod) FormatStartUnix() string {\n\treturn strconv.FormatInt(t.Start.Unix(), 10)\n}",
"func NowTimeUnixStr() string { return time.Now().Format(time.UnixDate) }",
"func formatUnixNanoTime(nano int64) string {\n\tt := time.Unix(0, nano)\n\treturn formatTime(t)\n}",
"func parseUnixTimeString(ref *ShapeRef, memName, v string) string {\n\tref.API.AddSDKImport(\"private/protocol\")\n\treturn fmt.Sprintf(\"%s: %s,\\n\", memName, inlineParseModeledTime(protocol.UnixTimeFormatName, v))\n}",
"func UnixTimestamp(sec int64, layout string) string {\n\treturn time.Unix(sec, 0).Format(layout)\n}",
"func parseUnix(str string, def time.Time) (time.Time, error) {\n\tif len(str) == 0 {\n\t\treturn def, nil\n\t}\n\tunix, err := strconv.ParseInt(str, 10, 64)\n\tif err != nil {\n\t\treturn def, err\n\t}\n\treturn time.Unix(unix, 0), nil\n}",
"func getUnixTimeStamp() string {\n\treturn fmt.Sprintf(\"%d\", time.Now().Unix())\n}",
"func daemonUnixTime(c *testing.T) string {\n\tc.Helper()\n\treturn parseEventTime(daemonTime(c))\n}",
"func timeToFormat(t int64, f string) string {\n\tutc, err := time.LoadLocation(\"UTC\")\n\tif err != nil {\n\t\tlog.Println(\"time.LoadLocation failed:\", err)\n\t\treturn \"\"\n\t}\n\tparsedTime := time.Unix(t, 0)\n\treturn escape(parsedTime.In(utc).Format(f))\n}",
"func putUnix(b []byte, sec int64, nsec int32) {\n\tb[11] = byte(nsec)\n\tb[0] = byte(sec >> 56)\n\tb[1] = byte(sec >> 48)\n\tb[2] = byte(sec >> 40)\n\tb[3] = byte(sec >> 32)\n\tb[4] = byte(sec >> 24)\n\tb[5] = byte(sec >> 16)\n\tb[6] = byte(sec >> 8)\n\tb[7] = byte(sec)\n\tb[8] = byte(nsec >> 24)\n\tb[9] = byte(nsec >> 16)\n\tb[10] = byte(nsec >> 8)\n}",
"func Unix(sec int64, nsec int64) Time {}",
"func UnixTime(ts string) int64 {\n\t// time is unix since epoch\n\tif len(ts) == 10 { // unix time\n\t\ttstamp, _ := strconv.ParseInt(ts, 10, 64)\n\t\treturn tstamp\n\t}\n\t// YYYYMMDD, always use 2006 as year 01 for month and 02 for date since it is predefined int Go parser\n\tconst layout = \"20060102\"\n\tt, err := time.Parse(layout, ts)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn int64(t.Unix())\n}",
"func (t Time) Unix() int64 {}",
"func MysqlTimeToUnix(ts string) int64 {\n\tloc, _ := time.LoadLocation(\"Local\")\n\tt, _ := time.ParseInLocation(goMysqlTimeFormat, ts, loc)\n\treturn t.Unix()\n}",
"func NowTimeUnix() uint64 {\n\treturn uint64(time.Now().Unix())\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
PrepayBalanceKey turn an address to key used to get prepaid balance from the sds store
|
func PrepayBalanceKey(acc []byte) []byte {
return append(PrepayBalancePrefix, acc...)
}
|
[
"func GetBalanceKey(address, token types.Address) string {\n\treturn states.NewContractDataKeyBuilder(types.AddressSize*3 + 1).\n\t\tPutBytes(common.DexAddress.ToArray()).\n\t\tPutByte(KeyPrefixBalance).\n\t\tPutBytes(address.ToArray()).\n\t\tPutBytes(token.ToArray()).\n\t\tGetKey()\n}",
"func initStateKeyAddr(\n\taccountState AccountState,\n\tprivateKey crypto.PrivateKey,\n\tinitBalance *big.Int,\n\tbc blockchain.Blockchain,\n\tsf factory.Factory,\n) (crypto.PrivateKey, string, error) {\n\tretKey := privateKey\n\tretAddr := \"\"\n\tswitch accountState {\n\tcase AcntCreate:\n\t\taddr := retKey.PublicKey().Address()\n\t\tif addr == nil {\n\t\t\treturn nil, \"\", errors.New(\"failed to get address\")\n\t\t}\n\t\tretAddr = addr.String()\n\n\tcase AcntExist:\n\t\taddr := retKey.PublicKey().Address()\n\t\tif addr == nil {\n\t\t\treturn nil, \"\", errors.New(\"failed to get address\")\n\t\t}\n\t\tretAddr = addr.String()\n\t\tctx := genesis.WithGenesisContext(context.Background(), bc.Genesis())\n\t\texistState, err := accountutil.AccountState(ctx, sf, addr)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tinitBalance.Set(existState.Balance)\n\tcase AcntNotRegistered:\n\t\tsk, err := crypto.GenerateKey()\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\taddr := sk.PublicKey().Address()\n\t\tif addr == nil {\n\t\t\treturn nil, \"\", errors.New(\"failed to get address\")\n\t\t}\n\t\tretAddr = addr.String()\n\t\tretKey = sk\n\tcase AcntBadAddr:\n\t\trand.Seed(time.Now().UnixNano())\n\t\tb := make([]byte, 41)\n\t\tfor i := range b {\n\t\t\tb[i] = byte(65 + rand.Intn(26))\n\t\t}\n\t\tretAddr = string(b)\n\t}\n\treturn retKey, retAddr, nil\n}",
"func GenerateBargainKey(addr string) string {\n\treturn fmt.Sprintf(\"bargain_addr_%s\", addr)\n}",
"func getAddressFromPrivKey(prikey *btcec.PrivateKey, isWitSeg bool) string {\n\tpriKey := btcec.PrivateKey(*prikey)\n\tesdsaPubKey := priKey.PubKey()\n\tpubkeyBytes := esdsaPubKey.SerializeCompressed()\n\n\tvar address string\n\tif isWitSeg {\n\t\taddress = hdwallet.ToBTC(pubkeyBytes, true)\n\t} else {\n\t\taddress = hdwallet.ToBTC(pubkeyBytes, false)\n\t}\n\n\treturn address\n}",
"func AddressFromKey(key []byte) []byte {\n\treturn key[1:] // remove prefix bytes\n}",
"func (rpcServer * RPCServer)transferKeysToPredecessor(){\n\t//open a read transaction\n\trpcServer.boltDB.View(func(tx *bolt.Tx) error {\n\t\tvar cursor *bolt.Cursor\n\t\tcursor = tx.Cursor()\n\t\t\n\t\tvar bucket *bolt.Bucket\n\t\t\n\n\t\t//traverse through all keys\n\t\tfor k, _ := cursor.First(); k != nil; k, _ = cursor.Next() {\n\t\t\tbucket = tx.Bucket(k)\n\t\t\t\n\t\t\t//traverse through all relation and value pairs\n\t\t\tbucket.ForEach(func(relation, value []byte) error {\n\t\t\t\t//create paramter - successor\n\t\t\t\n\t\t\t\t//add to array of interface\n\t\t\t\t\n\t\t\t\tparameterArray := make([]interface{},3)\n\t\t\t\tparameterArray[0] = string(k)\n\t\t\t\tparameterArray[1] = string(relation)\n\t\t\t\tparameterArray[2] = string(value)\n\t\t\t\t\n\t\t\t\t//if hash value less than predecessor value - then only insert\n\t\t\t\tkeyRelationHash := rpcServer.chordNode.GetHashFromKeyAndValue(string(k),string(relation));\n\t\t\t\tif keyRelationHash > rpcServer.chordNode.Predecessor{\n\t\t\t\t\treturn nil\n\t\t\t\t} \n\n\t\t\t\t//create json message\n\t\t\t\tjsonMessage := rpcclient.RequestParameters{}\n\t\t\t\tjsonMessage.Method = \"Insert\";\n\t\t\t\tjsonMessage.Params = parameterArray\n\t\t\t\t\n\t\t\t\tjsonBytes,err :=json.Marshal(jsonMessage)\n\t\t\t\tif err!=nil{\n\t\t\t\t\trpcServer.logger.Println(err)\n\t\t\t\t\treturn err\n\t\t\t\t} \n \n\t\t\t\trpcServer.logger.Println(string(jsonBytes))\n\n\t\t\t\tclientServerInfo,err := rpcServer.chordNode.PrepareClientServerInfo(rpcServer.chordNode.Predecessor)\n\t\t\t\tif err!=nil{\n\t\t\t\t\t\n\t\t\t\t\trpcServer.logger.Println(err)\n\t\t\t\t\treturn nil\n\t\t\t\t\t\n\t\t\t\t}\n\t\t\t\tclient := &rpcclient.RPCClient{}\n\t\t\t\terr, _ = client.RpcCall(clientServerInfo, string(jsonBytes))\n\t\t\t\t\n\t\t\t\tif err != nil {\n\t\t\t\t\trpcServer.logger.Println(err)\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t\n\t\t\t\treturn nil\n\t\t\t})\n\t\t}\n\t\treturn nil\n\t})\n\n}",
"func createDSAPrivkey(X *big.Int) (k *dsa.PrivateKey) {\n\tif X.Cmp(dsap) == -1 {\n\t\tY := new(big.Int)\n\t\tY.Exp(dsag, X, dsap)\n\t\tk = &dsa.PrivateKey{\n\t\t\tPublicKey: dsa.PublicKey{\n\t\t\t\tParameters: param,\n\t\t\t\tY: Y,\n\t\t\t},\n\t\t\tX: X,\n\t\t}\n\t}\n\treturn\n}",
"func PubKeyToAddress1(pub []byte) (add string, err error) {\n if len(pub) != 33 {//to judge the length of the public key, the default is 33\n //fmt.Println(\"the length of the imported public key is wrong, please check the import\")\n return \"\", errors.New(\"the length of the imported public key is wrong, please check the import\")\n }\n if pub[0]!=0x02 && pub[0]!=0x03 {//to judge whether the public key's format is right\n //fmt.Println(\"the format of the imported public key is wrong, please check the import\")\n return \"\", errors.New(\"the format of the imported public key is wrong, please check the import\")\n }\n pubhash := btcutil.Hash160(pub)//to get the second hash(ripemd160) value of the public key\n return base58.CheckEncode(pubhash[:ripemd160.Size], 000),nil//transform the address' style to string\n}",
"func GenerateSaveCoinKey(\n\tkeybase keyring.Keyring,\n\tkeyName, mnemonic string,\n\toverwrite bool,\n\talgo keyring.SignatureAlgo,\n) (sdk.AccAddress, string, error) {\n\texists := false\n\t_, err := keybase.Key(keyName)\n\tif err == nil {\n\t\texists = true\n\t}\n\n\t// ensure no overwrite\n\tif !overwrite && exists {\n\t\treturn sdk.AccAddress{}, \"\", fmt.Errorf(\"key already exists, overwrite is disabled\")\n\t}\n\n\tif exists {\n\t\tif err := keybase.Delete(keyName); err != nil {\n\t\t\treturn sdk.AccAddress{}, \"\", fmt.Errorf(\"failed to overwrite key\")\n\t\t}\n\t}\n\n\tvar (\n\t\trecord *keyring.Record\n\t\tsecret string\n\t)\n\n\t// generate or recover a new account\n\tif mnemonic != \"\" {\n\t\tsecret = mnemonic\n\t\trecord, err = keybase.NewAccount(keyName, mnemonic, keyring.DefaultBIP39Passphrase, sdk.GetConfig().GetFullBIP44Path(), algo)\n\t} else {\n\t\trecord, secret, err = keybase.NewMnemonic(keyName, keyring.English, sdk.GetConfig().GetFullBIP44Path(), keyring.DefaultBIP39Passphrase, algo)\n\t}\n\tif err != nil {\n\t\treturn sdk.AccAddress{}, \"\", err\n\t}\n\n\taddr, err := record.GetAddress()\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\treturn addr, secret, nil\n}",
"func insertKey(\n\tctx context.Context, tx types.StagingQuerier, stagingDB ident.Schema, pemBytes []byte,\n) error {\n\tkeyTable := ident.NewTable(stagingDB, PublicKeysTable)\n\t_, err := tx.Exec(ctx,\n\t\tfmt.Sprintf(\"INSERT INTO %s (public_key) VALUES ($1)\", keyTable),\n\t\tpemBytes,\n\t)\n\treturn errors.WithStack(err)\n}",
"func (client ManagementClient) BackupKeyPreparer(vaultBaseURL string, keyName string) (*http.Request, error) {\n\turlParameters := map[string]interface{}{\n\t\t\"vaultBaseUrl\": vaultBaseURL,\n\t}\n\n\tpathParameters := map[string]interface{}{\n\t\t\"key-name\": autorest.Encode(\"path\", keyName),\n\t}\n\n\tconst APIVersion = \"2015-06-01\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsPost(),\n\t\tautorest.WithCustomBaseURL(\"{vaultBaseUrl}\", urlParameters),\n\t\tautorest.WithPathParameters(\"/keys/{key-name}/backup\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare(&http.Request{})\n}",
"func PbkFromHex(pbk string) (ed25519.PublicKey, error) {\n\tk, err := hex.DecodeString(pbk)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ed25519.PublicKey(k), nil\n}",
"func AddressToPubkey(address string) (pubkey []byte, err error) {\n\terr = errors.New(\"invalid address\")\n\tswitch len(address) {\n\tcase 64:\n\t\tif address[:4] != \"xrb_\" && address[:4] != \"ban_\" {\n\t\t\treturn\n\t\t}\n\t\taddress = address[4:]\n\tcase 65:\n\t\tif address[:5] != \"nano_\" {\n\t\t\treturn\n\t\t}\n\t\taddress = address[5:]\n\tdefault:\n\t\treturn\n\t}\n\tb32 := base32.NewEncoding(\"13456789abcdefghijkmnopqrstuwxyz\")\n\tif pubkey, err = b32.DecodeString(\"1111\" + address[:52]); err != nil {\n\t\treturn\n\t}\n\tpubkey = pubkey[3:]\n\tchecksum, err := checksum(pubkey)\n\tif err != nil {\n\t\treturn\n\t}\n\tif b32.EncodeToString(checksum) != address[52:] {\n\t\terr = errors.New(\"checksum mismatch\")\n\t}\n\treturn\n}",
"func (p *btcParams) P2PK(pub []byte) (string, error) {\n\tr, err := btcutil.NewAddressPubKey(pub, p.params())\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn r.String(), nil\n}",
"func GenerateFundKey(addr string) string {\n\treturn fmt.Sprintf(\"fund_addr_%s\", addr)\n}",
"func (server *Server) AddContractKey(ctx context.Context, key *wallet.Key) error {\r\n\tserver.walletLock.Lock()\r\n\r\n\trawAddress, err := key.Key.RawAddress()\r\n\tif err != nil {\r\n\t\tserver.walletLock.Unlock()\r\n\t\treturn err\r\n\t}\r\n\r\n\tnode.Log(ctx, \"Adding key : %s\",\r\n\t\tbitcoin.NewAddressFromRawAddress(rawAddress, server.Config.Net))\r\n\r\n\tserver.contractAddresses = append(server.contractAddresses, rawAddress)\r\n\r\n\tif server.SpyNode != nil {\r\n\t\thashes, err := rawAddress.Hashes()\r\n\t\tif err != nil {\r\n\t\t\tserver.walletLock.Unlock()\r\n\t\t\treturn err\r\n\t\t}\r\n\r\n\t\tfor _, hash := range hashes {\r\n\t\t\tserver.SpyNode.SubscribePushDatas(ctx, [][]byte{hash[:]})\r\n\t\t}\r\n\t}\r\n\r\n\tserver.walletLock.Unlock()\r\n\r\n\tif err := server.SaveWallet(ctx); err != nil {\r\n\t\treturn err\r\n\t}\r\n\treturn nil\r\n}",
"func (client *KeyVaultClient) backupKeyCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, options *KeyVaultClientBackupKeyOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}/backup\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.3\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func (client *KeyVaultClient) backupKeyCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, options *KeyVaultClientBackupKeyOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}/backup\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func GenerateCoinKey(addr string) string {\n\treturn fmt.Sprintf(\"coin_addr_%s\", addr)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
FileStoreKey turn an address to key used to get it from the account store
|
func FileStoreKey(sender []byte) []byte {
return append(FileStoreKeyPrefix, sender...)
}
|
[
"func AccountStoreKey(addr sdk.AccAddress) []byte {\n\treturn append([]byte(\"address:\"), addr.Bytes()...)\n}",
"func keyFileName(keyAddr Address) string {\n\treturn keyAddr.Hex()\n}",
"func AddressStoreKey(addr sdk.AccAddress) []byte {\n\treturn append(AddressStoreKeyPrefix, addr.Bytes()...)\n}",
"func AddressFromKey(key []byte) []byte {\n\treturn key[1:] // remove prefix bytes\n}",
"func accountNumberStoreKey(accountNumber uint64) []byte {\n\treturn append(types.AccountNumberStoreKeyPrefix, sdk.Uint64ToBigEndian(accountNumber)...)\n}",
"func ChainLinksStoreKey(user, chainName, address string) []byte {\n\treturn append(UserChainLinksPrefix(user), []byte(chainName+address)...)\n}",
"func ShowKey(filename, password string) (pubkey string, err error) {\n\trpubkey, _, err := ReadKey(filename, password)\n\tif err != nil {\n\t\treturn\n\t}\n\tpubkey = hex.EncodeToString(rpubkey)\n\treturn\n}",
"func (ks *KeyStore) extractKeyFromFile(accAddress, accPath, pass string) (*Key, error) {\n\t// gets the byte data from a file\n\tkeyData, err := common.LoadDataFromFile(accPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Unmarshals the json data to a struct\n\tkey, err := UnmarshalKey(keyData, pass)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Given passphrase could be wrong. Error: %s\", err)\n\t}\n\t// Make sure we're really operating on the requested key (no swap attacks)\n\tif key.Address != accAddress {\n\t\treturn nil, fmt.Errorf(\"key content mismatch: have account %x, want %x\", key.Address, accAddress)\n\t}\n\treturn key, nil\n}",
"func importKeyfile(kmgr *idmgr.Manager, file string) error {\n\tb, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkeys := kmgr.ImportKeys(b)\n\tif keys == nil {\n\t\treturn errors.New(\"Could not read file.\")\n\t}\n\n\tfor addr, name := range keys {\n\t\tfmt.Printf(\"Imported address %s %s\\n\", addr, name)\n\t}\n\n\treturn nil\n}",
"func (k Keeper) getInviteStoreKey(user sdk.AccAddress) []byte {\n\treturn []byte(types.InviteStorePrefix + user.String())\n}",
"func (mb *MobileBackup) FileKey(rec Record) []byte {\n\tkey := mb.Keybag.GetClassKey(uint32(rec.ProtClass))\n\tif key != nil {\n\t\treturn aeswrap.Unwrap(key, rec.Key[4:])\n\t}\n\tlog.Println(\"No key for protection class\", rec.ProtClass)\n\n\treturn nil\n}",
"func RelationshipsStoreKey(user, subspace, recipient string) []byte {\n\treturn append(UserRelationshipsSubspacePrefix(user, subspace), []byte(recipient)...)\n}",
"func KeyFile(dir string) string {\n\treturn mustWriteToFile(dir, key)\n}",
"func keyIDFromAddr(addr string, group *key.Group) *key.Identity {\n\tids := group.Identities()\n\tfor _, id := range ids {\n\t\tif id.Address() == addr {\n\t\t\treturn id\n\t\t}\n\t}\n\tfatal(\"Could not retrive the node you are trying to contact in the group file.\")\n\treturn nil\n}",
"func AddrKey() (key []byte) {\n\tkey = append(key, []byte(\"mavl-\"+issuanceE.IssuanceX+\"-addr\")...)\n\treturn key\n}",
"func ContactIdentityKey(id string) ([]byte, error) {\n\ts := textSecureStore\n\tidkeyfile := filepath.Join(s.identityDir, \"remote_\"+id)\n\tif !exists(idkeyfile) {\n\t\treturn nil, UnknownContactError{id}\n\t}\n\tb, err := s.readFile(idkeyfile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn append([]byte{5}, b...), nil\n}",
"func AccountNumberStoreKey(accountNumber uint64) []byte {\n\treturn append(AccountNumberStoreKeyPrefix, sdk.Uint64ToBigEndian(accountNumber)...)\n}",
"func writeKeyToFile(keyBytes []byte, saveFileTo string) error {\n err := ioutil.WriteFile(saveFileTo, keyBytes, 0600)\n if err != nil {\n return err\n }\n\n log.Printf(\"Key saved to: %s\", saveFileTo)\n return nil\n}",
"func ImportKeyStore(keyPath, name, passphrase string) (string, error) {\n\tkeyPath, err := filepath.Abs(keyPath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tkeyJSON, readError := ioutil.ReadFile(keyPath)\n\tif readError != nil {\n\t\treturn \"\", readError\n\t}\n\tif name == \"\" {\n\t\tname = generateName() + \"-imported\"\n\t\tfor store.DoesNamedAccountExist(name) {\n\t\t\tname = generateName() + \"-imported\"\n\t\t}\n\t} else if store.DoesNamedAccountExist(name) {\n\t\treturn \"\", fmt.Errorf(\"account %s already exists\", name)\n\t}\n\tkey, err := keystore.DecryptKey(keyJSON, passphrase)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tb32 := address.ToBech32(key.Address)\n\thasAddress := store.FromAddress(b32) != nil\n\tif hasAddress {\n\t\treturn \"\", fmt.Errorf(\"address %s already exists in keystore\", b32)\n\t}\n\tuDir, _ := homedir.Dir()\n\tnewPath := filepath.Join(uDir, common.DefaultConfigDirName, common.DefaultConfigAccountAliasesDirName, name, filepath.Base(keyPath))\n\terr = writeToFile(newPath, string(keyJSON))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn name, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetStatistics set the container statistics
|
func (cs *Stats) SetStatistics(s StatsEntry) {
cs.mutex.Lock()
defer cs.mutex.Unlock()
s.Container = cs.Container
cs.StatsEntry = s
}
|
[
"func (t *Wrapper) SetStatistics(stats *connstats.Statistics) {\n\tt.stats.Store(stats)\n}",
"func (s *Cluster) SetStatistics(v []*KeyValuePair) *Cluster {\n\ts.Statistics = v\n\treturn s\n}",
"func (s *GetMetricStatisticsInput) SetStatistics(v []*string) *GetMetricStatisticsInput {\n\ts.Statistics = v\n\treturn s\n}",
"func (o *WafGetEventStatisticsResponse) SetStatistics(v EventStatistics) {\n\to.Statistics = &v\n}",
"func (container *container) Statistics() (Statistics, error) {\r\n\tproperties, err := container.system.Properties(context.Background(), schema1.PropertyTypeStatistics)\r\n\tif err != nil {\r\n\t\treturn Statistics{}, convertSystemError(err, container)\r\n\t}\r\n\r\n\treturn properties.Statistics, nil\r\n}",
"func (r *Redis) SetStats(node string, stats types.Stats) error {\n\terr := r.c.ZAdd(string(keys.StatsList), redis.Z{\n\t\tMember: node,\n\t\tScore: stats.CPU.SystemLoad / float64(stats.CPU.Cores),\n\t}).Err()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb, err := json.Marshal(stats)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn r.c.Set(keys.PrefixNodeStats.Fmt(node), b, 0).Err()\n}",
"func (n *Nodes) setStats(nodeId string, stats types.FuzzerStats) {\n\tn.statsLock.Lock()\n\tdefer n.statsLock.Unlock()\n\tn.Stats[nodeId] = stats\n\n\tn.updatesLock.Lock()\n\tdefer n.updatesLock.Unlock()\n\tn.updates[nodeId] = time.Now()\n}",
"func (r *RollingStoreStats) Set(stats *pdpb.StoreStats) {\n\tstatInterval := stats.GetInterval()\n\tinterval := float64(statInterval.GetEndTimestamp() - statInterval.GetStartTimestamp())\n\tif interval == 0 {\n\t\treturn\n\t}\n\tr.Lock()\n\tdefer r.Unlock()\n\treadQueryNum, writeQueryNum := core.GetReadQueryNum(stats.QueryStats), core.GetWriteQueryNum(stats.QueryStats)\n\tr.timeMedians[utils.StoreWriteBytes].Set(float64(stats.BytesWritten) / interval)\n\tr.timeMedians[utils.StoreReadBytes].Set(float64(stats.BytesRead) / interval)\n\tr.timeMedians[utils.StoreWriteKeys].Set(float64(stats.KeysWritten) / interval)\n\tr.timeMedians[utils.StoreReadKeys].Set(float64(stats.KeysRead) / interval)\n\tr.timeMedians[utils.StoreReadQuery].Set(float64(readQueryNum) / interval)\n\tr.timeMedians[utils.StoreWriteQuery].Set(float64(writeQueryNum) / interval)\n\tr.movingAvgs[utils.StoreCPUUsage].Set(collect(stats.GetCpuUsages()))\n\tr.movingAvgs[utils.StoreDiskReadRate].Set(collect(stats.GetReadIoRates()))\n\tr.movingAvgs[utils.StoreDiskWriteRate].Set(collect(stats.GetWriteIoRates()))\n}",
"func (c *Conn) SetStats(stats *Stats) {\n\tc.Stats = stats\n}",
"func (c *ColumnChunkMetaData) StatsSet() (bool, error) {\n\tif !c.columnMeta.IsSetStatistics() || c.descr.SortOrder() == schema.SortUNKNOWN {\n\t\treturn false, nil\n\t}\n\n\tif c.possibleStats == nil {\n\t\tc.possibleStats = makeColumnStats(c.columnMeta, c.descr, c.mem)\n\t}\n\n\tencoded, err := c.possibleStats.Encode()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn c.writerVersion.HasCorrectStatistics(c.Type(), c.descr.LogicalType(), encoded, c.descr.SortOrder()), nil\n}",
"func (r *registry) SetNetworkStats(chain vaa.ChainID, data *gossipv1.Heartbeat_Network) {\n\tr.mu.Lock()\n\tdata.Id = uint32(chain)\n\tr.networkStats[chain] = data\n\tr.mu.Unlock()\n}",
"func (p* PingerAgent) setStatisticsHandler() {\n\tstatsHandler := p.OnProcessComplete\n\tif statsHandler != nil {\n\t\tstats := p.GetPingStatistics()\n\t\tstatsHandler(stats)\n\t}\n}",
"func (s *Service) SetStat(c context.Context, st *api.Stat) (err error) {\n\ts.arc.SetStat3(c, &api.Stat{\n\t\tAid: st.Aid,\n\t\tView: int32(st.View),\n\t\tDanmaku: int32(st.Danmaku),\n\t\tReply: int32(st.Reply),\n\t\tFav: int32(st.Fav),\n\t\tCoin: int32(st.Coin),\n\t\tShare: int32(st.Share),\n\t\tNowRank: int32(st.NowRank),\n\t\tHisRank: int32(st.HisRank),\n\t\tLike: int32(st.Like),\n\t\tDisLike: 0,\n\t})\n\treturn\n}",
"func (m *EdiscoverySearch) SetLastEstimateStatisticsOperation(value EdiscoveryEstimateOperationable)() {\n m.lastEstimateStatisticsOperation = value\n}",
"func (s *S3Settings) SetEnableStatistics(v bool) *S3Settings {\n\ts.EnableStatistics = &v\n\treturn s\n}",
"func (r *RollingStoreStats) SetRegionsStats(writeBytesRate, writeKeysRate float64) {\n\tr.Lock()\n\tdefer r.Unlock()\n\tr.movingAvgs[utils.StoreRegionsWriteBytes].Set(writeBytesRate)\n\tr.movingAvgs[utils.StoreRegionsWriteKeys].Set(writeKeysRate)\n}",
"func Statistics(commands <-chan parser.Cmd) (*Stats, error) {\n\tstats := Stats{\n\t\tPropertiesPerType: make(map[string]int),\n\t\tPropertiesPerDepth: make(map[int]int),\n\t\tNodesPerDepth: make(map[int]int),\n\t\tValuesPerSize: make(map[int]int),\n\t}\n\n\tif err := stats.parse(commands); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &stats, nil\n}",
"func (mlw *Wrapper) ResetStatistics() {\n\tmlw.ml.ResetStatistics()\n}",
"func (sl *StagesLatency) ResetStatistics() {\n\tsl.first = duplicateSlice(sl.last)\n\tsl.FirstCollected = sl.LastCollected\n\n\tsl.calculate()\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewStatsFormat returns a format for rendering an CStatsContext
|
func NewStatsFormat(source, osType string) formatter.Format {
if source == formatter.TableFormatKey {
if osType == winOSType {
return formatter.Format(winDefaultStatsTableFormat)
}
return formatter.Format(defaultStatsTableFormat)
} else if source == formatter.AutoRangeFormatKey {
return formatter.Format(autoRangeStatsTableFormat)
}
return formatter.Format(source)
}
|
[
"func statsFormatWrite(ctx formatter.Context, Stats []StatsEntry, osType string, trunc bool) error {\n\trender := func(format func(subContext formatter.SubContext) error) error {\n\t\tfor _, cstats := range Stats {\n\t\t\tstatsCtx := &statsContext{\n\t\t\t\ts: cstats,\n\t\t\t\tos: osType,\n\t\t\t\ttrunc: trunc,\n\t\t\t}\n\t\t\tif err := format(statsCtx); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\tmemUsage := memUseHeader\n\tif osType == winOSType {\n\t\tmemUsage = winMemUseHeader\n\t}\n\tstatsCtx := statsContext{}\n\tstatsCtx.Header = formatter.SubHeaderContext{\n\t\t\"Container\": containerHeader,\n\t\t\"Name\": formatter.NameHeader,\n\t\t\"ID\": formatter.ContainerIDHeader,\n\t\t\"CPUPerc\": cpuPercHeader,\n\t\t\"MemUsage\": memUsage,\n\t\t\"MemPerc\": memPercHeader,\n\t\t\"NetIO\": netIOHeader,\n\t\t\"BlockIO\": blockIOHeader,\n\t\t\"PIDs\": pidsHeader,\n\t\t\"CurrentMemoryMin\": currentMemoryMinHeader,\n\t\t\"CurrentMemoryMax\": currentMemoryMaxHeader,\n\t\t\"OptiMemoryMin\": optiMemoryMinHeader,\n\t\t\"OptiMemoryMax\": optiMemoryMaxHeader,\n\t\t\"OptiCPUNumber\": optiCPUNumberHeader,\n\t\t\"UsedCPUPerc\": usedCPUPercHeader,\n\t\t\"OptiCPUTime\": optiCPUTimeHeader,\n\t}\n\tstatsCtx.os = osType\n\treturn ctx.Write(&statsCtx, render)\n}",
"func NewFormat(chans int, freq freq.T, sc sample.Codec) *Format {\n\treturn &Format{\n\t\tchannels: chans,\n\t\tfreq: freq,\n\t\tCodec: sc}\n}",
"func NewFormat(source string, quiet bool) formatter.Format {\n\tswitch source {\n\tcase formatter.PrettyFormatKey:\n\t\treturn secretInspectPrettyTemplate\n\tcase formatter.TableFormatKey:\n\t\tif quiet {\n\t\t\treturn formatter.DefaultQuietFormat\n\t\t}\n\t\treturn defaultSecretTableFormat\n\t}\n\treturn formatter.Format(source)\n}",
"func newFormat(format string) eval.Format {\n\treturn parseFormat(format, NO_STRING, NO_STRING, nil)\n}",
"func NewStatsCommand(conf *helper.Configuration) *cobra.Command {\n\toutput := &helper.OutputConf{}\n\tcmd := &cobra.Command{\n\t\tUse: \"stats\",\n\t\tShort: \"Display all stats\",\n\t\tLong: \"long\",\n\t\tExample: \"example\",\n\t\tDisableFlagsInUseLine: true,\n\t\tArgs: cobra.MaximumNArgs(1),\n\t\tValidArgs: []string{\"created\"},\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\thelper.CheckErrs(conf.Validate(cmd), output.ValidateFlags())\n\t\t\tdesc := statsTransformation\n\t\t\tif len(args) == 1 {\n\t\t\t\tswitch args[0] {\n\t\t\t\tcase \"created\":\n\t\t\t\t\tdesc = createdTransformation\n\t\t\t\t}\n\t\t\t}\n\t\t\thelper.CheckErr(output.SetTransformationDesc(desc))\n\t\t\thelper.CheckErr(output.PrintResponse(api.SRGetStats(context.Background(), conf.Authentication)))\n\t\t},\n\t}\n\toutput.AddOutputFlags(cmd)\n\treturn cmd\n}",
"func NewStats(container string) *Stats {\n\treturn &Stats{StatsEntry: StatsEntry{Container: container}}\n}",
"func NewStats() *Stats {\n\treturn &Stats{}\n}",
"func New() *Stats {\n\ts := &Stats{\n\t\tcounters: make(map[string]uint64),\n\t\tkeyFuncsByType: make(map[reflect.Type]EncoderFunc),\n\t\tkeyFuncsByString: make(map[string]EncoderFunc),\n\t}\n\n\treturn s\n}",
"func NewFormat(source string, quiet bool) formatter.Format {\n\tswitch source {\n\tcase formatter.TableFormatKey:\n\t\tif quiet {\n\t\t\treturn formatter.DefaultQuietFormat\n\t\t}\n\t\treturn defaultNetworkTableFormat\n\tcase formatter.RawFormatKey:\n\t\tif quiet {\n\t\t\treturn `network_id: {{.ID}}`\n\t\t}\n\t\treturn `network_id: {{.ID}}\\nname: {{.Name}}\\ndriver: {{.Driver}}\\nscope: {{.Scope}}\\n`\n\t}\n\treturn formatter.Format(source)\n}",
"func NewStats() *Stats {\n\tcs := new(Stats)\n\tcs.statMap = make(map[string]*FuncStat)\n\n\treturn cs\n}",
"func NewFormat(ctx context.Context, client *github.Client, debug bool) *Format {\n\treturn &Format{ctx: ctx, client: client, debug: debug}\n}",
"func NewConnectivityFormat(source string, quiet bool) Format {\n\tswitch source {\n\tcase TableFormatKey:\n\t\tif quiet {\n\t\t\treturn connectivityTableQuietFormat\n\t\t}\n\t\treturn connectivityTableFormat\n\tcase RawFormatKey:\n\t\tif quiet {\n\t\t\treturn connectivityRawQuietFormat\n\t\t}\n\t\treturn connectivityRawFormat\n\tcase SummaryFormatKey:\n\t\treturn connectivitySummaryFormat\n\t}\n\treturn Format(source)\n}",
"func New() *Stats {\n\treturn &Stats{\n\t\tStatusCode: map[string]uint64{},\n\t\tMethod: map[string]uint64{},\n\t\tPath: map[string]uint64{},\n\t\tInBytes: 0,\n\t\tOutBytes: 0,\n\t}\n}",
"func NewDiffFormat(source string) formatter.Format {\n\tswitch source {\n\tcase formatter.TableFormatKey:\n\t\treturn defaultDiffTableFormat\n\t}\n\treturn formatter.Format(source)\n}",
"func newCacheStats() *CacheStats {\n\treturn &CacheStats{}\n}",
"func New() *Format {\n\treturn &Format{\n\t\texclude: map[string]bool{},\n\t\tformatter: swag.NewFormatter(),\n\t}\n}",
"func (bs *blplStats) statsJSON() string {\n\tbuf := bytes.NewBuffer(make([]byte, 0, 128))\n\tfmt.Fprintf(buf, \"{\")\n\tfmt.Fprintf(buf, \"\\n \\\"TxnCount\\\": %v,\", bs.txnCount)\n\tfmt.Fprintf(buf, \"\\n \\\"QueryCount\\\": %v,\", bs.queryCount)\n\tfmt.Fprintf(buf, \"\\n \\\"QueriesPerSec\\\": %v,\", bs.queriesPerSec)\n\tfmt.Fprintf(buf, \"\\n \\\"TxnPerSec\\\": %v\", bs.txnsPerSec)\n\tfmt.Fprintf(buf, \"\\n \\\"TxnTime\\\": %v,\", bs.txnTime)\n\tfmt.Fprintf(buf, \"\\n \\\"QueryTime\\\": %v,\", bs.queryTime)\n\tfmt.Fprintf(buf, \"\\n}\")\n\treturn buf.String()\n}",
"func newHTTPStats() *httpStats {\n\treturn &httpStats{}\n}",
"func Benchmark_Ctx_Format(b *testing.B) {\n\tapp := New()\n\tc := app.AcquireCtx(&fasthttp.RequestCtx{})\n\tdefer app.ReleaseCtx(c)\n\tc.Fasthttp.Request.Header.Set(\"Accept\", \"text/plain\")\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor n := 0; n < b.N; n++ {\n\t\tc.Format(\"Hello, World!\")\n\t}\n\tutils.AssertEqual(b, `Hello, World!`, string(c.Fasthttp.Response.Body()))\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewStats returns a new Stats entity and sets in it the given name
|
func NewStats(container string) *Stats {
return &Stats{StatsEntry: StatsEntry{Container: container}}
}
|
[
"func NewStats() *Stats {\n\treturn &Stats{}\n}",
"func NewStats() *Stats {\n\tcs := new(Stats)\n\tcs.statMap = make(map[string]*FuncStat)\n\n\treturn cs\n}",
"func NewEntityStats(name string, life int) *EntityStats {\n\tengosdl.Logger.Trace().Str(\"component\", \"entity-stats\").Str(\"entity-stats\", name).Msg(\"new entity-stats\")\n\tresult := &EntityStats{\n\t\tComponent: engosdl.NewComponent(name),\n\t\tLife: life,\n\t}\n\treturn result\n}",
"func (p *metricService) newStats(serviceName string) Metric {\n\tp.statsMutex.Lock()\n\tif _, ok := p.stats[serviceName]; !ok {\n\t\tp.stats[serviceName] = list.New()\n\t}\n\tstat := newMetricWithLock()\n\tdefer p.statsMutex.Unlock()\n\tp.stats[serviceName].PushBack(stat)\n\treturn stat\n}",
"func New() *Stats {\n\treturn &Stats{\n\t\tStatusCode: map[string]uint64{},\n\t\tMethod: map[string]uint64{},\n\t\tPath: map[string]uint64{},\n\t\tInBytes: 0,\n\t\tOutBytes: 0,\n\t}\n}",
"func NewStat() *Stat {\n\treturn &Stat{}\n}",
"func NewStats(numBuckets int) *Stats {\n\tif numBuckets <= 0 {\n\t\tnumBuckets = 16\n\t}\n\t// Use one more bucket for the last unbounded bucket.\n\ts := &Stats{numBuckets: numBuckets + 1}\n\ts.hw = &histWrapper{}\n\treturn s\n}",
"func newHTTPStats() *httpStats {\n\treturn &httpStats{}\n}",
"func New() *Stats {\n\ts := &Stats{\n\t\tcounters: make(map[string]uint64),\n\t\tkeyFuncsByType: make(map[reflect.Type]EncoderFunc),\n\t\tkeyFuncsByString: make(map[string]EncoderFunc),\n\t}\n\n\treturn s\n}",
"func NewActionStats() ActionStats {\n stats := ActionStats{}\n stats.stats = make(map[string]*actionData)\n return stats\n}",
"func CreateStats(cluster, namespace, volumeName, deploymentName, mountPath, pathRestic, podName string) string {\n\tvar stats map[string]interface{}\n\tvar nameStats string\n\tif cluster == \"ClusterFrom\" {\n\t\tstats = utils.ReadJson(\"templates/stats\", \"stats_template_from\")\n\t\tnameStats = \"statsFrom\"\n\t} else {\n\t\tstats = utils.ReadJson(\"templates/stats\", \"stats_template_to\")\n\t\tnameStats = \"statsTo\"\n\t}\n\n\tauxName := \"stats-\" + deploymentName\n\tsizeVolume := utils.GetSizeVolume(podName, volumeName, mountPath)\n\tstats[\"name\"] = auxName\n\tstats[\"size\"] = sizeVolume\n\terr := utils.WriteJson(pathRestic, nameStats, stats)\n\tif err != nil {\n\t\tfmt.Println(\"Error creating \" + auxName)\n\t}\n\treturn sizeVolume\n}",
"func (r Resolver) CreateStat(ctx context.Context, args createStatArgs) (*StatResolver, error) {\n\tres, err := r.client.CreateStat(ctx, &pb.CreateStatReq{\n\t\tStat: &pb.Stat{\n\t\t\tAccountID: gqlIDToString(args.Input.AccountID),\n\t\t\tPlayerMove: args.Input.PlayerMove,\n\t\t},\n\t})\n\treturn statRes(res, err)\n}",
"func NewFuncStat(name string) *FuncStat {\n\tvar stat = new(FuncStat)\n\tstat.Name = name\n\tstat.Worker = NewCounter(0)\n\tstat.Job = NewCounter(0)\n\tstat.Processing = NewCounter(0)\n\treturn stat\n}",
"func (cs *Stats) CreateStats(fID string) error {\n\tif _, isPresent := cs.statMap[fID]; isPresent {\n\t\treturn errors.New(\"Stat exists\")\n\t}\n\n\tcs.statMap[fID] = new(FuncStat)\n\n\treturn nil\n}",
"func newCacheStats() *CacheStats {\n\treturn &CacheStats{}\n}",
"func newStatGroup(size uint64) *statGroup {\n\treturn &statGroup{\n\t\tvalues: make([]float64, size),\n\t\tcount: 0,\n\t}\n}",
"func NewMockStats(ctrl *gomock.Controller) *MockStats {\n\tmock := &MockStats{ctrl: ctrl}\n\tmock.recorder = &MockStatsMockRecorder{mock}\n\treturn mock\n}",
"func CreateEntityStats(params ...interface{}) engosdl.IComponent {\n\tif len(params) == 2 {\n\t\treturn NewEntityStats(params[0].(string), params[1].(int))\n\t}\n\treturn NewEntityStats(\"\", 0)\n}",
"func (p *stats) Name() string {\n\treturn \"Stats\"\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
statsFormatWrite renders the context for a list of containers statistics
|
func statsFormatWrite(ctx formatter.Context, Stats []StatsEntry, osType string, trunc bool) error {
render := func(format func(subContext formatter.SubContext) error) error {
for _, cstats := range Stats {
statsCtx := &statsContext{
s: cstats,
os: osType,
trunc: trunc,
}
if err := format(statsCtx); err != nil {
return err
}
}
return nil
}
memUsage := memUseHeader
if osType == winOSType {
memUsage = winMemUseHeader
}
statsCtx := statsContext{}
statsCtx.Header = formatter.SubHeaderContext{
"Container": containerHeader,
"Name": formatter.NameHeader,
"ID": formatter.ContainerIDHeader,
"CPUPerc": cpuPercHeader,
"MemUsage": memUsage,
"MemPerc": memPercHeader,
"NetIO": netIOHeader,
"BlockIO": blockIOHeader,
"PIDs": pidsHeader,
"CurrentMemoryMin": currentMemoryMinHeader,
"CurrentMemoryMax": currentMemoryMaxHeader,
"OptiMemoryMin": optiMemoryMinHeader,
"OptiMemoryMax": optiMemoryMaxHeader,
"OptiCPUNumber": optiCPUNumberHeader,
"UsedCPUPerc": usedCPUPercHeader,
"OptiCPUTime": optiCPUTimeHeader,
}
statsCtx.os = osType
return ctx.Write(&statsCtx, render)
}
|
[
"func ContainerWrite(ctx formatter.Context, containers []api.ContainerSummary) error {\n\trender := func(format func(subContext formatter.SubContext) error) error {\n\t\tfor _, container := range containers {\n\t\t\terr := format(&ContainerContext{trunc: ctx.Trunc, c: container})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\treturn ctx.Write(NewContainerContext(), render)\n}",
"func (ctx *Context) Stats(rw web.ResponseWriter, req *web.Request) {\n\tstats, err := getStats()\n\tif err != nil {\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\trw.Write([]byte(\"error getting stats : \" + err.Error()))\n\t\treturn\n\t}\n\n\trw.WriteHeader(http.StatusOK)\n\tb, _ := json.Marshal(stats)\n\trw.Write(b)\n}",
"func (t *Compose) Stats() {\n\tif !t.statsRunning {\n\t\tt.statsRunning = true\n\t\tt.statsQuit = make(chan struct{})\n\t\tgo func() {\n\t\t\trunning := false\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-t.statsQuit:\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t\tif !running {\n\t\t\t\t\t\trunning = true\n\t\t\t\t\t\tcmd := exec.Command(\"docker\", \"stats\", \"--no-stream\", \"--format\", \"\\\"{{.Name}}\\\\t{{.CPUPerc}}\\\\t{{.MemUsage}}\\\\t{{.MemPerc}}\\\"\")\n\t\t\t\t\t\tout, err := cmd.Output()\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tt.emitError(err.Error())\n\t\t\t\t\t\t}\n\t\t\t\t\t\treg := regexp.MustCompile(\"\\n\")\n\t\t\t\t\t\tlines := reg.Split(string(out), -1)\n\t\t\t\t\t\tlines = lines[:len(lines)-1]\n\t\t\t\t\t\tstatsa := []stats{}\n\t\t\t\t\t\tfor _, line := range lines {\n\t\t\t\t\t\t\treg = regexp.MustCompile(\"\\t\")\n\t\t\t\t\t\t\tcontArr := reg.Split(line, -1)\n\t\t\t\t\t\t\tname := strings.Replace(contArr[0], \"_1\", \"\", -1)\n\t\t\t\t\t\t\tname = strings.Replace(name, t.vuexState.Store.Settings.ContainerPrefix+\"_\", \"\", -1)\n\t\t\t\t\t\t\tname = strings.Replace(name, `\"`, \"\", -1)\n\t\t\t\t\t\t\tstat := stats{\n\t\t\t\t\t\t\t\tName: name,\n\t\t\t\t\t\t\t\tCPUPercString: contArr[1],\n\t\t\t\t\t\t\t\tCPUPerc: strings.Replace(contArr[1], `%`, \"\", -1),\n\t\t\t\t\t\t\t\tMemoryUseage: contArr[2],\n\t\t\t\t\t\t\t\tMemoryPercentString: strings.Replace(contArr[3], `\"`, \"\", -1),\n\t\t\t\t\t\t\t\tMemoryPercent: strings.Replace(contArr[3], `%\"`, \"\", -1),\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tstatsa = append(statsa, stat)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tres, mErr := json.Marshal(statsa)\n\t\t\t\t\t\tif mErr != nil {\n\t\t\t\t\t\t\tt.emitError(err.Error())\n\t\t\t\t\t\t}\n\t\t\t\t\t\tuEnc := b64.URLEncoding.EncodeToString(res)\n\t\t\t\t\t\tt.runtime.Events.Emit(\"stats\", uEnc)\n\t\t\t\t\t\trunning = false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ttime.Sleep(5 * time.Second)\n\t\t\t}\n\t\t}()\n\t}\n}",
"func RenderSummaryStats(w io.Writer, stats SummaryStats) error {\n\tvar err error\n\t_, err = fmt.Fprintf(w, \"Your total expenses: %.7f DASH\\n\\n\", stats.TotalCost)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttw := tabwriter.NewWriter(w, 8, 8, 1, '\\t', tabwriter.Debug|tabwriter.AlignRight)\n\tvar (\n\t\trequestStats []Stats\n\t\tnetworkStats []Stats\n\t)\n\tfor _, stats := range stats.GroupedStats.Slice() {\n\t\tswitch stats.Type {\n\t\tcase RequestStatsType:\n\t\t\trequestStats = append(requestStats, stats)\n\t\tcase NetworkStatsType:\n\t\t\tnetworkStats = append(networkStats, stats)\n\t\t}\n\t}\n\tif len(requestStats) > 0 {\n\t\t_, err := io.WriteString(w, \"Summary statistics for all performed requests\\n\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = writeTable(tw, []string{\"Request URL\", \"Size/bytes\", \"Elapsed/ms\", \"Cost/dash\"}, requestStats)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t_, err = fmt.Fprintf(w, \"\\n\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(networkStats) > 0 {\n\t\t_, err := io.WriteString(w, \"Summary statistics for all used networks\\n\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = writeTable(tw, []string{\"Network\", \"Size/bytes\", \"Elapsed/ms\", \"Cost/dash\"}, networkStats)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (mgr *ContainerManager) StreamStats(ctx context.Context, name string, config *ContainerStatsConfig) error {\n\tc, err := mgr.container(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\toutStream := config.OutStream\n\tif (!c.IsRunning() || c.IsRestarting()) && !config.Stream {\n\t\treturn json.NewEncoder(outStream).Encode(&types.ContainerStats{\n\t\t\tName: c.Name,\n\t\t\tID: c.ID,\n\t\t})\n\t}\n\n\tif c.IsRunning() && !config.Stream {\n\t\tmetrics, stats, err := mgr.Stats(ctx, name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcontainerStat := toContainerStats(metrics.Timestamp, stats)\n\t\treturn json.NewEncoder(outStream).Encode(containerStat)\n\t}\n\n\tif config.Stream {\n\t\twf := ioutils.NewWriteFlusher(outStream)\n\t\tdefer wf.Close()\n\t\twf.Flush()\n\t\toutStream = wf\n\t}\n\n\tenc := json.NewEncoder(outStream)\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tlogrus.Infof(\"context is cancelled when streaming stats of container %s\", c.ID)\n\t\t\treturn nil\n\t\tdefault:\n\t\t\tlogrus.Debugf(\"Start to stream stats of container %s\", c.ID)\n\t\t\tmetrics, stats, err := mgr.Stats(ctx, name)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// metrics may be nil if the container is not running,\n\t\t\t// so just ignore it and try get the metrics next time.\n\t\t\tif metrics != nil {\n\t\t\t\tcontainerStat := toContainerStats(metrics.Timestamp, stats)\n\n\t\t\t\tif err := enc.Encode(containerStat); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttime.Sleep(DefaultStatsInterval)\n\t\t}\n\t}\n}",
"func writeStats(to *os.File, final bool, s, t stats.Stats) {\n\tp := fmt.Fprintf\n\tpn := prettyNumber\n\tpb := prettyNumBytes\n\tpl := prettyLatency\n\tpt := prettyTimeStamp\n\tif final {\n\t\twriteStatsHeader(to)\n\t\tp(to, statsPrintHeader, pt(), \"Put\",\n\t\t\tpn(t.TotalPuts()),\n\t\t\tpb(t.TotalPutBytes()),\n\t\t\tpl(t.MinPutLatency(), t.AvgPutLatency(), t.MaxPutLatency()),\n\t\t\tpb(t.PutThroughput(time.Now())),\n\t\t\tpn(t.TotalErrPuts()))\n\t\tp(to, statsPrintHeader, pt(), \"Get\",\n\t\t\tpn(t.TotalGets()),\n\t\t\tpb(t.TotalGetBytes()),\n\t\t\tpl(t.MinGetLatency(), t.AvgGetLatency(), t.MaxGetLatency()),\n\t\t\tpb(t.GetThroughput(time.Now())),\n\t\t\tpn(t.TotalErrGets()))\n\t} else {\n\t\t// show interval stats; some fields are shown of both interval and total, for example, gets, puts, etc\n\t\tif s.TotalPuts() != 0 {\n\t\t\tp(to, statsPrintHeader, pt(), \"Put\",\n\t\t\t\tpn(s.TotalPuts())+\"(\"+pn(t.TotalPuts())+\")\",\n\t\t\t\tpb(s.TotalPutBytes())+\"(\"+pb(t.TotalPutBytes())+\")\",\n\t\t\t\tpl(s.MinPutLatency(), s.AvgPutLatency(), s.MaxPutLatency()),\n\t\t\t\tpb(s.PutThroughput(time.Now()))+\"(\"+pb(t.PutThroughput(time.Now()))+\")\",\n\t\t\t\tpn(s.TotalErrPuts())+\"(\"+pn(t.TotalErrPuts())+\")\")\n\t\t}\n\t\tif s.TotalGets() != 0 {\n\t\t\tp(to, statsPrintHeader, pt(), \"Get\",\n\t\t\t\tpn(s.TotalGets())+\"(\"+pn(t.TotalGets())+\")\",\n\t\t\t\tpb(s.TotalGetBytes())+\"(\"+pb(t.TotalGetBytes())+\")\",\n\t\t\t\tpl(s.MinGetLatency(), s.AvgGetLatency(), s.MaxGetLatency()),\n\t\t\t\tpb(s.GetThroughput(time.Now()))+\"(\"+pb(t.GetThroughput(time.Now()))+\")\",\n\t\t\t\tpn(s.TotalErrGets())+\"(\"+pn(t.TotalErrGets())+\")\")\n\t\t}\n\t}\n}",
"func ListTopoStats(r *http.Request, cfg config.Config) (int, http.Header, []byte, error) {\n\n\t//STANDARD DECLARATIONS START\n\tcode := http.StatusOK\n\th := http.Header{}\n\toutput := []byte(\"\")\n\terr := error(nil)\n\tcharset := \"utf-8\"\n\t//STANDARD DECLARATIONS END\n\n\t// Set Content-Type response Header value\n\tcontentType := r.Header.Get(\"Accept\")\n\th.Set(\"Content-Type\", fmt.Sprintf(\"%s; charset=%s\", contentType, charset))\n\n\t// Grab Tenant DB configuration from context\n\ttenantDbConfig := context.Get(r, \"tenant_conf\").(config.MongoConfig)\n\tgroupType := context.Get(r, \"group_type\").(string)\n\tegroupType := context.Get(r, \"endpoint_group_type\").(string)\n\n\t// Parse the request into the input\n\turlValues := r.URL.Query()\n\tvars := mux.Vars(r)\n\n\treportName := vars[\"report_name\"]\n\t//Time Related\n\tdateStr := urlValues.Get(\"date\")\n\n\tdt, dateStr, err := utils.ParseZuluDate(dateStr)\n\tif err != nil {\n\t\tcode = http.StatusBadRequest\n\t\toutput, _ = respond.MarshalContent(respond.ErrBadRequestDetails(err.Error()), contentType, \"\", \" \")\n\t\treturn code, h, output, err\n\t}\n\n\tsession, err := mongo.OpenSession(tenantDbConfig)\n\tdefer mongo.CloseSession(session)\n\n\tif err != nil {\n\t\tcode = http.StatusInternalServerError\n\t\treturn code, h, output, err\n\t}\n\n\t// find the report id first\n\treportID, err := mongo.GetReportID(session, tenantDbConfig.Db, reportName)\n\tif err != nil {\n\t\tcode = http.StatusInternalServerError\n\t\treturn code, h, output, err\n\t}\n\n\tvar serviceResults []string\n\tvar egroupResults []string\n\tvar groupResults []string\n\n\tserviceCol := session.DB(tenantDbConfig.Db).C(\"service_ar\")\n\teGroupCol := session.DB(tenantDbConfig.Db).C(\"endpoint_group_ar\")\n\n\terr = serviceCol.Find(bson.M{\"report\": reportID, \"date\": dt}).Distinct(\"name\", &serviceResults)\n\tif err != nil {\n\t\tcode = http.StatusInternalServerError\n\t\treturn code, h, output, err\n\t}\n\terr = eGroupCol.Find(bson.M{\"report\": reportID, \"date\": dt}).Distinct(\"name\", &egroupResults)\n\tif err != nil {\n\t\tcode = http.StatusInternalServerError\n\t\treturn code, h, output, err\n\t}\n\terr = eGroupCol.Find(bson.M{\"report\": reportID, \"date\": dt}).Distinct(\"supergroup\", &groupResults)\n\tif err != nil {\n\t\tcode = http.StatusInternalServerError\n\t\treturn code, h, output, err\n\t}\n\n\t// fill the topology JSON struct\n\tresult := Topology{}\n\tresult.GroupCount = len(groupResults)\n\tresult.GroupType = groupType\n\tresult.GroupList = groupResults\n\tresult.EndGroupCount = len(egroupResults)\n\tresult.EndGroupType = egroupType\n\tresult.EndGroupList = egroupResults\n\tresult.ServiceCount = len(serviceResults)\n\tresult.ServiceList = serviceResults\n\n\toutput, err = createTopoView(result, contentType, http.StatusOK)\n\tif err != nil {\n\t\tcode = http.StatusInternalServerError\n\t\treturn code, h, output, err\n\t}\n\n\treturn code, h, output, err\n}",
"func fprintStats(w io.Writer, q *QueryBenchmarker) {\n\tmaxKeyLength := 0\n\tkeys := make([]string, 0, len(q.statMapping))\n\tfor k := range q.statMapping {\n\t\tif len(k) > maxKeyLength {\n\t\t\tmaxKeyLength = len(k)\n\t\t}\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\tfor _, k := range keys {\n\t\tv := q.statMapping[k]\n\t\tminRate := 1e3 / v.Min\n\t\tmeanRate := 1e3 / v.Mean\n\t\tmaxRate := 1e3 / v.Max\n\t\tpaddedKey := fmt.Sprintf(\"%s\", k)\n\t\tfor len(paddedKey) < maxKeyLength {\n\t\t\tpaddedKey += \" \"\n\t\t}\n\t\tkStats := make(map[string]interface{})\n\t\tkStats[\"min\"] = v.Min\n\t\tkStats[\"minRate\"] = minRate\n\t\tkStats[\"mean\"] = v.Mean\n\t\tkStats[\"meanRate\"] = meanRate\n\t\tkStats[\"max\"] = v.Max\n\t\tkStats[\"maxRate\"] = maxRate\n\t\tkStats[\"count\"] = v.Count\n\t\tkStats[\"sum\"] = v.Sum / 1e3\n\t\tq.json[k] = kStats\n\t\tif !q.doJson {\n\t\t\t_, err := fmt.Fprintf(w, \"%s : min: %8.2fms (%7.2f/sec), mean: %8.2fms (%7.2f/sec), max: %7.2fms (%6.2f/sec), count: %8d, sum: %5.1fsec \\n\", paddedKey, v.Min, minRate, v.Mean, meanRate, v.Max, maxRate, v.Count, v.Sum/1e3)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n\tq.json[\"totalQueries\"] = q.totalQueries\n\tq.json[\"wallClockTime\"] = q.wallTook.Seconds()\n\tq.json[\"queryRate\"] = float64(q.totalQueries) / float64(q.wallTook.Seconds())\n\tq.json[\"workers\"] = q.workers\n\tq.json[\"batchSize\"] = q.batchSize\n\tif q.doJson {\n\t\tfor k, v := range q.json {\n\t\t\tif _, err := json.Marshal(v); err != nil {\n\t\t\t\tq.json[k] = \"\"\n\t\t\t}\n\t\t}\n\t\tb, err := json.Marshal(q.json)\n\t\tif err != nil {\n\t\t\tlog.Println(\"error:\", err)\n\t\t}\n\t\tos.Stdout.Write(b)\n\t}\n}",
"func (s *Server) ListContainerStats(ctx context.Context, req *types.ListContainerStatsRequest) (*types.ListContainerStatsResponse, error) {\n\tctrList, err := s.ContainerServer.ListContainers(\n\t\tfunc(container *oci.Container) bool {\n\t\t\treturn container.StateNoLock().Status != oci.ContainerStateStopped\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfilter := req.Filter\n\tif filter != nil {\n\t\tcFilter := &types.ContainerFilter{\n\t\t\tID: req.Filter.ID,\n\t\t\tPodSandboxID: req.Filter.PodSandboxID,\n\t\t\tLabelSelector: req.Filter.LabelSelector,\n\t\t}\n\t\tctrList = s.filterContainerList(ctx, cFilter, ctrList)\n\t}\n\n\tallStats := make([]*types.ContainerStats, 0, len(ctrList))\n\tfor _, container := range ctrList {\n\t\tsb := s.GetSandbox(container.Sandbox())\n\t\tif sb == nil {\n\t\t\t// Because we don't lock, we will get situations where the container was listed, and then\n\t\t\t// its sandbox was deleted before we got to checking its stats.\n\t\t\t// We should not log in this expected situation.\n\t\t\tcontinue\n\t\t}\n\t\tcgroup := sb.CgroupParent()\n\t\tstats, err := s.Runtime().ContainerStats(ctx, container, cgroup)\n\t\tif err != nil {\n\t\t\t// ErrCgroupDeleted is another situation that will happen if the container\n\t\t\t// is deleted from underneath the call to this function.\n\t\t\tif !errors.Is(err, cgroups.ErrCgroupDeleted) {\n\t\t\t\t// The other errors are much less likely, and possibly useful to hear about.\n\t\t\t\tlog.Warnf(ctx, \"Unable to get stats for container %s: %v\", container.ID(), err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tresponse := s.buildContainerStats(ctx, stats, container)\n\t\tallStats = append(allStats, response)\n\t}\n\n\treturn &types.ListContainerStatsResponse{\n\t\tStats: allStats,\n\t}, nil\n}",
"func tplServerFilterStatsGroups() []string {\n\tstats := []string{\"\", \"\", \"\", \"\"}\n\tbuf := bytes.Buffer{}\n\tstatsGroupsFilter := [][]map[string]string{binlogStatsGroups, cmdStatsGroups, currentStatsGroups, otherStatsGroups}\n\tfor k, statsGroups := range statsGroupsFilter {\n\t\tfor _, statsGroup := range statsGroups {\n\t\t\tfor property, description := range statsGroup {\n\t\t\t\tstatus := \"\"\n\t\t\t\tif checkInSlice(selfConf.Filter, property) {\n\t\t\t\t\tstatus = `checked`\n\t\t\t\t}\n\t\t\t\tbuf.Reset()\n\t\t\t\tbuf.WriteString(`<div class=\"control-group\"><div class=\"controls\"><div class=\"checkbox\"><label><input type=\"checkbox\" name=\"`)\n\t\t\t\tbuf.WriteString(property)\n\t\t\t\tbuf.WriteString(`\" `)\n\t\t\t\tbuf.WriteString(status)\n\t\t\t\tbuf.WriteString(`><b>`)\n\t\t\t\tbuf.WriteString(property)\n\t\t\t\tbuf.WriteString(`</b><br/>`)\n\t\t\t\tbuf.WriteString(description)\n\t\t\t\tbuf.WriteString(`</label></div></div></div>`)\n\t\t\t\tstats[k] += buf.String()\n\t\t\t}\n\t\t}\n\t}\n\treturn stats\n}",
"func (mqtt *mqttManager) ReportContainersStats() error {\n\n\tsett, err := mqtt.settingsService.Get()\n\tif err != nil {\n\t\tg.Log.Error(\"failed to get default settings\", err)\n\t\treturn err\n\t}\n\tif sett.GatewayID == \"\" || sett.RegistryID == \"\" {\n\t\tg.Log.Error(\"failed to report full system stats, gatewayID or registryID in settings missing\", sett.GatewayID, sett.RegistryID)\n\t\treturn errors.New(\"missing gateway or report id in settings\")\n\t}\n\tprocStats, err := mqtt.processService.StatsAllProcesses(sett)\n\tif err != nil {\n\t\tg.Log.Error(\"failed to retrieve all process stats\", err)\n\t\treturn err\n\t}\n\n\tstatsBytes, err := json.Marshal(procStats)\n\tif err != nil {\n\t\tg.Log.Error(\"failed to marshalall process streams to report to cloud\", err)\n\t\treturn err\n\t}\n\n\tmqttMsg := &models.MQTTMessage{\n\t\tCreated: time.Now().UTC().Unix() * 1000,\n\t\tProcessOperation: models.MQTTProcessOperation(models.DeviceOperationStats),\n\t\tProcessType: models.MQTTProcessType(models.ProcessTypeStats),\n\t\tMessage: statsBytes,\n\t}\n\tpErr := utils.PublishMonitoringTelemetry(sett.GatewayID, (*mqtt.client), mqttMsg)\n\tif pErr != nil {\n\t\tg.Log.Error(\"Failed to publish monitoring telemetry\", pErr)\n\t\treturn pErr\n\t}\n\treturn nil\n}",
"func (r *remoteRuntimeService) ListContainerStats(ctx context.Context, filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) {\n\tklog.V(10).InfoS(\"[RemoteRuntimeService] ListContainerStats\", \"filter\", filter)\n\t// Do not set timeout, because writable layer stats collection takes time.\n\t// TODO(random-liu): Should we assume runtime should cache the result, and set timeout here?\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\treturn r.listContainerStatsV1(ctx, filter)\n}",
"func (w *StatsDWriter) Write(results Summary) error {\n\tfor k, v := range results {\n\t\t_, err := fmt.Fprintf(w.writer, \"%s:%d|s\\n\", k, v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func getStats() string {\n\tvar mutex = &sync.Mutex{}\n\tvar stat []models.ActionOutput\n\t//using mutex to protect critical section and prevent race conditions.\n\tmutex.Lock()\n\tfor action, actionCounter := range actionMap {\n\t\tavgTime := actionCounter.TotalTime / (float64)(actionCounter.Counter)\n\t\tao := models.ActionOutput{\n\t\t\tAction: action,\n\t\t\tAvg: avgTime,\n\t\t}\n\t\tstat = append(stat, ao)\n\t}\n\tactionStats, _ := json.Marshal(stat)\n\tmutex.Unlock()\n\n\t//convert to serialized json string array\n\treturn string(actionStats)\n\n}",
"func (store Store) WriteStat(w io.Writer) error {\n\treturn store.db.View(func(tx *bolt.Tx) error {\n\t\tusers := tx.Bucket(bucket.RegisterDates).Stats().KeyN\n\t\tsubscriptions := tx.Bucket(bucket.Subscriptions).Stats().KeyN\n\t\tdbSize := float64(tx.Size()) / 1024.0 / 1024.0 // in mb\n\n\t\tphrasesTotal := tx.Bucket(bucket.Phrases).Stats().KeyN\n\t\tphrasesAvg := phrasesTotal / users\n\n\t\tscoretotal, err := sum(tx.Bucket(bucket.Scoretotals), simplesum)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tscoretotalAvg := scoretotal / users\n\n\t\tstudiesTotal := tx.Bucket(bucket.Studies).Stats().KeyN\n\t\tstudiesAvg := studiesTotal / users\n\n\t\tnow := itob(time.Now().Unix())\n\t\tdueStudiesTotal, err := sum(tx.Bucket(bucket.Studytimes), func(v []byte) int {\n\t\t\tif bytes.Compare(v, now) < 1 {\n\t\t\t\treturn 1\n\t\t\t}\n\t\t\treturn 0\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdueStudiesAvg := dueStudiesTotal / users\n\n\t\timportsTotal, err := sum(tx.Bucket(bucket.Imports), simplesum)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\timportsAvg := importsTotal / users\n\n\t\tnotifiesTotal, err := sum(tx.Bucket(bucket.Notifies), simplesum)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tnotifiesAvg := notifiesTotal / users\n\n\t\tzeroscore, err := sum(tx.Bucket(bucket.Zeroscores), simplesum)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tzeroscoreAvg := zeroscore / users\n\n\t\tnewphrasesTotal, err := sum(tx.Bucket(bucket.NewPhrases), count64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tnewphrasesAvg := newphrasesTotal / users\n\n\t\twarnings := \"\"\n\t\tnotNewPhrases := phrasesTotal - newphrasesTotal\n\t\tif n := tx.Bucket(bucket.Studytimes).Stats().KeyN; n != notNewPhrases {\n\t\t\twarnings += fmt.Sprintf(\"\\nWARNING: Number of studytimes (%d) does not match phrases - newphrases (%d).\\n\", n, notNewPhrases)\n\t\t}\n\t\tif n := tx.Bucket(bucket.PhraseAddTimes).Stats().KeyN; n != phrasesTotal {\n\t\t\twarnings += fmt.Sprintf(\"\\nWARNING: Number of phraseaddtimes (%d) does not match number of phrases (%d).\\n\", n, phrasesTotal)\n\t\t}\n\n\t\tfmt.Fprintf(\n\t\t\tw, statmsg, users, subscriptions, dbSize,\n\t\t\tphrasesTotal, phrasesAvg,\n\t\t\tscoretotal, scoretotalAvg,\n\t\t\tstudiesTotal, studiesAvg,\n\t\t\tdueStudiesTotal, dueStudiesAvg,\n\t\t\timportsTotal, importsAvg,\n\t\t\tnotifiesTotal, notifiesAvg,\n\t\t\tzeroscore, zeroscoreAvg,\n\t\t\tnewphrasesTotal, newphrasesAvg,\n\t\t\twarnings,\n\t\t)\n\t\treturn nil\n\t})\n}",
"func (bs *blplStats) statsJSON() string {\n\tbuf := bytes.NewBuffer(make([]byte, 0, 128))\n\tfmt.Fprintf(buf, \"{\")\n\tfmt.Fprintf(buf, \"\\n \\\"TxnCount\\\": %v,\", bs.txnCount)\n\tfmt.Fprintf(buf, \"\\n \\\"QueryCount\\\": %v,\", bs.queryCount)\n\tfmt.Fprintf(buf, \"\\n \\\"QueriesPerSec\\\": %v,\", bs.queriesPerSec)\n\tfmt.Fprintf(buf, \"\\n \\\"TxnPerSec\\\": %v\", bs.txnsPerSec)\n\tfmt.Fprintf(buf, \"\\n \\\"TxnTime\\\": %v,\", bs.txnTime)\n\tfmt.Fprintf(buf, \"\\n \\\"QueryTime\\\": %v,\", bs.queryTime)\n\tfmt.Fprintf(buf, \"\\n}\")\n\treturn buf.String()\n}",
"func (d *Influxstatsd) WriteTo(w io.Writer) (count int64, err error) {\n\tvar n int\n\n\td.counters.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool {\n\t\tn, err = fmt.Fprintf(w, \"%s%s%s:%f|c%s\\n\", d.prefix, name, d.tagValues(lvs), sum(values), sampling(d.rates.Get(name)))\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\tcount += int64(n)\n\t\treturn true\n\t})\n\tif err != nil {\n\t\treturn count, err\n\t}\n\n\td.mtx.RLock()\n\tfor _, root := range d.gauges {\n\t\troot.walk(func(name string, lvs lv.LabelValues, value float64) bool {\n\t\t\tn, err = fmt.Fprintf(w, \"%s%s%s:%f|g\\n\", d.prefix, name, d.tagValues(lvs), value)\n\t\t\tif err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tcount += int64(n)\n\t\t\treturn true\n\t\t})\n\t}\n\td.mtx.RUnlock()\n\n\td.timings.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool {\n\t\tsampleRate := d.rates.Get(name)\n\t\tfor _, value := range values {\n\t\t\tn, err = fmt.Fprintf(w, \"%s%s%s:%f|ms%s\\n\", d.prefix, name, d.tagValues(lvs), value, sampling(sampleRate))\n\t\t\tif err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tcount += int64(n)\n\t\t}\n\t\treturn true\n\t})\n\tif err != nil {\n\t\treturn count, err\n\t}\n\n\td.histograms.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool {\n\t\tsampleRate := d.rates.Get(name)\n\t\tfor _, value := range values {\n\t\t\tn, err = fmt.Fprintf(w, \"%s%s%s:%f|h%s\\n\", d.prefix, name, d.tagValues(lvs), value, sampling(sampleRate))\n\t\t\tif err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tcount += int64(n)\n\t\t}\n\t\treturn true\n\t})\n\tif err != nil {\n\t\treturn count, err\n\t}\n\n\treturn count, err\n}",
"func GetIndexUsageStatList(client *mongo.Client) *IndexStatsList {\n\tindexUsageStatsList := &IndexStatsList{}\n\tdatabaseNames, err := client.ListDatabaseNames(context.TODO(), bson.M{})\n\tif err != nil {\n\t\t_, logSFound := logSuppressIS[\"\"]\n\t\tif !logSFound {\n\t\t\tlog.Errorf(\"%s. Index usage stats will not be collected. This log message will be suppressed from now.\", err)\n\t\t\tlogSuppressIS[\"\"] = true\n\t\t}\n\t\treturn nil\n\t}\n\tdelete(logSuppressIS, \"\")\n\tfor _, dbName := range databaseNames {\n\t\tc, err := client.Database(dbName).ListCollections(context.TODO(), bson.M{}, options.ListCollections().SetNameOnly(true))\n\t\tif err != nil {\n\t\t\t_, logSFound := logSuppressIS[dbName]\n\t\t\tif !logSFound {\n\t\t\t\tlog.Errorf(\"%s. Index usage stats will not be collected for this db. This log message will be suppressed from now.\", err)\n\t\t\t\tlogSuppressIS[dbName] = true\n\t\t\t}\n\t\t} else {\n\n\t\t\ttype collListItem struct {\n\t\t\t\tName string `bson:\"name,omitempty\"`\n\t\t\t\tType string `bson:\"type,omitempty\"`\n\t\t\t}\n\n\t\t\tdelete(logSuppressIS, dbName)\n\t\t\tfor c.Next(context.TODO()) {\n\t\t\t\tcoll := &collListItem{}\n\t\t\t\terr := c.Decode(&coll)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tcollIndexUsageStats := IndexStatsList{}\n\t\t\t\tc, err := client.Database(dbName).Collection(coll.Name).Aggregate(context.TODO(), []bson.M{{\"$indexStats\": bson.M{}}})\n\t\t\t\tif err != nil {\n\t\t\t\t\t_, logSFound := logSuppressIS[dbName+\".\"+coll.Name]\n\t\t\t\t\tif !logSFound {\n\t\t\t\t\t\tlog.Errorf(\"%s. Index usage stats will not be collected for this collection. This log message will be suppressed from now.\", err)\n\t\t\t\t\t\tlogSuppressIS[dbName+\".\"+coll.Name] = true\n\t\t\t\t\t}\n\t\t\t\t} else {\n\n\t\t\t\t\tfor c.Next(context.TODO()) {\n\t\t\t\t\t\ts := &IndexUsageStats{}\n\t\t\t\t\t\tif err := c.Decode(s); err != nil {\n\t\t\t\t\t\t\tlog.Error(err)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcollIndexUsageStats.Items = append(collIndexUsageStats.Items, *s)\n\t\t\t\t\t}\n\n\t\t\t\t\tif err := c.Err(); err != nil {\n\t\t\t\t\t\tlog.Error(err)\n\t\t\t\t\t}\n\n\t\t\t\t\tif err := c.Close(context.TODO()); err != nil {\n\t\t\t\t\t\tlog.Errorf(\"Could not close Aggregate() cursor, reason: %v\", err)\n\t\t\t\t\t}\n\n\t\t\t\t\tdelete(logSuppressIS, dbName+\".\"+coll.Name)\n\t\t\t\t\t// Label index stats with corresponding db.collection\n\t\t\t\t\tfor i := 0; i < len(collIndexUsageStats.Items); i++ {\n\t\t\t\t\t\tcollIndexUsageStats.Items[i].Database = dbName\n\t\t\t\t\t\tcollIndexUsageStats.Items[i].Collection = coll.Name\n\t\t\t\t\t}\n\t\t\t\t\tindexUsageStatsList.Items = append(indexUsageStatsList.Items, collIndexUsageStats.Items...)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := c.Close(context.TODO()); err != nil {\n\t\t\t\tlog.Errorf(\"Could not close ListCollections() cursor, reason: %v\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn indexUsageStatsList\n}",
"func (cb *printcb) outputStat(stats map[string]string) error {\n\tidx := len(*cb)\n\t*cb = append(*cb, FileDetails{})\n\tdetails := &(*cb)[idx]\n\tfor key, value := range stats {\n\t\tif err := setTaggedField(details, key, value, false); err != nil {\n\t\t\tglog.Warningf(\"Couldn't set field %v: %v\", key, err)\n\t\t}\n\t}\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
read a Grid in from stdin
|
func readStdin() (grid []lineRep, err error) {
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
line := strings.Trim(scanner.Text(), "\r\n")
if len(line) != lineLen {
err = ErrBadLine
return
}
var lrep lineRep
for i := 0; i < lineLen; i++ {
lrep[i] = (line[i] == '#')
}
grid = append(grid, lrep)
}
return
}
|
[
"func (system *IsingSystem) ReadGrid(position *Position) int {\n\treturn system.grid[position.i][position.j]\n}",
"func ReadGrid(s io.ByteScanner) (Grid, error) {\n\tg := NewGrid()\n\tfor r := 0; r < GridSize; r++ {\n\t\tfor c := 0; c < GridSize; c++ {\n\t\t\t// read number\n\t\t\tb, err := s.ReadByte()\n\t\t\tif err != nil {\n\t\t\t\treturn g, fmt.Errorf(\"failed to read sudoku grid row %d: %s\", r+1, err)\n\t\t\t}\n\n\t\t\tif b == '_' {\n\t\t\t\tg[r][c] = Cell{}\n\t\t\t} else if b >= '1' && b <= '9' {\n\t\t\t\tg[r][c].resolve(b - '0')\n\t\t\t} else {\n\t\t\t\treturn g, fmt.Errorf(\"fot a number %c at row %d\", b, r+1)\n\t\t\t}\n\n\t\t\tif c != GridSize-1 {\n\t\t\t\t// read space\n\t\t\t\tb, err = s.ReadByte()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn g, fmt.Errorf(\"failed to read sudoku grid row %d: %s\", r+1, err)\n\t\t\t\t}\n\t\t\t\tif b != ' ' {\n\t\t\t\t\treturn g, fmt.Errorf(\"unexpected character '%c' at row %d\", b, r+1)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// read newline\n\t\t\t\tb, err = s.ReadByte()\n\t\t\t\tif r == GridSize-1 && err == io.EOF {\n\t\t\t\t\tbreak // TODO: return EOF here?\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn g, fmt.Errorf(\"failed to read sudoku grid row %d: %s\", r+1, err)\n\t\t\t\t}\n\t\t\t\tif b != '\\n' {\n\t\t\t\t\t// TODO: support Windows and MAC new lines\n\t\t\t\t\treturn g, fmt.Errorf(\"unexpected character '%c' at row %d\", b, r+1)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn g, nil\n}",
"func (dla *DLASystem) ReadGrid(position [2]int) bool {\n\thalfGrid := dla.gridSize / 2\n\treturn dla.grid[position[0]+halfGrid][position[1]+halfGrid]\n}",
"func ReadFrom(rdr io.Reader) (grid *Grid, err error) {\n\treader := bufio.NewReader(rdr)\n\theader := make([]byte, 8)\n\tif _, err = io.ReadFull(reader, header); err != nil {\n\t\treturn\n\t}\n\n\t// Read the size\n\tvar view Rect\n\tview.Min.X = int16(binary.BigEndian.Uint16(header[0:2]))\n\tview.Min.Y = int16(binary.BigEndian.Uint16(header[2:4]))\n\tview.Max.X = int16(binary.BigEndian.Uint16(header[4:6]))\n\tview.Max.Y = int16(binary.BigEndian.Uint16(header[6:8]))\n\n\t// Allocate a new grid\n\tgrid = NewGrid(view.Max.X+1, view.Max.Y+1)\n\tbuf := make([]byte, tileDataSize)\n\tgrid.pagesWithin(view.Min, view.Max, func(page *page) {\n\t\tif _, err = io.ReadFull(reader, buf); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tcopy(page.Data(), buf)\n\t})\n\treturn\n}",
"func parseInput(filename string) (stringGrid, error) {\n\tbody, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn stringGrid{}, err\n\t}\n\tinput := strings.Split(string(body), \"\\n\")\n\tsg := make(stringGrid, len(input))\n\n\tfor i, val := range input {\n\t\tsg[i] = strings.Split(val, \"\")\n\t}\n\treturn sg, nil\n}",
"func ReadGrid(f io.Reader, templateNumber uint16) (Grid, error) {\n\tvar err error\n\tvar g Grid\n\tswitch templateNumber {\n\tcase 0:\n\t\tvar grid Grid0\n\t\terr = binary.Read(f, binary.BigEndian, &grid)\n\t\tgrid.La1 = fixNegLatLon(grid.La1)\n\t\tgrid.Lo1 = fixNegLatLon(grid.Lo1)\n\t\tgrid.La2 = fixNegLatLon(grid.La2)\n\t\tgrid.Lo2 = fixNegLatLon(grid.Lo2)\n\t\tg = &grid\n\tcase 10:\n\t\tvar grid Grid10\n\t\terr = binary.Read(f, binary.BigEndian, &grid)\n\t\tgrid.La1 = fixNegLatLon(grid.La1)\n\t\tgrid.Lo1 = fixNegLatLon(grid.Lo1)\n\t\tgrid.La2 = fixNegLatLon(grid.La2)\n\t\tgrid.Lo2 = fixNegLatLon(grid.Lo2)\n\t\tg = &grid\n\tcase 20:\n\t\tvar grid Grid20\n\t\terr = binary.Read(f, binary.BigEndian, &grid)\n\t\tgrid.La1 = fixNegLatLon(grid.La1)\n\t\tgrid.Lo1 = fixNegLatLon(grid.Lo1)\n\t\tg = &grid\n\tcase 30:\n\t\tvar grid Grid30\n\t\terr = binary.Read(f, binary.BigEndian, &grid)\n\t\tgrid.La1 = fixNegLatLon(grid.La1)\n\t\tgrid.Lo1 = fixNegLatLon(grid.Lo1)\n\t\tg = &grid\n\tcase 40:\n\t\tvar grid Grid40\n\t\terr = binary.Read(f, binary.BigEndian, &grid)\n\t\tgrid.La1 = fixNegLatLon(grid.La1)\n\t\tgrid.Lo1 = fixNegLatLon(grid.Lo1)\n\t\tgrid.La2 = fixNegLatLon(grid.La2)\n\t\tgrid.Lo2 = fixNegLatLon(grid.Lo2)\n\t\tg = &grid\n\tcase 90:\n\t\tvar grid Grid90\n\t\treturn &grid, binary.Read(f, binary.BigEndian, &grid)\n\tdefault:\n\t\tvar grid Grid90\n\t\treturn &grid, errors.New(fmt.Sprint(\"Unsupported grid definition \", templateNumber))\n\t}\n\treturn g, err\n}",
"func (tileset *DB) ReadGrid(z uint8, x uint64, y uint64, data *[]byte) error {\n\tif !tileset.hasUTFGrid {\n\t\treturn errors.New(\"Tileset does not contain UTFgrids\")\n\t}\n\n\terr := tileset.db.QueryRow(\"select grid from grids where zoom_level = ? and tile_column = ? and tile_row = ?\", z, x, y).Scan(data)\n\tif err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\t*data = nil // If this tile does not exist in the database, return empty bytes\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\tkeydata := make(map[string]interface{})\n\tvar (\n\t\tkey string\n\t\tvalue []byte\n\t)\n\n\trows, err := tileset.db.Query(\"select key_name, key_json FROM grid_data where zoom_level = ? and tile_column = ? and tile_row = ?\", z, x, y)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot fetch grid data: %v\", err)\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\terr := rows.Scan(&key, &value)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not fetch grid data: %v\", err)\n\t\t}\n\t\tvaluejson := make(map[string]interface{})\n\t\tjson.Unmarshal(value, &valuejson)\n\t\tkeydata[key] = valuejson\n\t}\n\n\tif len(keydata) == 0 {\n\t\treturn nil // there is no key data for this tile, return\n\t}\n\n\tvar (\n\t\tzreader io.ReadCloser // instance of zlib or gzip reader\n\t\tzwriter io.WriteCloser // instance of zlip or gzip writer\n\t\tbuf bytes.Buffer\n\t)\n\treader := bytes.NewReader(*data)\n\n\tif tileset.utfgridCompression == ZLIB {\n\t\tzreader, err = zlib.NewReader(reader)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tzwriter = zlib.NewWriter(&buf)\n\t} else {\n\t\tzreader, err = gzip.NewReader(reader)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tzwriter = gzip.NewWriter(&buf)\n\t}\n\n\tvar utfjson map[string]interface{}\n\tjsonDecoder := json.NewDecoder(zreader)\n\tjsonDecoder.Decode(&utfjson)\n\tzreader.Close()\n\n\t// splice the key data into the UTF json\n\tutfjson[\"data\"] = keydata\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// now re-encode to original zip encoding\n\tjsonEncoder := json.NewEncoder(zwriter)\n\terr = jsonEncoder.Encode(utfjson)\n\tif err != nil {\n\t\treturn err\n\t}\n\tzwriter.Close()\n\t*data = buf.Bytes()\n\n\treturn nil\n}",
"func readInput(s beam.Scope, dsn string) beam.PCollection {\n\ts = s.Scope(\"readInput\")\n\n\t// read from the database into a PCollection of pageView structs\n\treturn databaseio.Read(s, \"mysql\", dsn, \"data\", reflect.TypeOf(pageView{}))\n}",
"func readCmd() {\n\tbzIn, err := ioutil.ReadAll(os.Stdin)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"cannot read stdin: %v\", err)\n\t\treturn\n\t}\n\tvestingData, err := unmarshalVestingData(bzIn)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"cannot decode vesting data: %v\", err)\n\t\treturn\n\t}\n\tevents, err := vestingDataToEvents(vestingData)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"cannot convert vesting data: %v\", err)\n\t}\n\tbzOut, err := marshalEvents(events)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"cannot encode events: %v\", err)\n\t\treturn\n\t}\n\tfmt.Println(string(bzOut))\n}",
"func readInput(r io.Reader) Node {\n\tdata, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdata = bytes.Trim(data, \"^$ \\n\") // remove extraneous symbols\n\tnode, i := parseSequence(data, 0)\n\tif i < len(data) {\n\t\tpanic(fmt.Sprintf(\"parse error at offset %d\", i))\n\t}\n\treturn node\n}",
"func (f *ioFile) read(args []interface{}, nl bool) {\n\tf.eol = false\n\tfor len(args) != 0 {\n\t\targ := args[0]\n\t\targs = args[1:]\n\t\tif _, ok := getWidth(&args); ok {\n\t\t\tpanic(\"internal error: read field width specifier not supported\")\n\t\t}\n\n\t\tswitch x := arg.(type) {\n\t\tcase *byte:\n\t\t\t*x = f.component[0]\n\t\tdefault:\n\t\t\tpanic(fmt.Errorf(\"unsupported read variable type: %T (%q)\", x, f.name))\n\t\t}\n\t\tf.get()\n\t}\n\tif !nl {\n\t\treturn\n\t}\n\n\t// [0] page 92\n\t//\n\t// ReadLn(F) skips to the beginning of the next line of the textfile F (F^\n\t// becomes the first character of the next line).\n\tfor !f.eof && f.component[0] != '\\n' {\n\t\tf.get()\n\t}\n\tif !f.eof {\n\t\tf.get()\n\t}\n}",
"func ReadHostsStdin(config *Config) *Hosts {\n\n\t// create a standard host configuration\n\tmyhost := MyHosts{\n\t\tPort: config.Port,\n\t\tPriority: 1,\n\t\tThreads: 1,\n\t\tProtocol: config.Protocol,\n\t}\n\n\t// create the result\n\thosts := make(Hosts)\n\n\t// get data from stdin\n\tdata, _ := ioutil.ReadAll(os.Stdin)\n\n\t// Create a list of hosts\n\thostnames := []string(strings.Split(strings.TrimSpace(string(data)), \"|\"))\n\n\t// loop over hosts and create the structure\n\tfor _, name := range hostnames {\n\t\thosts[name] = myhost\n\t}\n\n\t// return the hosts\n\treturn &hosts\n\n}",
"func (b *Board) Load(inp string) {\n\t\n\ts := strings.Split(inp, \"\\n\")\n\n\tsizes := strings.Split(s[0], \":\")\n\theight, _ := strconv.ParseInt(sizes[0], 10, 32)\n\twidth, _ := strconv.ParseInt(sizes[1], 10, 32)\n\n\tb2 := NewBoard(int(height), int(width)) \n\tb = b2\n\n\trows := s[1..]\n\tfor line := range s[1..] {\n\t}\n}",
"func readInputLoop(dt *discordterm.Client) {\n\trd := bufio.NewReader(os.Stdin)\n\n\tl, err := readline.NewEx(&readline.Config{\n\t\tPrompt: prompt,\n\t\tAutoComplete: completer,\n\t\tHistoryFile: filepath.Join(os.TempDir(), historyFile),\n\t\tHistorySearchFold: true,\n\t})\n\tif err != nil {\n\t\tlog.Println(\"Error creating readline:\", err)\n\t\treturn\n\t}\n\n\tvar standardReader bool\n\n\tfor {\n\t\tvar line string\n\t\tvar err error\n\n\t\tl.SetPrompt(createPrompt(dt))\n\n\t\tif !standardReader {\n\t\t\tline, err = l.Readline()\n\t\t\tif err != nil {\n\t\t\t\tstandardReader = true\n\t\t\t\tlog.Println(\"Error using readline package: switching to bufio reader: \", err)\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Print(prompt)\n\t\t\tline, err = rd.ReadString('\\n')\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}\n\n\t\t// Read a line of input\n\n\t\t// Remove whitespace characters from line\n\t\tline = strings.TrimSpace(line)\n\n\t\terr = executeCommand(dt, line)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n}",
"func (client *Client) InputReader(c net.Conn) {\n\tscanner := bufio.NewScanner(os.Stdin)\n\tfmt.Print(\"> \")\n\tfor scanner.Scan() {\n\t\targs := strings.Split(scanner.Text(), \" \")\n\t\tif args[0] == \"exit\" {\n\t\t\tos.Exit(0)\n\t\t}\n\t\tresponse, err := client.processCommand(c, args[0], args[1:]...)\n\t\tparseResponse(response, err, defaultTag)\n\t\tif args[0] == \"subscribe\" {\n\t\t\tsubscribePattern(c, scanner)\n\t\t\tfmt.Printf(\"\\r%s\", defaultTag)\n\t\t}\n\t}\n\n\tif scanner.Err() != nil {\n\t\tfmt.Printf(\"%v\", scanner.Err())\n\t\tos.Exit(2)\n\t}\n}",
"func (c *Config) Stdin() io.ReadCloser {\n\treturn c.stdin\n}",
"func main() {\n\n\treader := bufio.NewReader(os.Stdin)\n\tfmt.Println(\"Enter your name:\")\n\n\tname, _ := reader.ReadString('\\n')\n\tfmt.Print(\"Your name is \", name)\n\n}",
"func readInput(conn net.Conn, qst string) (string, error) {\n\tconn.Write([]byte(qst))\n\ts, err := bufio.NewReader(conn).ReadString('\\n')\n\tif err != nil {\n\t\tlog.Printf(\"readinput: could not read input from stdin: %v from client %v\", err, conn.RemoteAddr().String())\n\t\treturn \"\", err\n\t}\n\ts = strings.Trim(s, \"\\r\\n\")\n\treturn s, nil\n}",
"func SetupSimulation() {\n iRobot = Robot{}\n iSurface = Surface{}\n fmt.Print(\"\\nPlease enter the length of the adjacency matrix to start simulation: \\n\")\n scanner := bufio.NewScanner(os.Stdin)\n for scanner.Scan() {\n length, err := strconv.Atoi(strings.TrimSpace(scanner.Text()))\n if err == nil && length > 1 {\n //initialize adjacency matrix\n iSurface = NewSurface(length)\n break\n }\n fmt.Println(\"Please enter the length of the adjacency matrix: >1 \\n\")\n }\n\n iSurface.Print(Node{})\n BeginSimulation()\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Ping gets the latest token and endpoint from knapsack and updates the sender
|
func (ls *LogShipper) Ping() {
// set up new auth token
token, _ := ls.knapsack.TokenStore().Get(storage.ObservabilityIngestAuthTokenKey)
ls.sender.authtoken = string(token)
parsedUrl, err := url.Parse(ls.knapsack.LogIngestServerURL())
if err != nil {
// If we have a bad endpoint, just disable for now.
// It will get renabled when control server sends a
// valid endpoint.
ls.sender.endpoint = ""
level.Debug(ls.baseLogger).Log(
"msg", "error parsing log ingest server url, shipping disabled",
"err", err,
"log_ingest_url", ls.knapsack.LogIngestServerURL(),
)
} else if parsedUrl != nil {
ls.sender.endpoint = parsedUrl.String()
}
ls.isShippingEnabled = ls.sender.endpoint != ""
ls.addDeviceIdentifyingAttributesToLogger()
if !ls.isShippingEnabled {
ls.sendBuffer.DeleteAllData()
}
}
|
[
"func (p *protocol) Ping(ctx context.Context, peer p2pcrypto.PublicKey) error {\n\tplogger := p.logger.WithFields(log.String(\"type\", \"ping\"), log.String(\"to\", peer.String()))\n\tplogger.Debug(\"send ping request\")\n\n\tdata, err := types.InterfaceToBytes(p.local)\n\tif err != nil {\n\t\treturn err\n\t}\n\tch := make(chan []byte, 1)\n\tfoo := func(msg []byte) {\n\t\tplogger.Debug(\"handle ping response\")\n\t\tsender := &node.Info{}\n\t\terr := types.BytesToInterface(msg, sender)\n\n\t\tif err != nil {\n\t\t\tplogger.With().Warning(\"got unreadable pong\", log.Err(err))\n\t\t\treturn\n\t\t}\n\t\t// TODO: if we pinged it we already have id so no need to update,\n\t\t// but what if id or listen address has changed?\n\t\tch <- sender.ID.Bytes()\n\t}\n\n\terr = p.msgServer.SendRequest(ctx, server.PingPong, data, peer, foo, func(err error) {})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttimeout := time.NewTimer(MessageTimeout) // todo: check whether this is useless because of `requestLifetime`\n\tselect {\n\tcase id := <-ch:\n\t\tif id == nil {\n\t\t\treturn errors.New(\"failed sending message\")\n\t\t}\n\t\tif !bytes.Equal(id, peer.Bytes()) {\n\t\t\treturn errors.New(\"got pong with different public key\")\n\t\t}\n\tcase <-timeout.C:\n\t\treturn errors.New(\"ping timeout\")\n\t}\n\n\treturn nil\n}",
"func Ping(node *shared.Node) {\n\tfor {\n\t\tblockchain.SwimBatchPuzzleGenerator(node)\n\n\t\ttime.Sleep(pingInterval)\n\t\ttarget := node.MembersSet.GetRandom()\n\t\tif target == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\ttargetPeer := strings.Split(target, \" \")\n\t\tip := targetPeer[0]\n\t\tport := targetPeer[1]\n\t\tconn, err := net.Dial(\"tcp\", ip+\":\"+port)\n\t\tif err != nil {\n\t\t\t// failure detected!\n\t\t\tif strings.HasSuffix(err.Error(), \"connect: connection refused\") {\n\t\t\t\tnode.MembersSet.SetDelete(target)\n\t\t\t\tnode.FailMsgBuffer.Add(target)\n\t\t\t\tfmt.Println(\"FAILURE DETECTED \" + target)\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Dial Error: \", err)\n\t\t\t}\n\t\t} else {\n\t\t\t// SWIM Implementation would send membership update message here\n\t\t\tswimMsg := \"DEAD \" + strings.Join(node.FailMsgBuffer.GetN(10), \",\") + \"\\n\"\n\t\t\tlogBandwithInfo(\"Send\", len(swimMsg))\n\t\t\tfmt.Fprintf(conn, swimMsg)\n\t\t\tfmt.Print(\"SWIM SENT \" + swimMsg)\n\t\t\ttransactionsMsg := strings.Join(node.TransactionBuffer.GetN(10000), \"\\n\") + \"\\n\"\n\t\t\tlogBandwithInfo(\"Send\", len(transactionsMsg))\n\t\t\tfmt.Fprintf(conn, transactionsMsg)\n\t\t\tfor _, block := range node.BlockBuffer.GetAll() {\n\t\t\t\tblockchain.SendBlock(node, conn, block)\n\t\t\t}\n\n\t\t\tconn.Close()\n\t\t}\n\t}\n}",
"func (s *SWIM) ping(target *Member) error {\n\tstats, err := s.mbrStatsMsgStore.Get()\n\tif err != nil {\n\t\tiLogger.Error(nil, err.Error())\n\t}\n\n\t// send ping message\n\taddr := target.Address()\n\tpingId := xid.New().String()\n\tping := createPingMessage(pingId, s.member.Address(), &stats)\n\n\tres, err := s.messageEndpoint.SyncSend(addr, ping)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// update piggyback data to store\n\ts.handlePbk(res.PiggyBack)\n\n\treturn nil\n}",
"func (app *App) Ping(msg osc.Message) error {\n\treturn errors.Wrap(app.SendTo(msg.Sender, osc.Message{Address: \"/pong\"}), \"sending pong\")\n}",
"func (s *GenericAPIServer) ping(ctx context.Context) error {\n\turl := fmt.Sprintf(\"http://%s/healthz\", s.InsecureServingInfo.BindAddress)\n\tif strings.Contains(s.InsecureServingInfo.BindAddress, \"0.0.0.0\") {\n\t\turl = fmt.Sprintf(\"http://127.0.0.1:%s/healthz\", strings.Split(s.InsecureServingInfo.BindAddress, \":\")[1])\n\t}\n\n\tfor {\n\t\t// Change NewRequest to NewRequestWithContext and pass context it\n\t\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Ping the server by sending a GET request to `/healthz`.\n\t\t// nolint: gosec\n\t\tresp, err := http.DefaultClient.Do(req)\n\t\tif err == nil && resp.StatusCode == http.StatusOK {\n\t\t\tlog.Info(\"The router has been deployed successfully.\")\n\n\t\t\tresp.Body.Close()\n\n\t\t\treturn nil\n\t\t}\n\n\t\t// Sleep for a second to continue the next ping.\n\t\tlog.Info(\"Waiting for the router, retry in 1 second.\")\n\t\ttime.Sleep(1 * time.Second)\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tlog.Fatal(\"can not ping http server within the specified time interval.\")\n\t\tdefault:\n\t\t}\n\t}\n\t// return fmt.Errorf(\"the router has no response, or it might took too long to start up\")\n}",
"func (s *Server) processPing(msg message.Message, db *database.DataStore) {\n\tlog.Debugf(\"[%s] Processing ping command\", msg.ULID)\n\n\terr := db.C(models.Endpoints).Update(bson.M{\"ulid\": msg.ULID}, bson.M{\"$set\": bson.M{\"last_ping\": time.Now()}})\n\tif err == mgo.ErrNotFound {\n\t\tlog.Errorf(\"[%s] Sending PONG with error: %s\", msg.ULID, errors.Errors[errors.NeedsRegister])\n\t\ts.sendErrorMsg(msg, errors.NeedsRegister, \"Pong\")\n\t} else {\n\t\tif jobs, pending := s.pendingJobs(msg, db); pending {\n\t\t\ts.taskPicker(msg, jobs, db)\n\t\t} else {\n\t\t\tmsg.Result = \"Pong\"\n\t\t\ts.w.Encode(msg)\n\t\t\tlog.Infof(\"[%s] Sending PONG due to no task assigned\", msg.ULID)\n\t\t}\n\t}\n}",
"func (s *RufsServer) Ping(request map[RequestArgument]interface{},\n\tresponse *interface{}) error {\n\n\t// Verify the given token and retrieve the target member\n\n\tif _, err := s.checkToken(request); err != nil {\n\t\treturn err\n\t}\n\n\t// Send a simple response\n\n\tres := []string{\"Pong\"}\n\n\t*response = res\n\n\treturn nil\n}",
"func (reb *rebManager) pingTarget(tsi *cluster.Snode, config *cmn.Config, ver int64, _ *xactGlobalReb) (ok bool) {\n\tvar (\n\t\ttname = reb.t.si.Name()\n\t\tmaxwt = config.Rebalance.DestRetryTime\n\t\tsleep = config.Timeout.CplaneOperation\n\t\tsleepRetry = keepaliveRetryDuration(config)\n\t\tcurwt time.Duration\n\t\targs = callArgs{\n\t\t\tsi: tsi,\n\t\t\treq: cmn.ReqArgs{\n\t\t\t\tMethod: http.MethodGet,\n\t\t\t\tBase: tsi.IntraControlNet.DirectURL,\n\t\t\t\tPath: cmn.URLPath(cmn.Version, cmn.Health),\n\t\t\t},\n\t\t\ttimeout: config.Timeout.CplaneOperation,\n\t\t}\n\t)\n\tfor curwt < maxwt {\n\t\tres := reb.t.call(args)\n\t\tif res.err == nil {\n\t\t\tif curwt > 0 {\n\t\t\t\tglog.Infof(\"%s: %s is online\", tname, tsi.Name())\n\t\t\t}\n\t\t\treturn true\n\t\t}\n\t\targs.timeout = sleepRetry\n\t\tglog.Warningf(\"%s: waiting for %s, err %v\", tname, tsi.Name(), res.err)\n\t\ttime.Sleep(sleep)\n\t\tcurwt += sleep\n\t\tnver := reb.t.smapowner.get().version()\n\t\tif nver > ver {\n\t\t\treturn\n\t\t}\n\t}\n\tglog.Errorf(\"%s: timed-out waiting for %s\", tname, tsi.Name())\n\treturn\n}",
"func periodicPing() {\n\tfor {\n\t\t// Shuffle membership list and get a member\n\t\t// Only executed when the membership list is not empty\n\t\tif CurrentList.Size() > 0 {\n\t\t\tmember := CurrentList.Shuffle()\n\t\t\t// Do not pick itself as the ping target\n\t\t\tif (member.TimeStamp == CurrentMember.TimeStamp) && (member.IP == CurrentMember.IP) {\n\t\t\t\ttime.Sleep(PingSendingPeriod)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tLogger.Info(\"Member (%d, %d) is selected by shuffling\\n\", member.TimeStamp, member.IP)\n\t\t\t// Get update entry from TTL Cache\n\t\t\tupdate, flag, err := getUpdate()\n\t\t\t// if no update there, do pure ping\n\t\t\tif err != nil {\n\t\t\t\tping(member)\n\t\t\t} else {\n\t\t\t\t// Send update as payload of ping\n\t\t\t\tpingWithPayload(member, update, flag)\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(PingSendingPeriod)\n\t}\n}",
"func (c *Impl) ping() {\n\tgo func(c *Impl) {\n\t\tt := time.NewTicker(3 * time.Second)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-t.C:\n\t\t\t\terr := c.conn.SendSignal(frame.NewPingFrame())\n\t\t\t\tlogger.Info(\"Send Ping to zipper.\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tif err.Error() == quic.ErrConnectionClosed {\n\t\t\t\t\t\tlogger.Print(\"[client] ❌ the zipper was offline.\")\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// other errors.\n\t\t\t\t\t\tlogger.Error(\"[client] ❌ sent Ping to zipper failed.\", \"err\", err)\n\t\t\t\t\t}\n\n\t\t\t\t\tt.Stop()\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}(c)\n}",
"func (client *MqttClient) Ping(ctx context.Context) error {\n\n\tp := packet.NewPingreqPacket()\n\t_, err := client.queuePacket(ctx, p)\n\tif err != nil {\n\t\tlogError(\"[MQTT] failed to send packet: %s\", err.Error())\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (agent *Agent) PingKvEx(opts PingKvOptions, cb PingKvExCallback) (PendingOp, error) {\n\tconfig := agent.routingInfo.Get()\n\tif config == nil {\n\t\treturn nil, ErrShutdown\n\t}\n\n\top := &pingOp{\n\t\tcallback: cb,\n\t\tremaining: 1,\n\t}\n\n\tpingStartTime := time.Now()\n\n\tkvHandler := func(resp *memdQResponse, req *memdQRequest, err error) {\n\t\tserverAddress := resp.sourceAddr\n\n\t\tpingLatency := time.Now().Sub(pingStartTime)\n\n\t\top.lock.Lock()\n\t\top.results = append(op.results, PingResult{\n\t\t\tEndpoint: serverAddress,\n\t\t\tError: err,\n\t\t\tLatency: pingLatency,\n\t\t})\n\t\top.handledOneLocked()\n\t\top.lock.Unlock()\n\t}\n\n\tfor serverIdx := 0; serverIdx < config.clientMux.NumPipelines(); serverIdx++ {\n\t\tpipeline := config.clientMux.GetPipeline(serverIdx)\n\t\tserverAddress := pipeline.Address()\n\n\t\treq := &memdQRequest{\n\t\t\tmemdPacket: memdPacket{\n\t\t\t\tMagic: reqMagic,\n\t\t\t\tOpcode: cmdNoop,\n\t\t\t\tDatatype: 0,\n\t\t\t\tCas: 0,\n\t\t\t\tKey: nil,\n\t\t\t\tValue: nil,\n\t\t\t},\n\t\t\tCallback: kvHandler,\n\t\t}\n\n\t\tcurOp, err := agent.dispatchOpToAddress(req, serverAddress)\n\t\tif err != nil {\n\t\t\top.lock.Lock()\n\t\t\top.results = append(op.results, PingResult{\n\t\t\t\tEndpoint: serverAddress,\n\t\t\t\tError: err,\n\t\t\t\tLatency: 0,\n\t\t\t})\n\t\t\top.lock.Unlock()\n\t\t\tcontinue\n\t\t}\n\n\t\top.lock.Lock()\n\t\top.subops = append(op.subops, pingSubOp{\n\t\t\tendpoint: serverAddress,\n\t\t\top: curOp,\n\t\t})\n\t\tatomic.AddInt32(&op.remaining, 1)\n\t\top.lock.Unlock()\n\t}\n\n\t// We initialized remaining to one to ensure that the callback is not\n\t// invoked until all of the operations have been dispatched first. This\n\t// final handling is to indicate that all operations were dispatched.\n\top.lock.Lock()\n\top.handledOneLocked()\n\top.lock.Unlock()\n\n\treturn op, nil\n}",
"func (h *Hub) Ping(ctx context.Context, _ *pb.PingRequest) (*pb.PingReply, error) {\n\tlog.G(h.ctx).Info(\"handling Ping request\")\n\treturn &pb.PingReply{}, nil\n}",
"func (c *Client) Ping(name string, message string) []error {\n\turl := c.buildURL(\"/v1/ping\")\n\n\t_, _, errs := gorequest.New().\n\t\tPost(url).\n\t\tSet(\"Authorization\", c.buildAuthHeader()).\n\t\tSend(requests.PingRequest{\n\t\tName: name,\n\t\tMessage: message,\n\t}).\n\t\tEnd()\n\n\tif errs != nil {\n\t\treturn errs\n\t}\n\n\treturn nil\n}",
"func (c *HttpController) Ping(writer http.ResponseWriter, request *http.Request) {\n\tresponse := common.NewPingResponse()\n\tc.sendResponse(writer, request, contracts.ApiPingRoute, response, http.StatusOK)\n}",
"func (connection *Connection) ping() {\n\tfor {\n\t\ttime.Sleep(1 * time.Second)\n\t\tif len(connection.consumers) > 0 {\n\t\t\t//do some ping, if no response then kill it\n\t\t\tfor _, consumer := range connection.consumers {\n\t\t\t\t_, pingError := consumer.connection.Write([]byte(\"hunga\"))\n\t\t\t\tif pingError != nil {\n\t\t\t\t\t// fmt.Print(\"PING ERROR\")\n\t\t\t\t\tconnection.killConsumer(consumer.id)\n\t\t\t\t} else {\n\t\t\t\t\tconnection.getConsumerMessage(consumer.id)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}",
"func SendPings(pms []*dm.PingMeasurement, batchSize int) {\n\n\tcontrollerClient, err := CreateControllerClient(\"controller/root.crt\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t// Shuffle the pings \n\trand.Shuffle(len(pms), func(i, j int) { pms[i], pms[j] = pms[j], pms[i] })\n\n\t// Create batches of 100K TS probes\n\tbatches := [] []*dm.PingMeasurement{}\n\tfor i := 0; i < len(pms); i+=batchSize {\n\t\tif i + batchSize > len(pms) {\n\t\t\tbatches = append(batches, pms[i:])\n\t\t} else {\n\t\t\tbatches = append(batches, pms[i:i+batchSize])\n\t\t}\n\t\t\n\t}\n\n\tfor i, batch := range(batches) {\n\t\tpingsByVP := map[uint32]uint32 {}\n\t\tfor _, p := range(batch) {\n\t\t\tvar vpI uint32\n\t\t\tif p.Spoof {\n\t\t\t\tvp := p.SAddr\n\t\t\t\tvpI, _ = util.IPStringToInt32(vp)\n\t\t\t} else {\n\t\t\t\tvpI = p.Src\n\t\t\t}\n\t\t\tif _, ok := pingsByVP[vpI]; !ok {\n\t\t\t\tpingsByVP[vpI] = 0\n\t\t\t}\n\t\t\tpingsByVP[vpI] += 1 \n\t\t}\n\t\t\n\t\ttheoreticalSeconds := util.TheoreticalSpoofTimeout(100, pingsByVP)\n\n\t\tif len(batch) > 100 {\n\t\t\t// Small hack here to determine a back of the enveloppe estimation of the measurement time.\n\t\t\t// Basically, check whether we are in an huge batch of pings of just a small RR pings from the revtr\n\t\t\t// system \n\t\t\tbatch[0].IsAbsoluteSpoofTimeout = true\n\t\t\tbatch[0].SpoofTimeout = theoreticalSeconds + 10 // time to gather last responses + processing time of the system \t\n\t\t}\n\n\t\tlog.Infof(\"Sending %d\", i)\n\t\tst, err := controllerClient.Ping(context.Background(), &dm.PingArg{\n\t\t\tPings: batch,\n\t\t})\n\t\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\n\t\t// Do nothing on replies, we'll analyze it later. \n\t\tfor {\n\t\t\t_, err := st.Recv()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\t// log.Error(err)\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\n\t\tlog.Infof(\"Finished %d\", i)\n\t} \n\t\n}",
"func (conn *Conn) ping() {\n\ttick := time.NewTicker(conn.PingFreq)\n\tfor {\n\t\tselect {\n\t\tcase <-tick.C:\n\t\t\tconn.Raw(fmt.Sprintf(\"PING :%d\", time.Now().UnixNano()))\n\t\tcase <-conn.cPing:\n\t\t\ttick.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (m *manager) onPing(addr string, rtt time.Duration) error {\n\tv := int32(rtt.Nanoseconds() / 1000)\n\tif v == 0 { // Don't let it be zero, otherwise the update would fail\n\t\tv = 1\n\t}\n\n\tm.monitor.Measure(addr, v)\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
filterResults filteres out the osquery results, which just make a lot of noise in our debug logs. It's a bit fragile, since it parses keyvals, but hopefully that's good enough
|
func filterResults(keyvals ...interface{}) {
// Consider switching on `method` as well?
for i := 0; i < len(keyvals); i += 2 {
if keyvals[i] == "results" && len(keyvals) > i+1 {
str, ok := keyvals[i+1].(string)
if ok && len(str) > 100 {
keyvals[i+1] = fmt.Sprintf(truncatedFormatString, str[0:99])
}
}
}
}
|
[
"func (client *Client) FilterResults(bucketKey, testID string, count int64, since, before *time.Time) ([]Result, error) {\n\tvar results = []Result{}\n\n\tfilterQs, err := client.buildFilterQS(count, since, before)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpath := fmt.Sprintf(\"buckets/%s/tests/%s/results%s\", bucketKey, testID, filterQs)\n\tcontent, err := client.Get(path)\n\tif err != nil {\n\t\treturn results, err\n\t}\n\n\terr = unmarshal(content, &results)\n\treturn results, err\n}",
"func processFilter(keys []string, filter []string) ([]string, bool) {\n\tvar vpps []string\n\tif len(filter) > 0 {\n\t\t// Ignore all parameters but first\n\t\tvpps = strings.Split(filter[0], \",\")\n\t} else {\n\t\t// Show all if there is no filter\n\t\tvpps = keys\n\t}\n\tvar isData bool\n\t// Find at leas one match\n\tfor _, key := range keys {\n\t\tfor _, vpp := range vpps {\n\t\t\tif key == vpp {\n\t\t\t\tisData = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn vpps, isData\n}",
"func getFilteredResults(args []interface{}, dtData dtPaginationData) ([]map[string]string, error) { //nolint:lll\n\tvar (\n\t\tresult []map[string]string\n\t\tp = \"%\"\n\t\taux = []interface{}{p}\n\t)\n\n\tresult, err := search(dtData, args)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = getNumberOfResults(dtData, aux)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn result, nil\n}",
"func (s *Server) ListResultsResult(ctx context.Context, req *pb.ListResultsRequest) (*pb.ListResultsResponse, error) {\n\t// Set up environment for cel and check if filter is empty string\n\tast, issues := s.env.Compile(req.GetFilter())\n\tif issues != nil && issues.Err() != nil && req.GetFilter() != \"\" {\n\t\tlog.Printf(\"type-check error: %s\", issues.Err())\n\t\treturn nil, status.Errorf(codes.InvalidArgument, \"Error occurred during filter parse step, no Results found for the query string due to invalid field, invalid function to evaluate filter or missing double quotes around field value, please try to enter a query with correct type again: %v\", issues.Err())\n\t}\n\t// get all results from database\n\trows, err := s.db.Query(\"SELECT data FROM results\")\n\tif err != nil {\n\t\tlog.Printf(\"failed to query on database: %v\", err)\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to query results: %v\", err)\n\t}\n\tvar results []*pb.Result\n\tfor rows.Next() {\n\t\tvar b []byte\n\t\tif err := rows.Scan(&b); err != nil {\n\t\t\tlog.Printf(\"failed to scan a row in query results: %v\", err)\n\t\t\treturn nil, status.Errorf(codes.Internal, \"failed to read result data: %v\", err)\n\t\t}\n\t\tr := &pb.Result{}\n\t\tif err := proto.Unmarshal(b, r); err != nil {\n\t\t\tlog.Printf(\"unmarshaling error: %v\", err)\n\t\t\treturn nil, status.Errorf(codes.Internal, \"failed to parse result data: %v\", err)\n\t\t}\n\t\tresults = append(results, r)\n\t}\n\n\t// return all results back to users if empty query is given\n\tif req.GetFilter() == \"\" {\n\t\treturn &pb.ListResultsResponse{Results: results}, nil\n\t}\n\n\t// filter from all results\n\tprg, err := s.env.Program(ast)\n\tif err != nil {\n\t\tlog.Printf(\"program construction error: %s\", err)\n\t\treturn nil, status.Errorf(codes.InvalidArgument, \"Error occurred during filter checking step, no Results found for the query string due to invalid field, invalid function to evaluate filter or missing double quotes around field value, please try to enter a query with correct type again: %v\", err)\n\t}\n\tvar resp []*pb.Result\n\tfor _, r := range results {\n\t\tif ok, err := matchCelFilter(r, prg); err != nil {\n\t\t\treturn nil, err\n\t\t} else if ok {\n\t\t\tresp = append(resp, r)\n\t\t}\n\t}\n\treturn &pb.ListResultsResponse{Results: resp}, nil\n}",
"func unfiltered(providers []string, q campwiz.Query, cs cache.Store) ([]campwiz.Result, []error) {\n\tklog.V(1).Infof(\"search campwiz.Query: %+v\", q)\n\n\tresults := []campwiz.Result{}\n\terrs := []error{}\n\n\t// There is an opportunity to parallelize this with channels if anyone is keen to do so\n\tfor _, pname := range providers {\n\t\tp, err := backend.New(backend.Config{Type: pname, Store: cs})\n\t\tif err != nil {\n\t\t\terrs = append(errs, fmt.Errorf(\"%s init: %v\", pname, err))\n\t\t\tcontinue\n\t\t}\n\n\t\tprs, err := p.List(q)\n\t\tif err != nil {\n\t\t\terrs = append(errs, fmt.Errorf(\"%s list: %v\", pname, err))\n\t\t\tcontinue\n\t\t}\n\n\t\tresults = append(results, prs...)\n\t}\n\n\treturn results, errs\n}",
"func (list *APTAuditList) clearResults() {\n\tlist.mutex.Lock()\n\tlist.results = make([]string, 0)\n\tlist.mutex.Unlock()\n}",
"func (s *ShowCLI) filterEntries() {\n\tnewSlice := []*common.RegistrationEntry{}\n\t// Map used to skip duplicated entries.\n\tmatchingEntries := map[string]*common.RegistrationEntry{}\n\n\tvar federatedIDs map[string]bool\n\tif len(s.Config.FederatesWith) > 0 {\n\t\tfederatedIDs = make(map[string]bool)\n\t\tfor _, federatesWith := range s.Config.FederatesWith {\n\t\t\tfederatedIDs[federatesWith] = true\n\t\t}\n\t}\n\n\tfor _, e := range s.Entries {\n\t\tmatch, _ := hasSelectors(e, s.Config.Selectors)\n\t\tif !match {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If SpiffeID was specified, discard entries that don't match.\n\t\tif s.Config.SpiffeID != \"\" && e.SpiffeId != s.Config.SpiffeID {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If ParentID was specified, discard entries that don't match.\n\t\tif s.Config.ParentID != \"\" && e.ParentId != s.Config.ParentID {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If FederatesWith was specified, discard entries that don't match\n\t\tif federatedIDs != nil {\n\t\t\tfound := false\n\t\t\tfor _, federatesWith := range e.FederatesWith {\n\t\t\t\tif federatedIDs[federatesWith] {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// If this entry wasn't matched before, save it.\n\t\tif _, ok := matchingEntries[e.EntryId]; !ok {\n\t\t\tmatchingEntries[e.EntryId] = e\n\t\t\tnewSlice = append(newSlice, e)\n\t\t}\n\t}\n\n\ts.Entries = newSlice\n}",
"func filterResultSet(result couchDbResponse, params dragonfruit.QueryParams,\n\tlimit int, offset int) (int, couchDbResponse, error) {\n\n\tif len(params.QueryParams) < 1 {\n\t\treturn len(result.Rows), result, nil\n\t}\n\toutResult := result\n\n\toutResult.Rows = make([]couchdbRow, 0)\n\tfor _, v := range result.Rows {\n\t\tfor queryParam := range params.QueryParams {\n\n\t\t\tval, ok := v.Value[queryParam]\n\t\t\tif ok && (params.QueryParams.Get(queryParam) == val) {\n\t\t\t\t/*switch val.(type) {}*/\n\n\t\t\t\toutResult.Rows = append(outResult.Rows, v)\n\t\t\t}\n\t\t}\n\t}\n\ttotalNum := len(outResult.Rows)\n\tif int(offset) > totalNum {\n\t\toutResult.Rows = make([]couchdbRow, 0)\n\t} else if int(limit+offset) > len(outResult.Rows) {\n\t\toutResult.Rows = outResult.Rows[offset:len(outResult.Rows)]\n\t} else {\n\t\toutResult.Rows = outResult.Rows[offset:(offset + limit)]\n\t}\n\n\treturn totalNum, outResult, nil\n}",
"func FilterPubSimulationResults(_ map[string]*common.CollectionConfigPackage, pubSimulationResults *rwset.TxReadWriteSet) (*rwset.TxReadWriteSet, error) {\n\treturn pubSimulationResults, nil\n}",
"func ProcessListResults(result map[string]interface{}) []MovieList {\n\n\tvar movielist []MovieList\n\tif result[\"Response\"].(string) == \"False\" {\n\t\treturn movielist\n\t}\n\tlength, err := strconv.Atoi(result[\"totalResults\"].(string))\n\tif length > 10 && err != nil {\n\t\tlength = 10\n\t}\n\tmovielist = make([]MovieList, length)\n\n\t//fmt.Println(result)\n\tmovieArray := result[\"Search\"].([]interface{})\n\tfor key, value := range movieArray {\n\t\tmovielist[key].Title = value.(map[string]interface{})[\"Title\"].(string)\n\t\tmovielist[key].Type = value.(map[string]interface{})[\"Type\"].(string)\n\t\tmovielist[key].Year = value.(map[string]interface{})[\"Year\"].(string)\n\t}\n\treturn movielist\n}",
"func ParseResults(data string) [][]string {\n\tvar out [][]string\n\trows := strings.Split(data, \"\\n\")\n\tfor i := 0; i < len(rows); i++ {\n\t\trow := strings.Split(rows[i], \",\")\n\t\tif len(row) == 5 && row[0] != \"SUM\" {\n\t\t\tout = append(out, row)\n\t\t}\n\t}\n\treturn out\n}",
"func resultToQueryItems(queryType string, results Results) ([]QueryItem, error) {\n\tresultSize := int64(results.Results.Total)\n\tif resultSize < 1 {\n\t\treturn nil, nil\n\t}\n\tvar items = make([]QueryItem, resultSize)\n\tswitch queryType {\n\tcase types.QtAdminCatalogItem:\n\t\tfor i, item := range results.Results.AdminCatalogItemRecord {\n\t\t\titems[i] = QueryCatalogItem(*item)\n\t\t}\n\tcase types.QtCatalogItem:\n\t\tfor i, item := range results.Results.CatalogItemRecord {\n\t\t\titems[i] = QueryCatalogItem(*item)\n\t\t}\n\tcase types.QtMedia:\n\t\tfor i, item := range results.Results.MediaRecord {\n\t\t\titems[i] = QueryMedia(*item)\n\t\t}\n\tcase types.QtAdminMedia:\n\t\tfor i, item := range results.Results.AdminMediaRecord {\n\t\t\titems[i] = QueryMedia(*item)\n\t\t}\n\tcase types.QtVappTemplate:\n\t\tfor i, item := range results.Results.VappTemplateRecord {\n\t\t\titems[i] = QueryVAppTemplate(*item)\n\t\t}\n\tcase types.QtAdminVappTemplate:\n\t\tfor i, item := range results.Results.AdminVappTemplateRecord {\n\t\t\titems[i] = QueryVAppTemplate(*item)\n\t\t}\n\tcase types.QtEdgeGateway:\n\t\tfor i, item := range results.Results.EdgeGatewayRecord {\n\t\t\titems[i] = QueryEdgeGateway(*item)\n\t\t}\n\tcase types.QtOrgVdcNetwork:\n\t\tfor i, item := range results.Results.OrgVdcNetworkRecord {\n\t\t\titems[i] = QueryOrgVdcNetwork(*item)\n\t\t}\n\tcase types.QtCatalog:\n\t\tfor i, item := range results.Results.CatalogRecord {\n\t\t\titems[i] = QueryCatalog(*item)\n\t\t}\n\tcase types.QtAdminCatalog:\n\t\tfor i, item := range results.Results.AdminCatalogRecord {\n\t\t\titems[i] = QueryAdminCatalog(*item)\n\t\t}\n\tcase types.QtVm:\n\t\tfor i, item := range results.Results.VMRecord {\n\t\t\titems[i] = QueryVm(*item)\n\t\t}\n\tcase types.QtAdminVm:\n\t\tfor i, item := range results.Results.AdminVMRecord {\n\t\t\titems[i] = QueryVm(*item)\n\t\t}\n\tcase types.QtVapp:\n\t\tfor i, item := range results.Results.VAppRecord {\n\t\t\titems[i] = QueryVapp(*item)\n\t\t}\n\tcase types.QtAdminVapp:\n\t\tfor i, item := range results.Results.AdminVAppRecord {\n\t\t\titems[i] = QueryVapp(*item)\n\t\t}\n\tcase types.QtOrgVdc:\n\t\tfor i, item := range results.Results.OrgVdcRecord {\n\t\t\titems[i] = QueryOrgVdc(*item)\n\t\t}\n\tcase types.QtAdminOrgVdc:\n\t\tfor i, item := range results.Results.OrgVdcAdminRecord {\n\t\t\titems[i] = QueryOrgVdc(*item)\n\t\t}\n\tcase types.QtTask:\n\t\tfor i, item := range results.Results.TaskRecord {\n\t\t\titems[i] = QueryTask(*item)\n\t\t}\n\tcase types.QtAdminTask:\n\t\tfor i, item := range results.Results.TaskRecord {\n\t\t\titems[i] = QueryAdminTask(*item)\n\t\t}\n\n\t}\n\tif len(items) > 0 {\n\t\treturn items, nil\n\t}\n\treturn nil, fmt.Errorf(\"unsupported query type %s\", queryType)\n}",
"func LogResults(ctx context.Context, searchResult []string) error {\n\tfmt.Println(\"Start to log results\")\n\tfor i, result := range searchResult {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn context.Canceled\n\t\tdefault:\n\t\t}\n\n\t\tvar product Product\n\t\tif err := json.NewDecoder(strings.NewReader(result)).Decode(&product); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Printf(\"Total #%d : \\n%v\\n%v\\n%v\\n%v\\n\\n\", i+1, product.Name, product.URL, product.Image, product.Price)\n\n\t}\n\treturn nil\n}",
"func handleResults() {\n\tlist, err := database.ReadList()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(\"\\n//---------------//5 CLOSEST LOCATIONS//--------------//\")\n\tfmt.Println(\"\\n//ID: DISTANCE (in meters)\")\n\t// Slicing the ordered list with the top 5 results\n\tfor _, c := range list[:5] {\n\t\tfmt.Printf(\"%v: %.0fm\\n\", c.Id, c.Distance)\n\t}\n\n\tfmt.Println(\"\\n//---------------//5 FURTHEST LOCATIONS//--------------//\")\n\tfmt.Println(\"\\n//ID: DISTANCE (in meters)\")\n\t// Slicing the list with the bottom 5 results\n\tfor _, c := range list[len(list)-5:] {\n\t\tfmt.Printf(\"%v: %.0fm\\n\", c.Id, c.Distance)\n\t}\n}",
"func filter(matches map[string]Matches) error {\n\tfor k, v := range matches {\n\t\tif v.Len() == 1 {\n\t\t\tcontinue\n\t\t}\n\n\t\tif v.Len() == 0 {\n\t\t\treturn Errorf(\"%s matches no fields\", k)\n\t\t}\n\n\t\texact, eptr := v.Exists(k, Cmp)\n\t\tif !exact {\n\t\t\terr := Errorf(\"%s matches %d fields\", k, v.Len())\n\t\t\terr.Needle = k\n\t\t\terr.Matches = v\n\t\t\treturn err\n\t\t}\n\t\tv.Swap(0, eptr.Index)\n\t}\n\n\treturn nil\n}",
"func getNonFilteredResults(searchValue string, start string, end string, dtData dtPaginationData) ([]map[string]string, error) {\n\tvar (\n\t\tp = searchValue + \"%\"\n\t\targs = []interface{}{p, start, end}\n\t\taux = []interface{}{p}\n\t)\n\n\tresult, err := search(dtData, args)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = getNumberOfResults(dtData, aux)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn result, err\n}",
"func doFilterHistory() error {\n\n\tshowUpdateStatus()\n\n\thistory.MaxSearchResults = maxResults * 10 // allow for lots of duplicates\n\twf.Configure(aw.MaxResults(maxResults))\n\n\tentries, err := history.Search(query)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Remove duplicates\n\tvar (\n\t\tseen = map[string]bool{}\n\t\tunique = []*history.Entry{}\n\t)\n\tfor _, e := range entries {\n\t\tif seen[e.URL] {\n\t\t\tcontinue\n\t\t}\n\t\tseen[e.URL] = true\n\t\tunique = append(unique, e)\n\t}\n\tentries = unique\n\n\tlog.Printf(\"%d results for \\\"%s\\\"\", len(entries), query)\n\n\tfor _, e := range entries {\n\t\tURLerItem(&hURLer{e})\n\t}\n\n\twf.WarnEmpty(\"No matching entries found\", \"Try a different query?\")\n\twf.SendFeedback()\n\n\treturn nil\n}",
"func (p *M3uParser) FilterBy(key string, filters []string, retrieve bool, nestedKey bool) {\n\tif p.isEmpty() {\n\t\tlog.Infof(\"No streams info to filter.\")\n\t\treturn\n\t}\n\tvar key0, key1 string\n\tvar filteredStreams []Channel\n\tif nestedKey {\n\t\tsplittedKey := strings.Split(key, \"-\")\n\t\tkey0, key1 = splittedKey[0], splittedKey[1]\n\t}\n\tif len(filters) == 0 {\n\t\tlog.Warnln(\"Filter word/s missing!!!\")\n\t\treturn\n\t}\n\n\tswitch nestedKey {\n\tcase false:\n\t\tfor _, stream := range p.streamsInfo {\n\t\t\tif val, ok := stream[key]; ok {\n\t\t\t\tfor _, filter := range filters {\n\t\t\t\t\tif retrieve {\n\t\t\t\t\t\tif strings.Contains(strings.ToLower(fmt.Sprintf(\"%v\", val)), strings.ToLower(filter)) {\n\t\t\t\t\t\t\tfilteredStreams = append(filteredStreams, stream)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tif !strings.Contains(strings.ToLower(fmt.Sprintf(\"%v\", val)), strings.ToLower(filter)) {\n\t\t\t\t\t\t\tfilteredStreams = append(filteredStreams, stream)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\tcase true:\n\t\tfor _, stream := range p.streamsInfo {\n\t\t\tif val, ok := stream[key0]; ok {\n\t\t\t\tswitch v := val.(type) {\n\t\t\t\tcase map[string]string:\n\t\t\t\t\tif val, ok := v[key1]; ok {\n\t\t\t\t\t\tfor _, filter := range filters {\n\t\t\t\t\t\t\tif retrieve {\n\t\t\t\t\t\t\t\tif strings.Contains(strings.ToLower(val), strings.ToLower(filter)) {\n\t\t\t\t\t\t\t\t\tfilteredStreams = append(filteredStreams, stream)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tif !strings.Contains(strings.ToLower(val), strings.ToLower(filter)) {\n\t\t\t\t\t\t\t\t\tfilteredStreams = append(filteredStreams, stream)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tp.streamsInfo = filteredStreams\n}",
"func (f *aclFilter) filterPreparedQueries(queries *structs.PreparedQueries) {\n\t// Management tokens can see everything with no filtering.\n\tif f.authorizer.ACLWrite() {\n\t\treturn\n\t}\n\n\t// Otherwise, we need to see what the token has access to.\n\tret := make(structs.PreparedQueries, 0, len(*queries))\n\tfor _, query := range *queries {\n\t\t// If no prefix ACL applies to this query then filter it, since\n\t\t// we know at this point the user doesn't have a management\n\t\t// token, otherwise see what the policy says.\n\t\tprefix, ok := query.GetACLPrefix()\n\t\tif !ok || !f.authorizer.PreparedQueryRead(prefix) {\n\t\t\tf.logger.Printf(\"[DEBUG] consul: dropping prepared query %q from result due to ACLs\", query.ID)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Redact any tokens if necessary. We make a copy of just the\n\t\t// pointer so we don't mess with the caller's slice.\n\t\tfinal := query\n\t\tf.redactPreparedQueryTokens(&final)\n\t\tret = append(ret, final)\n\t}\n\t*queries = ret\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
addDeviceIdentifyingAttributesToLogger gets device identifiers from the serverprovided data and adds them as attributes on the logger.
|
func (ls *LogShipper) addDeviceIdentifyingAttributesToLogger() {
if deviceId, err := ls.knapsack.ServerProvidedDataStore().Get([]byte("device_id")); err != nil {
level.Debug(ls.baseLogger).Log("msg", "could not get device id", "err", err)
} else {
ls.shippingLogger = log.With(ls.shippingLogger, "k2_device_id", string(deviceId))
}
if munemo, err := ls.knapsack.ServerProvidedDataStore().Get([]byte("munemo")); err != nil {
level.Debug(ls.baseLogger).Log("msg", "could not get munemo", "err", err)
} else {
ls.shippingLogger = log.With(ls.shippingLogger, "k2_munemo", string(munemo))
}
if orgId, err := ls.knapsack.ServerProvidedDataStore().Get([]byte("organization_id")); err != nil {
level.Debug(ls.baseLogger).Log("msg", "could not get organization id", "err", err)
} else {
ls.shippingLogger = log.With(ls.shippingLogger, "k2_organization_id", string(orgId))
}
if serialNumber, err := ls.knapsack.ServerProvidedDataStore().Get([]byte("serial_number")); err != nil {
level.Debug(ls.baseLogger).Log("msg", "could not get serial number", "err", err)
} else {
ls.shippingLogger = log.With(ls.shippingLogger, "serial_number", string(serialNumber))
}
}
|
[
"func saveDeviceAttributes(crosAttrs *crossdevicecommon.CrosAttributes, androidAttrs *AndroidAttributes, filepath string) error {\n\tattributes := struct {\n\t\tCrOS *crossdevicecommon.CrosAttributes\n\t\tAndroid *AndroidAttributes\n\t}{CrOS: crosAttrs, Android: androidAttrs}\n\tcrosLog, err := json.MarshalIndent(attributes, \"\", \"\\t\")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to format device metadata for logging\")\n\t}\n\tif err := ioutil.WriteFile(filepath, crosLog, 0644); err != nil {\n\t\treturn errors.Wrap(err, \"failed to write CrOS attributes to output file\")\n\t}\n\treturn nil\n}",
"func (daemon *Daemon) LogDaemonEventWithAttributes(action string, attributes map[string]string) {\n\tif daemon.EventsService != nil {\n\t\tif name := hostName(); name != \"\" {\n\t\t\tattributes[\"name\"] = name\n\t\t}\n\t\tdaemon.EventsService.Log(action, events.DaemonEventType, events.Actor{\n\t\t\tID: daemon.id,\n\t\t\tAttributes: attributes,\n\t\t})\n\t}\n}",
"func (i *ImageService) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) {\n\tpanic(\"not implemented\")\n}",
"func (daemon *Daemon) LogDaemonEventWithAttributes(action string, attributes map[string]string) {\n\tif daemon.EventsService != nil {\n\t\tif info := daemon.SystemInfo(); info.Name != \"\" {\n\t\t\tattributes[\"name\"] = info.Name\n\t\t}\n\t\tactor := events.Actor{\n\t\t\tID: daemon.ID,\n\t\t\tAttributes: attributes,\n\t\t}\n\t\tdaemon.EventsService.Log(action, events.DaemonEventType, actor)\n\t}\n}",
"func DebugIdentityDeviceName(value string) DebugIdentityAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"device_name\"] = value\n\t}\n}",
"func (a Add) DeviceIDs(devices ...deviceID) error {\n\tfor i := range devices {\n\t\treqBody, err := json.Marshal(devices[i])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif a.debug == false {\n\t\t\terr = send(reqBody, a.APIKey, \"add\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn nil\n}",
"func AddDevice(w *rest.ResponseWriter, r *rest.Request) {\n\tdevice := Device{}\n\terr := r.DecodeJsonPayload(&device)\n\tif err != nil {\n\t\trest.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\trand.Seed(time.Now().Unix())\n\tdevice.DeviceID = strconv.Itoa(rand.Intn(100000))\n\tif device.IPAddr == \"\" {\n\t\trest.Error(w, \"device IPAddr required\", 400)\n\t\treturn\n\t}\n\tif device.ListenPort == \"\" {\n\t\trest.Error(w, \"device ListenPort required\", 400)\n\t\treturn\n\t}\n\tif device.Location == \"\" {\n\t\trest.Error(w, \"device Location required\", 400)\n\t\treturn\n\t}\n\tif device.ConnectionLimit == \"\" {\n\t\trest.Error(w, \"device ConnectionLimit required\", 400)\n\t\treturn\n\t}\n\tif device.Accelerometer == \"\" {\n\t\trest.Error(w, \"device Accelerometer t/f required\", 400)\n\t\treturn\n\t}\n\tif device.GPS == \"\" {\n\t\trest.Error(w, \"device GPS t/f required\", 400)\n\t\treturn\n\t}\n\tif device.Light == \"\" {\n\t\trest.Error(w, \"device Light t/f required\", 400)\n\t\treturn\n\t}\n\tif device.Temperature == \"\" {\n\t\trest.Error(w, \"device Temperature t/f required\", 400)\n\t\treturn\n\t}\n\tif device.Orientation == \"\" {\n\t\trest.Error(w, \"device Orientation t/f required\", 400)\n\t\treturn\n\t}\n\t_, err = addDeviceStmt.Exec(0, device.DeviceID, device.IPAddr, device.ListenPort, device.Location, device.ConnectionLimit, device.Accelerometer, device.GPS, device.Light, device.Temperature, device.Orientation, time.Now().Unix(), 0)\n\tif err != nil {\n\t\tlog.Printf(\"Error running addDeviceStmt %s\", err.Error())\n\t}\n\n\tw.WriteJson(&device)\n\n}",
"func (m *Middleware) DeviceUID(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tspan := trace.SpanFromContext(r.Context())\n\t\tduidCookie, err := r.Cookie(m.duidCookieSettings.Name)\n\t\tvar uid ksuid.KSUID\n\t\tswitch {\n\t\tcase errors.Is(err, http.ErrNoCookie):\n\t\t\tspan.AddEvent(\"set new duid cookie\")\n\t\t\tuid = m.MustNewUID()\n\t\t\tm.SetDeviceUID(w, uid)\n\t\tcase err != nil:\n\t\t\tspan.RecordError(err)\n\t\t\tuid = m.MustNewUID()\n\t\t\tm.SetDeviceUID(w, uid)\n\t\tdefault:\n\t\t\tuid, err = m.parseUID(duidCookie.Value)\n\t\t\tif err != nil {\n\t\t\t\tspan.AddEvent(\"replace invalid duid cookie\")\n\t\t\t\tuid = m.MustNewUID()\n\t\t\t\tm.SetDeviceUID(w, uid)\n\t\t\t}\n\t\t}\n\t\tspan.SetAttributes(duidAttributeKey.String(uid.String()))\n\t\tctx := context.WithValue(r.Context(), duidContextKey, uid)\n\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\t})\n}",
"func parseDeviceID() Handler {\n\treturn func(rc *RouterContext, w http.ResponseWriter, r *http.Request) *HTTPError {\n\n\t\tdeviceid := r.Header.Get(\"deviceid\")\n\t\tif deviceid == \"\" {\n\t\t\treturn handleMissingDataError(\"deviceid\")\n\t\t}\n\n\t\trc.deviceid = deviceid\n\t\treturn nil\n\t}\n}",
"func (v *VolumePublishManager) populateDevicePath(ctx context.Context, volumeId string,\n\tvolumeTrackingInfo *utils.VolumeTrackingInfo,\n) {\n\tlogFields := LogFields{\n\t\t\"volumeID\": volumeId,\n\t\t\"iscsiTargetPortal\": volumeTrackingInfo.IscsiTargetPortal,\n\t\t\"lun\": volumeTrackingInfo.IscsiLunNumber,\n\t}\n\n\tif volumeTrackingInfo.DevicePath == \"\" {\n\t\tif volumeTrackingInfo.RawDevicePath != \"\" {\n\t\t\tvolumeTrackingInfo.DevicePath = volumeTrackingInfo.RawDevicePath\n\t\t\tvolumeTrackingInfo.RawDevicePath = \"\"\n\n\t\t\tlogFields[\"devicePath\"] = volumeTrackingInfo.DevicePath\n\t\t\tLogc(ctx).Debug(\"Updating publish info records.\")\n\t\t} else {\n\t\t\tLogc(ctx).Errorf(\"Publish info is missing device path.\")\n\t\t}\n\t} else {\n\t\tif volumeTrackingInfo.RawDevicePath != \"\" {\n\t\t\tlogFields[\"devicePath\"] = volumeTrackingInfo.DevicePath\n\t\t\tlogFields[\"rawDevicePath\"] = volumeTrackingInfo.RawDevicePath\n\t\t\tLogc(ctx).Warn(\"Found both devices.\")\n\n\t\t\t// No need to have two sources of device path information\n\t\t\tvolumeTrackingInfo.RawDevicePath = \"\"\n\t\t}\n\t}\n}",
"func AddDeviceToInventory(uuid string, deviceData DeviceData) {\n\tDeviceInventory.mutex.Lock()\n\tdefer DeviceInventory.mutex.Unlock()\n\tDeviceInventory.Device[uuid] = deviceData\n\treturn\n}",
"func getCDIDeviceInfo(resp *pluginapi.ContainerAllocateResponse, knownCDIDevices sets.Set[string]) []kubecontainer.CDIDevice {\n\tvar cdiDevices []kubecontainer.CDIDevice\n\tfor _, cdiDevice := range resp.CDIDevices {\n\t\tif knownCDIDevices.Has(cdiDevice.Name) {\n\t\t\tklog.V(4).InfoS(\"Skip existing CDI Device\", \"name\", cdiDevice.Name)\n\t\t\tcontinue\n\t\t}\n\t\tklog.V(4).InfoS(\"Add CDI device\", \"name\", cdiDevice.Name)\n\t\tknownCDIDevices.Insert(cdiDevice.Name)\n\n\t\tdevice := kubecontainer.CDIDevice{\n\t\t\tName: cdiDevice.Name,\n\t\t}\n\t\tcdiDevices = append(cdiDevices, device)\n\t}\n\n\treturn cdiDevices\n}",
"func logDataAdd(r *http.Request, key string, value interface{}) {\n\tvar data map[string]interface{}\n\n\tctx := r.Context()\n\td := ctx.Value(\"log\")\n\tswitch v := d.(type) {\n\tcase map[string]interface{}:\n\t\tdata = v\n\tdefault:\n\t\tdata = make(map[string]interface{})\n\t}\n\n\tdata[key] = value\n\n\tr = r.WithContext(context.WithValue(ctx, \"log\", data))\n}",
"func (daemon *Daemon) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) {\n\timg, err := daemon.GetImage(imageID)\n\tif err == nil && img.Config != nil {\n\t\t// image has not been removed yet.\n\t\t// it could be missing if the event is `delete`.\n\t\tcopyAttributes(attributes, img.Config.Labels)\n\t}\n\tif refName != \"\" {\n\t\tattributes[\"name\"] = refName\n\t}\n\tactor := events.Actor{\n\t\tID: imageID,\n\t\tAttributes: attributes,\n\t}\n\n\tdaemon.EventsService.Log(action, events.ImageEventType, actor)\n}",
"func (e *HueEmulator) getDeviceInfo(w http.ResponseWriter, _ *http.Request, params httprouter.Params) {\n\tlightID := params.ByName(\"lightID\")\n\tfor _, v := range e.devices {\n\t\tif lightID == v.internalHash {\n\t\t\te.logger.Debug(\"Requested device state info\", common.LogIDToken, v.DeviceID)\n\t\t\te.sendJSON(w, getDevice(v))\n\t\t\treturn\n\t\t}\n\t}\n\n\te.logger.Warn(\"Requested unknown device state info\", common.LogIDToken, lightID)\n}",
"func (o OpenSignal) AddADevice(device Device) (Response, error) {\n\tstrResponse := Response{}\n\n\tres, body, errs := o.Client.Post(addADevice).\n\t\tSet(\"Authorization\", \"Basic \"+o.APIKey).\n\t\tSend(device).\n\t\tEndStruct(&strResponse)\n\terr := catch(res, body)\n\tif err == nil {\n\t\tfor _, e := range errs {\n\t\t\tif e != nil {\n\t\t\t\terr = e\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn strResponse, err\n}",
"func addAttribute(buf *bytes.Buffer, attrType uint16, data interface{}, dataSize int) {\n\tattr := syscall.RtAttr{\n\t\tLen: syscall.SizeofRtAttr,\n\t\tType: attrType,\n\t}\n\tattr.Len += uint16(dataSize)\n\tbinary.Write(buf, Endian, attr)\n\tswitch data := data.(type) {\n\tcase string:\n\t\tbinary.Write(buf, Endian, []byte(data))\n\t\tbuf.WriteByte(0) // terminate\n\tdefault:\n\t\tbinary.Write(buf, Endian, data)\n\t}\n\tfor i := 0; i < padding(int(attr.Len), syscall.NLMSG_ALIGNTO); i++ {\n\t\tbuf.WriteByte(0)\n\t}\n}",
"func DebugIdentityV3DeviceName(value string) DebugIdentityV3Attr {\n\treturn func(m optionalAttr) {\n\t\tm[\"device_name\"] = value\n\t}\n}",
"func (a *metricAttributesProcessor) processMetricAttributes(ctx context.Context, m pmetric.Metric) {\n\n\t// This is a lot of repeated code, but since there is no single parent superclass\n\t// between metric data types, we can't use polymorphism.\n\tswitch m.Type() {\n\tcase pmetric.MetricTypeGauge:\n\t\tdps := m.Gauge().DataPoints()\n\t\tfor i := 0; i < dps.Len(); i++ {\n\t\t\ta.attrProc.Process(ctx, a.logger, dps.At(i).Attributes())\n\t\t}\n\tcase pmetric.MetricTypeSum:\n\t\tdps := m.Sum().DataPoints()\n\t\tfor i := 0; i < dps.Len(); i++ {\n\t\t\ta.attrProc.Process(ctx, a.logger, dps.At(i).Attributes())\n\t\t}\n\tcase pmetric.MetricTypeHistogram:\n\t\tdps := m.Histogram().DataPoints()\n\t\tfor i := 0; i < dps.Len(); i++ {\n\t\t\ta.attrProc.Process(ctx, a.logger, dps.At(i).Attributes())\n\t\t}\n\tcase pmetric.MetricTypeExponentialHistogram:\n\t\tdps := m.ExponentialHistogram().DataPoints()\n\t\tfor i := 0; i < dps.Len(); i++ {\n\t\t\ta.attrProc.Process(ctx, a.logger, dps.At(i).Attributes())\n\t\t}\n\tcase pmetric.MetricTypeSummary:\n\t\tdps := m.Summary().DataPoints()\n\t\tfor i := 0; i < dps.Len(); i++ {\n\t\t\ta.attrProc.Process(ctx, a.logger, dps.At(i).Attributes())\n\t\t}\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
AddDuckEntry add a duck entry to the database
|
func (h *Helper) AddDuckEntry(entry *types.Entry) error {
query := `
INSERT INTO duck_entries (
id,
fed_time,
food,
kind_of_food,
amount_of_food,
location,
number_of_ducks
) VALUES (
$1, $2, $3, $4, $5, $6, $7
)
`
_, err := h.db.Exec(
query,
entry.ID,
entry.TimeFed,
entry.Food.Name,
entry.Food.Kind,
entry.AmountOfFood,
entry.Location,
entry.NumberOfDucks,
)
return err
}
|
[
"func addEntry(t *testing.T, key string, keyspace uint) {\n\t// Insert at least one event to make sure db exists\n\tc, err := rd.Dial(\"tcp\", host)\n\tif err != nil {\n\t\tt.Fatal(\"connect\", err)\n\t}\n\t_, err = c.Do(\"SELECT\", keyspace)\n\tif err != nil {\n\t\tt.Fatal(\"select\", err)\n\t}\n\tdefer c.Close()\n\t_, err = c.Do(\"SET\", key, \"bar\", \"EX\", \"360\")\n\tif err != nil {\n\t\tt.Fatal(\"SET\", err)\n\t}\n}",
"func (s *dnsTestServer) AddEntryToDNSDatabase(q DNSQuery, a DNSAnswers) {\n\ts.DNSDatabase[q] = append(s.DNSDatabase[q], a...)\n}",
"func (d dealRepo) Add(deal broker.Deal) error {\n\tentity := Deal{\n\t\tID: deal.ID,\n\t\tClientID: deal.ClientID,\n\t\tTicker: deal.Ticker,\n\t\tVol: deal.Amount,\n\t\tPrice: deal.Price,\n\t\tType: deal.Type,\n\t\tStatus: deal.Status,\n\t}\n\n\treturn d.db.Create(&entity).Error\n}",
"func (db *Database) AddEntry(entry string, data *storage.Data) {\n\tdb.entries[entry] = data\n}",
"func (w *Wallet) AddEntry(entry Entry) error {\n\terr := entry.Verify()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif w.SearchEntryByID(entry.ID) != (Entry{}) {\n\t\treturn fmt.Errorf(\"the id already exists in wallet, can't add the entry\")\n\t}\n\n\tentry.Create = time.Now().Unix()\n\tentry.LastUpdate = entry.Create\n\tw.Entries = append(w.Entries, entry)\n\n\treturn nil\n}",
"func (c *ComponentChest) AddDeck(name string, deck *Deck) error {\n\t//Only add the deck if we haven't finished initalizing\n\tif c.initialized {\n\t\treturn errors.New(\"The chest was already finished, so no new decks may be added.\")\n\t}\n\tif c.decks == nil {\n\t\tc.decks = make(map[string]*Deck)\n\t}\n\n\tif name == \"\" {\n\t\tname = \"NONAMEPROVIDED\"\n\t}\n\n\tif _, ok := c.decks[name]; ok {\n\t\treturn errors.New(\"A deck with name \" + name + \" was already in the deck.\")\n\t}\n\n\t//Tell the deck that no more items will be added to it.\n\tif err := deck.finish(c, name); err != nil {\n\t\treturn errors.New(\"Couldn't finish deck: \" + err.Error())\n\t}\n\n\tc.decks[name] = deck\n\n\treturn nil\n\n}",
"func (ps *dsPieceStore) AddDealForPiece(pieceCID cid.Cid, dealInfo piecestore.DealInfo) error {\n\t/*\treturn ps.mutatePieceInfo(pieceCID, func(pi *PieceInfo) error {\n\t\tfor _, di := range pi.Deals {\n\t\t\tif di.DealID == dealInfo.DealID {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\t//new deal\n\t\tpi.Deals = append(pi.Deals, DealInfo{\n\t\t\tDealInfo: dealInfo,\n\t\t\tIsPacking: false,\n\t\t\tExpiration: 0,\n\t\t})\n\t\treturn nil\n\t})*/\n\treturn nil\n}",
"func AddDeck(m *Deck) (id int64, err error) {\n\to := orm.NewOrm()\n\tif m.Title == \"\"{\n\t\treturn 0, errors.New(\"名称不能为空\")\n\t}\n\tid, err = o.Insert(m)\n\treturn\n}",
"func (c *APCtl) AddIDEntry(entry *fibcapi.DbIdEntry) error {\n\te := NewDBIDEntryFromAPI(entry)\n\treturn c.db.IDMap().Register(e)\n}",
"func AddDish(dishId int, date int, db *sqlx.DB) error {\n\t_, err := squirrel.Insert(Table).Columns(DishId, Date).Values(dishId, date).RunWith(db.DB).Exec()\n\treturn err\n}",
"func AddDish(w http.ResponseWriter, r *http.Request) {\n\t// Source model\n\tvar dish dishes.Dish\n\n\t// Extract request JSON data\n\tdecoder := json.NewDecoder(r.Body)\n\terr := decoder.Decode(&dish)\n\tdefer r.Body.Close()\n\n\tif err != nil {\n\t\trest.BadRequest(&w, err.Error())\n\t\treturn\n\t}\n\n\t// Validate data\n\tvalidErrors := dishValidator.Struct(&dish)\n\n\tif validErrors != nil {\n\t\trest.BadRequest(&w, validErrors.Error())\n\t\treturn\n\t}\n\n\tdb := database.GetInstance()\n\tdefer db.Close()\n\n\t// Try to create\n\tcreateErr := dishes.Add(&dish, db)\n\n\t// Handle errors\n\tif createErr != nil {\n\t\trest.Error(createErr).Write(&w)\n\t} else {\n\t\trest.Echo(\"Success\").Write(&w)\n\t}\n}",
"func (m *MysqlDriver) AddDifficulty(tag *model.Difficulty) error {\n\ti, err := m.conn.Insert(tag)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif i == 0 {\n\t\treturn ErrNoRowsAffected\n\t}\n\treturn nil\n}",
"func (db RecipeDB) addRecipe(name, version string, success bool) error {\n\tversionNum, err := convertVersion(version)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdb[name] = Recipe{Name: name, Version: versionNum, InstallTime: time.Now().Unix(), Success: success}\n\n\tvar recipelist []Recipe\n\tfor _, recipe := range db {\n\t\trecipelist = append(recipelist, recipe)\n\t}\n\tdbBytes, err := json.Marshal(recipelist)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdbDir := getDbDir()\n\tif err := os.MkdirAll(dbDir, 0755); err != nil {\n\t\treturn err\n\t}\n\n\tf, err := ioutil.TempFile(dbDir, dbFileName+\"_*\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := f.Write(dbBytes); err != nil {\n\t\tf.Close()\n\t\treturn err\n\t}\n\tif err := f.Close(); err != nil {\n\t\treturn err\n\t}\n\n\treturn os.Rename(f.Name(), filepath.Join(dbDir, dbFileName))\n}",
"func (r *WorkplaceRepository) Add(ctx context.Context, entity *model.WorkplaceInfo) error {\n\tdata, err := json.Marshal(entity)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error to marshal workplace info\")\n\t}\n\t_, err = r.db.Conn.Exec(ctx, \"INSERT INTO workplace(info) VALUES($1) ON CONFLICT (info) DO UPDATE SET updated_at=now()\", data)\n\treturn err\n}",
"func ForceAdd(db *sqlx.DB, label string) (err error) {\n\tif db == nil {\n\t\treturn ErrNilDB\n\t}\n\tdbpool[label] = db\n\treturn nil\n}",
"func (room *Room) AddEntry(entry, issuer string) error {\n\tif room.game == nil {\n\t\treturn errors.New(\"there isn't a started game\")\n\t}\n\n\tif err := room.game.AddEntry(entry, issuer); err != nil {\n\t\treturn err\n\t}\n\n\tif room.game.Finished {\n\t\troom.previousGame = room.game\n\t\troom.game = nil\n\t}\n\treturn nil\n}",
"func (s *Sqlite) AddCheck(check gogios.Check, output string) error {\n\tdb, err := s.openConnection()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer db.Close()\n\n\tdata := gogios.CheckHistory{CheckID: &check.ID, Asof: &check.Asof, Output: output, Status: &check.Status}\n\n\tif db.NewRecord(check) {\n\t\tdb.Create(&check)\n\t\tid, err := s.GetCheck(check.Title, \"title\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdata.CheckID = &id.ID\n\t\tdb.Create(&data)\n\t} else {\n\t\tdb.Model(check).Updates(&check)\n\t\tdb.Create(&data)\n\t}\n\n\treturn nil\n}",
"func (df *Dirfile) AddEntry(e *Entry) error {\n\tswitch e.fieldType {\n\tcase RAWENTRY:\n\t\treturn df.AddRaw(e.name, e.dataType, e.spf, e.fragment)\n\tcase BITENTRY:\n\t\treturn df.AddBit(e.name, e.inFields[0], e.bitnum, e.numbits, e.fragment)\n\t}\n\treturn fmt.Errorf(\"Unknown or not implemented entry type 0x%x\", e.fieldType)\n}",
"func (s *dnsTestServer) AddEntryToDNSDatabaseRetry(q DNSQuery, a DNSAnswers) {\n\ts.DNSDatabaseRetry[q] = append(s.DNSDatabaseRetry[q], a...)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
EncodeAddResponse returns an encoder for responses returned by the station add endpoint.
|
func EncodeAddResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {
return func(ctx context.Context, w http.ResponseWriter, v interface{}) error {
res := v.(*stationviews.StationFull)
enc := encoder(ctx, w)
body := NewAddResponseBody(res.Projected)
w.WriteHeader(http.StatusOK)
return enc.Encode(body)
}
}
|
[
"func EncodeAddResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) kithttp.EncodeResponseFunc {\n\treturn server.EncodeAddResponse(encoder)\n}",
"func EncodeAddResponse(ctx context.Context, v interface{}, hdr, trlr *metadata.MD) (interface{}, error) {\n\tresult, ok := v.(string)\n\tif !ok {\n\t\treturn nil, goagrpc.ErrInvalidType(\"todo\", \"add\", \"string\", v)\n\t}\n\tresp := NewAddResponse(result)\n\treturn resp, nil\n}",
"func EncodeAddResponse(ctx context.Context, v interface{}, hdr, trlr *metadata.MD) (interface{}, error) {\n\tresult, ok := v.(string)\n\tif !ok {\n\t\treturn nil, goagrpc.ErrInvalidType(\"tasks\", \"add\", \"string\", v)\n\t}\n\tresp := NewAddResponse(result)\n\treturn resp, nil\n}",
"func EncodeAddResponse(_ context.Context, w http.ResponseWriter, response interface{}) (err error) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\te := json.NewEncoder(w)\n\te.SetIndent(\"\", \"\\t\")\n\terr = e.Encode(response)\n\treturn err\n}",
"func encodeAddResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}",
"func EncodeUpdateResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*stationviews.StationFull)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewUpdateResponseBody(res.Projected)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func encodeAddCategoryResponse(_ context.Context, r interface{}) (interface{}, error) {\n\treturn nil, errors.New(\"'Todo' Encoder is not impelemented\")\n}",
"func EncodeRegisterResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(int)\n\t\tenc := encoder(ctx, w)\n\t\tbody := res\n\t\tw.WriteHeader(http.StatusCreated)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func EncodeNewResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(string)\n\t\tenc := encoder(ctx, w)\n\t\tbody := res\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func EncodeStationMetaResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*sensor.StationMetaResult)\n\t\tenc := encoder(ctx, w)\n\t\tbody := res.Object\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func DecodeAddResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (any, error) {\n\treturn func(resp *http.Response) (any, error) {\n\t\tif restoreBody {\n\t\t\tb, err := io.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = io.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = io.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusCreated:\n\t\t\tvar (\n\t\t\t\tbody string\n\t\t\t\terr error\n\t\t\t)\n\t\t\terr = decoder(resp).Decode(&body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, goahttp.ErrDecodingError(\"storage\", \"add\", err)\n\t\t\t}\n\t\t\treturn body, nil\n\t\tdefault:\n\t\t\tbody, _ := io.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"storage\", \"add\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}",
"func (t AddOffsetsToTxnResponse) Encode(e *Encoder, version int16) {\n\te.PutInt32(t.ThrottleTimeMs) // ThrottleTimeMs\n\te.PutInt16(t.ErrorCode) // ErrorCode\n}",
"func EncodeNextResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*spinbroker.NextResult)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewNextResponseBody(res)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func (t RenewDelegationTokenResponse) Encode(e *Encoder, version int16) {\n\te.PutInt16(t.ErrorCode) // ErrorCode\n\te.PutInt64(t.ExpiryTimestampMs) // ExpiryTimestampMs\n\te.PutInt32(t.ThrottleTimeMs) // ThrottleTimeMs\n}",
"func (t UpdateMetadataResponse) Encode(e *Encoder, version int16) {\n\te.PutInt16(t.ErrorCode) // ErrorCode\n}",
"func EncodeConcatstringsResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, any) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v any) error {\n\t\tres, _ := v.(string)\n\t\tctx = context.WithValue(ctx, goahttp.ContentTypeKey, \"text/html\")\n\t\tenc := encoder(ctx, w)\n\t\tbody := res\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func EncodeUpdateResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*inventoryviews.Inventory)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewUpdateResponseBody(res.Projected)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func EncodeListMineResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*stationviews.StationsFull)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewListMineResponseBody(res.Projected)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func EncodeConcatbytesResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, any) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v any) error {\n\t\tres, _ := v.([]byte)\n\t\tctx = context.WithValue(ctx, goahttp.ContentTypeKey, \"text/html\")\n\t\tenc := encoder(ctx, w)\n\t\tbody := res\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
EncodeGetResponse returns an encoder for responses returned by the station get endpoint.
|
func EncodeGetResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {
return func(ctx context.Context, w http.ResponseWriter, v interface{}) error {
res := v.(*stationviews.StationFull)
enc := encoder(ctx, w)
body := NewGetResponseBody(res.Projected)
w.WriteHeader(http.StatusOK)
return enc.Encode(body)
}
}
|
[
"func encodeGetResponse( w *rf.Context, response interface{}) error {\n\tresp := response.(getResponse)\n\treturn encodeResponse(w, resp.Sock)\n}",
"func EncodeGetResponse(_ context.Context, w http.ResponseWriter, response interface{}) (err error) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\te := json.NewEncoder(w)\n\te.SetIndent(\"\", \"\\t\")\n\terr = e.Encode(response)\n\treturn err\n}",
"func EncodeGetResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*clientviews.ClientManagement)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewGetResponseBody(res.Projected)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func EncodeGetResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*inventoryviews.Inventory)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewGetResponseBody(res.Projected)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func encodeGetResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}",
"func EncodeGetResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*termlimitviews.TermLimitResponse)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewGetResponseBody(res.Projected)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func encodeGetEventsResponse(_ context.Context, r interface{}) (interface{}, error) {\r\n\treturn nil, errors.New(\"'Events' Encoder is not impelemented\")\r\n}",
"func encodeGetDealByStateResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}",
"func EncodeGetResponse(ctx context.Context, v interface{}, hdr, trlr *metadata.MD) (interface{}, error) {\n\tvres, ok := v.(*todoviews.Todo)\n\tif !ok {\n\t\treturn nil, goagrpc.ErrInvalidType(\"todo\", \"get\", \"*todoviews.Todo\", v)\n\t}\n\tresult := vres.Projected\n\t(*hdr).Append(\"goa-view\", vres.View)\n\tresp := NewGetResponse(result)\n\treturn resp, nil\n}",
"func EncodeGetResponse(ctx context.Context, v interface{}, hdr, trlr *metadata.MD) (interface{}, error) {\n\tvres, ok := v.(*termlimitviews.TermLimitResponse)\n\tif !ok {\n\t\treturn nil, goagrpc.ErrInvalidType(\"term_limit\", \"get\", \"*termlimitviews.TermLimitResponse\", v)\n\t}\n\tresult := vres.Projected\n\t(*hdr).Append(\"goa-view\", vres.View)\n\tresp := NewGetResponse(result)\n\treturn resp, nil\n}",
"func EncodeGetLicenseResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(int)\n\t\tenc := encoder(ctx, w)\n\t\tbody := res\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func encodeGetPostResponse(ctx context.Context, w http.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\tif err != nil {\n\t\t\tlogrus.Warn(err.Error())\n\t\t}\n\treturn\n}",
"func encodeGetByCreteriaResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}",
"func encodeGetDealByDIDResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}",
"func EncodeStationMetaResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*sensor.StationMetaResult)\n\t\tenc := encoder(ctx, w)\n\t\tbody := res.Object\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func EncodeAddResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*stationviews.StationFull)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewAddResponseBody(res.Projected)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func encodeGetUserDealByStateResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}",
"func EncodeUpdateResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*stationviews.StationFull)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewUpdateResponseBody(res.Projected)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func encodeGetByMultiCriteriaResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
EncodeUpdateResponse returns an encoder for responses returned by the station update endpoint.
|
func EncodeUpdateResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {
return func(ctx context.Context, w http.ResponseWriter, v interface{}) error {
res := v.(*stationviews.StationFull)
enc := encoder(ctx, w)
body := NewUpdateResponseBody(res.Projected)
w.WriteHeader(http.StatusOK)
return enc.Encode(body)
}
}
|
[
"func EncodeUpdateResponse(ctx context.Context, v interface{}, hdr, trlr *metadata.MD) (interface{}, error) {\n\tresp := NewUpdateResponse()\n\treturn resp, nil\n}",
"func EncodeUpdateResponse(ctx context.Context, v interface{}, hdr, trlr *metadata.MD) (interface{}, error) {\n\tresult, ok := v.(string)\n\tif !ok {\n\t\treturn nil, goagrpc.ErrInvalidType(\"tasks\", \"update\", \"string\", v)\n\t}\n\tresp := NewUpdateResponse(result)\n\treturn resp, nil\n}",
"func EncodeUpdateResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*inventoryviews.Inventory)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewUpdateResponseBody(res.Projected)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func EncodeUpdateMessageResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*discussion.UpdateMessageResult)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewUpdateMessageResponseBody(res)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func encodeUpdatePostResponse(ctx context.Context, w http.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\tif err != nil {\n\t\t\tlogrus.Warn(err.Error())\n\t\t}\n\treturn\n}",
"func encodeUpdateTagResponse(ctx context.Context, w http.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\tif err != nil {\n\t\t\tlogrus.Warn(err.Error())\n\t\t}\n\treturn\n}",
"func encodeUpdatePostResponse(_ context.Context, r interface{}) (interface{}, error) {\n\trs := r.(endpoint.UpdatePostResponse)\n\n\tif rs.Err != nil {\n\t\treturn &pb.UpdatePostReply{}, rs.Err\n\t}\n\n\treturn &pb.UpdatePostReply{\n\t\tMessage: rs.Message,\n\t\tStatus: rs.Status,\n\t}, nil\n}",
"func encodeUpdateTagResponse(_ context.Context, r interface{}) (interface{}, error) {\n\trs := r.(endpoint.UpdateTagResponse)\n\n\tif rs.Err != nil {\n\t\treturn &pb.UpdateTagReply{}, rs.Err\n\t}\n\n\treturn &pb.UpdateTagReply{Message: rs.Message, Status: rs.Status}, nil\n}",
"func EncodeUpdateDeviceLicenseResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(int)\n\t\tenc := encoder(ctx, w)\n\t\tbody := res\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func EncodeUpdateAgentResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres, _ := v.(*admin.UpdateAgentResult)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewUpdateAgentResponseBody(res)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func (t UpdateMetadataResponse) Encode(e *Encoder, version int16) {\n\te.PutInt16(t.ErrorCode) // ErrorCode\n}",
"func EncodeAddResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*stationviews.StationFull)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewAddResponseBody(res.Projected)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func encodeUpdateCompanyResponse(_ context.Context, response interface{}) (interface{}, error) {\n\tres := response.(endpoints.UpdateCompanyResponse)\n\terr := getError(res.Err)\n\tif err == nil {\n\t\treturn res.Company.ToProto(), nil\n\t}\n\treturn nil, err\n}",
"func (t FindCoordinatorResponse) Encode(e *Encoder, version int16) {\n\tif version >= 1 {\n\t\te.PutInt32(t.ThrottleTimeMs) // ThrottleTimeMs\n\t}\n\te.PutInt16(t.ErrorCode) // ErrorCode\n\tif version >= 1 {\n\t\te.PutString(t.ErrorMessage) // ErrorMessage\n\t}\n\te.PutInt32(t.NodeId) // NodeId\n\te.PutString(t.Host) // Host\n\te.PutInt32(t.Port) // Port\n}",
"func EncodePatchResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, any) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v any) error {\n\t\tres, _ := v.(*tus.PatchResult)\n\t\tw.Header().Set(\"Tus-Resumable\", res.TusResumable)\n\t\t{\n\t\t\tval := res.UploadOffset\n\t\t\tuploadOffsets := strconv.FormatInt(val, 10)\n\t\t\tw.Header().Set(\"Upload-Offset\", uploadOffsets)\n\t\t}\n\t\tif res.UploadExpires != nil {\n\t\t\tw.Header().Set(\"Upload-Expires\", *res.UploadExpires)\n\t\t}\n\t\tw.WriteHeader(http.StatusNoContent)\n\t\treturn nil\n\t}\n}",
"func EncodeUpdateRequest(encoder func(*http.Request) goahttp.Encoder) func(*http.Request, interface{}) error {\n\treturn func(req *http.Request, v interface{}) error {\n\t\tp, ok := v.(*warehouse.UpdatePayload)\n\t\tif !ok {\n\t\t\treturn goahttp.ErrInvalidType(\"Warehouse\", \"Update\", \"*warehouse.UpdatePayload\", v)\n\t\t}\n\t\t{\n\t\t\thead := p.Token\n\t\t\tif !strings.Contains(head, \" \") {\n\t\t\t\treq.Header.Set(\"Authorization\", \"Bearer \"+head)\n\t\t\t} else {\n\t\t\t\treq.Header.Set(\"Authorization\", head)\n\t\t\t}\n\t\t}\n\t\tbody := NewUpdateRequestBody(p)\n\t\tif err := encoder(req).Encode(&body); err != nil {\n\t\t\treturn goahttp.ErrEncodingError(\"Warehouse\", \"Update\", err)\n\t\t}\n\t\treturn nil\n\t}\n}",
"func EncodeRefreshConfigResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres, _ := v.(*admin.RefreshConfigResult)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewRefreshConfigResponseBody(res)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func EncodeUpdateDeviceLicenseWithValueResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(int)\n\t\tenc := encoder(ctx, w)\n\t\tbody := res\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func EncodeChangeResponse(_ context.Context, w http.ResponseWriter, response interface{}) (err error) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\te := json.NewEncoder(w)\n\te.SetIndent(\"\", \"\\t\")\n\terr = e.Encode(response)\n\treturn err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
EncodeListMineResponse returns an encoder for responses returned by the station list mine endpoint.
|
func EncodeListMineResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {
return func(ctx context.Context, w http.ResponseWriter, v interface{}) error {
res := v.(*stationviews.StationsFull)
enc := encoder(ctx, w)
body := NewListMineResponseBody(res.Projected)
w.WriteHeader(http.StatusOK)
return enc.Encode(body)
}
}
|
[
"func NewListMineHandler(\n\tendpoint goa.Endpoint,\n\tmux goahttp.Muxer,\n\tdecoder func(*http.Request) goahttp.Decoder,\n\tencoder func(context.Context, http.ResponseWriter) goahttp.Encoder,\n\terrhandler func(context.Context, http.ResponseWriter, error),\n\tformatter func(err error) goahttp.Statuser,\n) http.Handler {\n\tvar (\n\t\tdecodeRequest = DecodeListMineRequest(mux, decoder)\n\t\tencodeResponse = EncodeListMineResponse(encoder)\n\t\tencodeError = EncodeListMineError(encoder, formatter)\n\t)\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tctx := context.WithValue(r.Context(), goahttp.AcceptTypeKey, r.Header.Get(\"Accept\"))\n\t\tctx = context.WithValue(ctx, goa.MethodKey, \"list mine\")\n\t\tctx = context.WithValue(ctx, goa.ServiceKey, \"station\")\n\t\tpayload, err := decodeRequest(r)\n\t\tif err != nil {\n\t\t\tif err := encodeError(ctx, w, err); err != nil {\n\t\t\t\terrhandler(ctx, w, err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tres, err := endpoint(ctx, payload)\n\t\tif err != nil {\n\t\t\tif err := encodeError(ctx, w, err); err != nil {\n\t\t\t\terrhandler(ctx, w, err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tif err := encodeResponse(ctx, w, res); err != nil {\n\t\t\terrhandler(ctx, w, err)\n\t\t}\n\t})\n}",
"func EncodeListResponse(_ context.Context, w http.ResponseWriter, response interface{}) (err error) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\te := json.NewEncoder(w)\n\te.SetIndent(\"\", \"\\t\")\n\terr = e.Encode(response)\n\treturn err\n}",
"func encodeListResponse(w *rf.Context, response interface{}) error {\n\tresp := response.(listResponse)\n\treturn encodeResponse(w, resp.Socks)\n}",
"func EncodeListAllResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*stationviews.PageOfStations)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewListAllResponseBody(res.Projected)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func (c *Client) ListMine(ctx context.Context, p *ListMinePayload) (res *StationsFull, err error) {\n\tvar ires interface{}\n\tires, err = c.ListMineEndpoint(ctx, p)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn ires.(*StationsFull), nil\n}",
"func EncodeListProjectResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*stationviews.StationsFull)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewListProjectResponseBody(res.Projected)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func EncodeListResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*inventory.ListResult)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewListResponseBody(res)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func EncodeListResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, any) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v any) error {\n\t\tres, _ := v.([]*pipeline.EnduroStoredPipeline)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewListResponseBody(res)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func EncodeShowResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(clientviews.ClientManagementCollection)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewClientManagementResponseCollection(res.Projected)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func EncodeGrpcRespAppList(ctx context.Context, response interface{}) (interface{}, error) {\n\treturn response, nil\n}",
"func (t WriteTxnMarkersResponse) Encode(e *Encoder, version int16) {\n\t// Markers\n\tlen0 := len(t.Markers)\n\te.PutArrayLength(len0)\n\tfor i := 0; i < len0; i++ {\n\t\tt.Markers[i].Encode(e, version)\n\t}\n}",
"func (v NetworkListResponse) EncodeJSON(b []byte) []byte {\n\tb = append(b, '{', '\"', 'n', 'e', 't', 'w', 'o', 'r', 'k', '_', 'i', 'd', 'e', 'n', 't', 'i', 'f', 'i', 'e', 'r', 's', '\"', ':', '[')\n\tfor i, elem := range v.NetworkIdentifiers {\n\t\tif i != 0 {\n\t\t\tb = append(b, \",\"...)\n\t\t}\n\t\tb = elem.EncodeJSON(b)\n\t}\n\treturn append(b, \"]}\"...)\n}",
"func (t ListOffsetResponse) Encode(e *Encoder, version int16) {\n\tif version >= 2 {\n\t\te.PutInt32(t.ThrottleTimeMs) // ThrottleTimeMs\n\t}\n\t// Topics\n\tlen1 := len(t.Topics)\n\te.PutArrayLength(len1)\n\tfor i := 0; i < len1; i++ {\n\t\tt.Topics[i].Encode(e, version)\n\t}\n}",
"func EncodeAddResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*stationviews.StationFull)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewAddResponseBody(res.Projected)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func EncodeFetchResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*fetchersvc.FetchMedia)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewFetchResponseBody(res)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func BuildListMinePayload(stationListMineAuth string) (*station.ListMinePayload, error) {\n\tvar auth string\n\t{\n\t\tauth = stationListMineAuth\n\t}\n\tv := &station.ListMinePayload{}\n\tv.Auth = auth\n\n\treturn v, nil\n}",
"func EncodeNewNeatThingResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*neatthingviews.NeatThing)\n\t\tw.Header().Set(\"goa-view\", res.View)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewNewNeatThingResponseBodyFull(res.Projected)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func listResp() string {\n\tvar list struct {\n\t\tVersion struct {\n\t\t\tName string `json:\"name\"`\n\t\t\tProtocol int `json:\"protocol\"`\n\t\t} `json:\"version\"`\n\t\tPlayers struct {\n\t\t\tMax int `json:\"max\"`\n\t\t\tOnline int `json:\"online\"`\n\t\t\tSample []player `json:\"sample\"`\n\t\t} `json:\"players\"`\n\t\tDescription chat.Message `json:\"description\"`\n\t\tFavIcon string `json:\"favicon,omitempty\"`\n\t}\n\n\tlist.Version.Name = \"Chat Server\"\n\tlist.Version.Protocol = ProtocolVersion\n\tlist.Players.Max = MaxPlayer\n\tlist.Players.Online = 123\n\tlist.Players.Sample = []player{} // must init. can't be nil\n\tlist.Description = chat.Message{Text: \"Powered by go-mc\", Color: \"blue\"}\n\n\tdata, err := json.Marshal(list)\n\tif err != nil {\n\t\tlog.Panic(\"Marshal JSON for status checking fail\")\n\t}\n\treturn string(data)\n}",
"func DecodeListMineRequest(mux goahttp.Muxer, decoder func(*http.Request) goahttp.Decoder) func(*http.Request) (interface{}, error) {\n\treturn func(r *http.Request) (interface{}, error) {\n\t\tvar (\n\t\t\tauth string\n\t\t\terr error\n\t\t)\n\t\tauth = r.Header.Get(\"Authorization\")\n\t\tif auth == \"\" {\n\t\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"Authorization\", \"header\"))\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpayload := NewListMinePayload(auth)\n\t\tif strings.Contains(payload.Auth, \" \") {\n\t\t\t// Remove authorization scheme prefix (e.g. \"Bearer\")\n\t\t\tcred := strings.SplitN(payload.Auth, \" \", 2)[1]\n\t\t\tpayload.Auth = cred\n\t\t}\n\n\t\treturn payload, nil\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
DecodeListMineRequest returns a decoder for requests sent to the station list mine endpoint.
|
func DecodeListMineRequest(mux goahttp.Muxer, decoder func(*http.Request) goahttp.Decoder) func(*http.Request) (interface{}, error) {
return func(r *http.Request) (interface{}, error) {
var (
auth string
err error
)
auth = r.Header.Get("Authorization")
if auth == "" {
err = goa.MergeErrors(err, goa.MissingFieldError("Authorization", "header"))
}
if err != nil {
return nil, err
}
payload := NewListMinePayload(auth)
if strings.Contains(payload.Auth, " ") {
// Remove authorization scheme prefix (e.g. "Bearer")
cred := strings.SplitN(payload.Auth, " ", 2)[1]
payload.Auth = cred
}
return payload, nil
}
}
|
[
"func DecodeListRequest(ctx context.Context, v interface{}, md metadata.MD) (interface{}, error) {\n\tvar (\n\t\tview *string\n\t\terr error\n\t)\n\t{\n\t\tif vals := md.Get(\"view\"); len(vals) > 0 {\n\t\t\tview = &vals[0]\n\t\t}\n\t\tif view != nil {\n\t\t\tif !(*view == \"default\" || *view == \"tiny\") {\n\t\t\t\terr = goa.MergeErrors(err, goa.InvalidEnumValueError(\"view\", *view, []interface{}{\"default\", \"tiny\"}))\n\t\t\t}\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar payload *tasks.ListPayload\n\t{\n\t\tpayload = NewListPayload(view)\n\t}\n\treturn payload, nil\n}",
"func DecodeListRequest(_ context.Context, r *http.Request) (req interface{}, err error) {\n\t//req = endpoints.ListRequest{}\n\t//err = json.NewDecoder(r.Body).Decode(&r)\n\treturn nil, nil\n}",
"func NewListMineHandler(\n\tendpoint goa.Endpoint,\n\tmux goahttp.Muxer,\n\tdecoder func(*http.Request) goahttp.Decoder,\n\tencoder func(context.Context, http.ResponseWriter) goahttp.Encoder,\n\terrhandler func(context.Context, http.ResponseWriter, error),\n\tformatter func(err error) goahttp.Statuser,\n) http.Handler {\n\tvar (\n\t\tdecodeRequest = DecodeListMineRequest(mux, decoder)\n\t\tencodeResponse = EncodeListMineResponse(encoder)\n\t\tencodeError = EncodeListMineError(encoder, formatter)\n\t)\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tctx := context.WithValue(r.Context(), goahttp.AcceptTypeKey, r.Header.Get(\"Accept\"))\n\t\tctx = context.WithValue(ctx, goa.MethodKey, \"list mine\")\n\t\tctx = context.WithValue(ctx, goa.ServiceKey, \"station\")\n\t\tpayload, err := decodeRequest(r)\n\t\tif err != nil {\n\t\t\tif err := encodeError(ctx, w, err); err != nil {\n\t\t\t\terrhandler(ctx, w, err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tres, err := endpoint(ctx, payload)\n\t\tif err != nil {\n\t\t\tif err := encodeError(ctx, w, err); err != nil {\n\t\t\t\terrhandler(ctx, w, err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tif err := encodeResponse(ctx, w, res); err != nil {\n\t\t\terrhandler(ctx, w, err)\n\t\t}\n\t})\n}",
"func DecodeListReq(ctx context.Context, r *http.Request) (interface{}, error) {\n\tvar request api.ListRequest\n\tif err := json.NewDecoder(r.Body).Decode(&request); err != nil {\n\t\treturn nil, err\n\t}\n\treturn request, nil\n}",
"func DecodeListRequest(mux goahttp.Muxer, decoder func(*http.Request) goahttp.Decoder) func(*http.Request) (any, error) {\n\treturn func(r *http.Request) (any, error) {\n\t\tvar (\n\t\t\tname *string\n\t\t\tstatus bool\n\t\t\terr error\n\t\t)\n\t\tnameRaw := r.URL.Query().Get(\"name\")\n\t\tif nameRaw != \"\" {\n\t\t\tname = &nameRaw\n\t\t}\n\t\t{\n\t\t\tstatusRaw := r.URL.Query().Get(\"status\")\n\t\t\tif statusRaw != \"\" {\n\t\t\t\tv, err2 := strconv.ParseBool(statusRaw)\n\t\t\t\tif err2 != nil {\n\t\t\t\t\terr = goa.MergeErrors(err, goa.InvalidFieldTypeError(\"status\", statusRaw, \"boolean\"))\n\t\t\t\t}\n\t\t\t\tstatus = v\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpayload := NewListPayload(name, status)\n\n\t\treturn payload, nil\n\t}\n}",
"func (c *Client) ListMine(ctx context.Context, p *ListMinePayload) (res *StationsFull, err error) {\n\tvar ires interface{}\n\tires, err = c.ListMineEndpoint(ctx, p)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn ires.(*StationsFull), nil\n}",
"func listRequestDecoder(r *http.Request) *ListUsersRequest {\n\tpagination, ok := mid.PaginateFromContext(r.Context())\n\tif !ok {\n\t\tpagination = model.NewPagination()\n\t}\n\tsearch := r.URL.Query().Get(\"search\")\n\tlistRequest := &ListUsersRequest{Pagination: *pagination, Search: search}\n\treturn listRequest\n}",
"func (t *ListOffsetRequest) Decode(d *Decoder, version int16) error {\n\tvar err error\n\tt.ReplicaId, err = d.Int32()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif version >= 2 {\n\t\tt.IsolationLevel, err = d.Int8()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t// Topics\n\tif n, err := d.ArrayLength(); err != nil {\n\t\treturn err\n\t} else if n >= 0 {\n\t\tt.Topics = make([]ListOffsetTopic2, n)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tvar item ListOffsetTopic2\n\t\t\tif err := (&item).Decode(d, version); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tt.Topics[i] = item\n\t\t}\n\t}\n\treturn err\n}",
"func BuildListMinePayload(stationListMineAuth string) (*station.ListMinePayload, error) {\n\tvar auth string\n\t{\n\t\tauth = stationListMineAuth\n\t}\n\tv := &station.ListMinePayload{}\n\tv.Auth = auth\n\n\treturn v, nil\n}",
"func DecodeScoreListRequest(ctx context.Context, v interface{}, md metadata.MD) (interface{}, error) {\n\tvar (\n\t\tmessage *scorepb.ScoreListRequest\n\t\tok bool\n\t)\n\t{\n\t\tif message, ok = v.(*scorepb.ScoreListRequest); !ok {\n\t\t\treturn nil, goagrpc.ErrInvalidType(\"Score\", \"ScoreList\", \"*scorepb.ScoreListRequest\", v)\n\t\t}\n\t\tif err := ValidateScoreListRequest(message); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tvar payload *score.ScoreListPayload\n\t{\n\t\tpayload = NewScoreListPayload(message)\n\t}\n\treturn payload, nil\n}",
"func (t *DescribeDelegationTokenRequest) Decode(d *Decoder, version int16) error {\n\tvar err error\n\t// Owners\n\tif n, err := d.ArrayLength(); err != nil {\n\t\treturn err\n\t} else if n >= 0 {\n\t\tt.Owners = make([]DescribeDelegationTokenOwner41, n)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tvar item DescribeDelegationTokenOwner41\n\t\t\tif err := (&item).Decode(d, version); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tt.Owners[i] = item\n\t\t}\n\t}\n\treturn err\n}",
"func decodeHTTPListRolesRequest(_ context.Context, r *http.Request) (interface{}, error) {\n\tvar req endpoints.ListRolesRequest\n\treturn req, nil\n}",
"func DecodeGatewayListDHTOfferRequest(fcrMsg *FCRMessage) (\n\t*nodeid.NodeID, // gatewayID\n\t*cid.ContentID, // cid min\n\t*cid.ContentID, // cid max\n\tstring, // block hash\n\tstring, // transaction receipt\n\tstring, // merkle root\n\t*fcrmerkletree.FCRMerkleProof, // merkle proof\n\terror, // error\n) {\n\tif fcrMsg.GetMessageType() != GatewayListDHTOfferRequestType {\n\t\treturn nil, nil, nil, \"\", \"\", \"\", nil, errors.New(\"message type mismatch\")\n\t}\n\tmsg := gatewayListDHTOfferRequest{}\n\terr := json.Unmarshal(fcrMsg.GetMessageBody(), &msg)\n\tif err != nil {\n\t\treturn nil, nil, nil, \"\", \"\", \"\", nil, err\n\t}\n\tnodeID, _ := nodeid.NewNodeIDFromHexString(msg.GatewayID)\n\tcontentIDMin, _ := cid.NewContentIDFromHexString(msg.CIDMin)\n\tcontentIDMax, _ := cid.NewContentIDFromHexString(msg.CIDMax)\n\treturn nodeID, contentIDMin, contentIDMax, msg.BlockHash, msg.TransactionReceipt, msg.MerkleRoot, &msg.MerkleProof, nil\n}",
"func (t *WriteTxnMarkersRequest) Decode(d *Decoder, version int16) error {\n\tvar err error\n\t// Markers\n\tif n, err := d.ArrayLength(); err != nil {\n\t\treturn err\n\t} else if n >= 0 {\n\t\tt.Markers = make([]WritableTxnMarker27, n)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tvar item WritableTxnMarker27\n\t\t\tif err := (&item).Decode(d, version); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tt.Markers[i] = item\n\t\t}\n\t}\n\treturn err\n}",
"func BuildListMinePayload(exportListMineAuth string) (*export.ListMinePayload, error) {\n\tvar auth string\n\t{\n\t\tauth = exportListMineAuth\n\t}\n\tv := &export.ListMinePayload{}\n\tv.Auth = auth\n\n\treturn v, nil\n}",
"func DecodeGrpcReqAppList(ctx context.Context, request interface{}) (interface{}, error) {\n\treq := request.(*AppList)\n\treturn req, nil\n}",
"func decodeListOrderRequest(_ context.Context, r *stdhttp.Request) (interface{}, error) {\n\tqp := processBasicQP(r)\n\treturn qp, nil\n}",
"func EncodeListMineResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*stationviews.StationsFull)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewListMineResponseBody(res.Projected)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func DecodeGrpcReqNetworkSecurityPolicyList(ctx context.Context, request interface{}) (interface{}, error) {\n\treq := request.(*NetworkSecurityPolicyList)\n\treturn req, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
EncodeListProjectResponse returns an encoder for responses returned by the station list project endpoint.
|
func EncodeListProjectResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {
return func(ctx context.Context, w http.ResponseWriter, v interface{}) error {
res := v.(*stationviews.StationsFull)
enc := encoder(ctx, w)
body := NewListProjectResponseBody(res.Projected)
w.WriteHeader(http.StatusOK)
return enc.Encode(body)
}
}
|
[
"func EncodeListAllResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*stationviews.PageOfStations)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewListAllResponseBody(res.Projected)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func EncodeProjectResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*discussionviews.Discussion)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewProjectResponseBody(res.Projected)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func NewListProjectHandler(\n\tendpoint goa.Endpoint,\n\tmux goahttp.Muxer,\n\tdecoder func(*http.Request) goahttp.Decoder,\n\tencoder func(context.Context, http.ResponseWriter) goahttp.Encoder,\n\terrhandler func(context.Context, http.ResponseWriter, error),\n\tformatter func(err error) goahttp.Statuser,\n) http.Handler {\n\tvar (\n\t\tdecodeRequest = DecodeListProjectRequest(mux, decoder)\n\t\tencodeResponse = EncodeListProjectResponse(encoder)\n\t\tencodeError = EncodeListProjectError(encoder, formatter)\n\t)\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tctx := context.WithValue(r.Context(), goahttp.AcceptTypeKey, r.Header.Get(\"Accept\"))\n\t\tctx = context.WithValue(ctx, goa.MethodKey, \"list project\")\n\t\tctx = context.WithValue(ctx, goa.ServiceKey, \"station\")\n\t\tpayload, err := decodeRequest(r)\n\t\tif err != nil {\n\t\t\tif err := encodeError(ctx, w, err); err != nil {\n\t\t\t\terrhandler(ctx, w, err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tres, err := endpoint(ctx, payload)\n\t\tif err != nil {\n\t\t\tif err := encodeError(ctx, w, err); err != nil {\n\t\t\t\terrhandler(ctx, w, err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tif err := encodeResponse(ctx, w, res); err != nil {\n\t\t\terrhandler(ctx, w, err)\n\t\t}\n\t})\n}",
"func EncodeListMineResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*stationviews.StationsFull)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewListMineResponseBody(res.Projected)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func CreateListProjectAPIsResponse() (response *ListProjectAPIsResponse) {\n\tresponse = &ListProjectAPIsResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func EncodeListResponse(ctx context.Context, v interface{}, hdr, trlr *metadata.MD) (interface{}, error) {\n\tvres, ok := v.(tasksviews.StoredTaskCollection)\n\tif !ok {\n\t\treturn nil, goagrpc.ErrInvalidType(\"tasks\", \"list\", \"tasksviews.StoredTaskCollection\", v)\n\t}\n\tresult := vres.Projected\n\t(*hdr).Append(\"goa-view\", vres.View)\n\tresp := NewStoredTaskCollection(result)\n\treturn resp, nil\n}",
"func EncodeListResponse(_ context.Context, w http.ResponseWriter, response interface{}) (err error) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\te := json.NewEncoder(w)\n\te.SetIndent(\"\", \"\\t\")\n\terr = e.Encode(response)\n\treturn err\n}",
"func EncodeListResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, any) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v any) error {\n\t\tres, _ := v.([]*pipeline.EnduroStoredPipeline)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewListResponseBody(res)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func ListProject(projectID string) error {\n\tclient, err := NewPacketClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp, _, err := client.Projects.Get(projectID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te := MarshallAndPrint(p)\n\treturn e\n}",
"func EncodeListResponse(ctx context.Context, v interface{}, hdr, trlr *metadata.MD) (interface{}, error) {\n\tvres, ok := v.(todoviews.TodoCollection)\n\tif !ok {\n\t\treturn nil, goagrpc.ErrInvalidType(\"todo\", \"list\", \"todoviews.TodoCollection\", v)\n\t}\n\tresult := vres.Projected\n\t(*hdr).Append(\"goa-view\", vres.View)\n\tresp := NewTodoCollection(result)\n\treturn resp, nil\n}",
"func (c *Client) ListProject(ctx context.Context, p *ListProjectPayload) (res *StationsFull, err error) {\n\tvar ires interface{}\n\tires, err = c.ListProjectEndpoint(ctx, p)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn ires.(*StationsFull), nil\n}",
"func ProjectListAll(w http.ResponseWriter, r *http.Request) {\n\n\t// Init output\n\toutput := []byte(\"\")\n\n\t// Add content type header to the response\n\tcontentType := \"application/json\"\n\tcharset := \"utf-8\"\n\tw.Header().Add(\"Content-Type\", fmt.Sprintf(\"%s; charset=%s\", contentType, charset))\n\n\t// Grab context references\n\trefStr := gorillaContext.Get(r, \"str\").(stores.Store)\n\n\t// Get Results Object\n\n\tres, err := projects.Find(\"\", \"\", refStr)\n\n\tif err != nil && err.Error() != \"not found\" {\n\t\terr := APIErrQueryDatastore()\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Output result to JSON\n\tresJSON, err := res.ExportJSON()\n\n\tif err != nil {\n\t\terr := APIErrExportJSON()\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Write response\n\toutput = []byte(resJSON)\n\trespondOK(w, output)\n\n}",
"func EncodeListResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*inventory.ListResult)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewListResponseBody(res)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func ProjectListOne(w http.ResponseWriter, r *http.Request) {\n\n\t// Init output\n\toutput := []byte(\"\")\n\n\t// Add content type header to the response\n\tcontentType := \"application/json\"\n\tcharset := \"utf-8\"\n\tw.Header().Add(\"Content-Type\", fmt.Sprintf(\"%s; charset=%s\", contentType, charset))\n\n\t// Grab url path variables\n\turlVars := mux.Vars(r)\n\turlProject := urlVars[\"project\"]\n\n\t// Grab context references\n\trefStr := gorillaContext.Get(r, \"str\").(stores.Store)\n\n\t// Get Results Object\n\tresults, err := projects.Find(\"\", urlProject, refStr)\n\n\tif err != nil {\n\n\t\tif err.Error() == \"not found\" {\n\t\t\terr := APIErrorNotFound(\"ProjectUUID\")\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\t\terr := APIErrQueryDatastore()\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Output result to JSON\n\tres := results.One()\n\tresJSON, err := res.ExportJSON()\n\n\tif err != nil {\n\t\terr := APIErrExportJSON()\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Write response\n\toutput = []byte(resJSON)\n\trespondOK(w, output)\n\n}",
"func (p ProjectList) MarshalJSON() ([]byte, error) {\n\ttype Alias ProjectList\n\treturn json.Marshal(\n\t\tstruct {\n\t\t\tmeta.TypeMeta `json:\",inline\"`\n\t\t\tAlias `json:\",inline\"`\n\t\t}{\n\t\t\tTypeMeta: meta.TypeMeta{\n\t\t\t\tAPIVersion: meta.APIVersion,\n\t\t\t\tKind: \"ProjectList\",\n\t\t\t},\n\t\t\tAlias: (Alias)(p),\n\t\t},\n\t)\n}",
"func ProjectList(c *gin.Context) error {\n\tuserID, err := GetIDParam(c, userIDParam)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\toption := &model.ProjectQueryOption{\n\t\tUserID: userID,\n\t}\n\n\tif limit := c.Query(\"limit\"); limit != \"\" {\n\t\tif i, err := strconv.Atoi(limit); err == nil {\n\t\t\toption.Limit = i\n\t\t}\n\t}\n\n\tif offset := c.Query(\"offset\"); offset != \"\" {\n\t\tif i, err := strconv.Atoi(offset); err == nil {\n\t\t\toption.Offset = i\n\t\t}\n\t}\n\n\tif order := c.Query(\"order\"); order != \"\" {\n\t\toption.Order = order\n\t} else {\n\t\toption.Order = \"-created_at\"\n\t}\n\n\tif err := CheckUserPermission(c, *userID); err == nil {\n\t\toption.Private = true\n\t}\n\n\tlist, err := model.GetProjectList(option)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn common.APIResponse(c, http.StatusOK, list)\n}",
"func EncodeGrpcRespAppList(ctx context.Context, response interface{}) (interface{}, error) {\n\treturn response, nil\n}",
"func encodeListResponse(w *rf.Context, response interface{}) error {\n\tresp := response.(listResponse)\n\treturn encodeResponse(w, resp.Socks)\n}",
"func BuildListProjectPayload(stationListProjectID string, stationListProjectAuth string) (*station.ListProjectPayload, error) {\n\tvar err error\n\tvar id int32\n\t{\n\t\tvar v int64\n\t\tv, err = strconv.ParseInt(stationListProjectID, 10, 32)\n\t\tid = int32(v)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid value for id, must be INT32\")\n\t\t}\n\t}\n\tvar auth string\n\t{\n\t\tauth = stationListProjectAuth\n\t}\n\tv := &station.ListProjectPayload{}\n\tv.ID = id\n\tv.Auth = auth\n\n\treturn v, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.