query
stringlengths
8
6.75k
document
stringlengths
9
1.89M
negatives
listlengths
19
19
metadata
dict
listDHCPServers lists all DHCP server settings in a map keyed by DHCP.NetworkName.
func listDHCPServers(vbox VBoxManager) (map[string]*dhcpServer, error) { out, err := vbox.vbmOut("list", "dhcpservers") if err != nil { return nil, err } m := map[string]*dhcpServer{} dhcp := &dhcpServer{} err = parseKeyValues(out, reColonLine, func(key, val string) error { switch key { case "NetworkName": dhcp = &dhcpServer{} m[val] = dhcp dhcp.NetworkName = val case "IP": dhcp.IPv4.IP = net.ParseIP(val) case "upperIPAddress": dhcp.UpperIP = net.ParseIP(val) case "lowerIPAddress": dhcp.LowerIP = net.ParseIP(val) case "NetworkMask": dhcp.IPv4.Mask = parseIPv4Mask(val) case "Enabled": dhcp.Enabled = (val == "Yes") } return nil }) if err != nil { return nil, err } return m, nil }
[ "func (o *VirtualizationVmwareVirtualMachineAllOf) GetDnsServerList() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn o.DnsServerList\n}", "func ListDHCPVirtualNetworks(path string) ([]int, error) {\n\t// example networking config\n\t//\n\t// $ cat /etc/vmware/networking\n\t//\n\t// VERSION=1,0\n\t// answer VNET_1_DHCP yes\n\t// ...\n\t// answer VNET_8_DHCP yes\n\t// ...\n\tvar networks []int\n\n\tdata, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn networks, err\n\t}\n\n\tmatches := reNetworkingConfig.FindAllStringSubmatch(string(data), -1)\n\n\tfor _, match := range matches {\n\t\tnetID, err := strconv.ParseInt(match[1], 10, 0)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"unexpected number format %q\", match[1])\n\t\t\tcontinue\n\t\t}\n\t\tnetworks = append(networks, int(netID))\n\t}\n\n\treturn networks, nil\n}", "func WithDHCPNameServers(dns []string) Option {\n\treturn func(d *dnsmasq) {\n\t\td.dns = dns\n\t}\n}", "func (client WorkloadNetworksClient) ListDhcpResponder(resp *http.Response) (result WorkloadNetworkDhcpList, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func removeOrphanDHCPServers(vbox VBoxManager) error {\n\tdhcps, err := listDHCPServers(vbox)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(dhcps) == 0 {\n\t\treturn nil\n\t}\n\n\tnets, err := listHostOnlyAdapters(vbox)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor name := range dhcps {\n\t\tif strings.HasPrefix(name, dhcpPrefix) {\n\t\t\tif _, present := nets[name]; !present {\n\t\t\t\tif err := vbox.vbm(\"dhcpserver\", \"remove\", \"--netname\", name); err != nil {\n\t\t\t\t\tlog.Warnf(\"Unable to remove orphan dhcp server %q: %s\", name, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func runListServers(_ *cobra.Command, _ []string) {\n\tcfg, err := config.LoadFromFile()\n\tif err != nil {\n\t\texitWithError(err)\n\t}\n\n\tregions, err := checkRegions(*region)\n\tif err != nil {\n\t\texitWithError(err)\n\t}\n\n\tnameFilter := core.NewFilter(core.TagName, *name, core.Contains, *ignoreCase)\n\tenvFilter := core.NewFilter(core.TagEnv, *env, core.Equals, *ignoreCase)\n\tservers, err := core.GetAllServers(cfg.AWSCredentials, regions, nameFilter, envFilter)\n\tif err != nil {\n\t\texitWithError(err)\n\t}\n\n\tw := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)\n\tfmt.Fprintln(w, \"NAME\\tENVIRONMENT\\tPRIVATE IP\\tPUBLIC IP\")\n\tfor _, server := range servers {\n\t\tfmt.Fprintf(w, \"%s\\t%s\\t%s\\t%s\\n\", server.Name, server.Env, server.PrivateIP, server.PublicIP)\n\t}\n\tw.Flush()\n}", "func (o *VirtualizationVmwareVirtualMachineAllOf) SetDnsServerList(v []string) {\n\to.DnsServerList = v\n}", "func (r *ManagementDHCPResource) ListAll() (*ManagementDHCPConfigList, error) {\n\tvar list ManagementDHCPConfigList\n\tif err := r.c.ReadQuery(BasePath+ManagementDHCPEndpoint, &list); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &list, nil\n}", "func (m *Map) ListSRV() (SRVRecords, error) {\n\ttmp := SRVRecords{}\n\n\tm.srvMutex.RLock()\n\tdefer m.srvMutex.RUnlock()\n\n\tfor name, rec := range m.srvRecords {\n\t\tt := *rec\n\t\ttmp[name] = &t\n\t}\n\n\treturn tmp, nil\n}", "func (h *InterfaceVppHandler) DumpDhcpClients() (map[uint32]*vppcalls.Dhcp, error) {\n\tdhcpData := make(map[uint32]*vppcalls.Dhcp)\n\treqCtx := h.callsChannel.SendMultiRequest(&dhcp.DHCPClientDump{})\n\n\tfor {\n\t\tdhcpDetails := &dhcp.DHCPClientDetails{}\n\t\tlast, err := reqCtx.ReceiveReply(dhcpDetails)\n\t\tif last {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tclient := dhcpDetails.Client\n\t\tlease := dhcpDetails.Lease\n\n\t\tvar hostMac net.HardwareAddr = lease.HostMac\n\t\tvar hostAddr, routerAddr string\n\t\tif uintToBool(lease.IsIPv6) {\n\t\t\thostAddr = fmt.Sprintf(\"%s/%d\", net.IP(lease.HostAddress).To16().String(), uint32(lease.MaskWidth))\n\t\t\trouterAddr = fmt.Sprintf(\"%s/%d\", net.IP(lease.RouterAddress).To16().String(), uint32(lease.MaskWidth))\n\t\t} else {\n\t\t\thostAddr = fmt.Sprintf(\"%s/%d\", net.IP(lease.HostAddress[:4]).To4().String(), uint32(lease.MaskWidth))\n\t\t\trouterAddr = fmt.Sprintf(\"%s/%d\", net.IP(lease.RouterAddress[:4]).To4().String(), uint32(lease.MaskWidth))\n\t\t}\n\n\t\t// DHCP client data\n\t\tdhcpClient := &vppcalls.Client{\n\t\t\tSwIfIndex: client.SwIfIndex,\n\t\t\tHostname: string(bytes.SplitN(client.Hostname, []byte{0x00}, 2)[0]),\n\t\t\tID: string(bytes.SplitN(client.ID, []byte{0x00}, 2)[0]),\n\t\t\tWantDhcpEvent: uintToBool(client.WantDHCPEvent),\n\t\t\tSetBroadcastFlag: uintToBool(client.SetBroadcastFlag),\n\t\t\tPID: client.PID,\n\t\t}\n\n\t\t// DHCP lease data\n\t\tdhcpLease := &vppcalls.Lease{\n\t\t\tSwIfIndex: lease.SwIfIndex,\n\t\t\tState: lease.State,\n\t\t\tHostname: string(bytes.SplitN(lease.Hostname, []byte{0x00}, 2)[0]),\n\t\t\tIsIPv6: uintToBool(lease.IsIPv6),\n\t\t\tHostAddress: hostAddr,\n\t\t\tRouterAddress: routerAddr,\n\t\t\tHostMac: hostMac.String(),\n\t\t}\n\n\t\t// DHCP metadata\n\t\tdhcpData[client.SwIfIndex] = &vppcalls.Dhcp{\n\t\t\tClient: dhcpClient,\n\t\t\tLease: dhcpLease,\n\t\t}\n\t}\n\n\treturn dhcpData, nil\n}", "func (client WorkloadNetworksClient) ListDhcpSender(req *http.Request) (*http.Response, error) {\n\treturn client.Send(req, azure.DoRetryWithRegistration(client.Client))\n}", "func (d *DhcpServerServers) Get(client sophos.ClientInterface, options ...sophos.Option) (err error) {\n\treturn get(client, \"/api/nodes/dhcp.server.servers\", &d.Value, options...)\n}", "func (c *OperatorDNS) List() (srvRecords map[string][]SrvRecord, err error) {\n\treturn nil, ErrNotImplemented\n}", "func (api Solarwinds) ListServers(siteid int) ([]Server, error) {\n\tbody := struct {\n\t\tItems []Server `xml:\"items>server\"`\n\t}{}\n\n\terr := api.get(url.Values{\n\t\t\"service\": []string{\"list_servers\"},\n\t\t\"siteid\": []string{strconv.Itoa(siteid)},\n\t}, &body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn body.Items, nil\n}", "func (d *DHCPv4) NTPServers() []net.IP {\n\treturn GetIPs(OptionNTPServers, d.Options)\n}", "func (s *API) ListDNSZoneNameservers(req *ListDNSZoneNameserversRequest, opts ...scw.RequestOption) (*ListDNSZoneNameserversResponse, error) {\n\tvar err error\n\n\tquery := url.Values{}\n\tparameter.AddToQuery(query, \"organization_id\", req.OrganizationID)\n\n\tif fmt.Sprint(req.DNSZone) == \"\" {\n\t\treturn nil, errors.New(\"field DNSZone cannot be empty in request\")\n\t}\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"GET\",\n\t\tPath: \"/domain/v2alpha2/dns-zones/\" + fmt.Sprint(req.DNSZone) + \"/nameservers\",\n\t\tQuery: query,\n\t\tHeaders: http.Header{},\n\t}\n\n\tvar resp ListDNSZoneNameserversResponse\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}", "func (s *API) ListDNSZoneNameservers(req *ListDNSZoneNameserversRequest, opts ...scw.RequestOption) (*ListDNSZoneNameserversResponse, error) {\n\tvar err error\n\n\tquery := url.Values{}\n\tparameter.AddToQuery(query, \"project_id\", req.ProjectID)\n\n\tif fmt.Sprint(req.DNSZone) == \"\" {\n\t\treturn nil, errors.New(\"field DNSZone cannot be empty in request\")\n\t}\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"GET\",\n\t\tPath: \"/domain/v2beta1/dns-zones/\" + fmt.Sprint(req.DNSZone) + \"/nameservers\",\n\t\tQuery: query,\n\t\tHeaders: http.Header{},\n\t}\n\n\tvar resp ListDNSZoneNameserversResponse\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}", "func (n *Network) DnsNameServers() []string {\n\treturn n.DnsNameServers_\n}", "func (ts *TemplateService) ListTemplateServers(templateID string) (templateServer []*types.TemplateServer, err error) {\n\tlog.Debug(\"ListTemplateServers\")\n\n\tdata, status, err := ts.concertoService.Get(fmt.Sprintf(APIPathBlueprintTemplateServers, templateID))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = utils.CheckStandardStatus(status, data); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = json.Unmarshal(data, &templateServer); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn templateServer, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
parseIPv4Mask parses IPv4 netmask written in IP form (e.g. 255.255.255.0). This function should really belong to the net package.
func parseIPv4Mask(s string) net.IPMask { mask := net.ParseIP(s) if mask == nil { return nil } return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15]) }
[ "func ParseIPv4Mask(s string) net.IPMask {\n\tmask := net.ParseIP(s)\n\tif mask == nil {\n\t\treturn nil\n\t}\n\treturn net.IPv4Mask(mask[12], mask[13], mask[14], mask[15])\n}", "func parseIPv4Mask(s string) net.IPMask {\n\tmask := net.ParseIP(s)\n\tif mask != nil {\n\t\treturn net.IPv4Mask(mask[12], mask[13], mask[14], mask[15])\n\t}\n\treturn nil\n}", "func ParseIPv4(s string) net.IP {\n\tip := net.ParseIP(s)\n\n\tif ip != nil {\n\t\tip = ip.To4()\n\t}\n\n\treturn ip\n}", "func parseIPv4(s string) (net.IP, error) {\n\tv, err := strconv.ParseUint(s, 16, 32)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tip := make(net.IP, net.IPv4len)\n\tbinary.LittleEndian.PutUint32(ip, uint32(v))\n\treturn ip, nil\n}", "func NewIPv4Addr(ipv4Str string) (IPv4Addr, error) {\n\t// Strip off any bogus hex-encoded netmasks that will be mis-parsed by Go. In\n\t// particular, clients with the Barracuda VPN client will see something like:\n\t// `192.168.3.51/00ffffff` as their IP address.\n\ttrailingHexNetmaskRe := trailingHexNetmaskRE.Copy()\n\tif match := trailingHexNetmaskRe.FindStringIndex(ipv4Str); match != nil {\n\t\tipv4Str = ipv4Str[:match[0]]\n\t}\n\n\t// Parse as an IPv4 CIDR\n\tipAddr, network, err := net.ParseCIDR(ipv4Str)\n\tif err == nil {\n\t\tipv4 := ipAddr.To4()\n\t\tif ipv4 == nil {\n\t\t\treturn IPv4Addr{}, fmt.Errorf(\"Unable to convert %s to an IPv4 address\", ipv4Str)\n\t\t}\n\n\t\t// If we see an IPv6 netmask, convert it to an IPv4 mask.\n\t\tnetmaskSepPos := strings.LastIndexByte(ipv4Str, '/')\n\t\tif netmaskSepPos != -1 && netmaskSepPos+1 < len(ipv4Str) {\n\t\t\tnetMask, err := strconv.ParseUint(ipv4Str[netmaskSepPos+1:], 10, 8)\n\t\t\tif err != nil {\n\t\t\t\treturn IPv4Addr{}, fmt.Errorf(\"Unable to convert %s to an IPv4 address: unable to parse CIDR netmask: %v\", ipv4Str, err)\n\t\t\t} else if netMask > 128 {\n\t\t\t\treturn IPv4Addr{}, fmt.Errorf(\"Unable to convert %s to an IPv4 address: invalid CIDR netmask\", ipv4Str)\n\t\t\t}\n\n\t\t\tif netMask >= 96 {\n\t\t\t\t// Convert the IPv6 netmask to an IPv4 netmask\n\t\t\t\tnetwork.Mask = net.CIDRMask(int(netMask-96), IPv4len*8)\n\t\t\t}\n\t\t}\n\t\tipv4Addr := IPv4Addr{\n\t\t\tAddress: IPv4Address(binary.BigEndian.Uint32(ipv4)),\n\t\t\tMask: IPv4Mask(binary.BigEndian.Uint32(network.Mask)),\n\t\t}\n\t\treturn ipv4Addr, nil\n\t}\n\n\t// Attempt to parse ipv4Str as a /32 host with a port number.\n\ttcpAddr, err := net.ResolveTCPAddr(\"tcp4\", ipv4Str)\n\tif err == nil {\n\t\tipv4 := tcpAddr.IP.To4()\n\t\tif ipv4 == nil {\n\t\t\treturn IPv4Addr{}, fmt.Errorf(\"Unable to resolve %+q as an IPv4 address\", ipv4Str)\n\t\t}\n\n\t\tipv4Uint32 := binary.BigEndian.Uint32(ipv4)\n\t\tipv4Addr := IPv4Addr{\n\t\t\tAddress: IPv4Address(ipv4Uint32),\n\t\t\tMask: IPv4HostMask,\n\t\t\tPort: IPPort(tcpAddr.Port),\n\t\t}\n\n\t\treturn ipv4Addr, nil\n\t}\n\n\t// Parse as a naked IPv4 address\n\tip := net.ParseIP(ipv4Str)\n\tif ip != nil {\n\t\tipv4 := ip.To4()\n\t\tif ipv4 == nil {\n\t\t\treturn IPv4Addr{}, fmt.Errorf(\"Unable to string convert %+q to an IPv4 address\", ipv4Str)\n\t\t}\n\n\t\tipv4Uint32 := binary.BigEndian.Uint32(ipv4)\n\t\tipv4Addr := IPv4Addr{\n\t\t\tAddress: IPv4Address(ipv4Uint32),\n\t\t\tMask: IPv4HostMask,\n\t\t}\n\t\treturn ipv4Addr, nil\n\t}\n\n\treturn IPv4Addr{}, fmt.Errorf(\"Unable to parse %+q to an IPv4 address: %v\", ipv4Str, err)\n}", "func WithIPv4Mask(mask net.IPMask) Option {\n\treturn func(o *Options) {\n\t\to.IPv4Mask = mask\n\t}\n}", "func ParseIPv4Port(address string) (ip gnet.IP, port uint16, err error) {\n\tcolonPos := strings.Index(address, \":\")\n\tif colonPos == -1 {\n\t\terr = errors.New(\"Address must be formated as x.x.x.x:x\")\n\t\treturn\n\t}\n\tip = gnet.ParseIP(address[:colonPos])\n\tif len(ip) != 16 {\n\t\terr = errors.New(\"Can not parse ip to net.IP\")\n\t\treturn\n\t}\n\tip = ip[12:]\n\t_port, err := strconv.Atoi(address[colonPos+1:])\n\tif err != nil {\n\t\terr = errors.New(\"Can not parse port to int\")\n\t\treturn\n\t}\n\tif _port > 65535 || _port < 0 {\n\t\terr = errors.New(\"Port must between 0~65535\")\n\t}\n\tport = uint16(_port)\n\n\treturn\n}", "func ParseIPv4(data []byte) (Packet, error) {\n\tif len(data) < 20 {\n\t\treturn nil, ErrorTruncated\n\t}\n\tihl := int(data[0] & 0x0f)\n\theaderLen := ihl * 4\n\tlength := int(bo.Uint16(data[2:]))\n\n\tif headerLen < 20 || headerLen > length {\n\t\treturn nil, ErrorInvalid\n\t}\n\tif length > len(data) {\n\t\treturn nil, ErrorTruncated\n\t}\n\tif Checksum(data[0:headerLen]) != 0 {\n\t\treturn nil, ErrorChecksum\n\t}\n\n\treturn &IPv4{\n\t\tversion: int(data[0] >> 4),\n\t\ttos: int(data[1]),\n\t\tid: bo.Uint16(data[4:]),\n\t\tflags: int8(data[6] >> 5),\n\t\toffset: bo.Uint16(data[6:]) & 0x1fff,\n\t\tttl: data[8],\n\t\tprotocol: Protocol(data[9]),\n\t\tsrc: net.IP(data[12:16]),\n\t\tdst: net.IP(data[16:20]),\n\t\tdata: data[headerLen:length],\n\t}, nil\n}", "func (i Internet) Ipv4() string {\n\tips := make([]string, 0, 4)\n\n\tips = append(ips, strconv.Itoa(i.Faker.IntBetween(1, 255)))\n\tfor j := 0; j < 3; j++ {\n\t\tips = append(ips, strconv.Itoa(i.Faker.IntBetween(0, 255)))\n\t}\n\n\treturn strings.Join(ips, \".\")\n}", "func AsIPV4IpNet(IPV4 string) *net.IPNet {\n\tif IsIPv4(IPV4) {\n\t\tIPV4 += \"/32\"\n\t}\n\t_, network, err := net.ParseCIDR(IPV4)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn network\n}", "func parseIPv4(ip string) net.IP {\n\tif parsedIP := net.ParseIP(strings.TrimSpace(ip)); parsedIP != nil {\n\t\tif ipv4 := parsedIP.To4(); ipv4 != nil {\n\t\t\treturn ipv4\n\t\t}\n\t}\n\n\treturn nil\n}", "func MustParseSubnet4(subnet string) tcpip.Subnet {\n\tparts := strings.Split(subnet, \"/\")\n\tif len(parts) != 2 {\n\t\tpanic(fmt.Sprintf(\"MustParseSubnet4 expected CIDR notation (<addr>/<prefixLen>), but got %q\", subnet))\n\t}\n\taddr := MustParse4(parts[0])\n\tprefixLen, err := strconv.Atoi(parts[1])\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to parse prefix length %q: %v\", parts[1], err))\n\t}\n\tif prefixLen < 0 || prefixLen > 32 {\n\t\tpanic(fmt.Sprintf(\"Prefix length %d is invalid. It must be between 0 and 32\", prefixLen))\n\t}\n\tprefixed := tcpip.AddressWithPrefix{Address: addr, PrefixLen: prefixLen}\n\treturn prefixed.Subnet()\n}", "func ValidateIPv4Cidr(cidr string) (bool, string) {\n\tre := regexp.MustCompile(`^([0-9]{1,3}\\.){3}[0-9]{1,3}(\\/([0-9]|[1-2][0-9]|3[0-2]))?$`)\n\tisMatched := re.MatchString(cidr)\n\tif !isMatched {\n\t\treturn isMatched, \"subnet cidr is invalid\"\n\t}\n\treturn isMatched, \"\"\n}", "func (ipv4 IPv4Addr) NetIPMask() *net.IPMask {\n\tipv4Mask := net.IPMask{}\n\tipv4Mask = make(net.IPMask, IPv4len)\n\tbinary.BigEndian.PutUint32(ipv4Mask, uint32(ipv4.Mask))\n\treturn &ipv4Mask\n}", "func ParseProxyIPV4(ip string) (*pb.IPAddress, error) {\n\tnetIP := net.ParseIP(ip)\n\tif netIP == nil {\n\t\treturn nil, fmt.Errorf(\"Invalid IP address: %s\", ip)\n\t}\n\n\toBigInt := IPToInt(netIP.To4())\n\treturn &pb.IPAddress{\n\t\tIp: &pb.IPAddress_Ipv4{\n\t\t\tIpv4: uint32(oBigInt.Uint64()),\n\t\t},\n\t}, nil\n}", "func parseIPv4(s string) (ip ipOctets, cc int) {\n\tip = make(ipOctets, net.IPv4len)\n\n\tfor i := 0; i < net.IPv4len; i++ {\n\t\tip[i] = make([]ipOctet, 0)\n\t}\n\n\tvar bb [2]uint16 // octet bounds: 0 - lo, 1 - hi\n\n\ti := 0 // octet idx\n\tk := 0 // bound idx: 0 - lo, 1 - hi\n\nloop:\n\tfor i < net.IPv4len {\n\t\t// Decimal number.\n\t\tn, c, ok := dtoi(s)\n\t\tif !ok || n > 0xFF {\n\t\t\treturn nil, cc\n\t\t}\n\n\t\t// Save bound.\n\t\tbb[k] = uint16(n)\n\n\t\t// Stop at max of string.\n\t\ts = s[c:]\n\t\tcc += c\n\t\tif len(s) == 0 {\n\t\t\tip.push(i, bb[0], bb[1])\n\t\t\ti++\n\t\t\tbreak\n\t\t}\n\n\t\t// Otherwise must be followed by dot, colon or dp.\n\t\tswitch s[0] {\n\t\tcase '.':\n\t\t\tfallthrough\n\t\tcase ',':\n\t\t\tip.push(i, bb[0], bb[1])\n\t\t\tbb[1] = 0\n\t\t\tk = 0\n\t\tcase '-':\n\t\t\tif k == 1 {\n\t\t\t\t// To many dashes in one octet.\n\t\t\t\treturn nil, cc\n\t\t\t}\n\t\t\tk++\n\t\tdefault:\n\t\t\tip.push(i, bb[0], bb[1])\n\t\t\ti++\n\t\t\tbreak loop\n\t\t}\n\n\t\tif s[0] == '.' {\n\t\t\ti++\n\t\t}\n\n\t\ts = s[1:]\n\t\tcc++\n\t}\n\n\tif i < net.IPv4len {\n\t\t// Missing ip2octets.\n\t\treturn nil, cc\n\t}\n\n\treturn ip, cc\n}", "func IP4FromNetaddr(ip netaddr.IP) IP4 {\n\tipbytes := ip.As4()\n\treturn IP4(binary.BigEndian.Uint32(ipbytes[:]))\n}", "func (Server) edgeParseIPv4(from net.Addr) (string, error) {\n\tdeviceIPPORT := strings.Split(from.String(), \":\")\n\tsrcIP := deviceIPPORT[0]\n\n\tisV4 := net.ParseIP(srcIP)\n\tif isV4.To4() == nil {\n\t\treturn \"\", errors.New(\"Do Not Handle IPv6\")\n\t}\n\treturn srcIP, nil\n}", "func MustParse4(addr string) tcpip.Address {\n\tip := net.ParseIP(addr).To4()\n\tif ip == nil {\n\t\tpanic(fmt.Sprintf(\"Parse4 expects IPv4 addresses, but was passed %q\", addr))\n\t}\n\treturn tcpip.AddrFrom4Slice(ip)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewPreRunner takes a name and a standalone pre runner compatible function and turns them into a Group compatible PreRunner, ready for registration.
func NewPreRunner(name string, fn func() error) PreRunner { return preRunner{name: name, fn: fn} }
[ "func preRun(c *cobra.Command, args []string) error {\n\tif len(args) < 1 {\n\t\treturn errors.New(\"Missing name argument\")\n\t}\n\n\tname = args[0]\n\treturn nil\n}", "func NewPreInstaller(applier ResourceApplier, parser ResourceParser, cfg Config, retryOptions []retry.Option) (*PreInstaller, error) {\n\trestConfig, err := config.RestConfig(cfg.KubeconfigSource)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdynamicClient, err := dynamic.NewForConfig(restConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &PreInstaller{\n\t\tapplier: applier,\n\t\tparser: parser,\n\t\tcfg: cfg,\n\t\tdynamicClient: dynamicClient,\n\t\tretryOptions: retryOptions,\n\t}, nil\n}", "func newRunnerGroup(scope RunnerGroupScope, name string) RunnerGroup {\n\tif name == \"\" {\n\t\treturn RunnerGroup{\n\t\t\tScope: scope,\n\t\t\tKind: Default,\n\t\t\tName: \"\",\n\t\t}\n\t}\n\n\treturn RunnerGroup{\n\t\tScope: scope,\n\t\tKind: Custom,\n\t\tName: name,\n\t}\n}", "func (c Carapace) PreRun(f func(cmd *cobra.Command, args []string)) {\n\tif entry := storage.get(c.cmd); entry.prerun != nil {\n\t\t_f := entry.prerun\n\t\tentry.prerun = func(cmd *cobra.Command, args []string) {\n\t\t\t// TODO yuck - probably best to append to a slice in storage\n\t\t\t_f(cmd, args)\n\t\t\tf(cmd, args)\n\t\t}\n\t} else {\n\t\tentry.prerun = f\n\t}\n}", "func New(t *testing.T, name string, arg ...string) *Runner {\n\treturn &Runner{t, name, arg}\n}", "func NewRunner(ctx *pulumi.Context,\n\tname string, args *RunnerArgs, opts ...pulumi.ResourceOption) (*Runner, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.RegistrationToken == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'RegistrationToken'\")\n\t}\n\tif args.RegistrationToken != nil {\n\t\targs.RegistrationToken = pulumi.ToSecret(args.RegistrationToken).(pulumi.StringInput)\n\t}\n\tsecrets := pulumi.AdditionalSecretOutputs([]string{\n\t\t\"authenticationToken\",\n\t\t\"registrationToken\",\n\t})\n\topts = append(opts, secrets)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Runner\n\terr := ctx.RegisterResource(\"gitlab:index/runner:Runner\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (k *PluginRunner) preRun(cmd *cobra.Command, args []string) error {\n\tlr := fLdr.RestrictionRootOnly\n\tfSys := filesys.MakeFsOnDisk()\n\tldr, err := fLdr.NewLoader(lr, filepath.Clean(k.root), fSys)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tv := validator.NewKustValidator()\n\n\tuf := kunstruct.NewKunstructuredFactoryImpl()\n\tvar pm resmap.Merginator // TODO The actual implementation is internal now...\n\trf := resmap.NewFactory(resource.NewFactory(uf), pm)\n\n\tk.h = resmap.NewPluginHelpers(ldr, v, rf)\n\n\tif c, ok := k.plugin.(resmap.Configurable); ok {\n\t\tconfig, err := k.config(cmd, args)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := c.Config(k.h, config); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *Supervisor) AddRunner(name string, callback Callback, policyOptions ...PolicyOption) {\n\tkey := fmt.Sprintf(\"%s-%s\", \"runner\", name)\n\ts.mux.Lock()\n\tdefer s.mux.Unlock()\n\n\tif _, exists := s.processes[key]; exists {\n\t\ts.logger(Error, loggerData{\"name\": name}, \"runner already exists\")\n\t\treturn\n\t}\n\n\tr := &runner{\n\t\tCallback: callback,\n\t\tname: name,\n\t\trestartPolicy: s.policy.Restart,\n\t\tlogger: s.logger,\n\t}\n\n\tp := Policy{\n\t\tRestart: s.policy.Restart,\n\t}\n\tp.Reconfigure(policyOptions...)\n\n\tr.restartPolicy = p.Restart\n\n\ts.processes[key] = r\n}", "func newPreReqValidator(opts ...option) *preReqValidator {\n\tcfg := defaultPreReqCfg()\n\tfor _, opt := range opts {\n\t\topt(cfg)\n\t}\n\treturn &preReqValidator{\n\t\tctxName: cfg.ctxName,\n\t\tns: cfg.ns,\n\t\tk8sClientProvider: cfg.k8sClientProvider,\n\t\toktetoClientProvider: cfg.oktetoClientProvider,\n\t\tgetContextStore: cfg.getContextStore,\n\t\tgetCtxResource: cfg.getCtxResource,\n\t}\n}", "func NewRunner() Runner {\n\treturn &defaultRunner{}\n}", "func NewRunner(be func() Backend, host string, userf, passf string) *Runner {\n\treturn &Runner{\n\t\tbe: be,\n\t\thost: host,\n\t\tuserf: userf,\n\t\tpassf: passf,\n\t\tsessions: make(chan error),\n\t\tpwdOver: make(chan struct{}),\n\t\tbroken: makeBroken(),\n\t\tlogins: gen.NewLogins(),\n\t\tagents: gen.NewAgents(),\n\t\tpool: newPool(),\n\t}\n}", "func (rnr *Runner) Preparer(l logger.Logger) (preparer.Preparer, error) {\n\n\t// NOTE: We have a good generic preparer so we'll provide that here\n\n\tl.Debug(\"** Preparer **\")\n\n\t// Return the existing preparer if we already have one\n\tif rnr.Prepare != nil {\n\t\tl.Debug(\"Returning existing preparer\")\n\t\treturn rnr.Prepare, nil\n\t}\n\n\tl.Debug(\"Creating new preparer\")\n\n\tp, err := prepare.NewPrepare(l)\n\tif err != nil {\n\t\tl.Warn(\"Failed new prepare >%v<\", err)\n\t\treturn nil, err\n\t}\n\n\tdb, err := rnr.Store.GetDb()\n\tif err != nil {\n\t\tl.Warn(\"Failed getting database handle >%v<\", err)\n\t\treturn nil, err\n\t}\n\n\terr = p.Init(db)\n\tif err != nil {\n\t\tl.Warn(\"Failed preparer init >%v<\", err)\n\t\treturn nil, err\n\t}\n\n\trnr.Prepare = p\n\n\treturn p, nil\n}", "func (client AppsClient) AddPremierAddOnPreparer(resourceGroupName string, name string, premierAddOnName string, premierAddOn PremierAddOn) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"name\": autorest.Encode(\"path\", name),\n\t\t\"premierAddOnName\": autorest.Encode(\"path\", premierAddOnName),\n\t\t\"resourceGroupName\": autorest.Encode(\"path\", resourceGroupName),\n\t\t\"subscriptionId\": autorest.Encode(\"path\", client.SubscriptionID),\n\t}\n\n\tconst APIVersion = \"2016-08-01\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsJSON(),\n\t\tautorest.AsPut(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/premieraddons/{premierAddOnName}\", pathParameters),\n\t\tautorest.WithJSON(premierAddOn),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare(&http.Request{})\n}", "func NewRunner(c cmd.Runner) *Runner {\n\treturn &Runner{cmd: c}\n}", "func NewFakeAppPrecondition(name string, numApps int, innerPre func(name string, opts ...chrome.Option) testing.Precondition, skiaRenderer bool) *preImpl {\n\tname = fmt.Sprintf(\"%s_%d\", name, numApps)\n\ttmpDir, err := ioutil.TempDir(\"\", name)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\topts := make([]chrome.Option, 0, numApps)\n\tfor i := 0; i < numApps; i++ {\n\t\topts = append(opts, chrome.UnpackedExtension(filepath.Join(tmpDir, fmt.Sprintf(\"fake_%d\", i))))\n\t}\n\tif skiaRenderer {\n\t\tname = name + \"_skia_renderer\"\n\t\topts = append(opts, chrome.EnableFeatures(\"UseSkiaRenderer\"))\n\t}\n\tcrPre := innerPre(name, opts...)\n\treturn &preImpl{crPre: crPre, numApps: numApps, extDirBase: tmpDir, prepared: false}\n}", "func NewRunner(pubClientFactory func() publisher.Client, mod *Wrapper) Runner {\n\treturn &runner{\n\t\tdone: make(chan struct{}),\n\t\tmod: mod,\n\t\tclient: pubClientFactory(),\n\t}\n}", "func setupRunner(ctx context.Context, factory RunnerFactory, schemaNames ...string) (Runner, error) {\n\trunner, err := factory(ctx, schemaNames)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn runner, nil\n}", "func NewRunner(name string, worker IWorker) IRunner {\n\treturn &runner{\n\t\tname: name,\n\t\tworker: worker,\n\t\tfactory: create(name),\n\t\tprocs: make(map[uint64]action.IAction),\n\t}\n}", "func NewPrecompileCaller(address common.Address, caller bind.ContractCaller) (*PrecompileCaller, error) {\n\tcontract, err := bindPrecompile(address, caller, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &PrecompileCaller{contract: contract}, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewGroup return a Group with input name.
func NewGroup(name string) Group { return Group{ name: name, readyCh: make(chan struct{}), } }
[ "func NewGroup(name string) *Group {\n\treturn &Group{Name: name}\n}", "func NewGroup(ctx *pulumi.Context,\n\tname string, args *GroupArgs, opts ...pulumi.ResourceOption) (*Group, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Path == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Path'\")\n\t}\n\tsecrets := pulumi.AdditionalSecretOutputs([]string{\n\t\t\"runnersToken\",\n\t})\n\topts = append(opts, secrets)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Group\n\terr := ctx.RegisterResource(\"gitlab:index/group:Group\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewGroup() *Group {\n\treturn &Group{}\n}", "func NewGroup(name string, o Owner) *Group {\n\tng := new(Group)\n\tng.Own = o\n\tng.Name = name\n\n\tvar nid OwnerID\n\tnid.Type = 'g'\n\tnid.UserDefined = name2userdefined(name)\n\t// nid.Stamp = newstamp()\n\n\tng.ID = nid\n\n\treturn ng\n}", "func NewGroup(ctx *pulumi.Context,\n\tname string, args *GroupArgs, opts ...pulumi.ResourceOption) (*Group, error) {\n\tif args == nil {\n\t\targs = &GroupArgs{}\n\t}\n\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Group\n\terr := ctx.RegisterResource(\"aws-native:synthetics:Group\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func New(dir string) *Group {\n\tg := &Group{\n\t\tdir: dir,\n\t}\n\tg.Clear()\n\treturn g\n}", "func newGroup(pk PK) *group {\n\treturn &group{\n\t\tPK: pk,\n\t\tMemberPK: make(map[Addr]PK),\n\t}\n}", "func New() *Group {\n\treturn &Group{\n\t\tch: make(chan struct{}),\n\t}\n}", "func NewGroup()(*Group) {\n m := &Group{\n DirectoryObject: *NewDirectoryObject(),\n }\n odataTypeValue := \"#microsoft.graph.group\";\n m.SetOdataType(&odataTypeValue);\n return m\n}", "func New(gid string) *Group {\n return &Group{\n Client: client.New().Init(),\n GroupID: gid,\n }\n}", "func newRunnerGroup(scope RunnerGroupScope, name string) RunnerGroup {\n\tif name == \"\" {\n\t\treturn RunnerGroup{\n\t\t\tScope: scope,\n\t\t\tKind: Default,\n\t\t\tName: \"\",\n\t\t}\n\t}\n\n\treturn RunnerGroup{\n\t\tScope: scope,\n\t\tKind: Custom,\n\t\tName: name,\n\t}\n}", "func (visual *Visual) NewGroup(parts []string, effect string) (*Group, error) {\n\tgroup, err := newGroup(parts, effect)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvisual.mux.Lock()\n\tvisual.groups = append(visual.groups, group)\n\tvisual.mux.Unlock()\n\n\treturn group, nil\n}", "func (s *Set) NewGroup(re regexp.Regexp) *Group {\n}", "func (frame *Framework) NewNamedGroup(name string, pattern string, children ...*MuxAPI) *MuxAPI {\r\n\tgroup := frame.NewNamedAPI(name, \"\", pattern)\r\n\tgroup.children = append(group.children, children...)\r\n\tfor _, child := range children {\r\n\t\tchild.parent = group\r\n\t}\r\n\treturn group\r\n}", "func NewGroup(ctx context.Context) *Group {\n\tctx, cancel := context.WithCancel(ctx)\n\n\tg := &Group{\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\tdone: make(chan struct{}),\n\t}\n\n\tgo g.wait()\n\n\treturn g\n}", "func NewGroup() *Group {\n\tin := make(chan Message)\n\tclose := make(chan bool)\n\treturn &Group{in: in, close: close, clock: 0}\n}", "func (g *Group) New(s string) *Group {\n\treturn g.Add(errors.New(s))\n}", "func (frame *Framework) NewGroup(pattern string, children ...*MuxAPI) *MuxAPI {\r\n\treturn frame.NewNamedGroup(\"\", pattern, children...)\r\n}", "func (s *GroupsService) Create(\n\tctx context.Context,\n\tgroupName string,\n) error {\n\traw, err := json.Marshal(struct {\n\t\tGroupName string `json:\"group_name\"`\n\t}{\n\t\tgroupName,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq, err := http.NewRequest(\n\t\thttp.MethodPost,\n\t\ts.client.url+\"2.0/groups/create\",\n\t\tbytes.NewBuffer(raw),\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq = req.WithContext(ctx)\n\tres, err := s.client.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif res.StatusCode >= 300 || res.StatusCode <= 199 {\n\t\treturn fmt.Errorf(\n\t\t\t\"Failed to returns 2XX response: %d\", res.StatusCode)\n\t}\n\n\treturn nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Name shows the name of the group.
func (g Group) Name() string { return g.name }
[ "func (g *Group) Name() string {\n\treturn g.name\n}", "func (g *Group) Name() (name string) {\n\treturn g.name\n}", "func (o ServerGroupOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ServerGroup) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (o PlacementGroupOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *PlacementGroup) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (r *LogGroup) Name() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"name\"])\n}", "func (o ReportGroupOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ReportGroup) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (o ThingGroupOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ThingGroup) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (e *awsElastigroup) Name() string { return fi.StringValue(e.obj.Name) }", "func (r *Group) Name(name string) *Group {\n\tr.ID = validateID(name)\n\treturn r\n}", "func (o ChallengeSpecSolverDns01WebhookOutput) GroupName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ChallengeSpecSolverDns01Webhook) string { return v.GroupName }).(pulumi.StringOutput)\n}", "func (o ScheduleOutput) GroupName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Schedule) pulumi.StringPtrOutput { return v.GroupName }).(pulumi.StringPtrOutput)\n}", "func (s *SoundGroup) Name() (string, error) {\n\tnlen := C.int(len(s.name) + 1)\n\tvar cname *C.char = C.CString(`\\0`)\n\tdefer C.free(unsafe.Pointer(cname))\n\tres := C.FMOD_SoundGroup_GetName(s.cptr, cname, nlen)\n\tif C.GoString(cname) != s.name {\n\t\treturn s.name, errors.New(\"Wrong names\")\n\t}\n\treturn C.GoString(cname), errs[res]\n}", "func (o BudgetResourceGroupOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *BudgetResourceGroup) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (o InstanceGroupManagerOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *InstanceGroupManager) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (o EventHubConsumerGroupOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *EventHubConsumerGroup) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (p ByName) GroupName() string { return p.groupName }", "func (pg *PropertyGroup) Name() string {\n\treturn pg.name\n}", "func (o DomainGroupOutput) GroupName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *DomainGroup) pulumi.StringOutput { return v.GroupName }).(pulumi.StringOutput)\n}", "func (o GroupPolicyOutput) GroupName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *GroupPolicy) pulumi.StringOutput { return v.GroupName }).(pulumi.StringOutput)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register will inspect the provided objects implementing the Unit interface to see if it needs to register the objects for any of the Group bootstrap phases. If a Unit doesn't satisfy any of the bootstrap phases it is ignored by Group. The returned array of booleans is of the same size as the amount of provided Units, signaling for each provided Unit if it successfully registered with Group for at least one of the bootstrap phases or if it was ignored.
func (g *Group) Register(units ...Unit) []bool { g.log = logger.GetLogger(g.name) hasRegistered := make([]bool, len(units)) for idx := range units { if !g.configured { // if RunConfig has been called we can no longer register Config // phases of Units if c, ok := units[idx].(Config); ok { g.c = append(g.c, c) hasRegistered[idx] = true } } if p, ok := units[idx].(PreRunner); ok { g.p = append(g.p, p) hasRegistered[idx] = true } if s, ok := units[idx].(Service); ok { g.s = append(g.s, s) hasRegistered[idx] = true } } return hasRegistered }
[ "func (v *Validator) Register(structs ...interface{}) error {\n\tfor _, s := range structs {\n\t\tif err := v.register(s); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func Register(pcs ...components.Pluggable) int {\n\tregisteredComponents := 0\n\tfor _, pc := range pcs {\n\t\tregister, ok := registries[pc.Type]\n\t\tif !ok {\n\t\t\tlog.Warnf(\"%s is not registered as a pluggable component\", pc.Type)\n\t\t\tcontinue\n\t\t}\n\t\tregister(pc)\n\t\tregisteredComponents++\n\t\tlog.Infof(\"%s.%s %s pluggable component was successfully registered\", pc.Type, pc.Name, pc.Version)\n\t}\n\treturn registeredComponents\n}", "func (p *pubSubRegistry) Register(components ...PubSub) {\n\tfor _, component := range components {\n\t\tp.messageBuses[createFullName(component.Name)] = component.FactoryMethod\n\t}\n}", "func (container *Container) Register(factories ...interface{}) error {\n\tfor _, factory := range factories {\n\t\terr := container.RegisterOne(factory)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (reg *Registry) MustRegister(cs ...Collector) {\n\tfor _, c := range cs {\n\t\tif err := reg.Register(c); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}", "func (r *Resolver) Units() (*[]*Unit, error) {\n\tvar result []*Unit\n\tfor _, theirUnit := range units.All() {\n\t\tourUnit, err := NewUnit(theirUnit.Name)\n\t\tif err != nil {\n\t\t\treturn &result, err\n\t\t}\n\t\tresult = append(result, &ourUnit)\n\t}\n\treturn &result, nil\n}", "func (g *Group) IsUnit(u *Unit) bool {\n\tif g != nil && u != nil {\n\t\tfor _, m := range g.units {\n\t\t\tif m.Equals(u) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (b *Builder) Register(br *sous.BuildResult) error {\n\tfor _, prod := range br.Products {\n\t\tif prod.Advisories.Contains(sous.IsBuilder) {\n\t\t\tmessages.ReportLogFieldsMessage(\"not pushing builder image\", logging.DebugLevel, b.log, prod)\n\t\t\tcontinue\n\t\t}\n\t\terr := b.pushToRegistry(prod)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = b.recordName(prod)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (o *EquipmentBaseSensor) HasUnits() bool {\n\tif o != nil && o.Units != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (app *App) Register(devices ...*message.Device) {\n\t// we are registering devices, we should also register for\n\t// discovery messages, but they are handled in the app.\n\t// we add a NoopHandler to the map to make the subscription work\n\t// we only do this if no handler is allready registered\n\tif _, ok := app.handler[queue.Inventory]; !ok {\n\t\tapp.SetHandler(queue.Inventory, app.inventoryHandler)\n\t}\n\n\tapp.deviceLock.Lock()\n\tdefer app.deviceLock.Unlock()\n\tfor _, device := range devices {\n\t\tfound := false\n\t\tfor _, d := range app.devices {\n\t\t\tif *device.ID == *d.device.ID {\n\t\t\t\td.lastSeen = time.Now()\n\t\t\t\tfound = true\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tapp.devices = append(app.devices, &appDevice{device: device, lastSeen: time.Now()})\n\t\t}\n\t}\n}", "func (r *Registry) RegisterGroups(groups ...*Group) error {\n\tfor _, group := range groups {\n\t\t_, ok := r.groupMap[group.ID]\n\t\tif ok {\n\t\t\treturn fmt.Errorf(\"duplicate group ID %q\", group.ID)\n\t\t}\n\n\t\terr := r.RegisterOutlets(group.Outlets...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tr.groupMap[group.ID] = group\n\t\tr.groups = append(r.groups, group)\n\n\t\tlog.WithField(\"groupID\", group.ID).Info(\"registered outlet group\")\n\t}\n\n\treturn nil\n}", "func MustRegister(collectors ...prometheus.Collector) {\n\tprometheus.MustRegister(PrometheusHTTPRequestCount)\n\tprometheus.MustRegister(PrometheusHTTPRequestLatency)\n\tprometheus.MustRegister(PrometheusHTTPResponseCount)\n\tprometheus.MustRegister(PrometheusWebsocketMessageCount)\n\tprometheus.MustRegister(PrometheusWebsocketMessageSize)\n\n\tfor _, collector := range collectors {\n\t\tprometheus.MustRegister(collector)\n\t}\n}", "func (r *Registry) Register(ds ...*Object) {\n\tr.Lock()\n\tdefer r.Unlock()\n\tr.initDataStores()\n\n\tfor _, obj := range ds {\n\t\tname := obj.Name\n\t\tr.dataStores[name] = obj\n\t\tr.dataStores[name].Enabled = true\n\t}\n}", "func (p *exporterRegistry) Register(components ...Exporter) {\n\tfor _, component := range components {\n\t\tp.exporters[createFullName(component.Name)] = component.FactoryMethod\n\t}\n}", "func (g *Group) Units() []*Unit {\n\tif g == nil {\n\t\treturn nil\n\t}\n\treturn g.units\n}", "func (GM *GameManager) RegisterComponents() {\n\tGM.Components.Range(func(k, v interface{}) bool {\n\t\tcomponent := v.(ComponentInterface)\n\t\tcomponent.Register()\n\t\tGM.Log.Infof(\"Registered component %s\", k.(string))\n\t\treturn true\n\t})\n\n\tGM.Log.Info(\"Finished registering components...\")\n}", "func Register(aggregate Aggregate, events ...EventData) {\n\n\tfor _, event := range events {\n\t\teventType := event.AggregateType() +\n\t\t\t\".\" + event.Action() +\n\t\t\t\".\" + strconv.FormatUint(event.Version(), 10)\n\n\t\teventRegistry[eventType] = reflect.TypeOf(event)\n\t}\n}", "func RegisterAll(emitPerNodeGroupMetrics bool) {\n\tlegacyregistry.MustRegister(clusterSafeToAutoscale)\n\tlegacyregistry.MustRegister(nodesCount)\n\tlegacyregistry.MustRegister(nodeGroupsCount)\n\tlegacyregistry.MustRegister(unschedulablePodsCount)\n\tlegacyregistry.MustRegister(maxNodesCount)\n\tlegacyregistry.MustRegister(cpuCurrentCores)\n\tlegacyregistry.MustRegister(cpuLimitsCores)\n\tlegacyregistry.MustRegister(memoryCurrentBytes)\n\tlegacyregistry.MustRegister(memoryLimitsBytes)\n\tlegacyregistry.MustRegister(lastActivity)\n\tlegacyregistry.MustRegister(functionDuration)\n\tlegacyregistry.MustRegister(functionDurationSummary)\n\tlegacyregistry.MustRegister(errorsCount)\n\tlegacyregistry.MustRegister(scaleUpCount)\n\tlegacyregistry.MustRegister(gpuScaleUpCount)\n\tlegacyregistry.MustRegister(failedScaleUpCount)\n\tlegacyregistry.MustRegister(failedGPUScaleUpCount)\n\tlegacyregistry.MustRegister(scaleDownCount)\n\tlegacyregistry.MustRegister(gpuScaleDownCount)\n\tlegacyregistry.MustRegister(evictionsCount)\n\tlegacyregistry.MustRegister(unneededNodesCount)\n\tlegacyregistry.MustRegister(unremovableNodesCount)\n\tlegacyregistry.MustRegister(scaleDownInCooldown)\n\tlegacyregistry.MustRegister(oldUnregisteredNodesRemovedCount)\n\tlegacyregistry.MustRegister(overflowingControllersCount)\n\tlegacyregistry.MustRegister(skippedScaleEventsCount)\n\tlegacyregistry.MustRegister(napEnabled)\n\tlegacyregistry.MustRegister(nodeGroupCreationCount)\n\tlegacyregistry.MustRegister(nodeGroupDeletionCount)\n\tlegacyregistry.MustRegister(pendingNodeDeletions)\n\n\tif emitPerNodeGroupMetrics {\n\t\tlegacyregistry.MustRegister(nodesGroupMinNodes)\n\t\tlegacyregistry.MustRegister(nodesGroupMaxNodes)\n\t}\n}", "func (f *DefaultFilterEngine) Register(filters ...Filter) {\n\tfor _, filter := range filters {\n\t\tf.log.Infof(\"Registering filter %q\", filter.Name())\n\t\tf.filters[filter.Name()] = RegisteredFilter{\n\t\t\tFilter: filter,\n\t\t\tEnabled: true,\n\t\t}\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
RegisterFlags returns FlagSet contains Flags in all modules.
func (g *Group) RegisterFlags() *FlagSet { // run configuration stage g.f = NewFlagSet(g.name) g.f.SortFlags = false // keep order of flag registration g.f.Usage = func() { fmt.Printf("Flags:\n") g.f.PrintDefaults() } gFS := NewFlagSet("Common Service options") gFS.SortFlags = false gFS.StringVarP(&g.name, "name", "n", g.name, `name of this service`) gFS.BoolVar(&g.showRunGroup, "show-rungroup-units", false, "show rungroup units") g.f.AddFlagSet(gFS.FlagSet) // register flags from attached Config objects fs := make([]*FlagSet, len(g.c)) for idx := range g.c { // a Namer might have been deregistered if g.c[idx] == nil { continue } g.log.Debug().Str("name", g.c[idx].Name()).Uint32("registered", uint32(idx+1)).Uint32("total", uint32(len(g.c))).Msg("register flags") fs[idx] = g.c[idx].FlagSet() if fs[idx] == nil { // no FlagSet returned g.log.Debug().Str("name", g.c[idx].Name()).Msg("config object did not return a flagset") continue } fs[idx].VisitAll(func(f *pflag.Flag) { if g.f.Lookup(f.Name) != nil { // log duplicate flag g.log.Warn().Str("name", f.Name).Uint32("registered", uint32(idx+1)).Msg("ignoring duplicate flag") return } g.f.AddFlag(f) }) } return g.f }
[ "func RegisterFlags() {\n\tregisterFlags()\n}", "func (a *BootstrapCommand) RegisterFlags(r codegen.FlagRegistry) {\n\tfor _, c := range BootstrapCommands {\n\t\tif c != a {\n\t\t\tc.RegisterFlags(r)\n\t\t}\n\t}\n}", "func (app *App) RegFlags() {\n\tif app.Flags.values == nil {\n\t\tapp.Flags.values = make(map[string]Flag)\n\t}\n\tapp.flagsRegistered = true\n\tfor _, v := range app.flagsQueue {\n\t\tapp.Flags.values[v.Name] = Flag{\n\t\t\tName: v.Name,\n\t\t\tDescription: v.Description,\n\t\t\tDefault: v.Default,\n\t\t\tValue: flag.String(v.Name, v.Default, v.Description),\n\t\t}\n\t}\n}", "func RegisterFlags(fs *flag.FlagSet) {\n\tfs.BoolVar(&native, \"native\", false, \"whether the test should be run natively\")\n\tfs.StringVar(&testbenchBinary, \"testbench_binary\", \"\", \"path to the testbench binary\")\n\tfs.BoolVar(&tshark, \"tshark\", false, \"use more verbose tshark in logs instead of tcpdump\")\n\tfs.Var(&extraTestArgs, \"extra_test_arg\", \"extra arguments to pass to the testbench\")\n\tfs.BoolVar(&expectFailure, \"expect_failure\", false, \"expect that the test will fail when run\")\n}", "func GetRegisterFlags() []cli.Flag {\n\treturn []cli.Flag{\n\t\tcli.IntFlag{\n\t\t\tName: FlagWeight,\n\t\t\tUsage: \"initial weight of this backend\",\n\t\t\tValue: 1,\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: FlagDC,\n\t\t\tUsage: \"datacenter short name as defined in VaaS\",\n\t\t\tEnvVar: EnvDC,\n\t\t},\n\t}\n}", "func (d *Data) RegisterFlags(fs *flag.FlagSet) {\n\tfs.BoolVar(&d.Verbose, \"verbose\", false, \"\")\n\tfs.IntVar(&d.Server.Port, \"server.port\", 80, \"\")\n\tfs.DurationVar(&d.Server.Timeout, \"server.timeout\", 60*time.Second, \"\")\n\n\tfs.StringVar(&d.TLS.Cert, \"tls.cert\", \"DEFAULTCERT\", \"\")\n\tfs.StringVar(&d.TLS.Key, \"tls.key\", \"DEFAULTKEY\", \"\")\n}", "func (cfg *Config) RegisterFlags(f *flag.FlagSet) {\n\tcfg.RegisterFlagsWithPrefix(\"\", f)\n}", "func (cfg *Config) RegisterFlags(f *flag.FlagSet) {\n\tcfg.RegisterFlagsWithPrefix(f, \"\")\n}", "func (cmd *GetTasksCommand) RegisterFlags(cc *cobra.Command) {\n}", "func registerFlags(fs *flag.FlagSet) {\n\tfs.BoolVar(&Native, \"native\", Native, \"whether the test is running natively\")\n\tfs.DurationVar(&RPCKeepalive, \"rpc_keepalive\", RPCKeepalive, \"gRPC keepalive\")\n\tfs.StringVar(&dutInfosJSON, \"dut_infos_json\", dutInfosJSON, \"json that describes the DUTs\")\n}", "func RegisterFlags(flags *flag.FlagSet, s interface{}, opts ...Option) error {\n\tv := reflect.ValueOf(s)\n\tif v.Kind() != reflect.Struct {\n\t\treturn fmt.Errorf(\"unable to register flags for %q: not a struct type\", v.Type())\n\t}\n\to := getOpts(opts...)\n\treturn registerStructFields(flags, v, o)\n}", "func RegisterFlags(fs *pflag.FlagSet) {\n\tfs.StringVar(&sqliteDataFile, \"sqlite-data-file\", sqliteDataFile, \"SQLite Datafile to use as VTOrc's database\")\n\tfs.DurationVar(&instancePollTime, \"instance-poll-time\", instancePollTime, \"Timer duration on which VTOrc refreshes MySQL information\")\n\tfs.DurationVar(&snapshotTopologyInterval, \"snapshot-topology-interval\", snapshotTopologyInterval, \"Timer duration on which VTOrc takes a snapshot of the current MySQL information it has in the database. Should be in multiple of hours\")\n\tfs.DurationVar(&reasonableReplicationLag, \"reasonable-replication-lag\", reasonableReplicationLag, \"Maximum replication lag on replicas which is deemed to be acceptable\")\n\tfs.StringVar(&auditFileLocation, \"audit-file-location\", auditFileLocation, \"File location where the audit logs are to be stored\")\n\tfs.BoolVar(&auditToBackend, \"audit-to-backend\", auditToBackend, \"Whether to store the audit log in the VTOrc database\")\n\tfs.BoolVar(&auditToSyslog, \"audit-to-syslog\", auditToSyslog, \"Whether to store the audit log in the syslog\")\n\tfs.DurationVar(&auditPurgeDuration, \"audit-purge-duration\", auditPurgeDuration, \"Duration for which audit logs are held before being purged. Should be in multiples of days\")\n\tfs.DurationVar(&recoveryPeriodBlockDuration, \"recovery-period-block-duration\", recoveryPeriodBlockDuration, \"Duration for which a new recovery is blocked on an instance after running a recovery\")\n\tfs.BoolVar(&preventCrossCellFailover, \"prevent-cross-cell-failover\", preventCrossCellFailover, \"Prevent VTOrc from promoting a primary in a different cell than the current primary in case of a failover\")\n\tfs.Duration(\"lock-shard-timeout\", 30*time.Second, \"Duration for which a shard lock is held when running a recovery\")\n\t_ = fs.MarkDeprecated(\"lock-shard-timeout\", \"Please use lock-timeout instead.\")\n\tfs.DurationVar(&waitReplicasTimeout, \"wait-replicas-timeout\", waitReplicasTimeout, \"Duration for which to wait for replica's to respond when issuing RPCs\")\n\tfs.DurationVar(&topoInformationRefreshDuration, \"topo-information-refresh-duration\", topoInformationRefreshDuration, \"Timer duration on which VTOrc refreshes the keyspace and vttablet records from the topology server\")\n\tfs.DurationVar(&recoveryPollDuration, \"recovery-poll-duration\", recoveryPollDuration, \"Timer duration on which VTOrc polls its database to run a recovery\")\n\tfs.BoolVar(&ersEnabled, \"allow-emergency-reparent\", ersEnabled, \"Whether VTOrc should be allowed to run emergency reparent operation when it detects a dead primary\")\n}", "func RegisterFlags(fs *pflag.FlagSet) {\n\tfs.StringSlice(\"config-path\", configPaths.Default(), \"Paths to search for config files in.\")\n\tfs.String(\"config-type\", configType.Default(), \"Config file type (omit to infer config type from file extension).\")\n\tfs.String(\"config-name\", configName.Default(), \"Name of the config file (without extension) to search for.\")\n\tfs.String(\"config-file\", configFile.Default(), \"Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored.\")\n\tfs.Duration(\"config-persistence-min-interval\", configPersistenceMinInterval.Default(), \"minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done).\")\n\n\tvar h = configFileNotFoundHandling.Default()\n\tfs.Var(&h, \"config-file-not-found-handling\", fmt.Sprintf(\"Behavior when a config file is not found. (Options: %s)\", strings.Join(handlingNames, \", \")))\n\n\tBindFlags(fs, configPaths, configType, configName, configFile, configFileNotFoundHandling, configPersistenceMinInterval)\n}", "func (c *Config) RegisterFlags(f *flag.FlagSet) {\n\tc.TLS.RegisterFlags(f)\n\tc.SASL.RegisterFlags(f)\n}", "func (b *AdapterBase) Flags() *pflag.FlagSet {\n\tb.initFlagSet()\n\tb.InstallFlags()\n\n\treturn b.FlagSet\n}", "func (c *DashboardLsCmd) RegisterFlags(fs *flag.FlagSet) {\n\tfs.StringVar(&c.Conf.UID, \"uid\", \"\", \"dashboard UID\")\n}", "func (cfg *Config) RegisterFlags(f *flag.FlagSet) {\n\tcfg.Ring.RegisterFlags(\"index-gateway.\", \"collectors/\", f)\n\tf.StringVar((*string)(&cfg.Mode), \"index-gateway.mode\", SimpleMode.String(), \"Defines in which mode the index gateway server will operate (default to 'simple'). It supports two modes:\\n- 'simple': an index gateway server instance is responsible for handling, storing and returning requests for all indices for all tenants.\\n- 'ring': an index gateway server instance is responsible for a subset of tenants instead of all tenants.\")\n}", "func Flags() []xo.FlagSet {\n\tvar types []string\n\tfor typ := range loaders {\n\t\ttypes = append(types, typ)\n\t}\n\tsort.Strings(types)\n\tvar flags []xo.FlagSet\n\tfor _, typ := range types {\n\t\tl := loaders[typ]\n\t\tif l.Flags == nil {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, flag := range l.Flags() {\n\t\t\tflags = append(flags, xo.FlagSet{\n\t\t\t\tType: typ,\n\t\t\t\tName: string(flag.ContextKey),\n\t\t\t\tFlag: flag,\n\t\t\t})\n\t\t}\n\t}\n\treturn flags\n}", "func RegisterFlagSets(cmd *cobra.Command, flagsets ...*pflag.FlagSet) {\n\tcommandFlagSets[cmd] = append(commandFlagSets[cmd], flagsets...)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
RunConfig runs the Config phase of all registered Config aware Units. Only use this function if needing to add additional wiring between config and (pre)run phases and a separate PreRunner phase is not an option. In most cases it is best to use the Run method directly as it will run the Config phase prior to executing the PreRunner and Service phases. If an error is returned the application must shut down as it is considered fatal.
func (g *Group) RunConfig() (interrupted bool, err error) { g.log = logger.GetLogger(g.name) g.configured = true if g.name == "" { // use the binary name if custom name has not been provided g.name = path.Base(os.Args[0]) } defer func() { if err != nil { g.log.Error().Err(err).Msg("unexpected exit") } }() // Load config from env and file if err = config.Load(g.f.Name, g.f.FlagSet); err != nil { return false, errors.Wrapf(err, "%s fails to load config", g.f.Name) } // bail early on help or version requests switch { case g.showRunGroup: fmt.Println(g.ListUnits()) return true, nil } // Validate Config inputs for idx := range g.c { // a Config might have been deregistered during Run if g.c[idx] == nil { g.log.Debug().Uint32("ran", uint32(idx+1)).Msg("skipping validate") continue } g.log.Debug().Str("name", g.c[idx].Name()).Uint32("ran", uint32(idx+1)).Uint32("total", uint32(len(g.c))).Msg("validate config") if vErr := g.c[idx].Validate(); vErr != nil { err = multierr.Append(err, vErr) } } // exit on at least one Validate error if err != nil { return false, err } // log binary name and version g.log.Info().Msg("started") return false, nil }
[ "func ConfigRun(ctx *cli.Context) error {\n\topt, err := InitOption(ctx)\n\tif err != nil {\n\t\treturn xerrors.Errorf(\"option error: %w\", err)\n\t}\n\n\t// Disable OS and language analyzers\n\topt.DisabledAnalyzers = append(analyzer.TypeOSes, analyzer.TypeLanguages...)\n\n\t// Scan only config files\n\topt.VulnType = nil\n\topt.SecurityChecks = []string{types.SecurityCheckConfig}\n\n\t// Run filesystem command internally\n\treturn run(ctx.Context, opt, filesystemArtifact)\n}", "func ConfigRun(ctx *cli.Context) error {\n\topt, err := NewOption(ctx)\n\tif err != nil {\n\t\treturn xerrors.Errorf(\"option error: %w\", err)\n\t}\n\n\t// initialize options\n\tif err = opt.Init(); err != nil {\n\t\treturn xerrors.Errorf(\"failed to initialize options: %w\", err)\n\t}\n\n\t// Scan only config files\n\topt.VulnType = nil\n\topt.SecurityChecks = []string{types.SecurityCheckConfig}\n\n\t// Skip downloading vulnerability DB\n\topt.SkipDBUpdate = true\n\n\t// Run filesystem command internally\n\treturn Run(ctx.Context, opt, filesystemScanner, initFSCache)\n}", "func RunForConfig(ctx context.Context, cfg Config, init func(*InitData) error) error {\n\tlogger, err := newLogger(cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\tlogger.Info(\"Starting server\",\n\t\tzap.String(\"network\", cfg.GetNetwork()),\n\t\tzap.String(\"address\", cfg.GetAddress()),\n\t)\n\tlistener, err := net.Listen(cfg.GetNetwork(), cfg.GetAddress())\n\tif err != nil {\n\t\treturn err\n\t}\n\ts := grpc.NewServer(grpc.UnaryInterceptor(newSecretChecker(cfg).Intercept))\n\tif err = init(&InitData{\n\t\tLogger: logger,\n\t\tServer: s,\n\t\tListener: listener,\n\t}); err != nil {\n\t\treturn err\n\t}\n\tgo func() {\n\t\t<-ctx.Done()\n\t\ts.GracefulStop()\n\t}()\n\terr = s.Serve(listener)\n\tlogger.Info(\"Server stopped\")\n\treturn err\n}", "func (c *ConfigController) Run(threadiness int, stopCh <-chan struct{}) error {\n\tdefer func() {\n\t\tc.queue.ShutDown()\n\t}()\n\n\tglog.V(3).Infoln(\"Creating CDI config\")\n\tif _, err := CreateCDIConfig(c.client, c.cdiClientSet, c.configName); err != nil {\n\t\truntime.HandleError(err)\n\t\treturn errors.Wrap(err, \"Error creating CDI config\")\n\t}\n\n\tglog.V(3).Infoln(\"Starting config controller Run loop\")\n\tif threadiness < 1 {\n\t\treturn errors.Errorf(\"expected >0 threads, got %d\", threadiness)\n\t}\n\n\tif ok := cache.WaitForCacheSync(stopCh, c.ingressesSynced, c.routesSynced); !ok {\n\t\treturn errors.New(\"failed to wait for caches to sync\")\n\t}\n\n\tglog.V(3).Infoln(\"ConfigController cache has synced\")\n\n\tfor i := 0; i < threadiness; i++ {\n\t\tgo wait.Until(c.runWorker, time.Second, stopCh)\n\t}\n\n\tglog.Info(\"Started workers\")\n\t<-stopCh\n\tglog.Info(\"Shutting down workers\")\n\treturn nil\n}", "func Run(a *config.Args) error {\n\tfor {\n\t\t// copy the baseline config\n\t\tcfg := *a\n\n\t\t// load the config file\n\t\tif err := fetchConfig(&cfg); err != nil {\n\t\t\tif cfg.StartupOptions.ConfigRepo != \"\" {\n\t\t\t\tlog.Errorf(\"Unable to load configuration file, waiting for 1 minute and then will try again: %v\", err)\n\t\t\t\ttime.Sleep(time.Minute)\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\treturn fmt.Errorf(\"unable to load configuration file: %v\", err)\n\t\t\t}\n\t\t}\n\n\t\tif err := serve(&cfg); err != nil {\n\t\t\tif cfg.StartupOptions.ConfigRepo != \"\" {\n\t\t\t\tlog.Errorf(\"Unable to initialize server likely due to bad config, waiting for 1 minute and then will try again: %v\", err)\n\t\t\t\ttime.Sleep(time.Minute)\n\t\t\t} else {\n\t\t\t\treturn fmt.Errorf(\"unable to initialize server: %v\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Infof(\"Configuration change detected, attempting to reload configuration\")\n\t\t}\n\t}\n}", "func (fr *Runner) RunConfigs(cfgs ...Config) (stdout, stderr string, err error) {\n\targs := fr.argsFromConfigs(append([]Config{fr.Global}, cfgs...)...)\n\n\treturn fr.Run(args...)\n}", "func RunConfigs() {\n\tloggingSetup()\n}", "func (cmd *GenerateConfigCommand) Run(_ context.Context) error {\n\tconf := server.NewConfig()\n\tret, err := toml.Marshal(*conf)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unmarshalling default config\")\n\t}\n\tfmt.Fprintf(cmd.stdout, \"%s\\n\", ret)\n\treturn nil\n}", "func RunConfig(pair *Pair, port int, getCommand func(config Config, port int) (cmd *exec.Cmd, formatted string)) {\n\tvar err error\n\t// Create client/server log files\n\tif err = createLogs(pair); err != nil {\n\t\tlog.Debugf(\"Failed to create logs for % client and %s server\", pair.Client.Name, pair.Server.Name)\n\t\treportCrossrunnerFailure(pair, err)\n\t\treturn\n\t}\n\tdefer pair.Client.Logs.Close()\n\tdefer pair.Server.Logs.Close()\n\n\t// Get server and client command structs\n\tserver, serverCmd := getCommand(pair.Server, port)\n\tclient, clientCmd := getCommand(pair.Client, port)\n\n\t// write server log header\n\tlog.Debug(serverCmd)\n\n\tif err = writeFileHeader(pair.Server.Logs, serverCmd, pair.Server.Workdir,\n\t\tpair.Server.Timeout, pair.Client.Timeout); err != nil {\n\t\tlog.Debugf(\"Failed to write header to %s\", pair.Server.Logs.Name())\n\t\treportCrossrunnerFailure(pair, err)\n\t\treturn\n\t}\n\n\t// start the server\n\tsStartTime := time.Now()\n\tif err = server.Start(); err != nil {\n\t\tlog.Debugf(\"Failed to start %s server\", pair.Server.Name)\n\t\treportCrossrunnerFailure(pair, err)\n\t\treturn\n\t}\n\t// Defer stopping the server to ensure the process is killed on exit\n\tdefer func() {\n\t\tif err = server.Process.Kill(); err != nil {\n\t\t\treportCrossrunnerFailure(pair, err)\n\t\t\tlog.Info(\"Failed to kill \" + pair.Server.Name + \" server.\")\n\t\t\treturn\n\t\t}\n\t}()\n\tstimeout := pair.Server.Timeout * time.Millisecond * 1000\n\tvar total time.Duration\n\t// Poll the server healthcheck until it returns a valid status code or exceeds the timeout\n\tfor total <= stimeout {\n\t\t// If the server hasn't started within the specified timeout, fail the test\n\t\tresp, err := http.Get(fmt.Sprintf(\"http://localhost:%d\", port))\n\t\tif err != nil {\n\t\t\ttime.Sleep(time.Millisecond * 250)\n\t\t\ttotal += (time.Millisecond * 250)\n\t\t\tcontinue\n\t\t}\n\t\tresp.Close = true\n\t\tresp.Body.Close()\n\t\tbreak\n\t}\n\n\tif total >= stimeout {\n\t\tif err = writeServerTimeout(pair.Server.Logs, pair.Server.Name); err != nil {\n\t\t\tlog.Debugf(\"Failed to write server timeout to %s\", pair.Server.Logs.Name)\n\t\t\treportCrossrunnerFailure(pair, err)\n\t\t\treturn\n\t\t}\n\t\tpair.ReturnCode = TestFailure\n\t\tpair.Err = errors.New(\"Server has not started within the specified timeout\")\n\t\tlog.Debug(pair.Server.Name + \" server not started within specified timeout\")\n\t\t// Even though the healthcheck server hasn't started, the process has.\n\t\t// Process is killed in the deferred function above\n\t\treturn\n\t}\n\n\t// write client log header\n\tif err = writeFileHeader(pair.Client.Logs, clientCmd, pair.Client.Workdir,\n\t\tpair.Server.Timeout, pair.Client.Timeout); err != nil {\n\t\tlog.Debugf(\"Failed to write header to %s\", pair.Client.Logs.Name())\n\t\treportCrossrunnerFailure(pair, err)\n\t\treturn\n\t}\n\n\t// start client\n\tdone := make(chan error, 1)\n\tlog.Debug(clientCmd)\n\tcStartTime := time.Now()\n\n\tif err = client.Start(); err != nil {\n\t\tlog.Debugf(\"Failed to start %s client\", pair.Client.Name)\n\t\tpair.ReturnCode = TestFailure\n\t\tpair.Err = err\n\t}\n\n\tgo func() {\n\t\tdone <- client.Wait()\n\t}()\n\n\tselect {\n\tcase <-time.After(pair.Client.Timeout * time.Second):\n\t\t// TODO: It's a bit annoying to have this message duplicated in the\n\t\t// unexpected_failures.log. Is there a better way to report this?\n\t\tif err = writeClientTimeout(pair, pair.Client.Name); err != nil {\n\t\t\tlog.Debugf(\"Failed to write timeout error to %s\", pair.Client.Logs.Name())\n\n\t\t\treportCrossrunnerFailure(pair, err)\n\t\t\treturn\n\t\t}\n\n\t\tif err = client.Process.Kill(); err != nil {\n\t\t\tlog.Infof(\"Error killing %s\", pair.Client.Name)\n\t\t\treportCrossrunnerFailure(pair, err)\n\t\t\treturn\n\t\t}\n\t\tpair.ReturnCode = TestFailure\n\t\tpair.Err = errors.New(\"Client has not completed within the specified timeout\")\n\t\tbreak\n\tcase err := <-done:\n\t\tif err != nil {\n\t\t\tlog.Debugf(\"Error in %s client\", pair.Client.Name)\n\t\t\tpair.ReturnCode = TestFailure\n\t\t\tpair.Err = err\n\t\t}\n\t}\n\n\t// write log footers\n\tif err = writeFileFooter(pair.Client.Logs, time.Since(cStartTime)); err != nil {\n\t\tlog.Debugf(\"Failed to write footer to %s\", pair.Client.Logs.Name())\n\t\treportCrossrunnerFailure(pair, err)\n\t\treturn\n\t}\n\tif err = writeFileFooter(pair.Server.Logs, time.Since(sStartTime)); err != nil {\n\t\tlog.Debugf(\"Failed to write footer to %s\", pair.Client.Logs.Name())\n\t\treportCrossrunnerFailure(pair, err)\n\t\treturn\n\t}\n}", "func (cmd *GenerateConfigCommand) Run(_ context.Context) error {\n\tconf := server.NewConfig()\n\tret, err := toml.Marshal(*conf)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unmarshaling default config\")\n\t}\n\tfmt.Fprintf(cmd.Stdout, \"%s\\n\", ret)\n\treturn nil\n}", "func RunWithConfig(t *testing.T) {\n\tconfigPath := integration.GetConfigPath(\"config_e2e.yaml\")\n\n\tgw, err := gateway.Connect(\n\t\tgateway.WithConfig(config.FromFile(configPath)),\n\t\tgateway.WithUser(\"User1\"),\n\t)\n\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create new Gateway: %s\", err)\n\t}\n\tdefer gw.Close()\n\n\tnw, err := gw.GetNetwork(channelID)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to get network: %s\", err)\n\t}\n\n\tname := nw.Name()\n\tif name != channelID {\n\t\tt.Fatalf(\"Incorrect network name: %s\", name)\n\t}\n\n\tcontract := nw.GetContract(ccID)\n\n\tname = contract.Name()\n\tif name != ccID {\n\t\tt.Fatalf(\"Incorrect contract name: %s\", name)\n\t}\n\n\trunContract(contract, t)\n}", "func (cmd *PrintConfigCommand) Run(args ...string) error {\n\t// Parse command flags.\n\tfs := flag.NewFlagSet(\"\", flag.ContinueOnError)\n\tconfigPath := fs.String(\"config\", \"\", \"\")\n\tfs.Usage = func() {\n\t\tif _, err := fmt.Fprintln(os.Stderr, printConfigUsage); err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t}\n\tif err := fs.Parse(args); err != nil {\n\t\treturn err\n\t}\n\n\t// Parse config from path.\n\topt := Options{ConfigPath: *configPath}\n\tconfig, err := cmd.parseConfig(opt.GetConfigPath())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"parse config: %s\", err)\n\t}\n\n\t// Validate the configuration.\n\tif err = config.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"%s. To generate a valid configuration file run `emailworker config > emailworker.generated.toml`\", err)\n\t}\n\n\tif err = toml.NewEncoder(cmd.Stdout).Encode(config); err != nil {\n\t\treturn fmt.Errorf(\"error encoding toml: %s\", err)\n\t}\n\t_, err = fmt.Fprint(cmd.Stdout, \"\\n\")\n\treturn err\n}", "func runConfig(cfg Config, root string) []error {\n\tfiles, err := gatherFiles(root, cfg)\n\tif err != nil {\n\t\treturn []error{fmt.Errorf(\"Failed to gather files: %w\", err)}\n\t}\n\n\tfmt.Printf(\"Scanning %d files...\\n\", len(files))\n\n\tvar wg sync.WaitGroup\n\terrs := make([]error, len(files))\n\tfor i, file := range files {\n\t\ti, file := i, file\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terrs[i] = examine(root, file, cfg)\n\t\t}()\n\t}\n\twg.Wait()\n\n\treturn removeNilErrs(errs)\n}", "func Run(config *Config) error {\n\t//pid configuration\n\tif pidErr := pidfile.SavePid(config.ProcessConfig); pidErr != nil {\n\t\tblog.Errorf(\"bmsf-mesos-adaptor save pid file failed, %s\", pidErr)\n\t}\n\n\t// register to bcs service layer, just for health check\n\t// no need to process discover event\n\tif len(config.BCSZk) != 0 {\n\t\tconfig.BCSZk = strings.Replace(config.BCSZk, \";\", \",\", -1)\n\t\tbcsDiscover, bcsDiscoverEvent, err := rdiscover.NewAdapterDiscover(\n\t\t\tconfig.BCSZk, config.Address, config.Cluster, config.MetricPort)\n\t\tif err != nil {\n\t\t\tblog.Warnf(\"new bcs zookeeper %s Discover failed, err %s\", config.BCSZk, err.Error())\n\t\t} else {\n\t\t\tgo bcsDiscover.Start()\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase curEvent := <-bcsDiscoverEvent:\n\t\t\t\t\t\tblog.Infof(\"found bcs service discover event %s\", curEvent)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\n\t}\n\n\t// create AdapterDiscover\n\tconfig.Zookeeper = strings.Replace(config.Zookeeper, \";\", \",\", -1)\n\tadapterDiscover, discoverEvent, err := rdiscover.NewAdapterDiscover(\n\t\tconfig.Zookeeper, config.Address, config.Cluster, config.MetricPort)\n\tif err != nil {\n\t\tblog.Errorf(\"new zookeeper %s Discover failed, err %s\", config.Zookeeper, err.Error())\n\t\treturn fmt.Errorf(\"new zookeeper %s Discover failed, err %s\", config.Zookeeper, err.Error())\n\t}\n\tgo adapterDiscover.Start()\n\n\t// create server\n\tserver := NewServer(config)\n\thandleEvent(server, config, discoverEvent)\n\treturn nil\n}", "func (c *ConfigAutoLoader) Run() {\n\tvar err error\n\tgo AutoLoaderErrorChan.HandleError() // spin out a new goroutine to handle errors that are not fatal\n\tfor {\n\t\t// Handle Not set errors\n\t\tif c.Location == \"\" {\n\t\t\terr = fmt.Errorf(\"No Config location set yet. time = %v\", time.Now())\n\t\t\tAutoLoaderErrorChan <- err\n\t\t\tcontinue\n\t\t}\n\t\tif c.Config == nil {\n\t\t\terr = fmt.Errorf(\"No Config set yet. time = %v\", time.Now())\n\t\t\tAutoLoaderErrorChan <- err\n\t\t\tcontinue\n\t\t}\n\t\tif c.Rr == 0 {\n\t\t\terr = fmt.Errorf(\"No Refresh Rate Set set yet. time = %v\", time.Now())\n\t\t\tAutoLoaderErrorChan <- err\n\t\t\tcontinue\n\t\t}\n\n\t\t// Read config.json and set the config\n\t\tconfigBytes, er := ioutil.ReadFile(c.Location)\n\t\tif er != nil {\n\t\t\terr = fmt.Errorf(\"Error while Reading File. %v\", er)\n\t\t\tAutoLoaderErrorChan <- err\n\t\t\tcontinue\n\t\t}\n\t\tc.Config.SetFromJSONParsed(configBytes)\n\n\t\ttime.Sleep(time.Duration(c.Rr) * time.Second) // Sleep untill it is time to set the json again\n\n\t}\n}", "func NewRunConfigController(ctx context.Context, cfg Config, client clientv3.KubeControllersConfigurationInterface) *RunConfigController {\n\tctrl := &RunConfigController{out: make(chan RunConfig)}\n\tgo syncDatastore(ctx, cfg, client, ctrl.out)\n\treturn ctrl\n}", "func (service *linuxUpstartService) Run(config *Config) (err error) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tfmt.Printf(\"******> SERVICE PANIC: %s\\n\", r)\n\t\t}\n\t}()\n\n\tfmt.Print(\"******> Initing Service\\n\")\n\n\tif config.Init != nil {\n\t\terr = config.Init()\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfmt.Print(\"******> Starting Service\\n\")\n\n\tif config.Start != nil {\n\t\terr = config.Start()\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfmt.Print(\"******> Service Started\\n\")\n\n\t// Create a channel to talk with the OS\n\tsigChan := make(chan os.Signal, 1)\n\tsignal.Notify(sigChan, os.Interrupt)\n\n\t// Wait for an event\n\t<-sigChan\n\n\tfmt.Print(\"******> Service Shutting Down\\n\")\n\n\tif config.Stop != nil {\n\t\terr = config.Stop()\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfmt.Print(\"******> Service Down\\n\")\n\treturn err\n}", "func (o *Options) Run(ctx context.Context) error {\n\tklog.Infof(\"karmada-metrics-adapter version: %s\", version.Get())\n\n\tmetricsServer, err := o.Config()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn metricsServer.StartServer(ctx.Done())\n}", "func (sm *Simulator) RunConfigs(configs []SimulationConfig) {\n\tfor _, c := range configs {\n\t\tstart := time.Now()\n\t\tp, _ := sm.Run(c.K, c.S, c.NumLN)\n\t\telapsed := time.Since(start) / time.Millisecond\n\t\tfmt.Printf(\"k=%v, s=%v, c=%v => p=%v %dms\\n\", c.K, c.S, c.NumLN, p, elapsed)\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run will execute all phases of all registered Units and block until an error occurs. If RunConfig has been called prior to Run, the Group's Config phase will be skipped and Run continues with the PreRunner and Service phases. The following phases are executed in the following sequence: Config phase (serially, in order of Unit registration) FlagSet() Get & register all FlagSets from Config Units. Flag Parsing Using the provided args (os.Args if empty) Validate() Validate Config Units. Exit on first error. PreRunner phase (serially, in order of Unit registration) PreRun() Execute PreRunner Units. Exit on first error. Service phase (concurrently) Serve() Execute all Service Units in separate Go routines. Wait Block until one of the Serve() methods returns GracefulStop() Call interrupt handlers of all Service Units. Run will return with the originating error on: first Config.Validate() returning an error first PreRunner.PreRun() returning an error first Service.Serve() returning (error or nil)
func (g *Group) Run() (err error) { // run config registration and flag parsing stages if interrupted, errRun := g.RunConfig(); interrupted || errRun != nil { return errRun } defer func() { if err != nil { g.log.Fatal().Err(err).Stack().Msg("unexpected exit") } }() // execute pre run stage and exit on error for idx := range g.p { // a PreRunner might have been deregistered during Run if g.p[idx] == nil { continue } g.log.Debug().Uint32("ran", uint32(idx+1)).Uint32("total", uint32(len(g.p))).Str("name", g.p[idx].Name()).Msg("pre-run") if err := g.p[idx].PreRun(); err != nil { return err } } swg := &sync.WaitGroup{} swg.Add(len(g.s)) go func() { swg.Wait() close(g.readyCh) }() // feed our registered services to our internal run.Group for idx := range g.s { // a Service might have been deregistered during Run s := g.s[idx] if s == nil { continue } g.log.Debug().Uint32("total", uint32(len(g.s))).Uint32("ran", uint32(idx+1)).Str("name", s.Name()).Msg("serve") g.r.Add(func() error { notify := s.Serve() swg.Done() <-notify return nil }, func(_ error) { g.log.Debug().Uint32("total", uint32(len(g.s))).Uint32("ran", uint32(idx+1)).Str("name", s.Name()).Msg("stop") s.GracefulStop() }) } // start registered services and block return g.r.Run() }
[ "func (s *ServiceManager) Run() error {\n\tif err := CheckAllRegisteredServices(); err != nil {\n\t\treturn err\n\t}\n\t// Run all service\n\tglog.Infof(\"There are %d service in iothub\", len(s.services))\n\tfor _, service := range s.services {\n\t\tglog.Infof(\"Starting service:'%s'...\", service.Name())\n\t\tgo service.Start()\n\t}\n\t// Wait all service to terminate in main context\n\tfor name, ch := range s.chs {\n\t\t<-ch\n\t\tglog.Info(\"Servide(%s) is terminated\", name)\n\t}\n\treturn nil\n}", "func (s *VMTServer) Run(_ []string) error {\n\tif err := s.checkFlag(); err != nil {\n\t\tglog.Errorf(\"check flag failed:%v. abort.\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tglog.V(3).Infof(\"spec path is: %v\", s.K8sTAPSpec)\n\tk8sTAPSpec, err := kubeturbo.ParseK8sTAPServiceSpec(s.K8sTAPSpec)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to generate correct TAP config: %v\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tkubeConfig := s.createKubeConfigOrDie()\n\tkubeClient := s.createKubeClientOrDie(kubeConfig)\n\tkubeletClient := s.createKubeletClientOrDie(kubeConfig)\n\tprobeConfig := s.createProbeConfigOrDie(kubeConfig, kubeletClient)\n\tbroker := turbostore.NewPodBroker()\n\n\tvmtConfig := kubeturbo.NewVMTConfig2()\n\tvmtConfig.WithTapSpec(k8sTAPSpec).\n\t\tWithKubeClient(kubeClient).\n\t\tWithKubeletClient(kubeletClient).\n\t\tWithProbeConfig(probeConfig).\n\t\tWithBroker(broker).\n\t\tWithK8sVersion(s.K8sVersion).\n\t\tWithNoneScheduler(s.NoneSchedulerName).\n\t\tWithRecorder(createRecorder(kubeClient))\n\tglog.V(3).Infof(\"Finished creating turbo configuration: %+v\", vmtConfig)\n\n\tvmtService := kubeturbo.NewKubeturboService(vmtConfig)\n\trun := func(_ <-chan struct{}) {\n\t\tvmtService.Run()\n\t\tselect {}\n\t}\n\n\tgo s.startHttp()\n\n\t//if !s.LeaderElection.LeaderElect {\n\tglog.V(2).Infof(\"No leader election\")\n\trun(nil)\n\n\tglog.Fatal(\"this statement is unreachable\")\n\tpanic(\"unreachable\")\n}", "func (a *App) Run(ctx context.Context) error {\n\n\t// 1. instantiate all components\n\n\tfor _, runnable := range a.runnables {\n\t\ta.logger(\"building: %s\", runnable)\n\n\t\t_, err := a.getValue(runnable)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"building %s: %w\", runnable, err)\n\t\t}\n\t}\n\n\t// 2. start all runnable\n\tctx, cancelCtx := context.WithCancel(ctx)\n\tdefer cancelCtx()\n\n\tvar wg sync.WaitGroup\n\n\tfor _, runnable := range a.runnables {\n\t\ta.logger(\"starting: %s\", runnable)\n\t\terr := a.start(ctx, runnable, &wg)\n\t\tif err != nil {\n\t\t\ta.logger(\"runnable failed to start: %s\", runnable)\n\t\t\tcancelCtx()\n\t\t\tbreak\n\t\t}\n\t}\n\n\t<-ctx.Done()\n\ta.logger(\"context cancelled, starting shutdown\")\n\n\t// 3. TODO: cancel component in reverse order\n\n\twg.Wait()\n\n\treturn nil\n}", "func (g *Group) RunConfig() (interrupted bool, err error) {\n\tg.log = logger.GetLogger(g.name)\n\tg.configured = true\n\n\tif g.name == \"\" {\n\t\t// use the binary name if custom name has not been provided\n\t\tg.name = path.Base(os.Args[0])\n\t}\n\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tg.log.Error().Err(err).Msg(\"unexpected exit\")\n\t\t}\n\t}()\n\n\t// Load config from env and file\n\tif err = config.Load(g.f.Name, g.f.FlagSet); err != nil {\n\t\treturn false, errors.Wrapf(err, \"%s fails to load config\", g.f.Name)\n\t}\n\n\t// bail early on help or version requests\n\tswitch {\n\tcase g.showRunGroup:\n\t\tfmt.Println(g.ListUnits())\n\t\treturn true, nil\n\t}\n\n\t// Validate Config inputs\n\tfor idx := range g.c {\n\t\t// a Config might have been deregistered during Run\n\t\tif g.c[idx] == nil {\n\t\t\tg.log.Debug().Uint32(\"ran\", uint32(idx+1)).Msg(\"skipping validate\")\n\t\t\tcontinue\n\t\t}\n\t\tg.log.Debug().Str(\"name\", g.c[idx].Name()).Uint32(\"ran\", uint32(idx+1)).Uint32(\"total\", uint32(len(g.c))).Msg(\"validate config\")\n\t\tif vErr := g.c[idx].Validate(); vErr != nil {\n\t\t\terr = multierr.Append(err, vErr)\n\t\t}\n\t}\n\n\t// exit on at least one Validate error\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// log binary name and version\n\tg.log.Info().Msg(\"started\")\n\n\treturn false, nil\n}", "func (a *Agent) Run() error {\n\ta.Cache = cache.NewCache()\n\n\terr := a.initPlugins()\n\tdefer a.stopPlugins()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = a.bootstrap()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = a.initEndpoints()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Main event loop\n\ta.config.Log.Info(\"SPIRE Agent is now running\")\n\tfor {\n\t\tselect {\n\t\tcase err = <-a.config.ErrorCh:\n\t\t\treturn err\n\t\tcase <-a.config.ShutdownCh:\n\t\t\ta.grpcServer.GracefulStop()\n\t\t\treturn <-a.config.ErrorCh\n\t\t}\n\t}\n}", "func (bs *BusinessServer) Run() {\n\t// initialize config.\n\tbs.initConfig()\n\n\t// initialize logger.\n\tbs.initLogger()\n\tdefer bs.Stop()\n\n\t// initialize server modules.\n\tbs.initMods()\n\n\t// register businessserver service.\n\tgo func() {\n\t\tif err := bs.service.Register(bs.etcdCfg); err != nil {\n\t\t\tlogger.Fatal(\"register service for discovery, %+v\", err)\n\t\t}\n\t}()\n\tlogger.Info(\"register service for discovery success.\")\n\n\t// run service.\n\ts := grpc.NewServer(grpc.MaxRecvMsgSize(math.MaxInt32))\n\tpb.RegisterBusinessServer(s, bs)\n\tlogger.Info(\"Business Server running now.\")\n\n\tif err := s.Serve(bs.lis); err != nil {\n\t\tlogger.Fatal(\"start businessserver gRPC service. %+v\", err)\n\t}\n}", "func Run() int {\n\tpflag.Parse()\n\tpopulateAvailableKubeconfigs()\n\n\tif len(availableKubeconfigs) == 0 {\n\t\tprintKubeConfigHelpOutput()\n\t\treturn 2\n\t}\n\n\t// DEBUG\n\tfmt.Println(availableKubeconfigs)\n\treturn 0\n}", "func (mgr *ServiceManager) Run() {\n\t// get local copy of the repository\n\trepo = repository.R()\n\n\t// init all the services to the starting state\n\tfor _, s := range mgr.svc {\n\t\ts.init()\n\t}\n\n\t// start services\n\tfor _, s := range mgr.svc {\n\t\ts.run()\n\t}\n}", "func (srv *SrvFrm) Run() {\n\tflagVersion := flag.Bool(\"version\", false, \"Print server version\")\n\tflagConfigFile := flag.String(\"config\", \"config.yml\", \"Configuration file path\")\n\tflagDefaultConfig := flag.Bool(\"default\", false, \"Output default configuration\")\n\n\tflag.Parse()\n\n\tif *flagVersion {\n\t\tsrv.printVersion()\n\t\treturn\n\t}\n\n\tif *flagDefaultConfig {\n\t\tfmt.Printf(\"%s\", srv.DefaultConfig)\n\t\treturn\n\t}\n\n\terr := srv.loadConfig(*flagConfigFile)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\terr = srv.loadLog()\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tdefer destroyLog()\n\n\terr = srv.loadDatabase()\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tdefer srv.destroyDatabase()\n\n\tsrv.runServer()\n}", "func (service *linuxUpstartService) Run(config *Config) (err error) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tfmt.Printf(\"******> SERVICE PANIC: %s\\n\", r)\n\t\t}\n\t}()\n\n\tfmt.Print(\"******> Initing Service\\n\")\n\n\tif config.Init != nil {\n\t\terr = config.Init()\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfmt.Print(\"******> Starting Service\\n\")\n\n\tif config.Start != nil {\n\t\terr = config.Start()\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfmt.Print(\"******> Service Started\\n\")\n\n\t// Create a channel to talk with the OS\n\tsigChan := make(chan os.Signal, 1)\n\tsignal.Notify(sigChan, os.Interrupt)\n\n\t// Wait for an event\n\t<-sigChan\n\n\tfmt.Print(\"******> Service Shutting Down\\n\")\n\n\tif config.Stop != nil {\n\t\terr = config.Stop()\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfmt.Print(\"******> Service Down\\n\")\n\treturn err\n}", "func Run(cfg *Config) error {\n\t// Hook interceptor for os signals.\n\tshutdownInterceptor, err := signal.Intercept()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogWriter := build.NewRotatingLogWriter()\n\tSetupLoggers(logWriter, shutdownInterceptor)\n\n\t// Special show command to list supported subsystems and exit.\n\tif cfg.DebugLevel == \"show\" {\n\t\tfmt.Printf(\"Supported subsystems: %v\\n\",\n\t\t\tlogWriter.SupportedSubsystems())\n\t\tos.Exit(0)\n\t}\n\n\t// Initialize logging at the default logging level.\n\terr = logWriter.InitLogRotator(\n\t\tfilepath.Join(cfg.LogDir, DefaultLogFilename),\n\t\tcfg.MaxLogFileSize, cfg.MaxLogFiles,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = build.ParseAndSetDebugLevels(cfg.DebugLevel, logWriter)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttrader := NewServer(cfg)\n\terr = trader.Start()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to start server: %v\", err)\n\t}\n\t<-shutdownInterceptor.ShutdownChannel()\n\treturn trader.Stop()\n}", "func (c *Command) Run(args []string) {\n\tservice := servd.New()\n\tc.loadAndValidateConfig()\n\n\tswitch args[0] {\n\tcase inquiry:\n\t\tservice.Inquiry()\n\tcase payment:\n\t\tservice.Payment()\n\tcase checkStatus:\n\t\tservice.CheckStatus()\n\tdefault:\n\t\tlog.Println(\"please specify the available command (inquiry, payment, checkstatus)\")\n\t}\n}", "func (s *Service) Run(host, port string) error {\n\t// let's gooooo\n\ts.l.Infow(\"spinning up core service\",\n\t\t\"core.host\", host,\n\t\t\"core.port\", port)\n\tlistener, err := net.Listen(\"tcp\", host+\":\"+port)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err = s.grpc.Serve(listener); err != nil {\n\t\ts.l.Errorf(\"error encountered - service stopped\",\n\t\t\t\"error\", err)\n\t\treturn err\n\t}\n\n\t// report shutdown\n\ts.l.Info(\"service shut down\")\n\treturn nil\n}", "func (o *Options) Run(ctx context.Context) error {\n\tklog.Infof(\"karmada-aggregated-apiserver version: %s\", version.Get())\n\n\tprofileflag.ListenAndServe(o.ProfileOpts)\n\n\tconfig, err := o.Config()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trestConfig := config.GenericConfig.ClientConfig\n\trestConfig.QPS, restConfig.Burst = o.KubeAPIQPS, o.KubeAPIBurst\n\tkubeClientSet := kubernetes.NewForConfigOrDie(restConfig)\n\tsecretLister := config.GenericConfig.SharedInformerFactory.Core().V1().Secrets().Lister()\n\n\tserver, err := config.Complete().New(kubeClientSet, secretLister)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tserver.GenericAPIServer.AddPostStartHookOrDie(\"start-aggregated-server-informers\", func(context genericapiserver.PostStartHookContext) error {\n\t\tconfig.GenericConfig.SharedInformerFactory.Start(context.StopCh)\n\t\to.SharedInformerFactory.Start(context.StopCh)\n\t\treturn nil\n\t})\n\n\treturn server.GenericAPIServer.PrepareRun().Run(ctx.Done())\n}", "func (p provision) Run(args []string) int {\n\t// TODO: Refactor arg parse.\n\t// TODO: Refactor exit status.\n\tfor _, arg := range args {\n\t\tif arg == \"start\" {\n\t\t\tcommand.ExecuteWithOutput(\"provision-api\")\n\t\t}\n\t}\n\treturn 0\n}", "func (phase *EtcdSetupPhase) Run() error {\n\tvar etcdNodes []clustermanager.Node\n\tcluster := phase.clusterManager.Cluster()\n\n\tif cluster.IsolatedEtcd {\n\t\tetcdNodes = phase.provider.GetEtcdNodes()\n\t} else {\n\t\tetcdNodes = phase.provider.GetMasterNodes()\n\t}\n\n\terr := phase.clusterManager.InstallEtcdNodes(etcdNodes, phase.options.KeepData)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (ts TestSuite) Run() int {\n\tif ts.Options == nil {\n\t\tvar err error\n\t\tts.Options, err = getDefaultOptions()\n\t\tif err != nil {\n\t\t\treturn exitOptionError\n\t\t}\n\t}\n\tif ts.Options.FS == nil {\n\t\tts.Options.FS = storage.FS{}\n\t}\n\tif ts.Options.ShowHelp {\n\t\tflag.CommandLine.Usage()\n\n\t\treturn 0\n\t}\n\n\tr := runner{testSuiteInitializer: ts.TestSuiteInitializer, scenarioInitializer: ts.ScenarioInitializer}\n\treturn runWithOptions(ts.Name, r, *ts.Options)\n}", "func (c *ContainerExecutor) Run(opts ifc.RunOptions) error {\n\tlog.Print(\"starting generic container\")\n\n\tif c.Options.ClusterName != \"\" {\n\t\tcleanup, err := c.SetKubeConfig()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer cleanup()\n\t}\n\n\tinput, err := bundleReader(c.ExecutorBundle)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// TODO this logic is redundant in executor package, move it to pkg/container\n\tvar output io.Writer\n\tif c.ResultsDir == \"\" {\n\t\t// set output only if the output if resulting directory is not defined\n\t\toutput = os.Stdout\n\t}\n\tif err = c.setConfig(); err != nil {\n\t\treturn err\n\t}\n\n\t// TODO check the executor type when dryrun is set\n\tif opts.DryRun {\n\t\tlog.Print(\"DryRun execution finished\")\n\t\treturn nil\n\t}\n\n\terr = c.ClientFunc(c.ResultsDir, input, output, c.Container, c.MountBasePath).Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Print(\"execution of the generic container finished\")\n\treturn nil\n}", "func (t *Task) Run() error {\n\tif err := t.init(); err != nil {\n\t\treturn liberr.Wrap(err)\n\t}\n\tt.Log.Infof(\"[START] Phase %s\", t.Phase)\n\tdefer t.updatePipeline()\n\n\t// Run the current phase.\n\tswitch t.Phase {\n\tcase Started, Rollback:\n\t\treturn t.next()\n\tcase QuiesceApplications:\n\t\terr := t.quiesceApplications()\n\t\tif err != nil {\n\t\t\treturn liberr.Wrap(err)\n\t\t}\n\t\treturn t.next()\n\tcase EnsureQuiesced:\n\t\tfor {\n\t\t\tquiesced, err := t.ensureQuiescedPodsTerminated()\n\t\t\tif err != nil {\n\t\t\t\treturn liberr.Wrap(err)\n\t\t\t}\n\t\t\tif quiesced {\n\t\t\t\treturn t.next()\n\t\t\t} else {\n\t\t\t\t// TODO add timeout here\n\t\t\t\tt.Log.Info(\"Quiesce in source cluster is incomplete. \" +\n\t\t\t\t\t\"Pods are not yet terminated, waiting.\")\n\t\t\t\ttime.Sleep(PollInterval)\n\t\t\t}\n\t\t}\n\tcase ExportSrcManifests:\n\t\tif err := t.createManifestFile(); err != nil {\n\t\t\treturn liberr.Wrap(err)\n\t\t}\n\t\tif err := t.runBackup(); err != nil {\n\t\t\treturn liberr.Wrap(err)\n\t\t}\n\t\treturn t.next()\n\tcase ChangePVReclaimPolicy:\n\t\terr := t.changePVReclaimPolicy()\n\t\tif err != nil {\n\t\t\treturn liberr.Wrap(err)\n\t\t}\n\t\treturn t.next()\n\tcase RegisterFCD:\n\t\terr := t.registerFCD()\n\t\tif err != nil {\n\t\t\treturn liberr.Wrap(err)\n\t\t}\n\t\treturn t.next()\n\tcase StaticallyProvisionDestPV:\n\t\terr := t.createDestNamespaces()\n\t\tif err != nil {\n\t\t\treturn liberr.Wrap(err)\n\t\t}\n\t\terr = t.staticallyProvisionDestPV()\n\t\tif err != nil {\n\t\t\treturn liberr.Wrap(err)\n\t\t}\n\t\treturn t.next()\n\tcase EnsurePVCBond:\n\t\tfor {\n\t\t\tbound, err := t.ensurePVCBond()\n\t\t\tif err != nil {\n\t\t\t\treturn liberr.Wrap(err)\n\t\t\t}\n\t\t\tif bound {\n\t\t\t\treturn t.next()\n\t\t\t} else {\n\t\t\t\t// TODO add timeout here\n\t\t\t\tt.Log.Info(\"PVC and PV are still binding\")\n\t\t\t\ttime.Sleep(PollInterval)\n\t\t\t}\n\t\t}\n\tcase ImportManifestsToDest:\n\t\tif _, err := t.BackupFile.Seek(0, 0); err != nil {\n\t\t\treturn liberr.Wrap(err)\n\t\t}\n\t\tif err := t.runRestore(); err != nil {\n\t\t\treturn liberr.Wrap(err)\n\t\t}\n\t\tif err := t.cleanManifestFile(); err != nil {\n\t\t\treturn liberr.Wrap(err)\n\t\t}\n\t\treturn t.next()\n\tcase DeleteMigratedNSResources:\n\t\tif err := t.deleteMigratedNamespaceScopedResources(); err != nil {\n\t\t\treturn liberr.Wrap(err)\n\t\t}\n\t\treturn t.next()\n\tcase EnsurePVsUnmounted:\n\t\tfor {\n\t\t\tunmounted, err := t.ensureMigratedPVsUnmounted()\n\t\t\tif err != nil {\n\t\t\t\treturn liberr.Wrap(err)\n\t\t\t}\n\t\t\tif unmounted {\n\t\t\t\treturn t.next()\n\t\t\t} else {\n\t\t\t\t// TODO add timeout here\n\t\t\t\tt.Log.Info(\"PVs are still mounted\")\n\t\t\t\ttime.Sleep(PollInterval)\n\t\t\t}\n\t\t}\n\tcase DeleteMigratedPVs:\n\t\tif err := t.deleteMigratedPVs(); err != nil {\n\t\t\treturn liberr.Wrap(err)\n\t\t}\n\t\treturn t.next()\n\tcase DeleteMigratedClusterResources:\n\t\tif err := t.deleteMigratedClusterScopedResources(); err != nil {\n\t\t\treturn liberr.Wrap(err)\n\t\t}\n\t\treturn t.next()\n\tcase UnQuiesceSrcApplications:\n\t\terr := t.unQuiesceApplications(t.SrcClient, t.sourceNamespaces(), t.PlanResources.SrcMigCluster.Spec.Vendor)\n\t\tif err != nil {\n\t\t\treturn liberr.Wrap(err)\n\t\t}\n\t\treturn t.next()\n\tcase ResetPVReclaimPolicy:\n\t\tif err := t.resetPVReclaimPolicy(); err != nil {\n\t\t\treturn liberr.Wrap(err)\n\t\t}\n\t\treturn t.next()\n\tcase Completed:\n\tdefault:\n\t\treturn t.next()\n\t}\n\n\tif t.Phase == Completed {\n\t\tt.Log.Info(\"[COMPLETED]\")\n\t}\n\n\treturn nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ListUnits returns a list of all Group phases and the Units registered to each of them.
func (g Group) ListUnits() string { var ( s string t = "cli" ) if len(g.c) > 0 { s += "\n- config: " for _, u := range g.c { if u != nil { s += u.Name() + " " } } } if len(g.p) > 0 { s += "\n- prerun: " for _, u := range g.p { if u != nil { s += u.Name() + " " } } } if len(g.s) > 0 { s += "\n- serve : " for _, u := range g.s { if u != nil { t = "svc" s += u.Name() + " " } } } return fmt.Sprintf("Group: %s [%s]%s", g.name, t, s) }
[ "func ListUnits(w http.ResponseWriter) error {\n\tconn, err := sd.NewSystemdConnection()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to get systemd bus connection: %s\", err)\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\tunits, err := conn.ListUnits()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed ListUnits: %v\", err)\n\t\treturn err\n\t}\n\n\treturn share.JSONResponse(units, w)\n}", "func (sys *DbusControl) ListUnits(prefix string) ([]string, error) {\n\tif sys == nil {\n\t\treturn nil, fmt.Errorf(\"cannot call method on nil\")\n\t}\n\n\tunits, err := sys.underlying.ListUnits()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfullPrefix := \"ledger-\" + prefix\n\n\tvar result = make([]string, 0)\n\tfor _, unit := range units {\n\t\tif unit.LoadState == \"not-found\" || !strings.HasPrefix(unit.Name, fullPrefix) {\n\t\t\tcontinue\n\t\t}\n\t\tresult = append(result, strings.TrimSuffix(strings.TrimPrefix(unit.Name, fullPrefix), \".service\"))\n\t}\n\n\tsort.Slice(result, func(i, j int) bool {\n\t\treturn result[i] < result[j]\n\t})\n\n\treturn result, nil\n}", "func cmdUnitList(c *cli.Context) error {\n\tif err := adm.VerifyNoArgument(c); err != nil {\n\t\treturn err\n\t}\n\n\treturn adm.Perform(`get`, `/unit/`, `list`, nil, c)\n}", "func (s *UnitsEndpoint) List(ctx context.Context, division int, all bool, o *api.ListOptions) ([]*Units, error) {\n\tvar entities []*Units\n\tu, _ := s.client.ResolvePathWithDivision(\"/api/v1/{division}/logistics/Units\", division) // #nosec\n\tapi.AddListOptionsToURL(u, o)\n\n\tif all {\n\t\terr := s.client.ListRequestAndDoAll(ctx, u.String(), &entities)\n\t\treturn entities, err\n\t}\n\t_, _, err := s.client.NewRequestAndDo(ctx, \"GET\", u.String(), nil, &entities)\n\treturn entities, err\n}", "func (g *Group) Units() []*Unit {\n\tif g == nil {\n\t\treturn nil\n\t}\n\treturn g.units\n}", "func (c *Client) ListMeasuredUnit(params *ListMeasuredUnitParams, opts ...Option) (MeasuredUnitLister, error) {\n\tpath, err := c.InterpolatePath(\"/measured_units\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trequestOptions := NewRequestOptions(opts...)\n\tpath = BuildURL(path, params)\n\treturn NewMeasuredUnitList(c, path, requestOptions), nil\n}", "func NewListUnitsOK() *ListUnitsOK {\n\treturn &ListUnitsOK{}\n}", "func (r *Resolver) Units() (*[]*Unit, error) {\n\tvar result []*Unit\n\tfor _, theirUnit := range units.All() {\n\t\tourUnit, err := NewUnit(theirUnit.Name)\n\t\tif err != nil {\n\t\t\treturn &result, err\n\t\t}\n\t\tresult = append(result, &ourUnit)\n\t}\n\treturn &result, nil\n}", "func listInstalledUnits(ns string, suffix string) ([]string, error) {\n\targs := []string{\n\t\t\"list-units\",\n\t\t\"--no-legend\",\n\t\t\"--no-pager\",\n\t\tfmt.Sprintf(\"%s_*.%s\", ns, suffix),\n\t}\n\tout, err := exec.Command(\"systemctl\", args...).Output()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn parseListUnits(string(out), ns, suffix)\n}", "func (fl Flannel) Units() []Unit {\n\treturn dropinFromConfig(fl.Flannel, \"flannel.service\")\n}", "func (st *Tools) GetUnitNames() (units []string, err error) {\n\tfiles, err := ioutil.ReadDir(\"/etc/systemd/system/dcos.target.wants\")\n\tif err != nil {\n\t\treturn units, err\n\t}\n\tfor _, f := range files {\n\t\tunits = append(units, f.Name())\n\t}\n\tlogrus.Debugf(\"List of units: %s\", units)\n\treturn units, nil\n}", "func (ee Etcd) Units() ([]Unit, error) {\n\tcontent := dropinContents(ee.Etcd)\n\tif content == \"\" {\n\t\treturn nil, nil\n\t}\n\treturn []Unit{{config.Unit{\n\t\tName: \"etcd.service\",\n\t\tRuntime: true,\n\t\tDropIn: true,\n\t\tContent: content,\n\t}}}, nil\n}", "func getUnits(app *App, names []string) unitList {\n\tvar units []Unit\n\tif len(names) > 0 {\n\t\tfor _, unitName := range names {\n\t\t\tfor _, appUnit := range app.Units {\n\t\t\t\tif appUnit.Name == unitName {\n\t\t\t\t\tunits = append(units, appUnit)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn unitList(units)\n}", "func UnitNames() [3]string {\n\treturn unitNames\n}", "func UnitNames() [7]string {\n\treturn unitNames\n}", "func (playerImpl *PlayerImpl) Units() []pirates.Unit {\n\treturn playerImpl.unitsImpl\n}", "func (usl UnitStatusList) Group() (UnitStatusList, error) {\n\tmatchers := map[string]struct{}{}\n\tnewList := []fleet.UnitStatus{}\n\n\thashesEqual, err := allHashesEqual(usl)\n\tif err != nil {\n\t\treturn nil, maskAny(err)\n\t}\n\n\tfor _, us := range usl {\n\t\t// Group unit status\n\t\tgrouped, suffix, err := groupUnitStatus(usl, us)\n\t\tif err != nil {\n\t\t\treturn nil, maskAny(err)\n\t\t}\n\n\t\t// Prevent doubled aggregation.\n\t\tif _, ok := matchers[suffix]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tmatchers[suffix] = struct{}{}\n\n\t\tstatesEqual := allStatesEqual(grouped)\n\n\t\t// Aggregate.\n\t\tif hashesEqual && statesEqual {\n\t\t\tnewStatus := grouped[0]\n\t\t\tnewStatus.Name = \"*\"\n\t\t\tnewList = append(newList, newStatus)\n\t\t} else {\n\t\t\tnewList = append(newList, grouped...)\n\t\t}\n\t}\n\n\treturn newList, nil\n}", "func NewList(slice []Unit) List {\n\treturn unitlist{slice}\n}", "func (m *MeasurementFamilyListUnits) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateUnitCode(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WaitTillReady blocks the goroutine till all modules are ready.
func (g *Group) WaitTillReady() { <-g.readyCh }
[ "func (envManager *TestEnvManager) WaitUntilReady() (bool, error) {\n\tlog.Println(\"Start checking components' status\")\n\tretry := u.Retrier{\n\t\tBaseDelay: 1 * time.Second,\n\t\tMaxDelay: 10 * time.Second,\n\t\tRetries: 8,\n\t}\n\n\tready := false\n\tretryFn := func(_ context.Context, i int) error {\n\t\tfor _, comp := range envManager.testEnv.GetComponents() {\n\t\t\tif alive, err := comp.IsAlive(); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to comfirm compoment %s is alive %v\", comp.GetName(), err)\n\t\t\t} else if !alive {\n\t\t\t\treturn fmt.Errorf(\"component %s is not alive\", comp.GetName())\n\t\t\t}\n\t\t}\n\n\t\tready = true\n\t\tlog.Println(\"All components are ready\")\n\t\treturn nil\n\t}\n\n\t_, err := retry.Retry(context.Background(), retryFn)\n\treturn ready, err\n}", "func WaitReady(ctx *util.Context) error {\n\tif !ctx.Cluster.MachineController.Deploy {\n\t\treturn nil\n\t}\n\n\tctx.Logger.Infoln(\"Waiting for machine-controller to come up…\")\n\n\t// Wait a bit to let scheduler to react\n\ttime.Sleep(10 * time.Second)\n\n\tif err := WaitForWebhook(ctx.DynamicClient); err != nil {\n\t\treturn errors.Wrap(err, \"machine-controller-webhook did not come up\")\n\t}\n\n\tif err := WaitForMachineController(ctx.DynamicClient); err != nil {\n\t\treturn errors.Wrap(err, \"machine-controller did not come up\")\n\t}\n\treturn nil\n}", "func (t *Indie) Wait() {\n t.waitModules()\n}", "func WaitForReady() {\n\tinstance.WaitForReady()\n}", "func (a *VppAdapter) WaitReady() error {\n\treturn nil\n}", "func WaitReady(s *state.State) error {\n\tif !s.Cluster.MachineController.Deploy {\n\t\treturn nil\n\t}\n\n\ts.Logger.Infoln(\"Waiting for machine-controller to come up...\")\n\n\tif err := cleanupStaleResources(s.Context, s.DynamicClient); err != nil {\n\t\treturn err\n\t}\n\n\tif err := waitForWebhook(s.Context, s.DynamicClient); err != nil {\n\t\treturn err\n\t}\n\n\tif err := waitForMachineController(s.Context, s.DynamicClient); err != nil {\n\t\treturn err\n\t}\n\n\treturn waitForCRDs(s)\n}", "func (s *Server) WaitUntilReady() {\n\t_, _ = <-s.readyCh\n}", "func WaitReady() {\n\tif deviceReady {\n\t\treturn\n\t}\n\tch := make(chan struct{}, 0)\n\tf := func() {\n\t\tdeviceReady = true\n\t\tclose(ch)\n\t}\n\tOnDeviceReady(f)\n\t<-ch\n\tUnDeviceReady(f)\n}", "func (b *Botanist) WaitUntilRequiredExtensionsReady(ctx context.Context) error {\n\treturn retry.UntilTimeout(ctx, 5*time.Second, time.Minute, func(ctx context.Context) (done bool, err error) {\n\t\tif err := b.RequiredExtensionsReady(ctx); err != nil {\n\t\t\tb.Logger.Infof(\"Waiting until all the required extension controllers are ready (%+v)\", err)\n\t\t\treturn retry.MinorError(err)\n\t\t}\n\t\treturn retry.Ok()\n\t})\n}", "func (a *Agent) WaitReady() {\n\ta.statusLock.RLock()\n\tdefer a.statusLock.RUnlock()\n\n\tfor {\n\t\tif a.status == 1 {\n\t\t\treturn\n\t\t}\n\t\ta.statusCond.Wait()\n\t}\n}", "func WaitForReady() {\n\tucc.WaitForReady()\n}", "func WaitForReady() {\n\tdefaultClient.WaitForReady()\n}", "func (p *Pebble) WaitReady(t *testing.T) {\n\tif p.pebbleCMD.Process == nil {\n\t\tt.Fatal(\"Pebble not started\")\n\t}\n\turl := p.DirectoryURL()\n\tRetry(t, 10, 10*time.Millisecond, func() error {\n\t\tctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)\n\t\tdefer cancel()\n\n\t\tt.Log(\"Checking pebble readiness\")\n\t\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tresp, err := p.httpClient.Do(req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tresp.Body.Close()\n\t\treturn nil\n\t})\n}", "func (c *Client) WaitUntilReady() {\n\tc.waitUntilReady()\n}", "func WaitAllReady() {\n\tlog := util.Logger()\n\tklient := util.KubeClient()\n\tcrds := LoadCrds()\n\tintervalSec := time.Duration(3)\n\tlist := []*CrdType{\n\t\tcrds.NooBaa, crds.BackingStore, crds.BucketClass,\n\t}\n\tutil.Panic(wait.PollImmediateInfinite(intervalSec*time.Second, func() (bool, error) {\n\t\tallReady := true\n\t\tfor _, crd := range list {\n\t\t\terr := klient.Get(util.Context(), client.ObjectKey{Name: crd.Name}, crd)\n\t\t\tutil.Panic(err)\n\t\t\tready, err := IsReady(crd)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"❌ %s\", err)\n\t\t\t\tallReady = false\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !ready {\n\t\t\t\tlog.Printf(\"❌ CRD is not ready. Need to wait ...\")\n\t\t\t\tallReady = false\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\treturn allReady, nil\n\t}))\n}", "func Wait() {\n\tdefaultManager.Wait()\n}", "func Wait() error {\n\t<-initlock\n\treturn initerr\n}", "func (n *node) wait() {\n\t<-n.apiReady\n}", "func StartAndWaitForReady(ctx context.Context, t *testing.T, manager datatransfer.Manager) {\n\tready := make(chan error, 1)\n\tmanager.OnReady(func(err error) {\n\t\tready <- err\n\t})\n\trequire.NoError(t, manager.Start(ctx))\n\tselect {\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"did not finish starting up module\")\n\tcase err := <-ready:\n\t\trequire.NoError(t, err)\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate adds an error if the Field is a path that does not exist.
func (v *StringIsPath) Validate(e *validator.Errors) { if Exists(v.Field) { return } e.Add(v.Name, StringIsPathError(v)) }
[ "func (r *RouteSpecFields) Validate(ctx context.Context) (errs *apis.FieldError) {\n\n\tif r.Domain == \"\" {\n\t\terrs = errs.Also(apis.ErrMissingField(\"domain\"))\n\t}\n\n\tif r.Hostname == \"www\" {\n\t\terrs = errs.Also(apis.ErrInvalidValue(\"hostname\", r.Hostname))\n\t}\n\n\tif _, err := BuildPathRegexp(r.Path); err != nil {\n\t\terrs = errs.Also(apis.ErrInvalidValue(\"path\", r.Path))\n\t}\n\n\treturn errs\n}", "func validatePathNoBacksteps(targetPath string, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tparts := strings.Split(filepath.ToSlash(targetPath), \"/\")\n\tfor _, item := range parts {\n\t\tif item == \"..\" {\n\t\t\tallErrs = append(allErrs, field.Invalid(fldPath, targetPath, \"must not contain '..'\"))\n\t\t\tbreak // even for `../../..`, one error is sufficient to make the point\n\t\t}\n\t}\n\treturn allErrs\n}", "func (f Unstructured) HasPath(fieldPath ...string) bool {\n\tif f.IsUndefined() || len(fieldPath) == 0 {\n\t\treturn true\n\t}\n\tif !f.HasByName(fieldPath[0]) {\n\t\treturn false\n\t}\n\treturn f.Field(fieldPath[0]) != nil\n}", "func urlPathValidator(fl validator.FieldLevel) bool {\n\treturn path.IsAbs(fl.Field().String())\n}", "func validatePathParameter(field *surface_v1.Field) {\n\tif field.Kind != surface_v1.FieldKind_SCALAR {\n\t\tlog.Println(\"The path parameter with the Name \" + field.Name + \" is invalid. \" +\n\t\t\t\"The path template may refer to one or more fields in the gRPC request message, as\" +\n\t\t\t\" long as each field is a non-repeated field with a primitive (non-message) type. \" +\n\t\t\t\"See: https://github.com/googleapis/googleapis/blob/master/google/api/http.proto#L62 for more information.\")\n\t}\n}", "func ValidateAbsPath(fl validator.FieldLevel) bool {\n\treturn strings.HasPrefix(fl.Field().String(), \"/\")\n}", "func Test_FSValidation_PathExists_PathIsInvalid(t *testing.T) {\n\ttestPath := \"asdasda\"\n\tresult := PathExists(testPath)\n\tfmt.Println(result)\n\tassert.Equal(t, result, false)\n}", "func validatePath(p string) (path string, err error) {\n\tpath = p\n\tif !strings.HasPrefix(path, \"/\") {\n\t\tpath = fmt.Sprintf(\"/%s\", path)\n\t}\n\n\tpath = strings.TrimSuffix(path, \"/\")\n\n\treturn\n}", "func (st *setDeviceState) ValidateField(isValid func(*ttnpb.EndDevice) bool, path string) error {\n\treturn st.WithField(func(dev *ttnpb.EndDevice) error {\n\t\tif !isValid(dev) {\n\t\t\treturn newInvalidFieldValueError(path)\n\t\t}\n\t\treturn nil\n\t}, path)\n}", "func validOriginPath(fl validator.FieldLevel) bool {\n\tval := fl.Field().String()\n\t// Not required\n\tif val == \"\" {\n\t\treturn true\n\t}\n\treturn OriginPathRexExp.MatchString(val)\n}", "func (t *test) pathValidate(p *gpb.Path, prefix bool) error {\n\terrs := &testerror.List{}\n\n\tif t.gpc.CheckElem && p != nil && len(p.Element) > 0 {\n\t\terrs.AddErr(fmt.Errorf(\"element field is used in gNMI Path %v\", proto.CompactTextString(p)))\n\t}\n\n\tif !prefix || (t.gpc.CheckTarget == \"\" && t.gpc.CheckOrigin == \"\") {\n\t\treturn errs\n\t}\n\n\tif p == nil {\n\t\terrs.AddErr(fmt.Errorf(\"prefix gNMI Path must be non-nil, origin and/or target are missing\"))\n\t\treturn errs\n\t}\n\n\tswitch {\n\tcase t.gpc.CheckTarget == \"\": // Validation on target field isn't requested.\n\tcase t.gpc.CheckTarget == \"*\":\n\t\tif p.Target == \"\" {\n\t\t\terrs.AddErr(fmt.Errorf(\"target isn't set in prefix gNMI Path %v\", proto.CompactTextString(p)))\n\t\t}\n\tcase t.gpc.CheckTarget != p.Target:\n\t\terrs.AddErr(fmt.Errorf(\"target in gNMI Path %v is %q, expect %q\", proto.CompactTextString(p), p.Target, t.gpc.CheckTarget))\n\t}\n\n\tswitch {\n\tcase t.gpc.CheckOrigin == \"\": // Validation on origin field isn't requested.\n\tcase t.gpc.CheckOrigin == \"*\":\n\t\tif p.Origin == \"\" {\n\t\t\terrs.AddErr(fmt.Errorf(\"origin isn't set in prefix gNMI Path %v\", proto.CompactTextString(p)))\n\t\t}\n\tcase t.gpc.CheckOrigin != p.Origin:\n\t\terrs.AddErr(fmt.Errorf(\"origin in gNMI Path %v is %q, expect %q\", proto.CompactTextString(p), p.Origin, t.gpc.CheckOrigin))\n\t}\n\n\treturn errs\n}", "func ContainsField(path string, allowedPaths []string) bool {\n\tfor _, allowedPath := range allowedPaths {\n\t\tif path == allowedPath {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (p *Path) IsValid() error {\n\t_, err := ParsePath(p.String())\n\treturn err\n}", "func (d *Dataset) checkField(path ...string) error {\n\tif d.check != nil {\n\t\treturn d.check(path...)\n\t}\n\treturn nil\n}", "func (form *FormData) mandatoryPath(filename string, target *string) *FormData {\n\tform.path(filename, target)\n\n\tif *target != \"\" {\n\t\treturn form\n\t}\n\n\tform.append(\n\t\tfmt.Errorf(\"form file '%s' is required\", filename),\n\t)\n\n\treturn form\n}", "func (p *Path) FieldPath(field string) Path {\n\treturn Path{\n\t\tparent: p,\n\t\tkey: fmt.Sprintf(\".%s\", field),\n\t}\n}", "func (df Dirfile) Validate(fieldcode string) error {\n\tfcode := C.CString(fieldcode)\n\tdefer C.free(unsafe.Pointer(fcode))\n\tresult := int(C.gd_validate(df.d, fcode))\n\tif result < 0 {\n\t\treturn df.Error()\n\t}\n\treturn nil\n}", "func (m *PathSegment) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tif utf8.RuneCountInString(m.GetKey()) < 1 {\n\t\treturn PathSegmentValidationError{\n\t\t\tfield: \"Key\",\n\t\t\treason: \"value length must be at least 1 runes\",\n\t\t}\n\t}\n\n\treturn nil\n}", "func (m *PathMatchInput) Validate() error {\n\treturn m.validate(false)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SetNameIndex sets index of slice element on Name.
func (v *StringIsPath) SetNameIndex(i int) { v.Name = fmt.Sprintf("%s[%d]", RxSetNameIndex.ReplaceAllString(v.Name, ""), i) }
[ "func (v *StringLengthInRange) SetNameIndex(i int) {\n\tv.Name = fmt.Sprintf(\"%s[%d]\", RxSetNameIndex.ReplaceAllString(v.Name, \"\"), i)\n}", "func (v *StringIsIP) SetNameIndex(i int) {\n\tv.Name = fmt.Sprintf(\"%s[%d]\", RxSetNameIndex.ReplaceAllString(v.Name, \"\"), i)\n}", "func (v *StringHasSuffixAny) SetNameIndex(i int) {\n\tv.Name = fmt.Sprintf(\"%s[%d]\", RxSetNameIndex.ReplaceAllString(v.Name, \"\"), i)\n}", "func (e *Element) SetIndex(value int) {\n\tif e.Scene != nil {\n\t\te.Scene.Resize.Notify()\n\t}\n\n\tif e.Parent != nil {\n\t\te.Parent.projectIndex(&value)\n\t\te.Parent.children.ReIndex(e.index, value)\n\t\te.Parent.updateIndexes(e.index, value) // this will set the index\n\t}\n}", "func (e *Engine) setIndex(index int64) {\n\te.Index = index\n\te.Name = naming.Name(index)\n}", "func (n Nodes) SetIndex(i int, node *Node)", "func (o *FakeObject) SetIndex(i int, value interface{}) {\n\treflect.ValueOf(o.Value).Index(i).Set(reflect.ValueOf(value))\n}", "func (v Value) SetIndex(i int, x interface{}) {\n\tpanic(message)\n}", "func (gr *GroupReader) SetIndex(index int) error {\n\tgr.mtx.Lock()\n\tdefer gr.mtx.Unlock()\n\treturn gr.openFile(index)\n}", "func (gen *AddressGenerator) SetIndex(i uint) *AddressGenerator {\n\tgen.state = addressState(i)\n\treturn gen\n}", "func (c *Chip8) SetIndex() {\n\tc.index = c.inst & 0x0FFF\n}", "func (this *Value) SetIndex(index int, val interface{}) {\n\tif this.parsedType == ARRAY && index >= 0 {\n\t\tswitch parsedValue := this.parsedValue.(type) {\n\t\tcase []*Value:\n\t\t\tif index < len(parsedValue) {\n\t\t\t\t// if we've already parsed the object, store it there\n\t\t\t\tswitch val := val.(type) {\n\t\t\t\tcase *Value:\n\t\t\t\t\tparsedValue[index] = val\n\t\t\t\tdefault:\n\t\t\t\t\tparsedValue[index] = NewValue(val)\n\t\t\t\t}\n\t\t\t}\n\t\tcase nil:\n\t\t\t// if not store it in alias\n\t\t\tif this.alias == nil {\n\t\t\t\tthis.alias = make(map[string]*Value)\n\t\t\t}\n\t\t\tswitch val := val.(type) {\n\t\t\tcase *Value:\n\t\t\t\tthis.alias[strconv.Itoa(index)] = val\n\t\t\tdefault:\n\t\t\t\tthis.alias[strconv.Itoa(index)] = NewValue(val)\n\t\t\t}\n\n\t\t}\n\t}\n}", "func (cli *SetWrapper) SetName(name string) error {\n\treturn cli.set.SetValue(fieldSetName, name)\n}", "func (c *Collection) SetBleveIndex(name string, documentMapping *mapping.DocumentMapping) (err error) {\n\t// Use only the tow first bytes as index prefix.\n\t// The prefix is used to confine indexes with a prefixes.\n\tprefix := c.buildIndexPrefix()\n\tindexHash := blake2b.Sum256([]byte(name))\n\tprefix = append(prefix, indexHash[:2]...)\n\n\t// ok, start building a new index\n\tindex := newIndex(name)\n\tindex.name = name\n\tindex.collection = c\n\tindex.prefix = prefix\n\terr = index.buildSignature(documentMapping)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Check there is no conflict name or hash\n\tfor _, i := range c.bleveIndexes {\n\t\tif i.name == name {\n\t\t\tif !bytes.Equal(i.signature[:], index.signature[:]) {\n\t\t\t\treturn ErrIndexAllreadyExistsWithDifferentMapping\n\t\t\t}\n\t\t\treturn ErrNameAllreadyExists\n\t\t}\n\t\tif reflect.DeepEqual(i.prefix, prefix) {\n\t\t\treturn ErrHashCollision\n\t\t}\n\t}\n\n\t// Bleve needs to save some parts on the drive.\n\t// The path is based on a part of the collection hash and the index prefix.\n\tcolHash := blake2b.Sum256([]byte(c.name))\n\tindex.path = fmt.Sprintf(\"%x%s%x\", colHash[:2], string(os.PathSeparator), indexHash[:2])\n\n\t// Build the index and set the given document index as default\n\tbleveMapping := bleve.NewIndexMapping()\n\tbleveMapping.StoreDynamic = false\n\tbleveMapping.IndexDynamic = true\n\tbleveMapping.DocValuesDynamic = false\n\n\tfor _, fieldMapping := range documentMapping.Fields {\n\t\tfieldMapping.Store = false\n\t\tfieldMapping.Index = true\n\t}\n\tbleveMapping.DefaultMapping = documentMapping\n\n\t// Build the configuration to use the local bleve storage and initialize the index\n\tconfig := blevestore.NewConfigMap(c.db.ctx, index.path, c.db.privateKey, prefix, c.db.badger, c.db.writeChan)\n\tindex.bleveIndex, err = bleve.NewUsing(c.db.path+string(os.PathSeparator)+index.path, bleveMapping, upsidedown.Name, blevestore.Name, config)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Save the on drive bleve element into the index struct itself\n\tindex.bleveIndexAsBytes, err = index.indexZipper()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Add the new index to the list of index of this collection\n\tc.bleveIndexes = append(c.bleveIndexes, index)\n\n\t// Index all existing values\n\terr = c.db.badger.View(func(txn *badger.Txn) error {\n\t\titer := txn.NewIterator(badger.DefaultIteratorOptions)\n\t\tdefer iter.Close()\n\n\t\tcolPrefix := c.buildDBKey(\"\")\n\t\tfor iter.Seek(colPrefix); iter.ValidForPrefix(colPrefix); iter.Next() {\n\t\t\titem := iter.Item()\n\n\t\t\tvar err error\n\t\t\tvar itemAsEncryptedBytes []byte\n\t\t\titemAsEncryptedBytes, err = item.ValueCopy(itemAsEncryptedBytes)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar clearBytes []byte\n\t\t\tclearBytes, err = cipher.Decrypt(c.db.privateKey, item.Key(), itemAsEncryptedBytes)\n\n\t\t\tid := string(item.Key()[len(colPrefix):])\n\n\t\t\tcontent := c.fromValueBytesGetContentToIndex(clearBytes)\n\t\t\terr = index.bleveIndex.Index(id, content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Save the new settup\n\treturn c.db.saveConfig()\n}", "func (self *SinglePad) SetIndexA(member int) {\n self.Object.Set(\"index\", member)\n}", "func (i *Index) IndexWithName(name string, opt ...Option) *Index {\n\to := &options{parent: i, name: name}\n\to.fill(opt)\n\treturn i.subIndexForKey(o)\n}", "func (bA *CompactBitArray) SetIndex(i int, v bool) bool {\n\tif bA == nil {\n\t\treturn false\n\t}\n\n\tif i < 0 || i >= bA.Count() {\n\t\treturn false\n\t}\n\n\tif v {\n\t\tbA.Elems[i>>3] |= (1 << uint8(7-(i%8)))\n\t} else {\n\t\tbA.Elems[i>>3] &= ^(1 << uint8(7-(i%8)))\n\t}\n\n\treturn true\n}", "func (self *Graphics) SetChildIndex(child *DisplayObject, index int) {\n self.Object.Call(\"setChildIndex\", child, index)\n}", "func (m *RecurrencePattern) SetIndex(value *WeekIndex)() {\n m.index = value\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
runIndex is the main function for the index subcommand
func runIndex() { // check index flag is set (global flag but don't require it for all sub commands) if *indexDir == "" { fmt.Println("please specify a directory for the index files (--indexDir)") os.Exit(1) } // set up profiling if *profiling == true { defer profile.Start(profile.MemProfile, profile.ProfilePath("./")).Stop() //defer profile.Start(profile.ProfilePath("./")).Stop() } // start logging if *logFile != "" { logFH := misc.StartLogging(*logFile) defer logFH.Close() log.SetOutput(logFH) } else { log.SetOutput(os.Stdout) } // start the index sub command start := time.Now() log.Printf("i am groot (version %s)", version.GetVersion()) log.Printf("starting the index subcommand") // check the supplied files and then log some stuff log.Printf("checking parameters...") misc.ErrorCheck(indexParamCheck()) log.Printf("\tprocessors: %d", *proc) log.Printf("\tk-mer size: %d", *kmerSize) log.Printf("\tsketch size: %d", *sketchSize) log.Printf("\tgraph window size: %d", *windowSize) log.Printf("\tnum. partitions: %d", *numPart) log.Printf("\tmax. K: %d", *maxK) log.Printf("\tmax. sketch span: %d", *maxSketchSpan) // record the runtime information for the index sub command info := &pipeline.Info{ Version: version.GetVersion(), KmerSize: *kmerSize, SketchSize: *sketchSize, WindowSize: *windowSize, NumPart: *numPart, MaxK: *maxK, MaxSketchSpan: *maxSketchSpan, IndexDir: *indexDir, } // create the pipeline log.Printf("initialising indexing pipeline...") indexingPipeline := pipeline.NewPipeline() // initialise processes log.Printf("\tinitialising the processes") msaConverter := pipeline.NewMSAconverter(info) graphSketcher := pipeline.NewGraphSketcher(info) sketchIndexer := pipeline.NewSketchIndexer(info) // connect the pipeline processes log.Printf("\tconnecting data streams") msaConverter.Connect(msaList) graphSketcher.Connect(msaConverter) sketchIndexer.Connect(graphSketcher) // submit each process to the pipeline and run it indexingPipeline.AddProcesses(msaConverter, graphSketcher, sketchIndexer) log.Printf("\tnumber of processes added to the indexing pipeline: %d\n", indexingPipeline.GetNumProcesses()) log.Print("creating graphs, sketching traversals and indexing...") indexingPipeline.Run() log.Printf("writing index files in \"%v\"...", *indexDir) misc.ErrorCheck(info.SaveDB(*indexDir + "/groot.lshe")) misc.ErrorCheck(info.Dump(*indexDir + "/groot.gg")) log.Printf("finished in %s", time.Since(start)) }
[ "func execIndex(_ int, p *gop.Context) {\n\targs := p.GetArgs(2)\n\tpprof.Index(args[0].(http.ResponseWriter), args[1].(*http.Request))\n}", "func Index(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, \"El index pa.\")\n}", "func ExecuteIndex(user models.User, w http.ResponseWriter, r *http.Request) error {\n\taccess := 0\n\n\t// check if user is empty\n\tif user.ID != \"\" {\n\t\taccess = user.GetAccess()\n\t} else {\n\t\t// todo: normal auth page\n\t\tw.Header().Set(\"Content-Type\", \"\")\n\t\thttp.Redirect(w, r, \"/login\", http.StatusFound)\n\t\treturn nil\n\t}\n\n\t// getting all required data\n\tvoiceTime := \"\"\n\tvtd, err := user.GetVoiceTime()\n\tif err == nil {\n\t\tvoiceTime = utils.FormatDuration(vtd)\n\t}\n\txbox := \"\"\n\txboxes, _ := user.GetXboxes()\n\tif len(xboxes) > 0 {\n\t\txbox = xboxes[0].Xbox\n\t}\n\tjoinedAtTime, err := user.GetGuildJoinDate()\n\tjoinedAt := \"\"\n\tif err == nil {\n\t\tdif := int(time.Now().Sub(joinedAtTime).Milliseconds()) / 1000 / 3600 / 24\n\t\tdays := utils.FormatUnit(dif, utils.Days)\n\t\tjoinedAt = fmt.Sprintf(\"%s (%s)\", utils.FormatDateTime(joinedAtTime), days)\n\t}\n\twarns, err := user.GetWarnings()\n\tif err != nil {\n\t\twarns = []models.Warning{}\n\t}\n\n\t// Preparing content and rendering\n\tcontent := IndexContent{\n\t\tUsername: user.Username,\n\t\tAvatar: user.AvatarURL,\n\t\tJoinedAt: joinedAt,\n\t\tXbox: xbox,\n\t\tVoiceTime: voiceTime,\n\t\tWarnsCount: len(warns),\n\t\tWarnings: PrepareWarnings(warns),\n\t}\n\n\ttmpl, err := template.ParseFiles(\"templates/layout.gohtml\", \"templates/index.gohtml\", \"templates/navbar.gohtml\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = tmpl.ExecuteTemplate(w, \"layout\", Layout{\n\t\tTitle: \"Главная страница\",\n\t\tPage: \"index\",\n\t\tAccess: access,\n\t\tContent: content,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func ShowIndex() {\n\tfmt.Printf(\"%v\\n\", indexText)\n}", "func (c *RunCommand) Index() int {\n\treturn c.cmd.index\n}", "func Index(w http.ResponseWriter, data *IndexData) {\n\trender(tpIndex, w, data)\n}", "func (g *Gossiper) Index(filename string, entry string) error {\n\tpath := entry + filename\n\tlog.Lvl3(\"Opening file : \", path)\n\tfile, err := os.Open(path)\n\tif err != nil {\n\n\t\tlog.Error(\"Could not open a file : \", err)\n\t\treturn err\n\t}\n\tdefer file.Close()\n\tfi, err := file.Stat()\n\tif err != nil {\n\t\tlog.Error(\"Could not get file information \", err)\n\t\treturn err\n\t}\n\tlength := fi.Size()\n\tmf, err := g.Scan(file, length)\n\tif err != nil {\n\t\tlog.Error(\"Error while scanning the file : \", err)\n\t\treturn err\n\t}\n\thash := make([]byte, 32)\n\tCstmCopy(hash, sha256.Sum256(mf))\n\tmeta := MetaData{\n\t\tName: filename,\n\t\tLength: length,\n\t\tMetafile: mf,\n\t\tMetaHash: hash,\n\t}\n\n\tg.AddMetaData(&meta)\n\n\t//now we ask to have it on the blockchain..\n\tif g.ConfirmationRunning() {\n\t\tgo g.AddToStack(meta.Name, meta.MetaHash, meta.Length)\n\n\t}\n\n\treturn nil\n}", "func (w *Worker) index(repoName api.RepoName, rev string, isPrimary bool) (err error) {\n\trepo, commit, err := resolveRevision(w.Ctx, repoName, rev)\n\tif err != nil {\n\t\t// Avoid infinite loop for always cloning test.\n\t\tif repo != nil && repo.Name == \"github.com/sourcegraphtest/AlwaysCloningTest\" {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\t// Check if index is already up-to-date\n\tif repo.IndexedRevision != nil && (repo.FreezeIndexedRevision || *repo.IndexedRevision == commit) {\n\t\treturn nil\n\t}\n\n\t// Get language\n\tinv, err := api.InternalClient.ReposGetInventoryUncached(w.Ctx, repo.ID, commit)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Repos.GetInventory failed: %s\", err)\n\t}\n\tlang := inv.PrimaryProgrammingLanguage()\n\n\t// Update global refs & packages index\n\tif !repo.Fork() {\n\t\tvar errs []error\n\t\tif err := api.InternalClient.DefsRefreshIndex(w.Ctx, repo.Name, commit); err != nil {\n\t\t\terrs = append(errs, fmt.Errorf(\"Defs.RefreshIndex failed: %s\", err))\n\t\t}\n\n\t\tif err := api.InternalClient.PkgsRefreshIndex(w.Ctx, repo.Name, commit); err != nil {\n\t\t\terrs = append(errs, fmt.Errorf(\"Pkgs.RefreshIndex failed: %s\", err))\n\t\t}\n\n\t\tif err := makeMultiErr(errs...); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = api.InternalClient.ReposUpdateIndex(w.Ctx, repo.ID, commit, lang)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func UseIndex() *ishell.Cmd {\n\n\treturn &ishell.Cmd{\n\t\tName: \"use\",\n\t\tHelp: \"Select index to use for subsequent document operations\",\n\t\tFunc: func(c *ishell.Context) {\n\t\t\tif context == nil {\n\t\t\t\terrorMsg(c, errNotConnected)\n\t\t\t} else {\n\t\t\t\tdefer restorePrompt(c)\n\t\t\t\tif len(c.Args) < 1 {\n\t\t\t\t\tif context.ActiveIndex != \"\" {\n\t\t\t\t\t\tcprintlist(c, \"Using index \", cy(context.ActiveIndex))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcprintln(c, \"No index is in use\")\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif c.Args[0] == \"--\" {\n\t\t\t\t\tif context.ActiveIndex != \"\" {\n\t\t\t\t\t\tcprintlist(c, \"Index \", cy(context.ActiveIndex), \" is no longer in use\")\n\t\t\t\t\t\tcontext.ActiveIndex = \"\"\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcprintln(c, \"No index is in use\")\n\t\t\t\t\t}\n\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ts, err := context.ResolveAndValidateIndex(c.Args[0])\n\t\t\t\tif err != nil {\n\t\t\t\t\terrorMsg(c, err.Error())\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcontext.ActiveIndex = s\n\t\t\t\tif s != c.Args[0] {\n\t\t\t\t\tcprintlist(c, \"For alias \", cyb(c.Args[0]), \" selected index \", cy(s))\n\t\t\t\t} else {\n\t\t\t\t\tcprintlist(c, \"Selected index \", cy(s))\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t}\n\n}", "func AppIndex(ctx cfg.Context, w http.ResponseWriter, r *http.Request) error {\n\treturn Info(ctx, w, r)\n}", "func (i indexer) Index(ctx context.Context, req IndexQuery) (\n\tresp *IndexResult, err error) {\n\n\tlog.Info(\"index [%v] root [%v] len_dirs=%v len_files=%v\",\n\t\treq.Key, req.Root, len(req.Dirs), len(req.Files))\n\tstart := time.Now()\n\t// Setup the response\n\tresp = NewIndexResult()\n\tif err = req.Normalize(); err != nil {\n\t\tlog.Info(\"index [%v] error: %v\", req.Key, err)\n\t\tresp.Error = errs.NewStructError(err)\n\t\treturn\n\t}\n\n\t// create index shards\n\tvar nshards int\n\tif nshards = i.cfg.NumShards; nshards == 0 {\n\t\tnshards = 1\n\t}\n\tnshards = utils.MinInt(nshards, maxShards)\n\ti.shards = make([]index.IndexWriter, nshards)\n\ti.root = getRoot(i.cfg, &req)\n\n\tfor n := range i.shards {\n\t\tname := path.Join(i.root, shardName(req.Key, n))\n\t\tixw, err := getIndexWriter(ctx, name)\n\t\tif err != nil {\n\t\t\tresp.Error = errs.NewStructError(err)\n\t\t\treturn resp, nil\n\t\t}\n\t\ti.shards[n] = ixw\n\t}\n\n\tfs := getFileSystem(ctx, i.root)\n\trepo := newRepoFromQuery(&req, i.root)\n\trepo.SetMeta(i.cfg.RepoMeta, req.Meta)\n\tresp.Repo = repo\n\n\t// Add query Files and scan Dirs for files to index\n\tnames, err := i.scanner(fs, &req)\n\tch := make(chan int, nshards)\n\tchnames := make(chan string, 100)\n\tgo func() {\n\t\tfor _, name := range names {\n\t\t\tchnames <- name\n\t\t}\n\t\tclose(chnames)\n\t}()\n\treqch := make(chan par.RequestFunc, nshards)\n\tfor _, shard := range i.shards {\n\t\treqch <- indexShard(&i, &req, shard, fs, chnames, ch)\n\t}\n\tclose(reqch)\n\terr = par.Requests(reqch).WithConcurrency(nshards).DoWithContext(ctx)\n\tclose(ch)\n\n\t// Await results, each indicating the number of files scanned\n\tfor num := range ch {\n\t\trepo.NumFiles += num\n\t}\n\n\trepo.NumShards = len(i.shards)\n\t// Flush our index shard files\n\tfor _, shard := range i.shards {\n\t\tshard.Flush()\n\t\trepo.SizeIndex += ByteSize(shard.IndexBytes())\n\t\trepo.SizeData += ByteSize(shard.DataBytes())\n\t\tlog.Debug(\"index flush %v (data) %v (index)\",\n\t\t\trepo.SizeData, repo.SizeIndex)\n\t}\n\trepo.ElapsedIndexing = time.Since(start)\n\trepo.TimeUpdated = time.Now().UTC()\n\n\tvar msg string\n\tif err != nil {\n\t\trepo.State = ERROR\n\t\tresp.SetError(err)\n\t\tmsg = \"error: \" + resp.Error.Error()\n\t} else {\n\t\trepo.State = OK\n\t\tmsg = \"ok \" + fmt.Sprintf(\n\t\t\t\"(%v files, %v data, %v index)\",\n\t\t\trepo.NumFiles, repo.SizeData, repo.SizeIndex)\n\t}\n\tlog.Info(\"index [%v] %v [%v]\", req.Key, msg, repo.ElapsedIndexing)\n\treturn\n}", "func (b *Blueprint) indexCommand(typ string, columns []string, index string, algorithm string) *Blueprint {\n\t// if no name was specified for this index, we will create one using a bsaic\n\t// convention of the table name, followed by the columns, followd by an\n\t// index type, such as primary or index, which makes the index unique.\n\tif index == \"\" {\n\t\tindex = b.createIndexName(typ, columns)\n\t}\n\n\treturn b.addCommand(typ, &CommandOptions{\n\t\tIndex: index,\n\t\tColumns: columns,\n\t\tAlgorithm: algorithm,\n\t})\n}", "func CheckoutIndexCmd(c *git.Client, args []string) error {\n\tflags := flag.NewFlagSet(\"checkout-index\", flag.ExitOnError)\n\tflags.SetOutput(flag.CommandLine.Output())\n\tflags.Usage = func() {\n\t\tflag.Usage()\n\t\tfmt.Fprintf(flag.CommandLine.Output(), \"\\n\\nOptions:\\n\")\n\t\tflags.PrintDefaults()\n\t\t// Some git tests test for a 129 exit code if the commandline\n\t\t// parsing fails for checkout-index.\n\t\tos.Exit(129)\n\t}\n\toptions := git.CheckoutIndexOptions{}\n\n\tflags.BoolVar(&options.UpdateStat, \"index\", false, \"Update stat information for checkout out entries in the index\")\n\tflags.BoolVar(&options.UpdateStat, \"u\", false, \"Alias for --index\")\n\n\tflags.BoolVar(&options.Quiet, \"quiet\", false, \"Be quiet if files exist or are not in index\")\n\tflags.BoolVar(&options.Quiet, \"q\", false, \"Alias for --quiet\")\n\n\tflags.BoolVar(&options.Force, \"force\", false, \"Force overwrite of existing files\")\n\tflags.BoolVar(&options.Force, \"f\", false, \"Alias for --force\")\n\n\tflags.BoolVar(&options.All, \"all\", false, \"Checkout all files in the index.\")\n\tflags.BoolVar(&options.All, \"a\", false, \"Alias for --all\")\n\n\tflags.BoolVar(&options.NoCreate, \"no-create\", false, \"Don't checkout new files, only refresh existing ones\")\n\tflags.BoolVar(&options.NoCreate, \"n\", false, \"Alias for --no-create\")\n\n\tflags.StringVar(&options.Prefix, \"prefix\", \"\", \"When creating files, prepend string\")\n\tflags.StringVar(&options.Stage, \"stage\", \"\", \"Copy files from named stage (unimplemented)\")\n\n\tflags.BoolVar(&options.Temp, \"temp\", false, \"Instead of copying files to a working directory, write them to a temp dir\")\n\n\tstdin := flags.Bool(\"stdin\", false, \"Instead of taking paths from command line, read from stdin\")\n\tflags.BoolVar(&options.NullTerminate, \"z\", false, \"Use nil instead of newline to terminate paths read from stdin\")\n\n\tflags.Parse(args)\n\tfiles := flags.Args()\n\tif *stdin {\n\t\toptions.Stdin = os.Stdin\n\t}\n\n\t// Convert from string to git.File\n\tgfiles := make([]git.File, len(files))\n\tfor i, f := range files {\n\t\tgfiles[i] = git.File(f)\n\t}\n\n\treturn git.CheckoutIndex(c, options, gfiles)\n\n}", "func indexHandler(w http.ResponseWriter, r *http.Request) {\n\trenderTemplate(w, \"index\")\n}", "func serveIndex(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\terr := serveAssets(w, r, \"index.html\")\n\tcheckError(err)\n}", "func (cmd *IndexScansCommand) Run(c *client.Client, args []string) error {\n\tvar path string\n\tif len(args) > 0 {\n\t\tpath = args[0]\n\t} else {\n\t\tpath = \"/v1/scans\"\n\t}\n\tlogger := goa.NewLogger(log.New(os.Stderr, \"\", log.LstdFlags))\n\tctx := goa.WithLogger(context.Background(), logger)\n\tresp, err := c.IndexScans(ctx, path)\n\tif err != nil {\n\t\tgoa.LogError(ctx, \"failed\", \"err\", err)\n\t\treturn err\n\t}\n\n\tgoaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint)\n\treturn nil\n}", "func (c App) Index() revel.Result {\n\treturn c.Render()\n}", "func indexHandler(res http.ResponseWriter, req *http.Request) {\n\n\t// Execute the template and respond with the index page.\n\ttemplates.ExecuteTemplate(res, \"index\", nil)\n}", "func handleIndex(req *restful.Request, resp *restful.Response) {\n\t// TODO: use restful's Request/Response methods\n\tif req.Request.URL.Path != \"/\" && req.Request.URL.Path != \"/index.html\" {\n\t\tnotFound(resp.ResponseWriter, req.Request)\n\t\treturn\n\t}\n\tresp.ResponseWriter.WriteHeader(http.StatusOK)\n\t// TODO: serve this out of a file\n\tdata := \"<html><body>Welcome to Kubernetes</body></html>\"\n\tfmt.Fprint(resp.ResponseWriter, data)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
indexParamCheck is a function to check user supplied parameters
func indexParamCheck() error { // check the supplied directory is accessible etc. log.Printf("\tdirectory containing MSA files: %v", *msaDir) misc.ErrorCheck(misc.CheckDir(*msaDir)) // check there are some files with the msa extension msas, err := filepath.Glob(*msaDir + "/cluster*.msa") if err != nil { return fmt.Errorf("no MSA files in the supplied directory (must be named cluster-DD.msa)") } for _, msa := range msas { // check accessibility misc.ErrorCheck(misc.CheckFile(msa)) // add to the pile msaList = append(msaList, msa) } if len(msas) == 0 { return fmt.Errorf("no MSA files found that passed the file checks (make sure filenames follow 'cluster-DD.msa' convention)") } log.Printf("\tnumber of MSA files: %d", len(msas)) // TODO: check the supplied arguments to make sure they don't conflict with each other eg: if *kmerSize > *windowSize { return fmt.Errorf("supplied k-mer size greater than read length") } // setup the indexDir if _, err := os.Stat(*indexDir); os.IsNotExist(err) { if err := os.MkdirAll(*indexDir, 0700); err != nil { return fmt.Errorf("can't create specified output directory") } } // set number of processors to use if *proc <= 0 || *proc > runtime.NumCPU() { *proc = runtime.NumCPU() } runtime.GOMAXPROCS(*proc) return nil }
[ "func (af *filtBase) checkIntParam(p, low, high int, name string) (int, error) {\n\tif low <= p && p <= high {\n\t\treturn p, nil\n\t} else {\n\t\terr := fmt.Errorf(\"parameter %v is not in range <%v, %v>\", name, low, high)\n\t\treturn 0, err\n\t}\n}", "func CheckArgs(argsLength, argIndex int) error {\n\tif argsLength == (argIndex + 1) {\n\t\treturn errors.New(\"Not specified key value.\")\n\t}\n\treturn nil\n}", "func checkIntParam(L *lua.LState, pos int) state.IntParam {\n\tud := L.CheckUserData(pos)\n\tif value, ok := ud.Value.(state.IntParam); ok {\n\t\treturn value\n\t}\n\tL.ArgError(pos, \"require IntParam object\")\n\treturn state.IntParam{}\n}", "func checkIndex(index string, test string, pool string) error {\n\tindexInt, err := strconv.Atoi(index)\n\tif err != nil {\n\t\tindexInt = 0\n\t}\n\t//connecting the database\n\tdb := config.DB\n\t// check index is less then zero or not\n\tif indexInt <= 0 {\n\t\treturn errors.New(\"please enter a valid index\")\n\t}\n\n\t// fetch total no. of questions from question table related to that question id\n\tpools := []dbtypes.TestPool{}\n\n\tdb.Raw(\"select no_of_questions from test_pools where test_id=? and pool_id=?\", test, pool).Scan(&pools)\n\tif len(pools) == 0 {\n\t\treturn errors.New(\"No question are there in this pool\")\n\t}\n\n\t//check index is less then total number of questions\n\tif pools[0].NoOfQuestions < indexInt {\n\t\treturn errors.New(\"Please enter valid index (less then or equal to \" + strconv.Itoa(pools[0].NoOfQuestions) + \" )\")\n\t}\n\treturn nil\n}", "func intParamMetaIndex(L *lua.LState) int {\n\tdata := checkIntParam(L, 1)\n\tL.CheckTypes(2, lua.LTNumber, lua.LTString)\n\n\tswitch lvalue := L.Get(2); lvalue.Type() {\n\tcase lua.LTNumber:\n\t\tindex := int(lua.LVAsNumber(lvalue))\n\t\tif ok := indexIsInRange(index, data); !ok {\n\t\t\tL.ArgError(2, indexOutMessage)\n\t\t}\n\t\tL.Push(lua.LNumber(data.Get(index)))\n\t\treturn 1\n\tcase lua.LTString:\n\t\tkey := lua.LVAsString(lvalue)\n\t\t// find methods\n\t\tmt := L.GetTypeMetatable(intParamMetaName).(*lua.LTable)\n\t\tif fn := mt.RawGetString(key); fn.Type() == lua.LTFunction {\n\t\t\tL.Push(fn)\n\t\t\treturn 1\n\t\t}\n\n\t\t// find data\n\t\tif val, ok := data.GetByStr(key); ok {\n\t\t\tL.Push(lua.LNumber(val))\n\t\t\treturn 1\n\t\t}\n\t\tL.ArgError(2, key+\" is not found in csv\")\n\t}\n\n\tL.Push(lua.LNil)\n\treturn 1\n}", "func (d *Driver) checkIdx(i int) error {\n\tif i <= 0 || i > d.Drivers {\n\t\treturn fmt.Errorf(\"max72xx: bad index, %d\", i)\n\t}\n\treturn nil\n}", "func validateParam(param string, list []string) (err error) {\n\t// parameter must be set\n\tif param == \"\" {\n\t\treturn errors.New(\"param is empty\")\n\t}\n\n\t// param must be valid\n\tif !stringInSlice(param, list) {\n\t\treturn fmt.Errorf(\"param \\\"%v\\\" is not valid\", param)\n\t}\n\n\treturn nil\n}", "func (c Comment) ParamCommandComment_IsParamIndexValid() bool {\n\to := C.clang_ParamCommandComment_isParamIndexValid(c.c)\n\n\treturn o != C.uint(0)\n}", "func (o *GetFetchParams) validateIndex(formats strfmt.Registry) error {\n\n\tif err := validate.MinimumInt(\"index\", \"query\", int64(*o.Index), 1, false); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func checkIndexInfo(indexName string, indexPartSpecifications []*ast.IndexPartSpecification) error {\n\tif strings.EqualFold(indexName, mysql.PrimaryKeyName) {\n\t\treturn dbterror.ErrWrongNameForIndex.GenWithStackByArgs(indexName)\n\t}\n\tif len(indexPartSpecifications) > mysql.MaxKeyParts {\n\t\treturn infoschema.ErrTooManyKeyParts.GenWithStackByArgs(mysql.MaxKeyParts)\n\t}\n\tfor _, idxSpec := range indexPartSpecifications {\n\t\t// -1 => unspecified/full, > 0 OK, 0 => error\n\t\tif idxSpec.Expr == nil && idxSpec.Length == 0 {\n\t\t\treturn ErrKeyPart0.GenWithStackByArgs(idxSpec.Column.Name.O)\n\t\t}\n\t}\n\treturn checkDuplicateColumnName(indexPartSpecifications)\n}", "func TestValidateStationDataParameter(t *testing.T) {\n\tt.Parallel()\n\tif !ValidateStationDataParameter(\"name\") {\n\t\tt.Error(\"Unable to match field to slice member\")\n\t}\n\tif !ValidateStationDataParameter(\"lum\") {\n\t\tt.Error(\"Unable to match field to slice member\")\n\t}\n\tif ValidateStationDataParameter(\"asdf\") {\n\t\tt.Error(\"Found incorrect member in slice\")\n\t}\n\tif !ValidateStationDataParameter(\"rain_1h\") {\n\t\tt.Error(\"Found incorrect member in slice\")\n\t}\n}", "func (mdl *Model) ParamIdx(nm string) int {\n\tfor i, p := range mdl.Params {\n\t\tif p.Name == nm {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}", "func CheckParam(param map[string]interface{}, target map[string][]string) (bool, map[string]string) {\n\tvar res bool = true\n\tvar detail string\n\tvar errOutput map[string]string\n\terrOutput = make(map[string]string)\n\tfor key, value := range target {\n\t\tif input, ok := param[key]; ok {\n\t\t\tres, detail = checkParamWithArray(input, value)\n\t\t\tif !res {\n\t\t\t\terrOutput[key] = detail\n\t\t\t}\n\t\t}\n\t}\n\tif len(errOutput) > 0 {\n\t\treturn false, errOutput\n\t}\n\treturn true, errOutput\n}", "func (af *filtBase) checkFloatParam(p, low, high float64, name string) (float64, error) {\n\tif low <= p && p <= high {\n\t\treturn p, nil\n\t} else {\n\t\terr := fmt.Errorf(\"parameter %v is not in range <%v, %v>\", name, low, high)\n\t\treturn 0, err\n\t}\n}", "func (a *Parser) verify() error {\n\tfor n, p := range a.params {\n\t\tif n == p.name {\n\t\t\tvalue := reflValue(p.target)\n\t\t\tswitch value.Kind() {\n\t\t\tcase reflect.Slice:\n\t\t\t\tfor i := p.count; i < reflLen(p.target); i++ {\n\t\t\t\t\tif p.scan != nil {\n\t\t\t\t\t\t// scan remaining initial values to ensure they are okay\n\t\t\t\t\t\te := p.scan(fmt.Sprint(reflElement(i, value)), reflCopy(reflElementAddr(i, value)))\n\t\t\t\t\t\tif e != nil {\n\t\t\t\t\t\t\treturn decorate(fmt.Errorf(\"invalid default value at offset %d: %v\", i, e), n)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase reflect.Array:\n\t\t\t\tif p.count != p.limit {\n\t\t\t\t\treturn decorate(fmt.Errorf(\"%d value%s specified but exactly %d expected\", p.count, plural(p.count), p.limit), n)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\t// single-valued parameter\n\t\t\t\tif p.count < 1 {\n\t\t\t\t\tif p.limit != 0 {\n\t\t\t\t\t\treturn decorate(fmt.Errorf(\"mandatory parameter not set\"), n)\n\t\t\t\t\t}\n\t\t\t\t\t// scan initial value (into a copy) to ensure it's okay\n\t\t\t\t\tif p.scan != nil {\n\t\t\t\t\t\te := p.scan(fmt.Sprint(value), reflCopy(p.target))\n\t\t\t\t\t\tif e != nil {\n\t\t\t\t\t\t\treturn decorate(fmt.Errorf(\"invalid default value: %v\", e), n)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func IsValidParameterPresent(vars map[string]string, sp []string) error {\n\n\tfor i := range sp {\n\t\tv := vars[sp[i]]\n\t\tif v == \"\" {\n\t\t\terrMessage := fmt.Sprintf(\"Missing %v in GET request\", sp[i])\n\t\t\treturn fmt.Errorf(errMessage)\n\t\t}\n\n\t}\n\treturn nil\n\n}", "func (lc *imgListCfg) checkParams(cmd *cobra.Command, args []string) error {\n\tvar err error\n\n\t//check and get persistent flag\n\tvar pf *PersistentFlags\n\tpf, err = CheckPersistentFlags()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t//check Flags using common parameter checker\n\tvar params utils.Params\n\tparams, err = utils.CheckFlags(cmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlc.params = params\n\tlc.pf = pf\n\n\treturn err\n}", "func checkIndexType(prgrm *ast.CXProgram, idxIdx ast.CXTypeSignatureIndex) {\n\tidxTypeSig := prgrm.GetCXTypeSignatureFromArray(idxIdx)\n\tvar idxType string\n\n\tidxType = ast.GetFormattedType(prgrm, idxTypeSig)\n\tif idxType != \"i32\" && idxType != \"i64\" {\n\t\tprintln(ast.CompilationError(idxTypeSig.ArgDetails.FileName, idxTypeSig.ArgDetails.FileLine), fmt.Sprintf(\"wrong index type; expected either 'i32' or 'i64', got '%s'\", idxType))\n\t}\n}", "func (it *item) checkParamRange(r Range) ErrorList {\n\tvar sev ErrorSeverity\n\tgiven := len(it.params)\n\tbelow := given < r.Min\n\tif below || uint(given) > uint(r.Max) {\n\t\tvar textErr, textParams string\n\t\tif below {\n\t\t\tif given > 0 {\n\t\t\t\ttextParams = \": \" + it.params.String()\n\t\t\t}\n\t\t\ttextErr = fmt.Sprintf(\n\t\t\t\t\"requires at least %d parameters, %d given\", r.Min, given,\n\t\t\t) + textParams\n\t\t\tsev = ESError\n\t\t} else {\n\t\t\tif r.Max == 0 {\n\t\t\t\ttextParams = \"accepts no parameters\"\n\t\t\t} else {\n\t\t\t\ttextParams = fmt.Sprintf(\"accepts a maximum of %d parameters\", r.Max)\n\t\t\t}\n\t\t\textra := given - r.Max\n\t\t\ttextErr = textParams + fmt.Sprintf(\n\t\t\t\t\", ignoring %d additional ones: \", extra,\n\t\t\t) + strings.Join(it.params[given-extra:], \", \")\n\t\t\tsev = ESWarning\n\t\t}\n\t\treturn ErrorListF(sev, \"%s %s\", it.val, textErr)\n\t}\n\treturn nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Subscribe subscribes to the update of todos.
func (app *Application) Subscribe(store *todo.Store) { store.Register(app.subscriber) }
[ "func (n *notifier) Subscribe(ch chan<- []Update) {\n\tn.mu.Lock()\n\tdefer n.mu.Unlock()\n\tn.subs = append(n.subs, ch)\n}", "func (u *Updater) Subscribe(s *Server) {\n\tu.mux.Lock()\n\tu.listeners = append(u.listeners, s)\n\tu.mux.Unlock()\n}", "func (d *Demo) Subscribe(recv backend.Receiver) {\n\td.Lock()\n\tdefer d.Unlock()\n\n\td.subscriber = recv\n\n\t// Release the lock before running an update.\n\tgo d.updateAll()\n}", "func (l *ObserverList) Subscribe(obs Observer) {\n\tl.Lock()\n\tl.Observers = append(l.Observers, obs)\n\tl.Unlock()\n}", "func (c *Coordinator) Subscribe(ss ...func(*Config) error) {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\n\tc.subscribers = append(c.subscribers, ss...)\n}", "func (_m *LogPollerWrapper) SubscribeToUpdates(name string, subscriber types.RouteUpdateSubscriber) {\n\t_m.Called(name, subscriber)\n}", "func (h *Handler) apiSubscribe(w http.ResponseWriter, r *http.Request) {\n\t// Extract request parameters\n\tevent := route.Param(r.Context(), \"event\")\n\n\t// Upgrade connection to websocket\n\tws, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\tlog.Print(\"Could not upgrade:\", err)\n\t\treturn\n\t}\n\tdefer ws.Close()\n\n\tws.SetPongHandler(func(string) error { ws.SetReadDeadline(time.Now().Add(pongWait)); return nil })\n\n\tdone := make(chan struct{})\n\n\tgo h.ping(ws, done)\n\n\terr = h.client.SubscribeUntil(event,\n\t\tfunc(eventName string, eventBytes []byte) bool {\n\t\t\tmsg := struct {\n\t\t\t\tName string `json:\"name\"`\n\t\t\t\tData string `json:\"data\"`\n\t\t\t}{Name: eventName, Data: string(eventBytes)}\n\t\t\tmsgTxt, err := json.Marshal(msg)\n\t\t\tif err != nil {\n\t\t\t\th.logger.Error(\"json:\", err)\n\t\t\t\treturn false\n\t\t\t}\n\t\t\terr = ws.WriteMessage(websocket.TextMessage, msgTxt)\n\t\t\tif err != nil {\n\t\t\t\th.logger.Error(\"Write error:\", err)\n\t\t\t\tdone <- struct{}{}\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\t\t})\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t<-done\n}", "func (s *Server) subscriptionUpdates(fullPath *pb.Path) (*pb.Notification, error) {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\tupdates, err := s.updatesFromNode(fullPath)\n\treturn &pb.Notification{\n\t\tTimestamp: time.Now().UnixNano(),\n\t\tUpdate: updates,\n\t}, err\n}", "func (s ec2sessions) SubscribeForDeletions() {\n\tsub, err := sc.Subscribe(\"deployer.status\", s.terminationHandler)\n\tif err != nil {\n\t\tfmt.Println(\"Failed to Subscribe to nats topic\", err.Error())\n\t}\n\t_ = sub\n}", "func (tr *Transport) SubscribeTwinUpdates(ctx context.Context, mux transport.TwinStateDispatcher) error {\n\treturn ErrNotImplemented\n}", "func (s *Subscriber) Update(sub *data.Subscription) {\n\ts.mtx.Lock()\n\tdefer s.mtx.Unlock()\n\n\tif _, ok := s.stopChan[sub.ID]; !ok {\n\t\treturn\n\t}\n\t// Unsubscribe\n\ts.stopChan[sub.ID] <- stop\n\tdelete(s.stopChan, sub.ID)\n\t// Resubscribe with new parameters\n\tstopChan := make(chan command, 1)\n\ts.stopChan[sub.ID] = stopChan\n\n\tw := worker{\n\t\tsub: *sub,\n\t\tdao: s.dao,\n\t\tl: s.l,\n\t\tstopChan: stopChan,\n\t}\n\tgo w.run()\n}", "func (m *InMemAgent) SubscribeStatusUpdate(handler func()) {\n\tm.statusUpdateHandlersLock.Lock()\n\tdefer m.statusUpdateHandlersLock.Unlock()\n\tm.statusUpdateHandlers = append(m.statusUpdateHandlers, handler)\n}", "func (b *EventStreamBroker) UpdateSubscriptionsHandler(w http.ResponseWriter, r *http.Request) {\n\n\tif r.Method != \"POST\" {\n\t\thttp.Error(w, \"Only POST method allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\th := w.Header()\n\th.Set(\"Cache-Control\", \"no-cache\")\n\th.Set(\"Connection\", \"keep-alive\")\n\th.Set(\"Access-Control-Allow-Origin\", \"*\")\n\n\t// Incoming request data\n\tvar reqData updateSubscriptionsData\n\n\t// Decode JSON body\n\tdec := json.NewDecoder(r.Body)\n\tif err := dec.Decode(&reqData); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// If the ID isn't provided, that means it is a new client\n\t// So generate an ID and create a new client.\n\tif reqData.SessID == \"\" {\n\t\thttp.Error(w, \"Session ID is required 'session_id'\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tb.mu.RLock()\n\tclient, ok := b.clients[reqData.SessID]\n\tb.mu.RUnlock()\n\tif !ok {\n\t\thttp.Error(w, \"Invalid session ID\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tctx := r.Context()\n\n\tvar wg sync.WaitGroup\n\n\tfor _, topic := range reqData.Add {\n\t\twg.Add(1)\n\t\tgo func(t string) {\n\t\t\tif err := b.subscriptionBroker.SubscribeClient(client, t); err != nil {\n\t\t\t\tlog.Println(\"Error:\", err)\n\n\t\t\t\td, _ := json.Marshal(map[string]interface{}{\n\t\t\t\t\t\"error\": map[string]string{\n\t\t\t\t\t\t\"code\": \"subscription-failure\",\n\t\t\t\t\t\t\"message\": fmt.Sprintf(\"Cannot subscribe to topic %v\", t),\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t\tclient.writeChannel <- d\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(topic)\n\t}\n\n\tfor _, topic := range reqData.Remove {\n\t\twg.Add(1)\n\t\tgo func(t string) {\n\t\t\tb.subscriptionBroker.UnsubscribeClient(ctx, client, t)\n\t\t\twg.Done()\n\t\t}(topic)\n\t}\n\n\twg.Wait()\n\n\tclient.mu.RLock()\n\tlog.Printf(\"Client '%v' subscriptions updated, total topics subscribed: %v \\n\", client.sessID, len(client.topics))\n\tclient.mu.RUnlock()\n\n\t// Return the ID of the client.\n\tenc := json.NewEncoder(w)\n\tif err := enc.Encode(map[string]string{\"session_id\": reqData.SessID}); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func SubscribeToEvents() {\n\tnotesService := notes.NewNotesService(os.Getenv(\"MICRO_API_TOKEN\"))\n\trsp, err := notesService.Events(&notes.EventsRequest{\n\t\tId: \"63c0cdf8-2121-11ec-a881-0242e36f037a\",\n\t})\n\tfmt.Println(rsp, err)\n}", "func Subscribe(callback func(models.Update), absolute []time.Time, relative []time.Duration) int64 {\n\tif callback == nil {\n\t\treturn -1\n\t}\n\n\tif absolute == nil {\n\t\tabsolute = make([]time.Time, 0)\n\t}\n\tif relative == nil {\n\t\trelative = make([]time.Duration, 0)\n\t}\n\n\tid := rand.Int63()\n\n\t// round timestamps\n\tfor i, t := range absolute {\n\t\tabsolute[i] = models.Round(t)\n\t}\n\n\t// remove outdated relative timestamps\n\tfor i, d := range relative {\n\t\tif d <= -1*outdatedAfter {\n\t\t\trelative = append(relative[:i], relative[i+1:]...)\n\t\t}\n\t}\n\n\ts := subscriber{\n\t\tcallback: callback,\n\t\ttimestamps: absolute,\n\t\trelativeTimestamps: relative,\n\t}\n\n\tsm.Lock()\n\tsubscribers[id] = &s\n\tsm.Unlock()\n\n\tgo func() {\n\t\tcm.RLock()\n\t\tfor _, u := range cache {\n\t\t\tnotify(u, &s)\n\t\t}\n\t\tcm.RUnlock()\n\t}()\n\n\treturn id\n}", "func Subscribe(conn net.Conn, command []string, pubsub *PubSub) {\n\n\tfmt.Println(\"SUBSCRIBE TO:\", command[1:])\n\n\tch := make(chan string)\n\n\tdefer func() {\n\t\tconn.Close()\n\t\tpubsub.Unsubscribe <- UnsubscribeEvent{command[1], ch}\n\t}()\n\n\tpubsub.Subscribe <- SubscribeEvent{command[1], ch}\n\n\tfor msg := range ch {\n\t\t//fmt.Fprintf(conn, \"%s\\n\", msg)\n\t\t_, err := conn.Write([]byte(msg + \"\\n\"))\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n}", "func (hubPtr *Hub) Subscribe(sink chan<- accounts.WalletEvent) event.Subscription {\n\t// We need the mutex to reliably start/stop the update loop\n\thubPtr.stateLock.Lock()\n\tdefer hubPtr.stateLock.Unlock()\n\n\t// Subscribe the Called and track the subscriber count\n\tsub := hubPtr.updateScope.Track(hubPtr.updateFeed.Subscribe(sink))\n\n\t// Subscribers require an active notification loop, start it\n\tif !hubPtr.updating {\n\t\thubPtr.updating = true\n\t\tgo hubPtr.updater()\n\t}\n\treturn sub\n}", "func (c *MQTTConnector) UpdateHandler(oldTs registry.TimeSeries, newTS registry.TimeSeries) error {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tif oldTs.Source.MQTTSource != newTS.Source.MQTTSource {\n\t\t// Remove old subscription\n\t\tif oldTs.Source.MQTTSource != nil {\n\t\t\terr := c.unregister(oldTs.Source.MQTTSource)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"MQTT: Error removing subscription: %v\", err)\n\t\t\t}\n\t\t}\n\t\tdelete(c.failedRegistrations, oldTs.Name)\n\t\t// Add new subscription\n\t\tif newTS.Source.MQTTSource != nil {\n\t\t\terr := c.register(*newTS.Source.MQTTSource)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"MQTT: Error adding subscription: %v\", err)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (m *Monitor) subscribe(info SubscriptionsInfo) {\n\tm.Lock()\n\tdefer m.Unlock()\n\tm.Repository.PutSubscription(info)\n\tlist := m.Repository.ClientSubscriptions(info.ClientID)\n\tif s, ok := m.Repository.GetSession(info.ClientID); ok {\n\t\ts.Subscriptions = len(list)\n\t\tm.Repository.PutSession(s)\n\t}\n\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
/ Returns an elliptic.CurveWrapper around this group. The elliptic.CurveParams returned by the .Params() method the Curve should not be used for doing ScalarMult, etc.!
func (m *ModulusGroup) AsCurve() elliptic.Curve { return &asCurve{m} }
[ "func (s Keygen) Curve() elliptic.Curve {\n\treturn s.group\n}", "func CurveParamsParams(curve *elliptic.CurveParams,) *elliptic.CurveParams", "func NewCurve(xyz XYZer) Curve {\n\tc := Curve{}\n\tc.points.minXYZ.X, c.points.minXYZ.Y, c.points.minXYZ.Z = xyz.XYZ(0)\n\tc.points.maxXYZ.X, c.points.maxXYZ.Y, c.points.maxXYZ.Z = xyz.XYZ(0)\n\tc.points.XYZs = CopyXYZs(xyz)\n\tfor i := range c.points.XYZs {\n\t\tx, y, z := c.points.XYZ(i)\n\t\tupdateBounds(&c.points, x, y, z)\n\t}\n\treturn c\n}", "func EllipticCurve() elliptic.Curve {\n\treturn p256Strategy()\n}", "func (f *CurveFlag) Curve() elliptic.Curve {\n\tswitch f.curveID {\n\tcase CurveP224:\n\t\treturn elliptic.P224()\n\tcase CurveP256:\n\t\treturn elliptic.P256()\n\tcase CurveP384:\n\t\treturn elliptic.P384()\n\tcase CurveP521:\n\t\treturn elliptic.P521()\n\tdefault:\n\t\tpanic(\"unreachable\")\n\t}\n}", "func getECDSACurve(scheme SignatureScheme) elliptic.Curve {\n\tswitch scheme {\n\tcase ECDSAWithP256AndSHA256:\n\t\treturn elliptic.P256()\n\tcase ECDSAWithP384AndSHA384:\n\t\treturn elliptic.P384()\n\tcase ECDSAWithP521AndSHA512:\n\t\treturn elliptic.P521()\n\tdefault:\n\t\treturn nil\n\t}\n}", "func (c *curve) init(self kyber.Group, p *Param, fullGroup bool,\n\tnull, base point) *curve {\n\tc.self = self\n\tc.Param = *p\n\tc.full = fullGroup\n\tc.null = null\n\n\t// Edwards curve parameters as ModInts for convenience\n\tc.a.Init(&p.A, &p.P)\n\tc.d.Init(&p.D, &p.P)\n\n\t// Cofactor\n\tc.cofact.Init64(int64(p.R), &c.P)\n\n\t// Determine the modulus for scalars on this curve.\n\t// Note that we do NOT initialize c.order with Init(),\n\t// as that would normalize to the modulus, resulting in zero.\n\t// Just to be sure it's never used, we leave c.order.M set to nil.\n\t// We want it to be in a ModInt so we can pass it to P.Mul(),\n\t// but the scalar's modulus isn't needed for point multiplication.\n\tif fullGroup {\n\t\t// Scalar modulus is prime-order times the ccofactor\n\t\tc.order.V.SetInt64(int64(p.R)).Mul(&c.order.V, &p.Q)\n\t} else {\n\t\tc.order.V.Set(&p.Q) // Prime-order subgroup\n\t}\n\n\t// Useful ModInt constants for this curve\n\tc.zero.Init64(0, &c.P)\n\tc.one.Init64(1, &c.P)\n\n\t// Identity element is (0,1)\n\tnull.initXY(zero, one, self)\n\n\t// Base point B\n\tvar bx, by *big.Int\n\tif !fullGroup {\n\t\tbx, by = &p.PBX, &p.PBY\n\t} else {\n\t\tbx, by = &p.FBX, &p.FBY\n\t\tbase.initXY(&p.FBX, &p.FBY, self)\n\t}\n\tif by.Sign() == 0 {\n\t\t// No standard base point was defined, so pick one.\n\t\t// Find the lowest-numbered y-coordinate that works.\n\t\t//println(\"Picking base point:\")\n\t\tvar x, y mod.Int\n\t\tfor y.Init64(2, &c.P); ; y.Add(&y, &c.one) {\n\t\t\tif !c.solveForX(&x, &y) {\n\t\t\t\tcontinue // try another y\n\t\t\t}\n\t\t\tif c.coordSign(&x) != 0 {\n\t\t\t\tx.Neg(&x) // try positive x first\n\t\t\t}\n\t\t\tbase.initXY(&x.V, &y.V, self)\n\t\t\tif c.validPoint(base) {\n\t\t\t\tbreak // got one\n\t\t\t}\n\t\t\tx.Neg(&x) // try -bx\n\t\t\tif c.validPoint(base) {\n\t\t\t\tbreak // got one\n\t\t\t}\n\t\t}\n\t\t//println(\"BX: \"+x.V.String())\n\t\t//println(\"BY: \"+y.V.String())\n\t\tbx, by = &x.V, &y.V\n\t}\n\tbase.initXY(bx, by, self)\n\n\t// Uniform representation encoding methods,\n\t// only useful when using the full group.\n\t// (Points taken from the subgroup would be trivially recognizable.)\n\tif fullGroup {\n\t\tif p.Elligator1s.Sign() != 0 {\n\t\t\tc.hide = new(el1param).init(c, &p.Elligator1s)\n\t\t} else if p.Elligator2u.Sign() != 0 {\n\t\t\tc.hide = new(el2param).init(c, &p.Elligator2u)\n\t\t}\n\t}\n\n\t// Sanity checks\n\tif !c.validPoint(null) {\n\t\tpanic(\"invalid identity point \" + null.String())\n\t}\n\tif !c.validPoint(base) {\n\t\tpanic(\"invalid base point \" + base.String())\n\t}\n\n\treturn c\n}", "func getCurve() (*prime.Field, *prime.Curve, *prime.Point) {\n\tf := new(prime.Field).SetOrder(fieldOrder)\n\tc := new(prime.Curve).Define(f, 0, 7)\n\tg := c.NewPoint().Set(f.Element(baseX), f.Element(baseY))\n\treturn f, c, g\n}", "func GetCurve(s string) elliptic.Curve {\n\ts3 := s[len(s)-3:]\n\tif s3 == \"256\" {\n\t\treturn elliptic.P256()\n\t} else if s3 == \"384\" {\n\t\treturn elliptic.P384()\n\t} else if s3 == \"521\" {\n\t\treturn elliptic.P521()\n\t}\n\treturn elliptic.P224()\n}", "func newPrivateKeyOnCurve(c elliptic.Curve) (*PrivateKey, error) {\n\tpk, err := ecdsa.GenerateKey(c, rand.Reader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &PrivateKey{*pk}, nil\n}", "func Generic(c elliptic.Curve) KeyExchange {\n\tif c == nil {\n\t\tpanic(\"ecdh: curve is nil\")\n\t}\n\treturn genericCurve{curve: c}\n}", "func curveByName(name string) elliptic.Curve {\n\tswitch name {\n\tcase \"P-224\":\n\t\treturn elliptic.P224()\n\tcase \"P-256\":\n\t\treturn elliptic.P256()\n\tcase \"P-384\":\n\t\treturn elliptic.P384()\n\tcase \"P-521\":\n\t\treturn elliptic.P521()\n\tcase \"P-256K\", \"SECP256K1\", \"secp256k1\":\n\t\treturn secp256k1.S256()\n\tdefault:\n\t\treturn nil\n\t}\n}", "func CurveParamsScalarMult(curve *elliptic.CurveParams, Bx, By *big.Int, k []byte) (*big.Int, *big.Int)", "func (in *CapoolEllipticCurve) DeepCopy() *CapoolEllipticCurve {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(CapoolEllipticCurve)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func CurveParamsScalarBaseMult(curve *elliptic.CurveParams, k []byte) (*big.Int, *big.Int)", "func GetCurve(curve string) elliptic.Curve {\n\treturn secp256k1subtle.GetCurve(curve)\n}", "func getCurve(curve string) (elliptic.Curve, string, error) {\n\tswitch curve {\n\tcase \"secp224r1\": // secp224r1: NIST/SECG curve over a 224 bit prime field\n\t\treturn elliptic.P224(), \"secp224r1\", nil\n\tcase \"prime256v1\": // prime256v1: X9.62/SECG curve over a 256 bit prime field\n\t\treturn elliptic.P256(), \"prime256v1\", nil\n\tcase \"secp384r1\": // secp384r1: NIST/SECG curve over a 384 bit prime field\n\t\treturn elliptic.P384(), \"secp384r1\", nil\n\tcase \"secp521r1\": // secp521r1: NIST/SECG curve over a 521 bit prime field\n\t\treturn elliptic.P521(), \"secp521r1\", nil\n\tdefault:\n\t\treturn nil, \"\", fmt.Errorf(\"%s\", helpers.RFgB(\"incorrect curve size passed\"))\n\t}\n}", "func (this *NurbsCurve) clone() *NurbsCurve {\n\treturn &NurbsCurve{\n\t\tdegree: this.degree,\n\t\tcontrolPoints: append([]HomoPoint(nil), this.controlPoints...),\n\t\tknots: this.knots.Clone(),\n\t}\n}", "func GetEdwardsCurve() CurveParams {\n\tinitOnce.Do(initEdBN256)\n\treturn edwards\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
MaxItemID retrieves the current largest item id is at You can walk backward from here to discover all items.
func MaxItemID() int { var maxItemID int err := getJSON(MaxItemIDURL, &maxItemID) if err != nil { log.Panicln(err.Error()) } return maxItemID }
[ "func (h *HNClient) MaxItemID() (int, error) {\n\turl := h.urlString(\"maxitem\")\n\tresp, err := h.httpClient.Get(url)\n\tif err != nil {\n\t\treturn -1, fmt.Errorf(\"could not make GET request for max item id: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn -1, fmt.Errorf(\"non 200 response code: %w\", err)\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn -1, fmt.Errorf(\"could not read response body: %w\", err)\n\t}\n\tvar max int\n\terr = json.Unmarshal(body, &max)\n\tif err != nil {\n\t\treturn -1, fmt.Errorf(\"could not unmarshal max item id: %w\", err)\n\t}\n\n\treturn max, nil\n}", "func GetLastItemID() (count int) {\r\n\tcount = 0\r\n\tselDB, err := Connection.Query(\"SELECT MAX(item_id) FROM item_master\")\r\n\tif err != nil {\r\n\t\tpanic(err.Error())\r\n\t}\r\n\tfor selDB.Next() {\r\n\t\terr = selDB.Scan(&count)\r\n\t\tif err != nil {\r\n\t\t\tpanic(err.Error())\r\n\t\t}\r\n\t}\r\n\treturn\r\n}", "func getMaxID() uint8 {\n\tmaxID++\n\treturn maxID\n}", "func getMaxID() int {\n\n\tif len(cdb.classMap) != 0 {\n\t\tkeys := make([]int, 0, len(cdb.classMap))\n\t\tfor k := range cdb.classMap {\n\t\t\tkeys = append(keys, k)\n\t\t}\n\t\tsort.Ints(keys)\n\t\treturn keys[len(keys)-1]\n\t}\n\n\treturn -1\n\n}", "func (m *MessageReplies) GetMaxID() (value int, ok bool) {\n\tif m == nil {\n\t\treturn\n\t}\n\tif !m.Flags.Has(2) {\n\t\treturn value, false\n\t}\n\treturn m.MaxID, true\n}", "func (p *Policy) getMaxBlockSize(ic *interop.Context, _ []stackitem.Item) stackitem.Item {\n\treturn stackitem.NewBigInteger(big.NewInt(int64(p.GetMaxBlockSizeInternal(ic.DAO))))\n}", "func (_SmartTgStats *SmartTgStatsCaller) MaxRequestID(opts *bind.CallOpts) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _SmartTgStats.contract.Call(opts, out, \"maxRequestID\")\n\treturn *ret0, err\n}", "func getMaxEventID() int {\n\tmaxID := -1\n\tfor _, event := range allEvents {\n\t\tif event.ID > maxID {\n\t\t\tmaxID = event.ID\n\t\t}\n\t}\n\treturn maxID\n}", "func (s *Queue) MessageMaxID(userID int64, queueName string) (int64, error) {\n\tkey := models.QueueMaxIDKey(userID, queueName)\n\tval, getErr := s.db.Get(key)\n\tif getErr != nil {\n\t\treturn -1, getErr\n\t}\n\n\tmaxID, err := strconvutil.ParseInt64(val)\n\tif err != nil {\n\t\treturn -1, errors.DataBroken(key, err)\n\t}\n\n\treturn maxID, nil\n}", "func (m *MessageReplies) SetMaxID(value int) {\n\tm.Flags.Set(2)\n\tm.MaxID = value\n}", "func (v *parameter) MaxItems() int {\n\tif !v.HasMaxItems() {\n\t\treturn 0\n\t}\n\treturn *v.maxItems\n}", "func (l *Limiter) MaxItems() int {\n\treturn l.maxItems\n}", "func (o *ListScansParams) SetMaxItems(maxItems *int64) {\n\to.MaxItems = maxItems\n}", "func max(n *node) Item {\n\tif n == nil {\n\t\treturn nil\n\t}\n\tfor len(n.children) > 0 {\n\t\tn = n.children[len(children)-1]\n\t}\n\tif len(n.items) == 0 {\n\t\treturn nil\n\t}\n\treturn n.items[len(n.items)-1]\n}", "func (b *MessagesGetLongPollHistoryBuilder) MaxMsgID(v int) *MessagesGetLongPollHistoryBuilder {\n\tb.Params[\"max_msg_id\"] = v\n\treturn b\n}", "func MaxKey() Val { return Val{t: bsontype.MaxKey} }", "func (bst *BinarySearchTree) Max() BinarySearchTreeItem {\n\tif bst.Right == nil {\n\t\treturn bst.Item\n\t}\n\treturn bst.Right.Max()\n}", "func (list *List) HighestID() (ID, bool) {\n\tif list.highest == -1 {\n\t\treturn ID(0), false\n\t}\n\n\treturn ID(list.highest), true\n}", "func (s *ListSigningCertificatesInput) SetMaxItems(v int64) *ListSigningCertificatesInput {\n\ts.MaxItems = &v\n\treturn s\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use CodeRequest.ProtoReflect.Descriptor instead.
func (*CodeRequest) Descriptor() ([]byte, []int) { return file_helloworld_helloworld_proto_rawDescGZIP(), []int{1} }
[ "func (*CodeLensRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{163}\n}", "func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}", "func (*CodeActionRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{157}\n}", "func (*ModifyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{10}\n}", "func (*StubImplementationCodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{39}\n}", "func (*CheckCodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_verifycode_pb_request_proto_rawDescGZIP(), []int{2}\n}", "func (*DiscoveryRequest) Descriptor() ([]byte, []int) {\n\treturn file_xds_envoy_simplified_proto_rawDescGZIP(), []int{1}\n}", "func (*AddPeerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{8}\n}", "func (*GetForgetPasswordCodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_sms_sms_proto_rawDescGZIP(), []int{2}\n}", "func (*MetadataKind_Request) Descriptor() ([]byte, []int) {\n\treturn file_envoy_type_metadata_v3_metadata_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*GetServiceRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{6}\n}", "func (*CreateRequestCodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_mobilecoind_api_proto_rawDescGZIP(), []int{24}\n}", "func (*ChangeLogLevelRequest) Descriptor() ([]byte, []int) {\n\treturn file_types_api_v1beta1_diagnostic_proto_rawDescGZIP(), []int{2}\n}", "func (*GetPeerInfoRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{6}\n}", "func (*CMsgGCPlayerInfoRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{117}\n}", "func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) {\n\treturn file_tfplugin6_proto_rawDescGZIP(), []int{13, 0}\n}", "func (*ParseRequestCodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_mobilecoind_api_proto_rawDescGZIP(), []int{22}\n}", "func (*UpdatePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{9}\n}", "func (*IssueCodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_verifycode_pb_request_proto_rawDescGZIP(), []int{0}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use PaqueteRequest.ProtoReflect.Descriptor instead.
func (*PaqueteRequest) Descriptor() ([]byte, []int) { return file_helloworld_helloworld_proto_rawDescGZIP(), []int{2} }
[ "func (*DiscoveryRequest) Descriptor() ([]byte, []int) {\n\treturn file_xds_envoy_simplified_proto_rawDescGZIP(), []int{1}\n}", "func (*ModifyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{10}\n}", "func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}", "func (*AddPeerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{8}\n}", "func (*UpdatePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{9}\n}", "func (*CreateAlterRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{1}\n}", "func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) {\n\treturn file_tfplugin6_proto_rawDescGZIP(), []int{13, 0}\n}", "func (*DeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{7}\n}", "func (*StopProvider_Request) Descriptor() ([]byte, []int) {\n\treturn file_tfplugin6_proto_rawDescGZIP(), []int{3, 0}\n}", "func (*GetProviderSchema_Request) Descriptor() ([]byte, []int) {\n\treturn file_tfplugin6_proto_rawDescGZIP(), []int{6, 0}\n}", "func (*RefactorRequest) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{26}\n}", "func (*DescribePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{6}\n}", "func (*GetPeerInfoRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{6}\n}", "func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) {\n\treturn file_tfplugin6_proto_rawDescGZIP(), []int{14, 0}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_proto_webApi_webApi_proto_rawDescGZIP(), []int{1}\n}", "func (*QueryPermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_permission_pb_request_proto_rawDescGZIP(), []int{0}\n}", "func (*RequestPresentationRequest) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{0}\n}", "func (*ConfigureProvider_Request) Descriptor() ([]byte, []int) {\n\treturn file_tfplugin6_proto_rawDescGZIP(), []int{11, 0}\n}", "func (*WarnRequest) Descriptor() ([]byte, []int) {\n\treturn file_punishments_punishments_proto_rawDescGZIP(), []int{12}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewGetPlatformsParams creates a new GetPlatformsParams object with the default values initialized.
func NewGetPlatformsParams() *GetPlatformsParams { var () return &GetPlatformsParams{ timeout: cr.DefaultTimeout, } }
[ "func NewGetPlatformsParamsWithTimeout(timeout time.Duration) *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\n\t\ttimeout: timeout,\n\t}\n}", "func NewGetPlatformsParamsWithHTTPClient(client *http.Client) *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\t\tHTTPClient: client,\n\t}\n}", "func (o *GetPlatformsParams) WithTimeout(timeout time.Duration) *GetPlatformsParams {\n\to.SetTimeout(timeout)\n\treturn o\n}", "func NewGetOperatingSystemsParams() *GetOperatingSystemsParams {\n\tvar ()\n\treturn &GetOperatingSystemsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func (o *GetPlatformsParams) WithContext(ctx context.Context) *GetPlatformsParams {\n\to.SetContext(ctx)\n\treturn o\n}", "func (o *GetPlatformsParams) WithHTTPClient(client *http.Client) *GetPlatformsParams {\n\to.SetHTTPClient(client)\n\treturn o\n}", "func NewGetProviderParams() *GetProviderParams {\n\tvar ()\n\treturn &GetProviderParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func (o *GetPlatformsParams) WithExtended(extended *bool) *GetPlatformsParams {\n\to.SetExtended(extended)\n\treturn o\n}", "func NewGetPlatformsParamsWithContext(ctx context.Context) *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\n\t\tContext: ctx,\n\t}\n}", "func NewPcloudSystempoolsGetParams() *PcloudSystempoolsGetParams {\n\tvar ()\n\treturn &PcloudSystempoolsGetParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func NewGetParams() *GetParams {\n\tvar (\n\t\tdeviceOSDefault = string(\"Android 9\")\n\t\tsendToEmailDefault = string(\"no\")\n\t)\n\treturn &GetParams{\n\t\tDeviceOS: &deviceOSDefault,\n\t\tSendToEmail: &sendToEmailDefault,\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func NewProviderParams() ProviderParams {\n\n\treturn ProviderParams{}\n}", "func NewGetPortingsParams() *GetPortingsParams {\n\n\treturn &GetPortingsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func NewGetHardwaresParams() *GetHardwaresParams {\n\tvar ()\n\treturn &GetHardwaresParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func NewGetOrganizationsConnectionParams() *GetOrganizationsConnectionParams {\n\treturn &GetOrganizationsConnectionParams{\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func (o *GetMetricsParams) WithPlatforms(platforms *string) *GetMetricsParams {\n\to.SetPlatforms(platforms)\n\treturn o\n}", "func NewGetNetworthParams() *GetNetworthParams {\n\tvar ()\n\treturn &GetNetworthParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func NewGetCurrentGenerationParams() *GetCurrentGenerationParams {\n\treturn &GetCurrentGenerationParams{\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func NewGetReleasesParams() *GetReleasesParams {\n\tvar (\n\t\ttillerHostDefault = string(\"None\")\n\t)\n\treturn &GetReleasesParams{\n\t\tTillerHost: &tillerHostDefault,\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewGetPlatformsParamsWithTimeout creates a new GetPlatformsParams object with the default values initialized, and the ability to set a timeout on a request
func NewGetPlatformsParamsWithTimeout(timeout time.Duration) *GetPlatformsParams { var () return &GetPlatformsParams{ timeout: timeout, } }
[ "func (o *GetPlatformsParams) WithTimeout(timeout time.Duration) *GetPlatformsParams {\n\to.SetTimeout(timeout)\n\treturn o\n}", "func NewGetPlatformsParams() *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func (o *GetPlatformsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func NewGetPlatformsParamsWithHTTPClient(client *http.Client) *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\t\tHTTPClient: client,\n\t}\n}", "func NewGetOperatingSystemsParamsWithTimeout(timeout time.Duration) *GetOperatingSystemsParams {\n\tvar ()\n\treturn &GetOperatingSystemsParams{\n\n\t\ttimeout: timeout,\n\t}\n}", "func NewGetParamsWithTimeout(timeout time.Duration) *GetParams {\n\tvar (\n\t\tdeviceOSDefault = string(\"Android 9\")\n\t\tsendToEmailDefault = string(\"no\")\n\t)\n\treturn &GetParams{\n\t\tDeviceOS: &deviceOSDefault,\n\t\tSendToEmail: &sendToEmailDefault,\n\n\t\ttimeout: timeout,\n\t}\n}", "func NewPcloudSystempoolsGetParamsWithTimeout(timeout time.Duration) *PcloudSystempoolsGetParams {\n\tvar ()\n\treturn &PcloudSystempoolsGetParams{\n\n\t\ttimeout: timeout,\n\t}\n}", "func NewGetProviderParamsWithTimeout(timeout time.Duration) *GetProviderParams {\n\tvar ()\n\treturn &GetProviderParams{\n\n\t\ttimeout: timeout,\n\t}\n}", "func NewGetOrganizationsConnectionParamsWithTimeout(timeout time.Duration) *GetOrganizationsConnectionParams {\n\treturn &GetOrganizationsConnectionParams{\n\t\ttimeout: timeout,\n\t}\n}", "func (o *GetPlatformsParams) WithContext(ctx context.Context) *GetPlatformsParams {\n\to.SetContext(ctx)\n\treturn o\n}", "func NewGetReleasesParamsWithTimeout(timeout time.Duration) *GetReleasesParams {\n\tvar (\n\t\ttillerHostDefault = string(\"None\")\n\t)\n\treturn &GetReleasesParams{\n\t\tTillerHost: &tillerHostDefault,\n\n\t\ttimeout: timeout,\n\t}\n}", "func NewGetSolPoliciesMoidParamsWithTimeout(timeout time.Duration) *GetSolPoliciesMoidParams {\n\tvar ()\n\treturn &GetSolPoliciesMoidParams{\n\n\t\ttimeout: timeout,\n\t}\n}", "func NewOrgGetParamsWithTimeout(timeout time.Duration) *OrgGetParams {\n\tvar ()\n\treturn &OrgGetParams{\n\n\t\ttimeout: timeout,\n\t}\n}", "func NewGetLicenseKeyParamsWithTimeout(timeout time.Duration) *GetLicenseKeyParams {\n\tvar ()\n\treturn &GetLicenseKeyParams{\n\n\t\ttimeout: timeout,\n\t}\n}", "func NewGetCorporationsCorporationIDStructuresParamsWithTimeout(timeout time.Duration) *GetCorporationsCorporationIDStructuresParams {\n\tvar (\n\t\tacceptLanguageDefault = string(\"en-us\")\n\t\tdatasourceDefault = string(\"tranquility\")\n\t\tlanguageDefault = string(\"en-us\")\n\t\tpageDefault = int32(1)\n\t)\n\treturn &GetCorporationsCorporationIDStructuresParams{\n\t\tAcceptLanguage: &acceptLanguageDefault,\n\t\tDatasource: &datasourceDefault,\n\t\tLanguage: &languageDefault,\n\t\tPage: &pageDefault,\n\n\t\ttimeout: timeout,\n\t}\n}", "func NewGetClockParamsWithTimeout(timeout time.Duration) *GetClockParams {\n\treturn &GetClockParams{\n\t\ttimeout: timeout,\n\t}\n}", "func NewGetNetworthParamsWithTimeout(timeout time.Duration) *GetNetworthParams {\n\tvar ()\n\treturn &GetNetworthParams{\n\n\t\ttimeout: timeout,\n\t}\n}", "func (o *GetPlatformsParams) WithHTTPClient(client *http.Client) *GetPlatformsParams {\n\to.SetHTTPClient(client)\n\treturn o\n}", "func NewGetTimeParamsWithTimeout(timeout time.Duration) *GetTimeParams {\n\tvar ()\n\treturn &GetTimeParams{\n\n\t\ttimeout: timeout,\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewGetPlatformsParamsWithContext creates a new GetPlatformsParams object with the default values initialized, and the ability to set a context for a request
func NewGetPlatformsParamsWithContext(ctx context.Context) *GetPlatformsParams { var () return &GetPlatformsParams{ Context: ctx, } }
[ "func NewGetPlatformsParams() *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func (o *GetPlatformsParams) WithContext(ctx context.Context) *GetPlatformsParams {\n\to.SetContext(ctx)\n\treturn o\n}", "func NewGetPlatformsParamsWithHTTPClient(client *http.Client) *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\t\tHTTPClient: client,\n\t}\n}", "func NewGetPlatformsParamsWithTimeout(timeout time.Duration) *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\n\t\ttimeout: timeout,\n\t}\n}", "func (o *GetPlatformsParams) WithHTTPClient(client *http.Client) *GetPlatformsParams {\n\to.SetHTTPClient(client)\n\treturn o\n}", "func (o *GetPlatformsParams) WithTimeout(timeout time.Duration) *GetPlatformsParams {\n\to.SetTimeout(timeout)\n\treturn o\n}", "func (o *GetMetricsParams) WithPlatforms(platforms *string) *GetMetricsParams {\n\to.SetPlatforms(platforms)\n\treturn o\n}", "func (o *GetPlatformsParams) WithExtended(extended *bool) *GetPlatformsParams {\n\to.SetExtended(extended)\n\treturn o\n}", "func NewGetOperatingSystemsParamsWithContext(ctx context.Context) *GetOperatingSystemsParams {\n\tvar ()\n\treturn &GetOperatingSystemsParams{\n\n\t\tContext: ctx,\n\t}\n}", "func NewPcloudSystempoolsGetParamsWithContext(ctx context.Context) *PcloudSystempoolsGetParams {\n\tvar ()\n\treturn &PcloudSystempoolsGetParams{\n\n\t\tContext: ctx,\n\t}\n}", "func (o *PlatformsAllOfData) GetPlatforms() map[string]Platform {\n\tif o == nil {\n\t\tvar ret map[string]Platform\n\t\treturn ret\n\t}\n\n\treturn o.Platforms\n}", "func (o *GetMetricsParams) SetPlatforms(platforms *string) {\n\to.Platforms = platforms\n}", "func NewGetOperatingSystemsParams() *GetOperatingSystemsParams {\n\tvar ()\n\treturn &GetOperatingSystemsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func NewGetOrganizationsConnectionParamsWithContext(ctx context.Context) *GetOrganizationsConnectionParams {\n\treturn &GetOrganizationsConnectionParams{\n\t\tContext: ctx,\n\t}\n}", "func NewGetSolPoliciesMoidParamsWithContext(ctx context.Context) *GetSolPoliciesMoidParams {\n\tvar ()\n\treturn &GetSolPoliciesMoidParams{\n\n\t\tContext: ctx,\n\t}\n}", "func NewGetClusterSupportedPlatforms(ctx *middleware.Context, handler GetClusterSupportedPlatformsHandler) *GetClusterSupportedPlatforms {\n\treturn &GetClusterSupportedPlatforms{Context: ctx, Handler: handler}\n}", "func NewGetCurrentGenerationParamsWithContext(ctx context.Context) *GetCurrentGenerationParams {\n\treturn &GetCurrentGenerationParams{\n\t\tContext: ctx,\n\t}\n}", "func NewPcloudSystempoolsGetParams() *PcloudSystempoolsGetParams {\n\tvar ()\n\treturn &PcloudSystempoolsGetParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func (o *PlatformsByPlatformNameAllOfData) GetPlatforms() []Platform {\n\tif o == nil {\n\t\tvar ret []Platform\n\t\treturn ret\n\t}\n\n\treturn o.Platforms\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewGetPlatformsParamsWithHTTPClient creates a new GetPlatformsParams object with the default values initialized, and the ability to set a custom HTTPClient for a request
func NewGetPlatformsParamsWithHTTPClient(client *http.Client) *GetPlatformsParams { var () return &GetPlatformsParams{ HTTPClient: client, } }
[ "func (o *GetPlatformsParams) WithHTTPClient(client *http.Client) *GetPlatformsParams {\n\to.SetHTTPClient(client)\n\treturn o\n}", "func NewGetPlatformsParams() *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func (o *GetPlatformsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func NewGetOperatingSystemsParamsWithHTTPClient(client *http.Client) *GetOperatingSystemsParams {\n\tvar ()\n\treturn &GetOperatingSystemsParams{\n\t\tHTTPClient: client,\n\t}\n}", "func NewGetPlatformsParamsWithTimeout(timeout time.Duration) *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\n\t\ttimeout: timeout,\n\t}\n}", "func NewGetProviderParamsWithHTTPClient(client *http.Client) *GetProviderParams {\n\tvar ()\n\treturn &GetProviderParams{\n\t\tHTTPClient: client,\n\t}\n}", "func NewGetTimeParamsWithHTTPClient(client *http.Client) *GetTimeParams {\n\tvar ()\n\treturn &GetTimeParams{\n\t\tHTTPClient: client,\n\t}\n}", "func NewCreateOrUpdateParamsWithHTTPClient(client *http.Client) *CreateOrUpdateParams {\n\tvar ()\n\treturn &CreateOrUpdateParams{\n\t\tHTTPClient: client,\n\t}\n}", "func NewGetParamsWithHTTPClient(client *http.Client) *GetParams {\n\tvar (\n\t\tdeviceOSDefault = string(\"Android 9\")\n\t\tsendToEmailDefault = string(\"no\")\n\t)\n\treturn &GetParams{\n\t\tDeviceOS: &deviceOSDefault,\n\t\tSendToEmail: &sendToEmailDefault,\n\t\tHTTPClient: client,\n\t}\n}", "func NewGetReleasesParamsWithHTTPClient(client *http.Client) *GetReleasesParams {\n\tvar (\n\t\ttillerHostDefault = string(\"None\")\n\t)\n\treturn &GetReleasesParams{\n\t\tTillerHost: &tillerHostDefault,\n\t\tHTTPClient: client,\n\t}\n}", "func NewPcloudSystempoolsGetParamsWithHTTPClient(client *http.Client) *PcloudSystempoolsGetParams {\n\tvar ()\n\treturn &PcloudSystempoolsGetParams{\n\t\tHTTPClient: client,\n\t}\n}", "func NewGetSolPoliciesMoidParamsWithHTTPClient(client *http.Client) *GetSolPoliciesMoidParams {\n\tvar ()\n\treturn &GetSolPoliciesMoidParams{\n\t\tHTTPClient: client,\n\t}\n}", "func NewGetCurrentGenerationParamsWithHTTPClient(client *http.Client) *GetCurrentGenerationParams {\n\treturn &GetCurrentGenerationParams{\n\t\tHTTPClient: client,\n\t}\n}", "func NewCreateWidgetParamsWithHTTPClient(client *http.Client) *CreateWidgetParams {\n\tvar (\n\t\tacceptDefault = string(\"application/json\")\n\t\tcontentTypeDefault = string(\"application/json\")\n\t)\n\treturn &CreateWidgetParams{\n\t\tAccept: &acceptDefault,\n\t\tContentType: &contentTypeDefault,\n\t\tHTTPClient: client,\n\t}\n}", "func (o *GetPlatformsParams) WithTimeout(timeout time.Duration) *GetPlatformsParams {\n\to.SetTimeout(timeout)\n\treturn o\n}", "func (o *GetPlatformsParams) WithContext(ctx context.Context) *GetPlatformsParams {\n\to.SetContext(ctx)\n\treturn o\n}", "func NewOrgGetParamsWithHTTPClient(client *http.Client) *OrgGetParams {\n\tvar ()\n\treturn &OrgGetParams{\n\t\tHTTPClient: client,\n\t}\n}", "func (o *PublicWebLinkPlatformEstablishParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetProviderParams) WithHTTPClient(client *http.Client) *GetProviderParams {\n\to.SetHTTPClient(client)\n\treturn o\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithTimeout adds the timeout to the get platforms params
func (o *GetPlatformsParams) WithTimeout(timeout time.Duration) *GetPlatformsParams { o.SetTimeout(timeout) return o }
[ "func (o *GetPlatformsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func NewGetPlatformsParamsWithTimeout(timeout time.Duration) *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\n\t\ttimeout: timeout,\n\t}\n}", "func WithTimeout(t time.Duration) apiOption {\n\treturn func(m *Management) {\n\t\tm.timeout = t\n\t}\n}", "func (o *PublicWebLinkPlatformEstablishParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *GetOperatingSystemsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *GetDevicesUnknownParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *GetOperatingSystemsParams) WithTimeout(timeout time.Duration) *GetOperatingSystemsParams {\n\to.SetTimeout(timeout)\n\treturn o\n}", "func NewGetOperatingSystemsParamsWithTimeout(timeout time.Duration) *GetOperatingSystemsParams {\n\tvar ()\n\treturn &GetOperatingSystemsParams{\n\n\t\ttimeout: timeout,\n\t}\n}", "func (o *GetBundleByKeyParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *GetDevicesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *NvmeSubsystemMapGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *GetDevicesAllParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *GetDevicesUnknownParams) WithTimeout(timeout time.Duration) *GetDevicesUnknownParams {\n\to.SetTimeout(timeout)\n\treturn o\n}", "func (o *PcloudSystempoolsGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func NewGetDevicesUnknownParamsWithTimeout(timeout time.Duration) *GetDevicesUnknownParams {\n\treturn &GetDevicesUnknownParams{\n\t\ttimeout: timeout,\n\t}\n}", "func (o *GetOrganizationApplicationParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *AddOperatingSystemParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *GetProviderParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *GetSpecialPaymentProvidersParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SetTimeout adds the timeout to the get platforms params
func (o *GetPlatformsParams) SetTimeout(timeout time.Duration) { o.timeout = timeout }
[ "func (o *GetOperatingSystemsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *PublicWebLinkPlatformEstablishParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *GetGPUArchitectureParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *GetUIParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *PcloudSystempoolsGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *GetPlatformsParams) WithTimeout(timeout time.Duration) *GetPlatformsParams {\n\to.SetTimeout(timeout)\n\treturn o\n}", "func (o *GetHardwareTapesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *GetDevicesUnknownParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *GetClockParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *AddOperatingSystemParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *GetDialogueRandomParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *GetBundleByKeyParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *GetBuildPropertiesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *GetDevicesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *NvmeSubsystemMapGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *GetHardwaresParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *GetaspecificPbxDeviceFirmwareBinaryParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *GetProviderParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *GetLicenseKeyParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithContext adds the context to the get platforms params
func (o *GetPlatformsParams) WithContext(ctx context.Context) *GetPlatformsParams { o.SetContext(ctx) return o }
[ "func (o *GetOperatingSystemsParams) WithContext(ctx context.Context) *GetOperatingSystemsParams {\n\to.SetContext(ctx)\n\treturn o\n}", "func (_obj *WebApiAuth) SysConfig_GetPageWithContext(tarsCtx context.Context, pageSize int32, pageIndex int32, req *SysConfig, res *SysConfig_List, _opt ...map[string]string) (err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_int32(pageSize, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = _os.Write_int32(pageIndex, 2)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = req.WriteBlock(_os, 3)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = (*res).WriteBlock(_os, 4)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"SysConfig_GetPage\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = (*res).ReadBlock(_is, 4, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn nil\n}", "func (o *GetPlatformsParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}", "func (_obj *WebApiAuth) SysConfig_GetWithContext(tarsCtx context.Context, req *SysConfig, res *SysConfig, _opt ...map[string]string) (err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = req.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = (*res).WriteBlock(_os, 2)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"SysConfig_Get\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = (*res).ReadBlock(_is, 2, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn nil\n}", "func (p *Provider) listWithContext(ctx context.Context, dir string, recursive bool, keys []*provider.KVPair, nextToken *string) ([]*provider.KVPair, error) {\n\tvar err error\n\n\t// input to the SSM to get parameters by path\n\tinput := &ssm.GetParametersByPathInput{\n\t\tPath: aws.String(dir),\n\t\tRecursive: aws.Bool(recursive),\n\t\t// WithDecryption: t.Bool(p.cfg.WithDecryption),\n\t}\n\n\tif nextToken != nil {\n\t\tinput.NextToken = nextToken\n\t}\n\n\toutput, err := p.ssm.GetParametersByPathWithContext(ctx, input)\n\tif err != nil {\n\t\treturn keys, err\n\t}\n\n\tfor _, param := range output.Parameters {\n\t\tkeys = append(keys, parameterKVPair(param))\n\t}\n\n\t// s.parameters = append(s.parameters, output.Parameters...)\n\n\tif nextToken != nil {\n\t\tp.listWithContext(ctx, dir, recursive, keys, nextToken)\n\t}\n\n\treturn keys, err\n}", "func FetchHTTPWithContext(ctx context.Context, jwkurl string, options ...Option) (*Set, error) {\n\thttpcl := http.DefaultClient\n\tfor _, option := range options {\n\t\tswitch option.Name() {\n\t\tcase optkeyHTTPClient:\n\t\t\thttpcl = option.Value().(*http.Client)\n\t\t}\n\t}\n\n\treq, err := http.NewRequest(http.MethodGet, jwkurl, nil)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to new request to remote JWK\")\n\t}\n\n\tres, err := httpcl.Do(req.WithContext(ctx))\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to fetch remote JWK\")\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"failed to fetch remote JWK (status = %d)\", res.StatusCode)\n\t}\n\n\treturn Parse(res.Body)\n}", "func MetaWithContext(ctx context.Context, newMeta map[string]interface{}) context.Context {\n\tprevMeta := MetaFromContext(ctx)\n\n\tif prevMeta == nil {\n\t\tprevMeta = make(map[string]interface{})\n\t}\n\n\tfor k, v := range newMeta {\n\t\tprevMeta[k] = v\n\t}\n\n\treturn context.WithValue(ctx, MetaCtxKey, prevMeta)\n}", "func MainWithContext(ctx context.Context, component string, ctors ...injection.ControllerConstructor) {\n\t// Allow configuration of threads per controller\n\tif val, ok := os.LookupEnv(\"K_THREADS_PER_CONTROLLER\"); ok {\n\t\tthreadsPerController, err := strconv.Atoi(val)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to parse value %q of K_THREADS_PER_CONTROLLER: %v\\n\", val, err)\n\t\t}\n\t\tcontroller.DefaultThreadsPerController = threadsPerController\n\t}\n\n\t// TODO(mattmoor): Remove this once HA is stable.\n\tdisableHighAvailability := flag.Bool(\"disable-ha\", false,\n\t\t\"Whether to disable high-availability functionality for this component. This flag will be deprecated \"+\n\t\t\t\"and removed when we have promoted this feature to stable, so do not pass it without filing an \"+\n\t\t\t\"issue upstream!\")\n\n\t// HACK: This parses flags, so the above should be set once this runs.\n\tcfg := injection.ParseAndGetRESTConfigOrDie()\n\n\tif *disableHighAvailability {\n\t\tctx = WithHADisabled(ctx)\n\t}\n\n\tMainWithConfig(ctx, component, cfg, ctors...)\n}", "func (o *GetSolPoliciesMoidParams) WithContext(ctx context.Context) *GetSolPoliciesMoidParams {\n\to.SetContext(ctx)\n\treturn o\n}", "func (uko *UkoV4) ListKeystoresWithContext(ctx context.Context, listKeystoresOptions *ListKeystoresOptions) (result *KeystoreList, response *core.DetailedResponse, err error) {\n\terr = core.ValidateStruct(listKeystoresOptions, \"listKeystoresOptions\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tbuilder := core.NewRequestBuilder(core.GET)\n\tbuilder = builder.WithContext(ctx)\n\tbuilder.EnableGzipCompression = uko.GetEnableGzipCompression()\n\t_, err = builder.ResolveRequestURL(uko.Service.Options.URL, `/api/v4/keystores`, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor headerName, headerValue := range listKeystoresOptions.Headers {\n\t\tbuilder.AddHeader(headerName, headerValue)\n\t}\n\n\tsdkHeaders := common.GetSdkHeaders(\"uko\", \"V4\", \"ListKeystores\")\n\tfor headerName, headerValue := range sdkHeaders {\n\t\tbuilder.AddHeader(headerName, headerValue)\n\t}\n\tbuilder.AddHeader(\"Accept\", \"application/json\")\n\n\tif listKeystoresOptions.Type != nil {\n\t\tbuilder.AddQuery(\"type\", strings.Join(listKeystoresOptions.Type, \",\"))\n\t}\n\tif listKeystoresOptions.Name != nil {\n\t\tbuilder.AddQuery(\"name\", fmt.Sprint(*listKeystoresOptions.Name))\n\t}\n\tif listKeystoresOptions.Description != nil {\n\t\tbuilder.AddQuery(\"description\", fmt.Sprint(*listKeystoresOptions.Description))\n\t}\n\tif listKeystoresOptions.Group != nil {\n\t\tbuilder.AddQuery(\"group\", fmt.Sprint(*listKeystoresOptions.Group))\n\t}\n\tif listKeystoresOptions.Groups != nil {\n\t\tbuilder.AddQuery(\"groups[]\", fmt.Sprint(*listKeystoresOptions.Groups))\n\t}\n\tif listKeystoresOptions.VaultID != nil {\n\t\tbuilder.AddQuery(\"vault.id\", strings.Join(listKeystoresOptions.VaultID, \",\"))\n\t}\n\tif listKeystoresOptions.Location != nil {\n\t\tbuilder.AddQuery(\"location\", fmt.Sprint(*listKeystoresOptions.Location))\n\t}\n\tif listKeystoresOptions.Limit != nil {\n\t\tbuilder.AddQuery(\"limit\", fmt.Sprint(*listKeystoresOptions.Limit))\n\t}\n\tif listKeystoresOptions.Offset != nil {\n\t\tbuilder.AddQuery(\"offset\", fmt.Sprint(*listKeystoresOptions.Offset))\n\t}\n\tif listKeystoresOptions.Sort != nil {\n\t\tbuilder.AddQuery(\"sort\", strings.Join(listKeystoresOptions.Sort, \",\"))\n\t}\n\n\trequest, err := builder.Build()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar rawResponse map[string]json.RawMessage\n\tresponse, err = uko.Service.Request(request, &rawResponse)\n\tif err != nil {\n\t\treturn\n\t}\n\tif rawResponse != nil {\n\t\terr = core.UnmarshalModel(rawResponse, \"\", &result, UnmarshalKeystoreList)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tresponse.Result = result\n\t}\n\n\treturn\n}", "func (_obj *Apilangpack) Langpack_getLanguageWithContext(tarsCtx context.Context, params *TLlangpack_getLanguage, _opt ...map[string]string) (ret LangPackLanguage, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = params.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"langpack_getLanguage\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = ret.ReadBlock(_is, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func (c Client) FetchWithContext(context context.Context) (*FetchTrunkResponse, error) {\n\top := client.Operation{\n\t\tMethod: http.MethodGet,\n\t\tURI: \"/Trunks/{sid}\",\n\t\tPathParams: map[string]string{\n\t\t\t\"sid\": c.sid,\n\t\t},\n\t}\n\n\tresponse := &FetchTrunkResponse{}\n\tif err := c.client.Send(context, op, nil, response); err != nil {\n\t\treturn nil, err\n\t}\n\treturn response, nil\n}", "func (_obj *Hello) TestWithContext(ctx context.Context, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\terr = _obj.s.Tars_invoke(ctx, 0, \"test\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = _is.Read_int32(&ret, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k, _ := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k, _ := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k, _ := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func (obj *Sys) MenuAllWithContext(ctx context.Context, input ResultEmpty, _opt ...map[string]string) (output ResCoMenu, err error) {\n\tvar inputMarshal []byte\n\tinputMarshal, err = proto.Marshal(&input)\n\tif err != nil {\n\t\treturn output, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\n\tresp := new(requestf.ResponsePacket)\n\n\terr = obj.s.Tars_invoke(ctx, 0, \"MenuAll\", inputMarshal, _status, _context, resp)\n\tif err != nil {\n\t\treturn output, err\n\t}\n\tif err = proto.Unmarshal(tools.Int8ToByte(resp.SBuffer), &output); err != nil {\n\t\treturn output, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\t}\n\n\treturn output, nil\n}", "func (_obj *LacService) TestWithContext(ctx context.Context, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\terr = _obj.s.Tars_invoke(ctx, 0, \"test\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = _is.Read_int32(&ret, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_obj.setMap(len(_opt), _resp, _context, _status)\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func (_obj *Apichannels) Channels_joinChannelWithContext(tarsCtx context.Context, params *TLchannels_joinChannel, _opt ...map[string]string) (ret Updates, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = params.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"channels_joinChannel\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = ret.ReadBlock(_is, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func (h *KubernetesHelper) KubectlWithContext(stdin string, context string, arg ...string) (string, error) {\n\twithContext := append([]string{\"--context=\" + context}, arg...)\n\tcmd := exec.Command(\"kubectl\", withContext...)\n\tcmd.Stdin = strings.NewReader(stdin)\n\tout, err := cmd.CombinedOutput()\n\treturn string(out), err\n}", "func (_obj *Apipayments) Payments_getSavedInfoWithContext(tarsCtx context.Context, params *TLpayments_getSavedInfo, _opt ...map[string]string) (ret Payments_SavedInfo, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = params.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"payments_getSavedInfo\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = ret.ReadBlock(_is, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func (_obj *WebApiAuth) SysUser_GetPageWithContext(tarsCtx context.Context, pageSize int32, pageIndex int32, req *SysUser, res *SysUser_List, _opt ...map[string]string) (err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_int32(pageSize, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = _os.Write_int32(pageIndex, 2)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = req.WriteBlock(_os, 3)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = (*res).WriteBlock(_os, 4)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"SysUser_GetPage\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = (*res).ReadBlock(_is, 4, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SetContext adds the context to the get platforms params
func (o *GetPlatformsParams) SetContext(ctx context.Context) { o.Context = ctx }
[ "func (o *GetGPUArchitectureParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}", "func (o *NvmeSubsystemMapGetParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}", "func (o *GetSolPoliciesMoidParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}", "func (o *PublicWebLinkPlatformEstablishParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}", "func (o *GetApplianceImageBundlesMoidParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}", "func (o *GetaspecificPbxDeviceFirmwareBinaryParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}", "func (o *GetContentSourcesUsingGETParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}", "func (o *GetOperatingSystemsParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}", "func (o *GetHardwareTapesParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}", "func (o *GetPlatformsParams) WithContext(ctx context.Context) *GetPlatformsParams {\n\to.SetContext(ctx)\n\treturn o\n}", "func (o *GetContentSourceUsingGETParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}", "func (o *UpdateLcmHostWithAuthProviderUsingPOSTParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}", "func (o *GetHardwaresParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}", "func (o *GetRepositoriesParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}", "func (o *GetBundleByKeyParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}", "func (o *GetPublicHelloParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}", "func (o *GetGroupsByTypeUsingGETParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}", "func (o *GetRuntimeServersParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}", "func (o *GetDevicesAllParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithHTTPClient adds the HTTPClient to the get platforms params
func (o *GetPlatformsParams) WithHTTPClient(client *http.Client) *GetPlatformsParams { o.SetHTTPClient(client) return o }
[ "func (o *GetPlatformsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func NewGetPlatformsParamsWithHTTPClient(client *http.Client) *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\t\tHTTPClient: client,\n\t}\n}", "func (o *GetBundleByKeyParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetOperatingSystemsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetDevicesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetGPUArchitectureParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *PublicWebLinkPlatformEstablishParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetHardwareTapesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetHardwaresParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetSolPoliciesMoidParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetApplianceImageBundlesMoidParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetCloudTowerApplicationsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetDevicesAllParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetGroupsByTypeUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetContentSourcesUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetDeploymentByIDV3UsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetDevicesUnknownParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetAllVCentersUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *PcloudNetworksGetallParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SetHTTPClient adds the HTTPClient to the get platforms params
func (o *GetPlatformsParams) SetHTTPClient(client *http.Client) { o.HTTPClient = client }
[ "func (o *GetGPUArchitectureParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetSolPoliciesMoidParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetHardwaresParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetBundleByKeyParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetDevicesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetContentSourcesUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetOperatingSystemsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *PublicWebLinkPlatformEstablishParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetApplianceImageBundlesMoidParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetContentSourceUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetHardwareTapesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetProviderParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *MetroclusterInterconnectGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetDeploymentByIDV3UsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetUIParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetPoolProjectParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *AllLookmlTestsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *GetDialogueRandomParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *HandleGetAboutUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithExtended adds the extended to the get platforms params
func (o *GetPlatformsParams) WithExtended(extended *bool) *GetPlatformsParams { o.SetExtended(extended) return o }
[ "func (o *GetPlatformsParams) SetExtended(extended *bool) {\n\to.Extended = extended\n}", "func (o *VulnerabilitiesRequest) SetExtended(v bool) {\n\to.Extended = &v\n}", "func (vk *VK) StoriesGetExtended(params Params) (response StoriesGetExtendedResponse, err error) {\n\tparams[\"extended\"] = true\n\terr = vk.RequestUnmarshal(\"stories.get\", params, &response)\n\n\treturn\n}", "func (vk *VK) AppsGetLeaderboardExtended(params map[string]string) (response AppsGetLeaderboardExtendedResponse, vkErr Error) {\n\tparams[\"extended\"] = \"1\"\n\tvk.RequestUnmarshal(\"apps.getLeaderboard\", params, &response, &vkErr)\n\treturn\n}", "func AddPlatformExtensions(platform string, baseExt []string) []string {\n\text := []string{}\n\tfor _, extension := range baseExt {\n\t\text = append(ext, strings.Join([]string{\".\" + platform, extension}, \"\"))\n\t}\n\n\text = append(ext, baseExt...)\n\treturn ext\n}", "func (p *HostedProgramInfo) Extend(ext auth.SubPrin) {\n\tp.subprin = append(p.subprin, ext...)\n}", "func (vk *VK) FaveGetMarketItemsExtended(params map[string]string) (response FaveGetMarketItemsResponse, vkErr Error) {\n\tparams[\"extended\"] = \"1\"\n\tvk.RequestUnmarshal(\"fave.getMarketItems\", params, &response, &vkErr)\n\treturn\n}", "func NewExtendedOptionsJson(value string) xjson.Map {\n\treturn xjson.Map{\n\t\t\"@type\": TypeURLPrefix + proto.MessageName(&m3test.PingResponse{}),\n\t\t\"Value\": value,\n\t\t\"counter\": 0,\n\t}\n}", "func MergeRawExtension(base *runtime.RawExtension, patch *runtime.RawExtension) (*runtime.RawExtension, error) {\n\tpatchParameter, err := util.RawExtension2Map(patch)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to convert patch parameters to map\")\n\t}\n\tbaseParameter, err := util.RawExtension2Map(base)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to convert base parameters to map\")\n\t}\n\tif baseParameter == nil {\n\t\tbaseParameter = make(map[string]interface{})\n\t}\n\terr = mergo.Merge(&baseParameter, patchParameter, mergo.WithOverride)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to do merge with override\")\n\t}\n\tbs, err := json.Marshal(baseParameter)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to marshal merged properties\")\n\t}\n\treturn &runtime.RawExtension{Raw: bs}, nil\n}", "func (vk *VK) FaveGetVideosExtended(params map[string]string) (response FaveGetVideosExtendedResponse, vkErr Error) {\n\tparams[\"extended\"] = \"1\"\n\tvk.RequestUnmarshal(\"fave.getVideos\", params, &response, &vkErr)\n\treturn\n}", "func (o *GetPlatformsParams) WithContext(ctx context.Context) *GetPlatformsParams {\n\to.SetContext(ctx)\n\treturn o\n}", "func (vk *VK) FaveGetPostsExtended(params map[string]string) (response FaveGetPostsExtendedResponse, vkErr Error) {\n\tparams[\"extended\"] = \"1\"\n\tvk.RequestUnmarshal(\"fave.getPosts\", params, &response, &vkErr)\n\treturn\n}", "func (vk *VK) StoriesSearchExtended(params Params) (response StoriesSearchExtendedResponse, err error) {\n\tparams[\"extended\"] = true\n\terr = vk.RequestUnmarshal(\"stories.search\", params, &response)\n\n\treturn\n}", "func (client *ManagersClient) getExtendedInfoCreateRequest(ctx context.Context, resourceGroupName string, managerName string, options *ManagersClientGetExtendedInfoOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorSimple/managers/{managerName}/extendedInformation/vaultExtendedInfo\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managerName == \"\" {\n\t\treturn nil, errors.New(\"parameter managerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managerName}\", url.PathEscape(managerName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2016-10-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func NewExtendedOptionsProto(value string) (*protobuftypes.Any, error) {\n\t// NB: using some arbitrary custom protobuf message to avoid well known protobuf types as these work across\n\t// gogo/golang implementations.\n\tmsg := &m3test.PingResponse{Value: value}\n\treturn NewProtobufAny(msg)\n}", "func (o *VulnerabilitiesRequest) HasExtended() bool {\n\tif o != nil && o.Extended != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *VulnerabilitiesRequest) GetExtendedOk() (*bool, bool) {\n\tif o == nil || o.Extended == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Extended, true\n}", "func WithStandardUserAgent(platform string, systemCode string) Option {\n\treturn func(d *ExtensibleTransport) {\n\t\text := NewUserAgentExtension(standardUserAgent(platform, systemCode))\n\t\td.extensions = append(d.extensions, ext)\n\t}\n}", "func (o *VulnerabilitiesRequest) GetExtended() bool {\n\tif o == nil || o.Extended == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\treturn *o.Extended\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SetExtended adds the extended to the get platforms params
func (o *GetPlatformsParams) SetExtended(extended *bool) { o.Extended = extended }
[ "func (o *GetPlatformsParams) WithExtended(extended *bool) *GetPlatformsParams {\n\to.SetExtended(extended)\n\treturn o\n}", "func (o *VulnerabilitiesRequest) SetExtended(v bool) {\n\to.Extended = &v\n}", "func (e *Entity) SetExtendedAttributes(b []byte) {\n\te.ExtendedAttributes = b\n}", "func (p *HostedProgramInfo) Extend(ext auth.SubPrin) {\n\tp.subprin = append(p.subprin, ext...)\n}", "func (s *BaseWebIDLListener) EnterExtendedAttributes(ctx *ExtendedAttributesContext) {}", "func (vk *VK) StoriesGetExtended(params Params) (response StoriesGetExtendedResponse, err error) {\n\tparams[\"extended\"] = true\n\terr = vk.RequestUnmarshal(\"stories.get\", params, &response)\n\n\treturn\n}", "func (s *BaseWebIDLListener) EnterExtendedAttribute(ctx *ExtendedAttributeContext) {}", "func (vk *VK) FaveGetMarketItemsExtended(params map[string]string) (response FaveGetMarketItemsResponse, vkErr Error) {\n\tparams[\"extended\"] = \"1\"\n\tvk.RequestUnmarshal(\"fave.getMarketItems\", params, &response, &vkErr)\n\treturn\n}", "func (m *AdministrativeUnit) SetExtensions(value []Extensionable)() {\n m.extensions = value\n}", "func (rr *OPT) SetExtendedRcode(v uint16) {\n\trr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | uint32(v>>4)<<24\n}", "func (o *VulnerabilitiesRequest) GetExtended() bool {\n\tif o == nil || o.Extended == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\treturn *o.Extended\n}", "func TestSetExtraSpecs(t *testing.T) {\n\tth.SetupHTTP()\n\tdefer th.TeardownHTTP()\n\n\tMockSetExtraSpecsResponse(t)\n\n\toptions := &sharetypes.SetExtraSpecsOpts{\n\t\tExtraSpecs: map[string]interface{}{\"my_key\": \"my_value\"},\n\t}\n\n\tes, err := sharetypes.SetExtraSpecs(client.ServiceClient(), \"shareTypeID\", options).Extract()\n\tth.AssertNoErr(t, err)\n\n\tth.AssertEquals(t, es[\"my_key\"], \"my_value\")\n}", "func (s *BaseWebIDLListener) EnterExtendedAttributeArgList(ctx *ExtendedAttributeArgListContext) {}", "func (o *VulnerabilitiesRequest) HasExtended() bool {\n\tif o != nil && o.Extended != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (vk *VK) FaveGetVideosExtended(params map[string]string) (response FaveGetVideosExtendedResponse, vkErr Error) {\n\tparams[\"extended\"] = \"1\"\n\tvk.RequestUnmarshal(\"fave.getVideos\", params, &response, &vkErr)\n\treturn\n}", "func NewExtendedOptionsJson(value string) xjson.Map {\n\treturn xjson.Map{\n\t\t\"@type\": TypeURLPrefix + proto.MessageName(&m3test.PingResponse{}),\n\t\t\"Value\": value,\n\t\t\"counter\": 0,\n\t}\n}", "func (m *Application) SetExtensionProperties(value []ExtensionPropertyable)() {\n m.extensionProperties = value\n}", "func (s *BaseWebIDLListener) EnterExtendedAttributeNoArgs(ctx *ExtendedAttributeNoArgsContext) {}", "func AddPlatformExtensions(platform string, baseExt []string) []string {\n\text := []string{}\n\tfor _, extension := range baseExt {\n\t\text = append(ext, strings.Join([]string{\".\" + platform, extension}, \"\"))\n\t}\n\n\text = append(ext, baseExt...)\n\treturn ext\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ListMetricEquipAttr implements Licence ListMetricEquipAttr function
func (l *LicenseRepository) ListMetricEquipAttr(ctx context.Context, scopes ...string) ([]*v1.MetricEquipAttrStand, error) { respJSON, err := l.listMetricWithMetricType(ctx, v1.MetricEquipAttrStandard, scopes...) if err != nil { logger.Log.Error("dgraph/ListMetricEquipAttr - listMetricWithMetricType", zap.Error(err)) return nil, err } type Resp struct { Data []*metricEquipAttr } var data Resp if err := json.Unmarshal(respJSON, &data); err != nil { logger.Log.Error("dgraph/ListMetricEquipAttr - Unmarshal failed", zap.Error(err)) return nil, errors.New("cannot Unmarshal") } if len(data.Data) == 0 { return nil, v1.ErrNoData } return converMetricToModelMetricAllEquipAttr(data.Data) }
[ "func (p ClusterProvider) ListAttr(req resource.Request) resource.Response {\n\treturn resource.Response{\n\t\tCode: 0,\n\t\tData: []interface{}{},\n\t}\n}", "func cmdAttributeList(c *cli.Context) error {\n\tif err := adm.VerifyNoArgument(c); err != nil {\n\t\treturn err\n\t}\n\n\treturn adm.Perform(`get`, `/attribute/`, `list`, nil, c)\n}", "func AttributeList(attr int32) (list map[string]int32) {\n\tlist = map[string]int32{}\n\tfor bit, name := range VideoAttribute {\n\t\tlist[name] = int32(((attr >> bit) & 1))\n\t}\n\treturn\n}", "func getAttrList(selection *goquery.Selection, attrName string) []string {\n\tres := selection.Map(func(ind int, s *goquery.Selection) string {\n\t\tattr, _ := s.Attr(attrName)\n\t\treturn attr\n\t})\n\treturn removeEmpty(res)\n}", "func (e *Environment) Attr(environmentName, attr string) ([]Attr, error) {\n\n\targkeys := []string{\"attr\"}\n\targvalues := []interface{}{attr}\n\tbaseCommand := fmt.Sprintf(\"list environment attr %s\", environmentName)\n\n\tc, err := cmd.ArgsExpander(baseCommand, argkeys, argvalues)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb, err := cmd.RunCommand(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tattrs := []Attr{}\n\terr = json.Unmarshal(b, &attrs)\n\tif err != nil {\n\t\t// it may have been just an empty output from the Frontend\n\t\tnullOutput := NullOutput{}\n\t\terr = json.Unmarshal(b, &nullOutput)\n\t\tif err != nil {\n\t\t\t// if we still can't recognize the output, return an error\n\t\t\treturn nil, err\n\t\t}\n\t\treturn attrs, err\n\t}\n\treturn attrs, err\n}", "func (m *MetricEquipAtt) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\t// no validation rules for ID\n\n\tif utf8.RuneCountInString(m.GetName()) < 1 {\n\t\treturn MetricEquipAttValidationError{\n\t\t\tfield: \"Name\",\n\t\t\treason: \"value length must be at least 1 runes\",\n\t\t}\n\t}\n\n\tif !_MetricEquipAtt_Name_Pattern.MatchString(m.GetName()) {\n\t\treturn MetricEquipAttValidationError{\n\t\t\tfield: \"Name\",\n\t\t\treason: \"value does not match regex pattern \\\"[.-_A-Za-z0-9]+$\\\"\",\n\t\t}\n\t}\n\n\t// no validation rules for EqType\n\n\t// no validation rules for AttributeName\n\n\tif utf8.RuneCountInString(m.GetEnvironment()) < 1 {\n\t\treturn MetricEquipAttValidationError{\n\t\t\tfield: \"Environment\",\n\t\t\treason: \"value length must be at least 1 runes\",\n\t\t}\n\t}\n\n\tif !_MetricEquipAtt_Environment_Pattern.MatchString(m.GetEnvironment()) {\n\t\treturn MetricEquipAttValidationError{\n\t\t\tfield: \"Environment\",\n\t\t\treason: \"value does not match regex pattern \\\"^[a-zA-Z0-9,]+$\\\"\",\n\t\t}\n\t}\n\n\tif m.GetValue() <= 0 {\n\t\treturn MetricEquipAttValidationError{\n\t\t\tfield: \"Value\",\n\t\t\treason: \"value must be greater than 0\",\n\t\t}\n\t}\n\n\tfor idx, item := range m.GetScopes() {\n\t\t_, _ = idx, item\n\n\t\tif utf8.RuneCountInString(item) != 3 {\n\t\t\treturn MetricEquipAttValidationError{\n\t\t\t\tfield: fmt.Sprintf(\"Scopes[%v]\", idx),\n\t\t\t\treason: \"value length must be 3 runes\",\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\treturn nil\n}", "func (p ClusterProvider) ListAttrValue(req resource.Request) resource.Response {\n\treturn resource.Response{\n\t\tCode: 0,\n\t\tData: ListResult{Count: 0, Results: []interface{}{}},\n\t}\n}", "func (a *Attr) Attr(attrName string, shadow bool) ([]Attr, error) {\n\tvar shadowstr string\n\tif shadow == true {\n\t\tshadowstr = \"true\"\n\t} else {\n\t\tshadowstr = \"false\"\n\t}\n\targs := []interface{}{attrName, shadowstr}\n\tc := fmt.Sprintf(\"list attr attr='%s' shadow='%s'\", args...)\n\tb, err := cmd.RunCommand(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tattrs := []Attr{}\n\terr = json.Unmarshal(b, &attrs)\n\treturn attrs, err\n}", "func (a *CapabilityApiService) GetCapabilityEquipmentPhysicalDefList(ctx context.Context) ApiGetCapabilityEquipmentPhysicalDefListRequest {\n\treturn ApiGetCapabilityEquipmentPhysicalDefListRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func (cr *CredentialRequest) AttributeList(\n\tconf *Configuration,\n\tmetadataVersion byte,\n\trevocationAttr *big.Int,\n\tissuedAt time.Time,\n) (*AttributeList, error) {\n\tif err := cr.Validate(conf); err != nil {\n\t\treturn nil, err\n\t}\n\n\tcredtype := conf.CredentialTypes[cr.CredentialTypeID]\n\tif !credtype.RevocationSupported() && revocationAttr != nil {\n\t\treturn nil, errors.Errorf(\"cannot specify revocationAttr: credtype %s does not support revocation\", cr.CredentialTypeID.String())\n\t}\n\n\t// Compute metadata attribute\n\tmeta := NewMetadataAttribute(metadataVersion)\n\tmeta.setKeyCounter(cr.KeyCounter)\n\tmeta.setCredentialTypeIdentifier(cr.CredentialTypeID.String())\n\tmeta.setSigningDate(issuedAt)\n\tif err := meta.setExpiryDate(cr.Validity); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Compute other attributes\n\tattrs := make([]*big.Int, len(credtype.AttributeTypes)+1)\n\tattrs[0] = meta.Int\n\tif credtype.RevocationSupported() {\n\t\tif revocationAttr != nil {\n\t\t\tattrs[credtype.RevocationIndex+1] = revocationAttr\n\t\t} else {\n\t\t\tattrs[credtype.RevocationIndex+1] = bigZero\n\t\t}\n\t}\n\tfor i, attrtype := range credtype.AttributeTypes {\n\t\tif attrtype.RevocationAttribute || attrtype.RandomBlind {\n\t\t\tcontinue\n\t\t}\n\t\tattrs[i+1] = new(big.Int)\n\t\tif str, present := cr.Attributes[attrtype.ID]; present {\n\t\t\t// Set attribute to str << 1 + 1\n\t\t\tattrs[i+1].SetBytes([]byte(str))\n\t\t\tif meta.Version() >= 0x03 {\n\t\t\t\tattrs[i+1].Lsh(attrs[i+1], 1) // attr <<= 1\n\t\t\t\tattrs[i+1].Add(attrs[i+1], big.NewInt(1)) // attr += 1\n\t\t\t}\n\t\t}\n\t}\n\n\tlist := NewAttributeListFromInts(attrs, conf)\n\tlist.RevocationSupported = cr.RevocationSupported\n\treturn list, nil\n}", "func addListAttribute(item map[string]*dynamodb.AttributeValue, key string, value []*dynamodb.AttributeValue) {\n\titem[key] = &dynamodb.AttributeValue{L: value}\n}", "func (s *BaseDOTListener) EnterAttr_list(ctx *Attr_listContext) {}", "func (i *InMemoryAttributes) Listxattr() (map[string]struct{}, error) {\n\tnames := make(map[string]struct{}, len(i.Xattrs))\n\tfor name := range i.Xattrs {\n\t\tnames[name] = struct{}{}\n\t}\n\treturn names, nil\n}", "func (s *BaseDOTListener) ExitAttr_list(ctx *Attr_listContext) {}", "func (l *LicenseRepository) ProductsForEquipmentForMetricAttrCounterStandard(ctx context.Context, equipID, equipType string, hirearchyLevel uint8, metric *v1.MetricACSComputed, scopes ...string) ([]*v1.ProductData, error) {\n\treturn l.productsForEquipmentForMetric(ctx, equipID, hirearchyLevel, metric.Name, scopes...)\n}", "func (f Features) attrExtreme() *spotify.TrackAttributes {\n\tok := func(val float32) bool {\n\t\treturn val <= 0.25 || val >= 0.75\n\t}\n\tx := spotify.NewTrackAttributes()\n\tif ok(f.Acousticness) {\n\t\tx.TargetAcousticness(float64(f.Acousticness))\n\t}\n\n\tif ok(f.Danceability) {\n\t\tx.TargetDanceability(float64(f.Danceability))\n\t}\n\tx.TargetDuration(f.Duration)\n\tif ok(f.Energy) {\n\t\tx.TargetEnergy(float64(f.Energy))\n\t}\n\tif ok(f.Instrumentalness) {\n\t\tx.TargetInstrumentalness(float64(f.Instrumentalness))\n\t}\n\tif ok(f.Liveness) {\n\t\tx.TargetLiveness(float64(f.Liveness))\n\t}\n\tif ok(f.Loudness) {\n\t\tx.TargetLoudness(float64(f.Loudness))\n\t}\n\tif ok(f.Speechiness) {\n\t\tx.TargetSpeechiness(float64(f.Speechiness))\n\t}\n\tif ok(f.Valence) {\n\t\tx.TargetValence(float64(f.Valence))\n\t}\n\treturn x\n}", "func Attr(attrs ...a.Attribute) []a.Attribute {\n return attrs\n}", "func (a adapter) Attrs(key string) []string {\n\treturn a.entry.GetAttributeValues(key)\n}", "func (av DynamoDBAttributeValue) List() []DynamoDBAttributeValue {\n\tav.ensureType(DataTypeList)\n\treturn av.value.([]DynamoDBAttributeValue)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Auth mocks base method
func (m *MockServiceAuth) Auth(arg0 models.UserInput) (models.UserBoardsOutside, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Auth", arg0) ret0, _ := ret[0].(models.UserBoardsOutside) ret1, _ := ret[1].(error) return ret0, ret1 }
[ "func newMockAuthFunc(expRet AuthFuncReturn, expErr error) *MockImpl {\n\tmockImpl := &MockImpl{expRet: expRet, expErr: expErr}\n\tmockImpl.On(\"mockAuthFunc\", nil, (*http.Request)(nil)).Return(expRet, expErr)\n\treturn mockImpl\n}", "func TestReadAuthHandlerSucceeds(t *testing.T) {\n\tmockEnv := makeMockEnv()\n\n\t// Create an auth\n\t_, err := makeRequest(mockEnv, http.MethodPost, \"/auth/register\", `{\"email\": \"[email protected]\", \"password\": \"BlackcurrantCrush123\"}`)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not make request: %s\", err.Error())\n\t}\n\n\t// Access that same auth\n\tres, err := makeRequest(mockEnv, http.MethodPost, \"/auth/login\", `{\"email\": \"[email protected]\", \"password\":\"BlackcurrantCrush123\"}`)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not make request: %s\", err.Error())\n\t}\n\n\tif res.Code != http.StatusOK {\n\t\tt.Errorf(\"Wrong status code: %v\", res.Code)\n\t}\n\n\tvar decoded map[string]string\n\terr = json.Unmarshal([]byte(res.Body.String()), &decoded)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not decode json: %s\", err.Error())\n\t}\n\n\trawToken, ok := decoded[\"AccessToken\"]\n\tif !ok {\n\t\tt.Fatalf(\"Token doesn't contain an access token: %s\", err.Error())\n\t}\n\n\ttoken, _, err := new(jwt.Parser).ParseUnverified(rawToken, jwt.MapClaims{})\n\tif err != nil {\n\t\tt.Fatalf(\"Could not decode JWT: %s\", err.Error())\n\t}\n\n\tclaims, ok := token.Claims.(jwt.MapClaims)\n\tif !ok {\n\t\tt.Fatalf(\"Could not decode claims\")\n\t}\n\n\tid, ok := claims[\"id\"]\n\tif !ok {\n\t\tt.Fatalf(\"Claims doesn't contain an ID key\")\n\t}\n\n\t_, err = uuid.Parse(id.(string))\n\tif err != nil {\n\t\tt.Fatalf(\"ID is not a valid UUID\")\n\t}\n\n\tiss, ok := claims[\"iss\"]\n\tif !ok {\n\t\tt.Fatalf(\"Claims doesn't contain an iss key\")\n\t}\n\n\tif iss.(string) != mockEnv.jwtCredential.Key {\n\t\tt.Fatalf(\"iss is incorrect: found %v, wanted %s\", iss, mockEnv.jwtCredential.Key)\n\t}\n}", "func TestAuthRequestGetters(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"foo\", func(r res.AuthRequest) {\n\t\t\trestest.AssertEqualJSON(t, \"Method\", r.Method(), \"foo\")\n\t\t\trestest.AssertEqualJSON(t, \"CID\", r.CID(), mock.CID)\n\t\t\trestest.AssertEqualJSON(t, \"Header\", r.Header(), mock.Header)\n\t\t\trestest.AssertEqualJSON(t, \"Host\", r.Host(), mock.Host)\n\t\t\trestest.AssertEqualJSON(t, \"RemoteAddr\", r.RemoteAddr(), mock.RemoteAddr)\n\t\t\trestest.AssertEqualJSON(t, \"URI\", r.URI(), mock.URI)\n\t\t\tr.NotFound()\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\ts.Auth(\"test.model\", \"foo\", nil).\n\t\t\tResponse().\n\t\t\tAssertError(res.ErrNotFound)\n\t})\n}", "func (TestHelpers) Authenticate(perms ...weave.Condition) Authenticator {\n\treturn mockAuth{perms}\n}", "func (_m *Remote) Auth(token string, secret string) (string, error) {\n\tret := _m.Called(token, secret)\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func(string, string) string); ok {\n\t\tr0 = rf(token, secret)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string, string) error); ok {\n\t\tr1 = rf(token, secret)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *Forge) Auth(ctx context.Context, token string, secret string) (string, error) {\n\tret := _m.Called(ctx, token, secret)\n\n\tvar r0 string\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string, string) (string, error)); ok {\n\t\treturn rf(ctx, token, secret)\n\t}\n\tif rf, ok := ret.Get(0).(func(context.Context, string, string) string); ok {\n\t\tr0 = rf(ctx, token, secret)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\tif rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok {\n\t\tr1 = rf(ctx, token, secret)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockAPI) AuthCall(arg0 *middlewares.AutheliaCtx, arg1 *session.UserSession, arg2 url.Values) (*duo.AuthResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"AuthCall\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(*duo.AuthResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func mockTestUserInteraction(ctx context.Context, pro providerParams, username, password string) (string, error) {\n\tctx, cancel := context.WithTimeout(ctx, 10*time.Second)\n\tdefer cancel()\n\n\tprovider, err := oidc.NewProvider(ctx, pro.providerURL)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"unable to create provider: %v\", err)\n\t}\n\n\t// Configure an OpenID Connect aware OAuth2 client.\n\toauth2Config := oauth2.Config{\n\t\tClientID: pro.clientID,\n\t\tClientSecret: pro.clientSecret,\n\t\tRedirectURL: pro.redirectURL,\n\n\t\t// Discovery returns the OAuth2 endpoints.\n\t\tEndpoint: provider.Endpoint(),\n\n\t\t// \"openid\" is a required scope for OpenID Connect flows.\n\t\tScopes: []string{oidc.ScopeOpenID, \"groups\"},\n\t}\n\n\tstate := \"xxx\"\n\tauthCodeURL := oauth2Config.AuthCodeURL(state)\n\t// fmt.Printf(\"authcodeurl: %s\\n\", authCodeURL)\n\n\tvar lastReq *http.Request\n\tcheckRedirect := func(req *http.Request, via []*http.Request) error {\n\t\t// fmt.Printf(\"CheckRedirect:\\n\")\n\t\t// fmt.Printf(\"Upcoming: %s %#v\\n\", req.URL.String(), req)\n\t\t// for _, c := range via {\n\t\t// \tfmt.Printf(\"Sofar: %s %#v\\n\", c.URL.String(), c)\n\t\t// }\n\t\t// Save the last request in a redirect chain.\n\t\tlastReq = req\n\t\t// We do not follow redirect back to client application.\n\t\tif req.URL.Path == \"/oauth_callback\" {\n\t\t\treturn http.ErrUseLastResponse\n\t\t}\n\t\treturn nil\n\t}\n\n\tdexClient := http.Client{\n\t\tCheckRedirect: checkRedirect,\n\t}\n\n\tu, err := url.Parse(authCodeURL)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"url parse err: %v\", err)\n\t}\n\n\t// Start the user auth flow. This page would present the login with\n\t// email or LDAP option.\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"new request err: %v\", err)\n\t}\n\t_, err = dexClient.Do(req)\n\t// fmt.Printf(\"Do: %#v %#v\\n\", resp, err)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"auth url request err: %v\", err)\n\t}\n\n\t// Modify u to choose the ldap option\n\tu.Path += \"/ldap\"\n\t// fmt.Println(u)\n\n\t// Pick the LDAP login option. This would return a form page after\n\t// following some redirects. `lastReq` would be the URL of the form\n\t// page, where we need to POST (submit) the form.\n\treq, err = http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"new request err (/ldap): %v\", err)\n\t}\n\t_, err = dexClient.Do(req)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"request err: %v\", err)\n\t}\n\n\t// Fill the login form with our test creds:\n\t// fmt.Printf(\"login form url: %s\\n\", lastReq.URL.String())\n\tformData := url.Values{}\n\tformData.Set(\"login\", username)\n\tformData.Set(\"password\", password)\n\treq, err = http.NewRequestWithContext(ctx, http.MethodPost, lastReq.URL.String(), strings.NewReader(formData.Encode()))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"new request err (/login): %v\", err)\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\t_, err = dexClient.Do(req)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"post form err: %v\", err)\n\t}\n\t// fmt.Printf(\"resp: %#v %#v\\n\", resp.StatusCode, resp.Header)\n\t// fmt.Printf(\"lastReq: %#v\\n\", lastReq.URL.String())\n\n\t// On form submission, the last redirect response contains the auth\n\t// code, which we now have in `lastReq`. Exchange it for a JWT id_token.\n\tq := lastReq.URL.Query()\n\tcode := q.Get(\"code\")\n\toauth2Token, err := oauth2Config.Exchange(ctx, code)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"unable to exchange code for id token: %v\", err)\n\t}\n\n\trawIDToken, ok := oauth2Token.Extra(\"id_token\").(string)\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"id_token not found!\")\n\t}\n\n\t// fmt.Printf(\"TOKEN: %s\\n\", rawIDToken)\n\treturn rawIDToken, nil\n}", "func TestAuthNoOrgs(t *testing.T) {\n\tlog := GetLogger(t)\n\tdb := GetDb(t)\n\tCleanDb(t, db)\n\tCreateUser(t, db, ADMIN_EMAIL, ADMIN_PASSWORD)\n\ttoken := LoginUser(t, log, db, ADMIN_EMAIL, ADMIN_PASSWORD, http.StatusOK)\n\n\t// send some request and let handler to initiate user profile section of\n\t// context associated with request\n\treq, err := http.NewRequest(\"POST\", \"/\", strings.NewReader(\"\"))\n\treq.Header.Add(\"Authorization\", \"Bearer \"+token)\n\n\tOk(t, err)\n\n\trr := httptest.NewRecorder()\n\n\tmh := getMockHandler(log)\n\n\thandler := getAuthHandler(t, db, mh)\n\thandler.ServeHTTP(rr, req)\n\n\tCheckStatusCode(t, rr, 200)\n\n\t// check if child handler was called\n\tEquals(t, 1, len(mh.Calls))\n\n\t// get context associated with child request\n\tctx := mh.Calls[0].Request.Context()\n\n\t// verify that context contains user profile\n\tprofile := ctx.Value(\"profile\").(*main.UserProfile)\n\n\tAssert(t, profile != nil, \"User profile not initialized\")\n\t//lint:ignore SA5011 possible nil pointer dereference\n\tEquals(t, profile.Email, ADMIN_EMAIL)\n\t//lint:ignore SA5011 possible nil pointer dereference\n\t//lint:ignore SA5011 possible nil pointer dereference\n\tEquals(t, profile.IsAdmin, false)\n\t//lint:ignore SA5011 possible nil pointer dereference\n\tEquals(t, profile.OrgId, primitive.NilObjectID)\n\t//lint:ignore SA5011 possible nil pointer dereference\n\tEquals(t, 0, len(profile.OrgIds))\n}", "func (_m *MockAuthServiceServer) Auth(_a0 context.Context, _a1 *AuthRequest) (*AuthResponse, error) {\n\tret := _m.Called(_a0, _a1)\n\n\tvar r0 *AuthResponse\n\tif rf, ok := ret.Get(0).(func(context.Context, *AuthRequest) *AuthResponse); ok {\n\t\tr0 = rf(_a0, _a1)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*AuthResponse)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, *AuthRequest) error); ok {\n\t\tr1 = rf(_a0, _a1)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (suite *SubscriptionsTestSuite) mockClientAuth(account *accounts.Account) {\n\t// Mock GetConfig call to return the config object\n\tsuite.accountsServiceMock.On(\"GetConfig\").Return(suite.cnf)\n\n\t// Mock GetOauthService to return a mock oauth service\n\tsuite.accountsServiceMock.On(\"GetOauthService\").Return(suite.oauthServiceMock)\n\n\t// Mock AuthClient to return a mock client\n\tsuite.oauthServiceMock.On(\"AuthClient\", \"test_client_1\", \"test_secret\").\n\t\tReturn(account.OauthClient, nil)\n\n\t// Mock FindAccountByOauthClientID to return the wanted account\n\tsuite.accountsServiceMock.\n\t\tOn(\"FindAccountByOauthClientID\", account.OauthClient.ID).\n\t\tReturn(account, nil)\n}", "func TestCreateAuthHandlerSucceeds(t *testing.T) {\n\tmockEnv := makeMockEnv()\n\n\t// Create a single auth\n\tres, err := makeRequest(mockEnv, http.MethodPost, \"/auth/register\", `{\"email\": \"[email protected]\", \"password\": \"BlackcurrantCrush123\"}`)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not make request: %s\", err.Error())\n\t}\n\n\tif res.Code != http.StatusOK {\n\t\tt.Errorf(\"Wrong status code: %v\", res.Code)\n\t}\n\n\tvar decoded map[string]string\n\terr = json.Unmarshal([]byte(res.Body.String()), &decoded)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not decode json: %s\", err.Error())\n\t}\n\n\trawToken, ok := decoded[\"AccessToken\"]\n\tif !ok {\n\t\tt.Fatalf(\"Token doesn't contain an access token: %s\", err.Error())\n\t}\n\n\ttoken, _, err := new(jwt.Parser).ParseUnverified(rawToken, jwt.MapClaims{})\n\tif err != nil {\n\t\tt.Fatalf(\"Could not decode JWT: %s\", err.Error())\n\t}\n\n\tclaims, ok := token.Claims.(jwt.MapClaims)\n\tif !ok {\n\t\tt.Fatalf(\"Could not decode claims\")\n\t}\n\n\tid, ok := claims[\"id\"]\n\tif !ok {\n\t\tt.Fatalf(\"Claims doesn't contain an ID key\")\n\t}\n\n\t_, err = uuid.Parse(id.(string))\n\tif err != nil {\n\t\tt.Fatalf(\"ID is not a valid UUID\")\n\t}\n\n\tiss, ok := claims[\"iss\"]\n\tif !ok {\n\t\tt.Fatalf(\"Claims doesn't contain an iss key\")\n\t}\n\n\tif iss.(string) != mockEnv.jwtCredential.Key {\n\t\tt.Fatalf(\"iss is incorrect: found %v, wanted %s\", iss, mockEnv.jwtCredential.Key)\n\t}\n}", "func newAuthorizationMocks(t *testing.T, resource, action string) (\n\tauthn.AuthenticationServiceClient, authz.AuthorizationServiceClient) {\n\tvar (\n\t\tctrl = gomock.NewController(t)\n\t\tmockAuthClient = authn.NewMockAuthenticationServiceClient(ctrl)\n\t\tmockAuthzClient = authz.NewMockAuthorizationServiceClient(ctrl)\n\t)\n\n\t// Mocking AuthN Calls\n\tmockAuthClient.EXPECT().Authenticate(gomock.Any(), gomock.Any()).DoAndReturn(\n\t\tfunc(_ context.Context, _ *authn.AuthenticateRequest) (*authn.AuthenticateResponse, error) {\n\t\t\treturn &authn.AuthenticateResponse{Subject: \"mock\", Teams: []string{}}, nil\n\t\t})\n\n\t// Mocking AuthZ Calls\n\tmockAuthzClient.EXPECT().ProjectsAuthorized(\n\t\tgomock.Any(),\n\t\t&authz.ProjectsAuthorizedReq{\n\t\t\tSubjects: []string{\"mock\"},\n\t\t\tResource: resource,\n\t\t\tAction: action,\n\t\t\tProjectsFilter: []string{},\n\t\t},\n\t).DoAndReturn(\n\t\tfunc(_ context.Context, _ *authz.ProjectsAuthorizedReq) (*authz.ProjectsAuthorizedResp, error) {\n\t\t\treturn &authz.ProjectsAuthorizedResp{Projects: []string{\"any\"}}, nil\n\t\t},\n\t)\n\n\treturn mockAuthClient, mockAuthzClient\n}", "func TestAuthOK(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\tr.OK(mock.Result)\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\ts.Auth(\"test.model\", \"method\", nil).\n\t\t\tResponse().\n\t\t\tAssertResult(mock.Result)\n\t})\n}", "func (m *MockImpl) mockAuthFunc(w http.ResponseWriter, r *http.Request) (AuthFuncReturn, error) {\n\targs := m.Called(w, r)\n\texpRet := args.Get(0).(AuthFuncReturn)\n\texpErr := args.Error(1)\n\treturn expRet, expErr\n}", "func (m *MockService) FirebaseAuth(arg0 context.Context, arg1 string) (*user.User, string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"FirebaseAuth\", arg0, arg1)\n\tret0, _ := ret[0].(*user.User)\n\tret1, _ := ret[1].(string)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}", "func NewAuthMock(l auth.Logger) *AuthMock {\n\treturn &AuthMock{*auth.New(l)}\n}", "func (c *Client) Auth(a Auth) error {}", "func TestgetAuth(t *testing.T) {\n\tt.Parallel()\n\ta, err := getAuth()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif reflect.TypeOf(a).String() != \"*scaniigo.APIAuth\" {\n\t\tt.Error(ErrInvalidDataType)\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CheckCookie mocks base method
func (m *MockServiceAuth) CheckCookie(arg0 echo.Context) (int64, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CheckCookie", arg0) ret0, _ := ret[0].(int64) ret1, _ := ret[1].(error) return ret0, ret1 }
[ "func getCookie(r *http.Request, cookiename string) (bool, *http.Cookie) {\n // Ignoring error value because it is likely that the cookie might not exist here\n cookie, _ := r.Cookie(cookiename)\n if cookie == nil {\n return false, nil\n }\n return true, cookie\n}", "func Test_Ctx_Cookie(t *testing.T) {\n\tt.Parallel()\n\tapp := New()\n\tctx := app.AcquireCtx(&fasthttp.RequestCtx{})\n\tdefer app.ReleaseCtx(ctx)\n\texpire := time.Now().Add(24 * time.Hour)\n\tvar dst []byte\n\tdst = expire.In(time.UTC).AppendFormat(dst, time.RFC1123)\n\thttpdate := strings.Replace(string(dst), \"UTC\", \"GMT\", -1)\n\tctx.Cookie(&Cookie{\n\t\tName: \"username\",\n\t\tValue: \"john\",\n\t\tExpires: expire,\n\t})\n\texpect := \"username=john; expires=\" + httpdate + \"; path=/; SameSite=Lax\"\n\tutils.AssertEqual(t, expect, string(ctx.Fasthttp.Response.Header.Peek(HeaderSetCookie)))\n\n\tctx.Cookie(&Cookie{SameSite: \"strict\"})\n\tctx.Cookie(&Cookie{SameSite: \"none\"})\n}", "func MockProjectSessionCookie(projectID, secret string) *http.Cookie {\n\tstore := mockCookieStore()\n\n\tr := &http.Request{}\n\tw := httptest.NewRecorder()\n\n\tsession, _ := store.Get(r, getProjectSessionNameFromString(projectID))\n\n\tsession.Values[projectSecretKeyName] = secret\n\n\terr := session.Save(r, w)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn w.Result().Cookies()[0]\n}", "func Test_Session_Cookie(t *testing.T) {\n\tt.Parallel()\n\t// session store\n\tstore := New()\n\t// fiber instance\n\tapp := fiber.New()\n\t// fiber context\n\tctx := app.AcquireCtx(&fasthttp.RequestCtx{})\n\tdefer app.ReleaseCtx(ctx)\n\n\t// get session\n\tsess, _ := store.Get(ctx)\n\tsess.Save()\n\n\t// cookie should not be set if empty data\n\tutils.AssertEqual(t, 0, len(ctx.Response().Header.PeekCookie(store.CookieName)))\n}", "func testCookieDeletion(orig *Test, c *http.Cookie, cond Condition) {\n\ttracef(\"Test for deletion of cookie '%s' (neg=%t)\", c.Name, cond.Neg)\n\tif cond.Neg {\n\t\torig.Error(cond.Id, \"Bad test\",\n\t\t\t\"You cannot test on 'not deletion' of cookie in\\n\"+cond.String())\n\t\treturn\n\t}\n\n\t// Reliable deleted == Max-Age: 0 AND Expired in the past\n\tif c.MaxAge < 0 && c.Expires.Year() != 0 && c.Expires.Before(time.Now()) && c.Value == \"\" {\n\t\ttracef(\" Properly deleted\")\n\t\torig.Passed(cond.Id + \" \" + cond.String())\n\t} else {\n\t\tcause := \"\"\n\t\tif c.MaxAge >= 0 {\n\t\t\tcause += \"Missing 'Max-Age: 0'.\"\n\t\t}\n\t\tif c.Value != \"\" {\n\t\t\tcause += \" Value '\" + c.Value + \"' given.\"\n\t\t}\n\t\tif c.Expires.Year() == 0 {\n\t\t\tcause += \" Expires not set.\"\n\t\t} else if c.Expires.After(time.Now()) {\n\t\t\tcause += fmt.Sprintf(\" Wrong Expires '%s'.\",\n\t\t\t\tc.Expires.Format(http.TimeFormat))\n\t\t}\n\t\ttracef(\" Not properly deleted %s\", cause)\n\t\torig.Failed(cond.Id, \"Cookie not deleted\", cause+\"\\nin\\n\"+cond.String())\n\t}\n}", "func testHeader(resp *http.Response, cookies []*http.Cookie, t, orig *Test) {\n\tif len(t.RespCond) > 0 {\n\t\tdebugf(\"Testing Header\")\n\t\tfor _, c := range t.RespCond {\n\t\t\tcs := c.Info(\"resp\")\n\t\t\tv := resp.Header.Get(c.Key)\n\t\t\tif ok, _ := c.Fullfilled(v); !ok {\n\t\t\t\torig.Failed(c.Id, \"Bad Header\",\n\t\t\t\t\tfmt.Sprintf(\"%s\\nTesting for: %s\\nBut got: %s\", c.Id, c.String(), v))\n\t\t\t} else {\n\t\t\t\torig.Passed(cs)\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(t.CookieCond) > 0 {\n\t\tdebugf(\"Testing Cookies\")\n\t\tdomain := stripPort(resp.Request.URL.Host)\n\t\tfor _, cc := range t.CookieCond {\n\t\t\tcc.Key = strings.Replace(cc.Key, \"{CURRENT}\", domain, 1)\n\t\t\ttestSingleCookie(orig, cc, cookies)\n\t\t}\n\t}\n}", "func TestSetCookie(cookieStr string) {\n\ttestCookieStr = cookieStr\n\tlogger.Trace.Println(\"Set cookie to:\", testCookieStr)\n}", "func HasCookie(t *testing.T, req Request, name string, value string) {\n\tassert.True(t, len(req.Cookies) > 0, \"Request does not contain any cookies.\")\n\tif len(req.Cookies) > 0 {\n\t\tfor _, cookie := range req.Cookies {\n\t\t\tif cookie.Name == name && cookie.Value == value {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tassert.Fail(t, fmt.Sprintf(\"Cookies do not contain %s=%s => %s\", name, value, req.Cookies))\n\t}\n}", "func (r *MockRequest) Cookie(name, value string) *MockRequest {\n\tr.cookie = append(r.cookie, Cookie{name: &name, value: &value})\n\treturn r\n}", "func TestCookieDecode(t *testing.T) {\n\tr, err := http.NewRequest(\"GET\", \"/\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar age = 3600\n\n\t// Test with a nil hash key\n\tsc := securecookie.New(nil, nil)\n\tsc.MaxAge(age)\n\tst := &cookieStore{cookieName, age, true, true, \"\", \"\", sc, SameSiteDefaultMode}\n\n\t// Set a fake cookie value so r.Cookie passes.\n\tr.Header.Set(\"Cookie\", fmt.Sprintf(\"%s=%s\", cookieName, \"notacookie\"))\n\n\t_, err = st.Get(r)\n\tif err == nil {\n\t\tt.Fatal(\"cookiestore did not report an invalid hashkey on decode\")\n\t}\n}", "func (m *MockCookieRepository) AddCookie(arg0 int, arg1 string, arg2 time.Duration) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"AddCookie\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (r *MockRequest) CookiePresent(name string) *MockRequest {\n\tr.cookiePresent = append(r.cookiePresent, name)\n\treturn r\n}", "func (m *MockRegistry) CookieStore() sessions.Store {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CookieStore\")\n\tret0, _ := ret[0].(sessions.Store)\n\treturn ret0\n}", "func TestLogout(t *testing.T) {\n\tclockTime := time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)\n\tutil.Clock = util.ClockMock{Time: clockTime}\n\n\trequest := httptest.NewRequest(\"GET\", \"http://kiali/api/logout\", nil)\n\trequest.AddCookie(&http.Cookie{\n\t\tName: config.TokenCookieName,\n\t\tValue: \"foo\",\n\t})\n\n\tresponseRecorder := httptest.NewRecorder()\n\tLogout(responseRecorder, request)\n\n\tresponse := responseRecorder.Result()\n\tassert.Equal(t, http.StatusNoContent, response.StatusCode)\n\tassert.Equal(t, 1, len(response.Cookies()))\n\n\tcookie := response.Cookies()[0]\n\tassert.Equal(t, config.TokenCookieName, cookie.Name)\n\tassert.True(t, cookie.HttpOnly)\n\t// assert.Equal(t,, http.SameSiteStrictMode, cookie.SameSite) ** Commented out because unsupported in go < 1.11\n\n\tassert.Equal(t, \"\", cookie.Value)\n\tassert.True(t, cookie.Expires.Before(clockTime))\n}", "func TestCookieEncode(t *testing.T) {\n\tvar age = 3600\n\n\t// Test with a nil hash key\n\tsc := securecookie.New(nil, nil)\n\tsc.MaxAge(age)\n\tst := &cookieStore{cookieName, age, true, true, \"\", \"\", sc, SameSiteDefaultMode}\n\n\trr := httptest.NewRecorder()\n\n\terr := st.Save(nil, rr)\n\tif err == nil {\n\t\tt.Fatal(\"cookiestore did not report an invalid hashkey on encode\")\n\t}\n}", "func (p *para) checkCookie(rawCookies string) {\n\theader := http.Header{}\n\theader.Add(\"Cookie\", rawCookies)\n\trequest := http.Request{Header: header}\n\tfor _, e := range request.Cookies() {\n\t\tif strings.Contains(e.Name, \"download_warning_\") {\n\t\t\tcookie, _ := request.Cookie(e.Name)\n\t\t\tp.Code = cookie.Value\n\t\t\tbreak\n\t\t}\n\t}\n}", "func VerifyCookie(r *http.Request) (status int, username string) {\n\tcookie, err := r.Cookie(\"jwt-token\")\n\tif err != nil {\n\t\tif err == http.ErrNoCookie {\n\t\t\tstatus = http.StatusUnauthorized\n\t\t\treturn\n\t\t}\n\t\tstatus = http.StatusBadRequest\n\t\treturn\n\t}\n\ttokenStr := cookie.Value\n\tclaims := &claims{}\n\n\ttoken, err := jwt.ParseWithClaims(tokenStr, claims, func(token *jwt.Token) (interface{}, error) {\n\t\treturn jwtKey, nil\n\t})\n\n\tif err != nil {\n\t\tif err == jwt.ErrSignatureInvalid {\n\t\t\tstatus = http.StatusUnauthorized\n\t\t\treturn\n\t\t}\n\t\tstatus = http.StatusBadRequest\n\t\treturn\n\t}\n\tif !token.Valid {\n\t\tstatus = http.StatusUnauthorized\n\t\treturn\n\t}\n\tusername = claims.Username\n\tstatus = http.StatusOK\n\treturn\n}", "func (m *MockCookieRepository) DeleteCookie(arg0 string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteCookie\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func Test_Ctx_Cookies(t *testing.T) {\n\tt.Parallel()\n\tapp := New()\n\tctx := app.AcquireCtx(&fasthttp.RequestCtx{})\n\tdefer app.ReleaseCtx(ctx)\n\tctx.Fasthttp.Request.Header.Set(\"Cookie\", \"john=doe\")\n\tutils.AssertEqual(t, \"doe\", ctx.Cookies(\"john\"))\n\tutils.AssertEqual(t, \"default\", ctx.Cookies(\"unknown\", \"default\"))\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Login mocks base method
func (m *MockServiceAuth) Login(arg0 models.UserInputLogin) (models.UserSession, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Login", arg0) ret0, _ := ret[0].(models.UserSession) ret1, _ := ret[1].(error) return ret0, ret1 }
[ "func (m *MockHandler) Login(arg0 http.ResponseWriter, arg1 *http.Request) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Login\", arg0, arg1)\n}", "func login () *httptest.ResponseRecorder {\n\treq, _ := http.NewRequest(\"GET\", \"/api/v.0.0.1/get-token?uname=\" + USER_NAME + \"&upass=xxx\", nil)\n\tresponse := executeRequest(req)\n\treturn response\n}", "func TestLogin(w http.ResponseWriter, r *http.Request) {\n\n\tauthHeader := r.Header.Get(\"Authorization\")\n\tcookies := r.Cookies()\n\tvar token string\n\tfor _, c := range cookies {\n\t\tif c.Name == \"token\" {\n\t\t\ttoken = c.Value\n\t\t}\n\t}\n\n\tvar accessToken string\n\t// header value format will be \"Bearer <token>\"\n\tif authHeader != \"\" {\n\t\tif !strings.HasPrefix(authHeader, \"Bearer \") {\n\t\t\tlog.Errorf(\"GetMyIdentities Failed to find Bearer token %v\", authHeader)\n\t\t\tReturnHTTPError(w, r, http.StatusUnauthorized, \"Unauthorized, please provide a valid token\")\n\t\t\treturn\n\t\t}\n\t\taccessToken = strings.TrimPrefix(authHeader, \"Bearer \")\n\t}\n\n\tbytes, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlog.Errorf(\"TestLogin failed with error: %v\", err)\n\t\tReturnHTTPError(w, r, http.StatusBadRequest, \"Bad Request, Please check the request content\")\n\t\treturn\n\t}\n\tvar testAuthConfig model.TestAuthConfig\n\n\terr = json.Unmarshal(bytes, &testAuthConfig)\n\tif err != nil {\n\t\tlog.Errorf(\"TestLogin unmarshal failed with error: %v\", err)\n\t\tReturnHTTPError(w, r, http.StatusBadRequest, \"Bad Request, Please check the request content\")\n\t\treturn\n\t}\n\n\tif testAuthConfig.AuthConfig.Provider == \"\" {\n\t\tlog.Errorf(\"UpdateConfig: Provider is a required field\")\n\t\tReturnHTTPError(w, r, http.StatusBadRequest, \"Bad request, Provider is a required field\")\n\t\treturn\n\t}\n\n\tstatus, err := server.TestLogin(testAuthConfig, accessToken, token)\n\tif err != nil {\n\t\tlog.Errorf(\"TestLogin GetProvider failed with error: %v\", err)\n\t\tif status == 0 {\n\t\t\tstatus = http.StatusInternalServerError\n\t\t}\n\t\tReturnHTTPError(w, r, status, fmt.Sprintf(\"%v\", err))\n\t}\n}", "func (_m *Repository) Login(email string) (entity.User, error) {\n\tret := _m.Called(email)\n\n\tvar r0 entity.User\n\tif rf, ok := ret.Get(0).(func(string) entity.User); ok {\n\t\tr0 = rf(email)\n\t} else {\n\t\tr0 = ret.Get(0).(entity.User)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = rf(email)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockHandler) UserLogin(email, password string) (*domain.User, string, error) {\n\tret := m.ctrl.Call(m, \"UserLogin\", email, password)\n\tret0, _ := ret[0].(*domain.User)\n\tret1, _ := ret[1].(string)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}", "func (_m *ServerConnexion) Login(sUsername string, sPwd string) error {\n\tret := _m.Called(sUsername, sPwd)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string, string) error); ok {\n\t\tr0 = rf(sUsername, sPwd)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *Remote) Login(w http.ResponseWriter, r *http.Request) (*model.User, error) {\n\tret := _m.Called(w, r)\n\n\tvar r0 *model.User\n\tif rf, ok := ret.Get(0).(func(http.ResponseWriter, *http.Request) *model.User); ok {\n\t\tr0 = rf(w, r)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*model.User)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(http.ResponseWriter, *http.Request) error); ok {\n\t\tr1 = rf(w, r)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *Asconn) Login(_a0 *aerospike.ClientPolicy) aerospike.Error {\n\tret := _m.Called(_a0)\n\n\tvar r0 aerospike.Error\n\tif rf, ok := ret.Get(0).(func(*aerospike.ClientPolicy) aerospike.Error); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(aerospike.Error)\n\t\t}\n\t}\n\n\treturn r0\n}", "func (m *MockUserLogic) UserLogin(email, password string) (*domain.User, string, error) {\n\tret := m.ctrl.Call(m, \"UserLogin\", email, password)\n\tret0, _ := ret[0].(*domain.User)\n\tret1, _ := ret[1].(string)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}", "func (_m *Handler) Login(c echo.Context) error {\n\tret := _m.Called(c)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(echo.Context) error); ok {\n\t\tr0 = rf(c)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func TestAuthenticate_Success(t *testing.T) {\n\tdb, mock, err := sqlmock.New()\n\tif err != nil {\n\t\tt.Fatalf(\"an error '%s' was not expected when opening a stub database connection\", err)\n\t}\n\tdefer db.Close()\n\n\tuser := models.User{\n\t\tID: 1,\n\t\tEmail: \"[email protected]\",\n\t\tPassword: \"personia\",\n\t}\n\n\trows := sqlmock.NewRows([]string{\"id\", \"email\"}).AddRow(user.ID, user.Email)\n\tmock.ExpectQuery(regexp.QuoteMeta(constants.LoginDetailsSelectQuery)).WithArgs(user.Email, user.Password).WillReturnRows(rows)\n\n\tloginRepository := NewLoginRepository(db)\n\n\tloginModel := &models.Login{\n\t\tEmail: \"[email protected]\",\n\t\tPassword: \"personia\",\n\t}\n\n\tcntx := context.Background()\n\tdbuser, err := loginRepository.Authenticate(cntx, loginModel)\n\tassert.Nil(t, err)\n\tassert.Equal(t, user.ID, dbuser.ID)\n\tassert.Equal(t, user.Email, dbuser.Email)\n}", "func (_m *AuthServer) Login(_a0 context.Context, _a1 *auth.Credentials) (*auth.SessionInfo, error) {\n\tret := _m.Called(_a0, _a1)\n\n\tvar r0 *auth.SessionInfo\n\tif rf, ok := ret.Get(0).(func(context.Context, *auth.Credentials) *auth.SessionInfo); ok {\n\t\tr0 = rf(_a0, _a1)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*auth.SessionInfo)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, *auth.Credentials) error); ok {\n\t\tr1 = rf(_a0, _a1)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func mockLoginAsUser() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tusername, err := usernameFromRequestPath(r)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\thttp.SetCookie(w, &http.Cookie{\n\t\t\tName: \"userkit_auth_token\",\n\t\t\tValue: fmt.Sprintf(\"dummy_usr_token__%s:dummy\", username),\n\t\t\tPath: \"/\",\n\t\t\tExpires: time.Now().Add(600 * time.Hour),\n\t\t})\n\t\tlog.Printf(\"mock logged in as %s\", username)\n\t\thttp.Redirect(w, r, \"/\", http.StatusTemporaryRedirect)\n\t}\n}", "func (m *MockRepository) GetLoginBySession(uuid string) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetLoginBySession\", uuid)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestAuthenticate_Fail(t *testing.T) {\n\tdb, mock, err := sqlmock.New()\n\tif err != nil {\n\t\tt.Fatalf(\"an error '%s' was not expected when opening a stub database connection\", err)\n\t}\n\tdefer db.Close()\n\n\tuser := models.User{\n\t\tID: 1,\n\t\tEmail: \"[email protected]\",\n\t\tPassword: \"personia\",\n\t}\n\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(constants.LoginDetailsSelectQuery)\n\n\trows := sqlmock.NewRows([]string{\"id\", \"email\"})\n\tmock.ExpectQuery(regexp.QuoteMeta(buffer.String())).WithArgs(user.Email, user.Password).WillReturnRows(rows)\n\n\tloginRepository := NewLoginRepository(db)\n\n\tloginModel := &models.Login{\n\t\tEmail: \"[email protected]\",\n\t\tPassword: \"personia\",\n\t}\n\n\tcntx := context.Background()\n\t_, err = loginRepository.Authenticate(cntx, loginModel)\n\tassert.NotNil(t, err)\n}", "func (m *MockIWXClient) WXLogin(arg0, arg1, arg2 string) (wx.LoginResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"WXLogin\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(wx.LoginResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockClient) LoginLongLived(ctx context.Context, username, password string) (*oauth2.Token, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"LoginLongLived\", ctx, username, password)\n\tret0, _ := ret[0].(*oauth2.Token)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *Forge) Login(ctx context.Context, w http.ResponseWriter, r *http.Request) (*model.User, error) {\n\tret := _m.Called(ctx, w, r)\n\n\tvar r0 *model.User\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context, http.ResponseWriter, *http.Request) (*model.User, error)); ok {\n\t\treturn rf(ctx, w, r)\n\t}\n\tif rf, ok := ret.Get(0).(func(context.Context, http.ResponseWriter, *http.Request) *model.User); ok {\n\t\tr0 = rf(ctx, w, r)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*model.User)\n\t\t}\n\t}\n\n\tif rf, ok := ret.Get(1).(func(context.Context, http.ResponseWriter, *http.Request) error); ok {\n\t\tr1 = rf(ctx, w, r)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (u *Usecase) Login(query interface{}, password interface{}) error {\n\n\t// find user by email\n\tresult, err := u.repo.FindByQuery(query)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// if len result == 0, return user not found\n\tif len(result) == 0 {\n\t\treturn errors.New(\"user not found\")\n\t}\n\n\t// compare passwords and return result\n\tif err := bcrypt.CompareHashAndPassword([]byte(result[0][\"password\"].(string)), []byte(password.(string))); err != nil {\n\t\treturn errors.New(\"invalid password\")\n\t}\n\treturn nil\n\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Registration mocks base method
func (m *MockServiceAuth) Registration(arg0 models.UserInputReg) (models.UserSession, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Registration", arg0) ret0, _ := ret[0].(models.UserSession) ret1, _ := ret[1].(error) return ret0, ret1 }
[ "func (m *MockRegistry) Register(arg0 provider.Provider) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Register\", arg0)\n}", "func (m *MockVirtualServiceClient) Register() error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Register\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockRPCServer) registerServices() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"registerServices\")\n}", "func (m *MockUserController) Register(context *gin.Context) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Register\", context)\n}", "func registerMock(name string, priority CollectorPriority) *MockCollector {\n\tc := &MockCollector{}\n\tfactory := func() Collector { return c }\n\tregisterCollector(name, factory, priority)\n\treturn c\n}", "func (_m *MockProxy) Register() error {\n\tret := _m.Called()\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockRPCServer) registerServicesProxy(ctx context.Context) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"registerServicesProxy\", ctx)\n}", "func (m *MockRegistry) RegisterRoutes(arg0 *x.RouterAdmin, arg1 *x.RouterPublic) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"RegisterRoutes\", arg0, arg1)\n}", "func (m *MockHealthCheck) RegisterStats() {\n\tm.ctrl.Call(m, \"RegisterStats\")\n}", "func (mmRegister *mRegistererMockRegister) Set(f func(c1 mm_prometheus.Collector) (err error)) *RegistererMock {\n\tif mmRegister.defaultExpectation != nil {\n\t\tmmRegister.mock.t.Fatalf(\"Default expectation is already set for the Registerer.Register method\")\n\t}\n\n\tif len(mmRegister.expectations) > 0 {\n\t\tmmRegister.mock.t.Fatalf(\"Some expectations are already set for the Registerer.Register method\")\n\t}\n\n\tmmRegister.mock.funcRegister = f\n\treturn mmRegister.mock\n}", "func (_m *MockPermissionRegistry) Register(permissions charon.Permissions) (int64, int64, int64, error) {\n\tret := _m.Called(permissions)\n\n\tvar r0 int64\n\tif rf, ok := ret.Get(0).(func(charon.Permissions) int64); ok {\n\t\tr0 = rf(permissions)\n\t} else {\n\t\tr0 = ret.Get(0).(int64)\n\t}\n\n\tvar r1 int64\n\tif rf, ok := ret.Get(1).(func(charon.Permissions) int64); ok {\n\t\tr1 = rf(permissions)\n\t} else {\n\t\tr1 = ret.Get(1).(int64)\n\t}\n\n\tvar r2 int64\n\tif rf, ok := ret.Get(2).(func(charon.Permissions) int64); ok {\n\t\tr2 = rf(permissions)\n\t} else {\n\t\tr2 = ret.Get(2).(int64)\n\t}\n\n\tvar r3 error\n\tif rf, ok := ret.Get(3).(func(charon.Permissions) error); ok {\n\t\tr3 = rf(permissions)\n\t} else {\n\t\tr3 = ret.Error(3)\n\t}\n\n\treturn r0, r1, r2, r3\n}", "func (_e *MockProxy_Expecter) Register() *MockProxy_Register_Call {\n\treturn &MockProxy_Register_Call{Call: _e.mock.On(\"Register\")}\n}", "func (_m *MockPermissionProvider) Register(permissions charon.Permissions) (int64, int64, int64, error) {\n\tret := _m.Called(permissions)\n\n\tvar r0 int64\n\tif rf, ok := ret.Get(0).(func(charon.Permissions) int64); ok {\n\t\tr0 = rf(permissions)\n\t} else {\n\t\tr0 = ret.Get(0).(int64)\n\t}\n\n\tvar r1 int64\n\tif rf, ok := ret.Get(1).(func(charon.Permissions) int64); ok {\n\t\tr1 = rf(permissions)\n\t} else {\n\t\tr1 = ret.Get(1).(int64)\n\t}\n\n\tvar r2 int64\n\tif rf, ok := ret.Get(2).(func(charon.Permissions) int64); ok {\n\t\tr2 = rf(permissions)\n\t} else {\n\t\tr2 = ret.Get(2).(int64)\n\t}\n\n\tvar r3 error\n\tif rf, ok := ret.Get(3).(func(charon.Permissions) error); ok {\n\t\tr3 = rf(permissions)\n\t} else {\n\t\tr3 = ret.Error(3)\n\t}\n\n\treturn r0, r1, r2, r3\n}", "func (m *MockHealthCheck) RegisterStats() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"RegisterStats\")\n}", "func (_m *MockQueryCoord) Register() error {\n\tret := _m.Called()\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockServer) RegisterService(arg0 *grpc.ServiceDesc, arg1 interface{}) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"RegisterService\", arg0, arg1)\n}", "func (m *MockmdServerLocal) RegisterForUpdate(ctx context.Context, id tlf.ID, currHead kbfsmd.Revision) (<-chan error, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"RegisterForUpdate\", ctx, id, currHead)\n\tret0, _ := ret[0].(<-chan error)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockIOSession) Ref() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Ref\")\n}", "func newmockMsrService(t newmockMsrServiceT) *mockMsrService {\n\tmock := &mockMsrService{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Registration indicates an expected call of Registration
func (mr *MockServiceAuthMockRecorder) Registration(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Registration", reflect.TypeOf((*MockServiceAuth)(nil).Registration), arg0) }
[ "func TestRegistration(t *testing.T) {\n\td := getTestState(t)\n\n\ttxn := d.WriteTxn(1)\n\tdefer txn.Commit()\n\n\t// One must register to be registered.\n\tif id := txn.GetCuratorID(); id.IsValid() {\n\t\tt.Errorf(\"shouldn't be registered\")\n\t}\n\n\t// Register as an elite curator.\n\tsetCmd := SetRegistrationCommand{ID: 31337}\n\tif setCmd.apply(txn).ID != 31337 {\n\t\tt.Errorf(\"registered for first time but didn't get registration back\")\n\t}\n\tif id := txn.GetCuratorID(); !id.IsValid() {\n\t\tt.Errorf(\"should be registered\")\n\t}\n\n\t// Ensure idempotency.\n\tsetCmd.ID = 100\n\tif setCmd.apply(txn).ID != 31337 {\n\t\tt.Errorf(\"registered again but didn't get ID from first time\")\n\t}\n}", "func (_DelegateProfile *DelegateProfileCaller) Registered(opts *bind.CallOpts, _addr common.Address) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _DelegateProfile.contract.Call(opts, out, \"registered\", _addr)\n\treturn *ret0, err\n}", "func (mr *MockRegistryMockRecorder) Register(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Register\", reflect.TypeOf((*MockRegistry)(nil).Register), arg0)\n}", "func (_f26 *FakeconsulRegistry) CheckRegisterCalledWith(ident1 *consul.AgentCheckRegistration) (found bool) {\n\tfor _, call := range _f26.CheckRegisterCalls {\n\t\tif reflect.DeepEqual(call.Parameters.Ident1, ident1) {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn\n}", "func (mr *MockUsersRepoInterfaceMockRecorder) Register(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Register\", reflect.TypeOf((*MockUsersRepoInterface)(nil).Register), arg0)\n}", "func TestRegistration(t *testing.T) {\n\tmanifestCid, found := GetManifest(actorstypes.Version8)\n\trequire.True(t, found)\n\trequire.True(t, manifestCid.Defined())\n\n\tfor _, key := range manifest.GetBuiltinActorsKeys(actorstypes.Version8) {\n\t\tactorCid, found := GetActorCodeID(actorstypes.Version8, key)\n\t\trequire.True(t, found)\n\t\tname, version, found := GetActorMetaByCode(actorCid)\n\t\trequire.True(t, found)\n\t\trequire.Equal(t, actorstypes.Version8, version)\n\t\trequire.Equal(t, key, name)\n\t}\n}", "func TestRegister(t *testing.T) {\n\n\tfabricCAClient, err := NewFabricCAClient(org1, configImp, cryptoSuiteProvider)\n\tif err != nil {\n\t\tt.Fatalf(\"NewFabricCAClient returned error: %v\", err)\n\t}\n\tuser := mocks.NewMockUser(\"test\")\n\t// Register with nil request\n\t_, err = fabricCAClient.Register(user, nil)\n\tif err == nil {\n\t\tt.Fatalf(\"Expected error with nil request\")\n\t}\n\tif err.Error() != \"registration request required\" {\n\t\tt.Fatalf(\"Expected error registration request required. Got: %s\", err.Error())\n\t}\n\n\t//Register with nil user\n\t_, err = fabricCAClient.Register(nil, &ca.RegistrationRequest{})\n\tif err == nil {\n\t\tt.Fatalf(\"Expected error with nil user\")\n\t}\n\tif !strings.Contains(err.Error(), \"failed to create request for signing identity\") {\n\t\tt.Fatalf(\"Expected error failed to create request for signing identity. Got: %s\", err.Error())\n\t}\n\t// Register with nil user cert and key\n\t_, err = fabricCAClient.Register(user, &ca.RegistrationRequest{})\n\tif err == nil {\n\t\tt.Fatalf(\"Expected error without user enrolment information\")\n\t}\n\tif !strings.Contains(err.Error(), \"failed to create request for signing identity\") {\n\t\tt.Fatalf(\"Expected error failed to create request for signing identity. Got: %s\", err.Error())\n\t}\n\n\tuser.SetEnrollmentCertificate(readCert(t))\n\tkey, err := cryptosuite.GetDefault().KeyGen(cryptosuite.GetECDSAP256KeyGenOpts(true))\n\tif err != nil {\n\t\tt.Fatalf(\"KeyGen return error %v\", err)\n\t}\n\tuser.SetPrivateKey(key)\n\t// Register without registration name parameter\n\t_, err = fabricCAClient.Register(user, &ca.RegistrationRequest{})\n\tif !strings.Contains(err.Error(), \"failed to register user\") {\n\t\tt.Fatalf(\"Expected error failed to register user. Got: %s\", err.Error())\n\t}\n\n\t// Register with valid request\n\tvar attributes []ca.Attribute\n\tattributes = append(attributes, ca.Attribute{Key: \"test1\", Value: \"test2\"})\n\tattributes = append(attributes, ca.Attribute{Key: \"test2\", Value: \"test3\"})\n\tsecret, err := fabricCAClient.Register(user, &ca.RegistrationRequest{Name: \"test\",\n\t\tAffiliation: \"test\", Attributes: attributes})\n\tif err != nil {\n\t\tt.Fatalf(\"fabricCAClient Register return error %v\", err)\n\t}\n\tif secret != \"mockSecretValue\" {\n\t\tt.Fatalf(\"fabricCAClient Register return wrong value %s\", secret)\n\t}\n}", "func (s *Service) mustRegister(t *testing.T) {\n\tif err := s.Register(testService{}); err != nil {\n\t\tt.Fatalf(\"Registering test service failed: %v\", err)\n\t}\n}", "func (mr *MockCAClientMockRecorder) Register(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Register\", reflect.TypeOf((*MockCAClient)(nil).Register), arg0)\n}", "func (handler *Handlers)Registration(w http.ResponseWriter,req *http.Request) {\n\tlog.Println(\"new user registering to the system\")\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tif req.Body == nil {\n\t\tfmt.Fprintln(w, \"nil body passed\")\n\t\treturn\n\t}\n\n\tperson := newUser() //initialize the person\n\terr := json.NewDecoder(req.Body).Decode(&person) //Decode person from json\n\tif err != nil {\n\t\tfmt.Fprintln(w, err.Error())\n\t\treturn\n\t}\n\n\tvalidationError := validation.ValidateUser(person) //validate inputs of user and display errors if any\n\tif validationError != nil {\n\t\tvalidation.DisplayError(w, validationError)\n\t\treturn\n\t}\n person.Password= hashing.HashPassword(person.Password) //encrypt/hash password\n\n\terr = handler.Repository.AddUser(person) //store registration data into database userRepository\n\tif err != nil {\n\t\tfmt.Fprint(w, err.Error())\n\t\tlog.Error(err.Error())\n\t\treturn\n\t}\n\n\tfmt.Fprint(w, \"new user registered successfully\")\n\tlog.Println(\"new user registered successfully\")\n}", "func (v *VirtualHost) RegistrationAllowed() (bool, bool) {\n\tif v == nil {\n\t\treturn false, false\n\t}\n\treturn v.AllowRegistration, v.AllowGuests\n}", "func (s *service) Registration(ctx context.Context, user *model.User) error {\n\tif !strings.Contains(user.Email, \"@\") {\n\t\treturn fmt.Errorf(\"%s is bad email\", user.Email)\n\t}\n\tuser.CreateTime = time.Now()\n\n\terr := s.userRepo.Encrypt(user)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to encrypt with error: %s\", err.Error())\n\t}\n\n\ttx, err := s.transaction.Begin(&ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot start the transaction with error : %s\", err.Error())\n\t}\n\tdefer tx.Rollback()\n\n\terr = s.userPersist.InsertUser(ctx, user)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to save new user %s\", err.Error())\n\t}\n\n\terr = s.profilePersist.InsertProfile(ctx, user)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create a profile %s\", err.Error())\n\t}\n\n\treturn tx.Commit()\n}", "func (_e *MockProxy_Expecter) Register() *MockProxy_Register_Call {\n\treturn &MockProxy_Register_Call{Call: _e.mock.On(\"Register\")}\n}", "func Registration(w http.ResponseWriter, r *http.Request) {\n\n\tbody, _ := ioutil.ReadAll(r.Body)\n\n\tuser := models.User{}\n\terr := json.Unmarshal(body, &user)\n\tif err != nil {\n\t\tresponses.ERROR(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\t}\n\tuser.Prepare()\n\terr = user.Validate(\"login\")\n\tif err != nil {\n\t\tresponses.ERROR(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\t}\n\n\ttoken, err := auth.SignUp(user.Email, user.Password)\n\tif err != nil {\n\t\tresponses.ERROR(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\t}\n\tresponses.JSON(w, http.StatusOK, token)\n}", "func (f *FakeconsulRegistry) CheckRegisterNotCalled() bool {\n\treturn len(f.CheckRegisterCalls) == 0\n}", "func (m *MockServiceAuth) Registration(arg0 models.UserInputReg) (models.UserSession, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Registration\", arg0)\n\tret0, _ := ret[0].(models.UserSession)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func isRegistered(t *testing.T, tr *rt.TableRow) {\n\ta := tr.OtherTestData[testAdapterDataKey].(*adapter)\n\n\tresp := probeHandler(t, a, tURLPath)\n\tassert.NotEqual(t, http.StatusNotFound, resp.Code, \"Expected handler hit\")\n}", "func (e *Executor) Registered(executor.ExecutorDriver, *mesosproto.ExecutorInfo, *mesosproto.FrameworkInfo, *mesosproto.SlaveInfo) {\n\te.Called()\n}", "func (gw *Identity_Gateway) Registered() bool {\n\tif gw != nil {\n\t\treturn len(gw.GetLogicalId()) > 0 && len(gw.GetNetworkId()) > 0\n\t}\n\treturn false\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Popup is a wrapper around gtk_popover_popup().
func (v *Popover) Popup() { C.gtk_popover_popup(v.native()) }
[ "func (v *Popover) Popdown() {\n\tC.gtk_popover_popdown(v.native())\n}", "func Popup(ih Ihandle, x, y int) int {\n\t//int IupPopup (Ihandle* ih, int x, int y);\n\treturn int(C.IupPopup(ih.ptr(), C.int(x), C.int(y)))\n}", "func Popup(name string) *PopupWidget {\n\treturn &PopupWidget{\n\t\tname: Context.FontAtlas.RegisterString(name),\n\t\tflags: 0,\n\t\tlayout: nil,\n\t}\n}", "func NewPopupMenu(parent Widget, b Base, mo ...MenuOption) *PopupMenu {\n\tpm := &PopupMenu{\n\t\tPanel: NewPanel(nil, b),\n\t}\n\tInitWidget(parent, pm)\n\tpad := pm.MyTheme().Pad\n\tbtH := b.Rect.H() - pad\n\tbtW := b.Rect.W() - pad*2\n\tpm.Rect = R(pm.Rect.Min.X, pm.Rect.Max.Y-pm.Rect.H()*float64(len(mo))-pad,\n\t\tpm.Rect.Max.X, pm.Rect.Max.Y)\n\ty := pm.Rect.H() - pad\n\tfor _, o := range mo {\n\t\tbt := NewPushButton(pm, Base{\n\t\t\tRect: R(pad, y-btH, pad+btW, y),\n\t\t\tText: o.Text,\n\t\t\tImage: o.Image,\n\t\t\tDisabled: o.Disabled,\n\t\t})\n\t\ty -= btH + pad\n\t\to := o\n\t\tbt.OnPress = func() {\n\t\t\tfmt.Printf(\"popup menu onpress before popdown o=%+v\\n\", o)\n\t\t\tbt.Surface.PopDownTo(pm)\n\t\t\tfmt.Printf(\"popup menu onpress after popdown o=%+v\\n\", o)\n\t\t\tif o.Handler != nil {\n\t\t\t\tfmt.Printf(\"popup menu onpress before handler\\n\")\n\t\t\t\tclose := o.Handler(pm)\n\t\t\t\tfmt.Printf(\"popup menu onpress after handler =%v\\n\", close)\n\t\t\t\tif close {\n\t\t\t\t\tpm.Surface.PopDownTo(nil)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tow := optWidget{\n\t\t\topt: o,\n\t\t\tw: bt,\n\t\t}\n\t\tpm.opts = append(pm.opts, ow)\n\t}\n\tpm.Surface.PopUp(pm)\n\treturn pm\n}", "func NewPopup(p tview.Primitive) *Popup {\n\t_, _, width, height := p.GetRect()\n\tpopup := &Popup{\n\t\tflex: tview.NewFlex().\n\t\t\tAddItem(nil, 0, 1, false).\n\t\t\tAddItem(tview.NewFlex().\n\t\t\t\tSetDirection(tview.FlexRow).\n\t\t\t\tAddItem(nil, 0, 1, false).\n\t\t\t\tAddItem(p, height, 1, false).\n\t\t\t\tAddItem(nil, 0, 1, false), width, 1, false).\n\t\t\tAddItem(nil, 0, 1, false),\n\t}\n\tpopup.content = p\n\treturn popup\n}", "func Popup(prompt string, options ...string) (selection string, err error) {\n\t//selection, err = defaultDmenu().Popup(prompt, options...)\n\tdmenu := defaultDmenu()\n\tselection, err = dmenu.Popup(prompt, options...)\n\treturn\n}", "func (u *UpdateServiceNotification) SetPopup(value bool) {\n\tif value {\n\t\tu.Flags.Set(0)\n\t\tu.Popup = true\n\t} else {\n\t\tu.Flags.Unset(0)\n\t\tu.Popup = false\n\t}\n}", "func (p *Surface) GetPopup(parent *Surface, positioner *Positioner) (*Popup, error) {\n\tret := NewPopup(p.Context())\n\treturn ret, p.Context().SendRequest(p, 2, wl.Proxy(ret), parent, positioner)\n}", "func MakePopover(ptr unsafe.Pointer) *NSPopover {\n\tif ptr == nil {\n\t\treturn nil\n\t}\n\treturn &NSPopover{\n\t\tNSResponder: *MakeResponder(ptr),\n\t}\n}", "func (s *State) OpenPopup(bounds image.Rectangle, d Component) Popup {\n\tif s.root != nil {\n\t\ts.focused = d\n\t\ts.update = true\n\t\treturn s.root.OpenPopup(bounds.Add(s.bounds.Min), d)\n\t}\n\treturn nil\n}", "func PopupModal(name string) *PopupModalWidget {\n\treturn &PopupModalWidget{\n\t\tname: Context.FontAtlas.RegisterString(name),\n\t\topen: nil,\n\t\tflags: WindowFlagsNoResize,\n\t\tlayout: nil,\n\t}\n}", "func OpenPopup(name string) {\n\timgui.OpenPopup(name)\n}", "func (v *MenuButton) SetPopover(popover *Popover) {\n\tC.gtk_menu_button_set_popover(v.native(), popover.toWidget())\n}", "func (v *MenuButton) GetPopover() *Popover {\n\tc := C.gtk_menu_button_get_popover(v.native())\n\tif c == nil {\n\t\treturn nil\n\t}\n\treturn wrapPopover(glib.Take(unsafe.Pointer(c)))\n}", "func (v *MenuButton) SetPopup(menu IMenu) {\n\tC.gtk_menu_button_set_popup(v.native(), menu.toWidget())\n}", "func (p *PopUpMenu) Show() {\n\tp.overlay.Show()\n\tp.Menu.Show()\n}", "func (v *ScaleButton) GetPopup() (*Widget, error) {\n\tc := C.gtk_scale_button_get_popup(v.native())\n\tif c == nil {\n\t\treturn nil, nilPtrErr\n\t}\n\tobj := glib.Take(unsafe.Pointer(c))\n\treturn wrapWidget(obj), nil\n}", "func (v *MenuButton) SetUsePopover(setting bool) {\n\tC.gtk_menu_button_set_use_popover(v.native(), gbool(setting))\n}", "func (v *Menu) PopupAtPointer(triggerEvent *gdk.Event) {\n\te := (*C.GdkEvent)(triggerEvent.Native())\n\tC.gtk_menu_popup_at_pointer(v.native(), e)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Popdown is a wrapper around gtk_popover_popdown().
func (v *Popover) Popdown() { C.gtk_popover_popdown(v.native()) }
[ "func (v *Popover) Popup() {\n\tC.gtk_popover_popup(v.native())\n}", "func NewPopupMenu(parent Widget, b Base, mo ...MenuOption) *PopupMenu {\n\tpm := &PopupMenu{\n\t\tPanel: NewPanel(nil, b),\n\t}\n\tInitWidget(parent, pm)\n\tpad := pm.MyTheme().Pad\n\tbtH := b.Rect.H() - pad\n\tbtW := b.Rect.W() - pad*2\n\tpm.Rect = R(pm.Rect.Min.X, pm.Rect.Max.Y-pm.Rect.H()*float64(len(mo))-pad,\n\t\tpm.Rect.Max.X, pm.Rect.Max.Y)\n\ty := pm.Rect.H() - pad\n\tfor _, o := range mo {\n\t\tbt := NewPushButton(pm, Base{\n\t\t\tRect: R(pad, y-btH, pad+btW, y),\n\t\t\tText: o.Text,\n\t\t\tImage: o.Image,\n\t\t\tDisabled: o.Disabled,\n\t\t})\n\t\ty -= btH + pad\n\t\to := o\n\t\tbt.OnPress = func() {\n\t\t\tfmt.Printf(\"popup menu onpress before popdown o=%+v\\n\", o)\n\t\t\tbt.Surface.PopDownTo(pm)\n\t\t\tfmt.Printf(\"popup menu onpress after popdown o=%+v\\n\", o)\n\t\t\tif o.Handler != nil {\n\t\t\t\tfmt.Printf(\"popup menu onpress before handler\\n\")\n\t\t\t\tclose := o.Handler(pm)\n\t\t\t\tfmt.Printf(\"popup menu onpress after handler =%v\\n\", close)\n\t\t\t\tif close {\n\t\t\t\t\tpm.Surface.PopDownTo(nil)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tow := optWidget{\n\t\t\topt: o,\n\t\t\tw: bt,\n\t\t}\n\t\tpm.opts = append(pm.opts, ow)\n\t}\n\tpm.Surface.PopUp(pm)\n\treturn pm\n}", "func (v *MenuButton) GetPopover() *Popover {\n\tc := C.gtk_menu_button_get_popover(v.native())\n\tif c == nil {\n\t\treturn nil\n\t}\n\treturn wrapPopover(glib.Take(unsafe.Pointer(c)))\n}", "func Popup(prompt string, options ...string) (selection string, err error) {\n\t//selection, err = defaultDmenu().Popup(prompt, options...)\n\tdmenu := defaultDmenu()\n\tselection, err = dmenu.Popup(prompt, options...)\n\treturn\n}", "func Popup(ih Ihandle, x, y int) int {\n\t//int IupPopup (Ihandle* ih, int x, int y);\n\treturn int(C.IupPopup(ih.ptr(), C.int(x), C.int(y)))\n}", "func (v *MenuButton) SetPopover(popover *Popover) {\n\tC.gtk_menu_button_set_popover(v.native(), popover.toWidget())\n}", "func MakePopover(ptr unsafe.Pointer) *NSPopover {\n\tif ptr == nil {\n\t\treturn nil\n\t}\n\treturn &NSPopover{\n\t\tNSResponder: *MakeResponder(ptr),\n\t}\n}", "func (v *MenuButton) SetUsePopover(setting bool) {\n\tC.gtk_menu_button_set_use_popover(v.native(), gbool(setting))\n}", "func (v *Statusbar) Pop(contextID uint) {\n\tC.gtk_statusbar_pop(v.native(), C.guint(contextID))\n}", "func Popup(name string) *PopupWidget {\n\treturn &PopupWidget{\n\t\tname: Context.FontAtlas.RegisterString(name),\n\t\tflags: 0,\n\t\tlayout: nil,\n\t}\n}", "func (v *MenuButton) GetUsePopover() bool {\n\tc := C.gtk_menu_button_get_use_popover(v.native())\n\treturn gobool(c)\n}", "func PopStyle() {\n\timgui.PopStyleVar()\n}", "func (edit Editor) GotoPageDown() {\n\tedit.Call(\"gotoPageDown\")\n}", "func (v *MenuButton) GetPopup() *Menu {\n\tc := C.gtk_menu_button_get_popup(v.native())\n\tif c == nil {\n\t\treturn nil\n\t}\n\tobj := glib.Take(unsafe.Pointer(c))\n\treturn wrapMenu(obj)\n}", "func (v *Menu) PopupAtPointer(triggerEvent *gdk.Event) {\n\te := (*C.GdkEvent)(triggerEvent.Native())\n\tC.gtk_menu_popup_at_pointer(v.native(), e)\n}", "func NewPopTable(h SQLHandle) *PopTable {\n\treturn &PopTable{h}\n}", "func (edit Editor) NavigateDown(times int) {\n\tedit.Call(\"navigateDown\", times)\n}", "func NewPopup(p tview.Primitive) *Popup {\n\t_, _, width, height := p.GetRect()\n\tpopup := &Popup{\n\t\tflex: tview.NewFlex().\n\t\t\tAddItem(nil, 0, 1, false).\n\t\t\tAddItem(tview.NewFlex().\n\t\t\t\tSetDirection(tview.FlexRow).\n\t\t\t\tAddItem(nil, 0, 1, false).\n\t\t\t\tAddItem(p, height, 1, false).\n\t\t\t\tAddItem(nil, 0, 1, false), width, 1, false).\n\t\t\tAddItem(nil, 0, 1, false),\n\t}\n\tpopup.content = p\n\treturn popup\n}", "func (h *InfoPane) HistoryDown() {\n\th.DownHistory(h.History[h.PromptType])\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
/ GtkFileChooser AddChoice is a wrapper around gtk_file_chooser_add_choice().
func (v *FileChooser) AddChoice(id, label string, options, optionLabels []string) { cId := C.CString(id) defer C.free(unsafe.Pointer(cId)) cLabel := C.CString(label) defer C.free(unsafe.Pointer(cLabel)) if options == nil || optionLabels == nil { C.gtk_file_chooser_add_choice(v.native(), (*C.gchar)(cId), (*C.gchar)(cLabel), nil, nil) return } cOptions := C.make_strings(C.int(len(options) + 1)) for i, option := range options { cstr := C.CString(option) defer C.free(unsafe.Pointer(cstr)) C.set_string(cOptions, C.int(i), (*C.gchar)(cstr)) } C.set_string(cOptions, C.int(len(options)), nil) cOptionLabels := C.make_strings(C.int(len(optionLabels) + 1)) for i, optionLabel := range optionLabels { cstr := C.CString(optionLabel) defer C.free(unsafe.Pointer(cstr)) C.set_string(cOptionLabels, C.int(i), (*C.gchar)(cstr)) } C.set_string(cOptionLabels, C.int(len(optionLabels)), nil) C.gtk_file_chooser_add_choice(v.native(), (*C.gchar)(cId), (*C.gchar)(cLabel), cOptions, cOptionLabels) }
[ "func (v *FileChooser) SetChoice(id, option string) {\n\tcId := C.CString(id)\n\tdefer C.free(unsafe.Pointer(cId))\n\tcOption := C.CString(option)\n\tdefer C.free(unsafe.Pointer(cOption))\n\tC.gtk_file_chooser_set_choice(v.native(), (*C.gchar)(cId), (*C.gchar)(cOption))\n}", "func (v *FileChooser) GetChoice(id string) string {\n\tcId := C.CString(id)\n\tdefer C.free(unsafe.Pointer(cId))\n\tc := C.gtk_file_chooser_get_choice(v.native(), (*C.gchar)(cId))\n\treturn C.GoString(c)\n}", "func ChoiceDialog(avp *Viewport2D, opts DlgOpts, choices []string, recv ki.Ki, fun ki.RecvFunc) {\n\tdlg := NewStdDialog(opts, false, false) // no buttons\n\tdlg.Modal = true\n\tif recv != nil && fun != nil {\n\t\tdlg.DialogSig.Connect(recv, fun)\n\t}\n\n\tframe := dlg.Frame()\n\tbb, _ := dlg.ButtonBox(frame)\n\tfor i, ch := range choices {\n\t\tchnm := strcase.ToKebab(ch)\n\t\tb := bb.AddNewChild(KiT_Button, chnm).(*Button)\n\t\tb.SetProp(\"__cdSigVal\", int64(i))\n\t\tb.SetText(ch)\n\t\tif chnm == \"cancel\" {\n\t\t\tb.ButtonSig.Connect(dlg.This, func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\t\tif sig == int64(ButtonClicked) {\n\t\t\t\t\ttb := send.Embed(KiT_Button).(*Button)\n\t\t\t\t\tdlg := recv.Embed(KiT_Dialog).(*Dialog)\n\t\t\t\t\tdlg.SigVal = tb.KnownProp(\"__cdSigVal\").(int64)\n\t\t\t\t\tdlg.Cancel()\n\t\t\t\t}\n\t\t\t})\n\t\t} else {\n\t\t\tb.ButtonSig.Connect(dlg.This, func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\t\tif sig == int64(ButtonClicked) {\n\t\t\t\t\ttb := send.Embed(KiT_Button).(*Button)\n\t\t\t\t\tdlg := recv.Embed(KiT_Dialog).(*Dialog)\n\t\t\t\t\tdlg.SigVal = tb.KnownProp(\"__cdSigVal\").(int64)\n\t\t\t\t\tdlg.Accept()\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n\n\tdlg.UpdateEndNoSig(true) // going to be shown\n\tdlg.Open(0, 0, avp, nil)\n}", "func (v *FileChooser) RemoveChoice(id string) {\n\tcId := C.CString(id)\n\tdefer C.free(unsafe.Pointer(cId))\n\tC.gtk_file_chooser_remove_choice(v.native(), (*C.gchar)(cId))\n}", "func (cycle *Cycle) AddChoice(choice CycleChoice) *CycleItem {\n\treturn newCycleItem(cycle, choice)\n}", "func (f *FlagSetExt) Choice(name, value, usage string, options ...string) *string {\n\tp := new(string)\n\tf.ChoiceVarP(p, name, \"\", value, usage, options...)\n\treturn p\n}", "func runFileChooser(win *gtk.Window) (string, error) {\n\n\tvar fn string\n\n\topenFile, err := gtk.FileChooserDialogNewWith2Buttons(\"Open file\", win, gtk.FILE_CHOOSER_ACTION_OPEN,\n\t\t\"Cancel\", gtk.RESPONSE_CANCEL,\n\t\t\"Ok\", gtk.RESPONSE_OK)\n\tdefer openFile.Destroy()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\topenFile.SetDefaultSize(50, 50)\n\n\tres := openFile.Run()\n\n\tif res == int(gtk.RESPONSE_OK) {\n\t\tfn = openFile.FileChooser.GetFilename()\n\t}\n\n\treturn fn, nil\n}", "func (fv *FileView) FileSelectAction(idx int) {\n\tif idx < 0 {\n\t\treturn\n\t}\n\tfv.SaveSortPrefs()\n\tfi := fv.Files[idx]\n\tfv.SelectedIdx = idx\n\tfv.SelFile = fi.Name\n\tsf := fv.SelField()\n\tsf.SetText(fv.SelFile)\n\tfv.WidgetSig.Emit(fv.This, int64(gi.WidgetSelected), fv.SelectedFile())\n}", "func Choice(s *string, choices []string, title, id, class string, valid Validator) (jquery.JQuery, error) {\n\tj := jq(\"<select>\").AddClass(ClassPrefix + \"-choice\").AddClass(class)\n\tj.SetAttr(\"title\", title).SetAttr(\"id\", id)\n\tif *s == \"\" {\n\t\t*s = choices[0]\n\t}\n\tindex := -1\n\tfor i, c := range choices {\n\t\tif c == *s {\n\t\t\tindex = i\n\t\t}\n\t\tj.Append(jq(\"<option>\").SetAttr(\"value\", c).SetText(c))\n\t}\n\tif index == -1 {\n\t\treturn jq(), fmt.Errorf(\"Default of '%s' is not among valid choices\", *s)\n\t}\n\tj.SetData(\"prev\", index)\n\tj.SetProp(\"selectedIndex\", index)\n\tj.Call(jquery.CHANGE, func(event jquery.Event) {\n\t\tnewS := event.Target.Get(\"value\").String()\n\t\tnewIndex := event.Target.Get(\"selectedIndex\").Int()\n\t\tif valid != nil && !valid.Validate(newS) {\n\t\t\tnewIndex = int(j.Data(\"prev\").(float64))\n\t\t\tj.SetProp(\"selectedIndex\", newIndex)\n\t\t}\n\t\t*s = choices[int(newIndex)]\n\t\tj.SetData(\"prev\", newIndex)\n\t})\n\treturn j, nil\n}", "func create_combo_box(strings []string) *gtk.ComboBoxText {\n\tcombo_box := gtk.NewComboBoxText()\n\tfor _, s := range strings {\n\t\tcombo_box.AppendText(s)\n\t}\n\tcombo_box.SetActive(0)\n\treturn combo_box\n}", "func NewChoice() Choice {\n\treturn new(ChoiceImpl)\n}", "func selectFileGUI(titleA string, filterNameA string, filterTypeA string) string {\n\tfileNameT, errT := dialog.File().Filter(filterNameA, filterTypeA).Title(titleA).Load()\n\n\tif errT != nil {\n\t\treturn tk.GenerateErrorStringF(\"failed: %v\", errT)\n\t}\n\n\treturn fileNameT\n}", "func (s *BasevhdlListener) EnterChoice(ctx *ChoiceContext) {}", "func Choice(name, value, usage string, options ...string) *string {\n\treturn (&FlagSetExt{pflag.CommandLine}).ChoiceP(name, \"\", value, usage, options...)\n}", "func (s *BasevhdlListener) EnterChoices(ctx *ChoicesContext) {}", "func NewChoice(allowedValues ...string) Choice {\n\treturn Choice{AllowedValues: allowedValues}\n}", "func (c *Combobox) Append(text string) {\n\tctext := C.CString(text)\n\tC.uiComboboxAppend(c.c, ctext)\n\tfreestr(ctext)\n}", "func NewAddItemAccepted() *AddItemAccepted {\n\n\treturn &AddItemAccepted{}\n}", "func NewChoice(output *string, choices ...string) Choice {\n\treturn Choice{choices: choices, output: output}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
RemoveChoice is a wrapper around gtk_file_chooser_remove_choice().
func (v *FileChooser) RemoveChoice(id string) { cId := C.CString(id) defer C.free(unsafe.Pointer(cId)) C.gtk_file_chooser_remove_choice(v.native(), (*C.gchar)(cId)) }
[ "func DeleteChoice(choice *quizzesModel.Choice) error {\n\terr := dbInstance.GetDBConnection().Unscoped().Delete(choice).Error\n\tchoicesDiagnostics.WriteChoiceErr(err, \"Delete\", choice)\n\treturn err\n}", "func (q *question) Remove(voter string) {\n\tfor i := range q.Choices {\n\t\tfor j, v := range q.Choices[i].Voters {\n\t\t\tif v == voter {\n\t\t\t\tq.Choices[i].Voters = append(q.Choices[i].Voters[:j], q.Choices[i].Voters[j+1:]...)\n\n\t\t\t\t// Remove text question choice entirely if none adhere to it.\n\t\t\t\tif !q.Radio && len(q.Choices[i].Voters) == 0 {\n\t\t\t\t\tq.Choices = append(q.Choices[:i], q.Choices[i+1:]...)\n\t\t\t\t}\n\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func (v *FileChooser) GetChoice(id string) string {\n\tcId := C.CString(id)\n\tdefer C.free(unsafe.Pointer(cId))\n\tc := C.gtk_file_chooser_get_choice(v.native(), (*C.gchar)(cId))\n\treturn C.GoString(c)\n}", "func (v *FileChooser) SetChoice(id, option string) {\n\tcId := C.CString(id)\n\tdefer C.free(unsafe.Pointer(cId))\n\tcOption := C.CString(option)\n\tdefer C.free(unsafe.Pointer(cOption))\n\tC.gtk_file_chooser_set_choice(v.native(), (*C.gchar)(cId), (*C.gchar)(cOption))\n}", "func (v *FileChooser) AddChoice(id, label string, options, optionLabels []string) {\n\tcId := C.CString(id)\n\tdefer C.free(unsafe.Pointer(cId))\n\n\tcLabel := C.CString(label)\n\tdefer C.free(unsafe.Pointer(cLabel))\n\n\tif options == nil || optionLabels == nil {\n\t\tC.gtk_file_chooser_add_choice(v.native(), (*C.gchar)(cId), (*C.gchar)(cLabel), nil, nil)\n\t\treturn\n\t}\n\n\tcOptions := C.make_strings(C.int(len(options) + 1))\n\tfor i, option := range options {\n\t\tcstr := C.CString(option)\n\t\tdefer C.free(unsafe.Pointer(cstr))\n\t\tC.set_string(cOptions, C.int(i), (*C.gchar)(cstr))\n\t}\n\tC.set_string(cOptions, C.int(len(options)), nil)\n\n\tcOptionLabels := C.make_strings(C.int(len(optionLabels) + 1))\n\tfor i, optionLabel := range optionLabels {\n\t\tcstr := C.CString(optionLabel)\n\t\tdefer C.free(unsafe.Pointer(cstr))\n\t\tC.set_string(cOptionLabels, C.int(i), (*C.gchar)(cstr))\n\t}\n\tC.set_string(cOptionLabels, C.int(len(optionLabels)), nil)\n\n\tC.gtk_file_chooser_add_choice(v.native(), (*C.gchar)(cId), (*C.gchar)(cLabel), cOptions, cOptionLabels)\n}", "func (s *Selection) Remove(v int) {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\n\tfor k, i := range s.selection {\n\t\tif i == v {\n\t\t\ttmp := s.selection[:k]\n\t\t\ttmp = append(tmp, s.selection[k+1:]...)\n\t\t\ts.selection = tmp\n\t\t\treturn\n\t\t}\n\t}\n}", "func removeSelectedAccountFromOSSettings(ctx context.Context, tconn *chrome.TestConn) error {\n\ttesting.ContextLog(ctx, \"Removing account\")\n\n\tui := uiauto.New(tconn).WithTimeout(DefaultUITimeout)\n\tremoveAccountButton := RemoveActionButton()\n\tif err := uiauto.Combine(\"Click Remove account\",\n\t\tui.WaitUntilExists(removeAccountButton),\n\t\tui.LeftClick(removeAccountButton),\n\t)(ctx); err != nil {\n\t\treturn errors.Wrap(err, \"failed to to click Remove account\")\n\t}\n\n\treturn nil\n}", "func (m *Menu) RemoveMenuOption(index int) {\n\tcopy(m.Options[index:], m.Options[index+1:])\n\tm.Options = m.Options[:len(m.Options)-1]\n}", "func RemoveActionButton() *nodewith.Finder {\n\treturn nodewith.Name(\"Remove this account\").Role(role.MenuItem)\n}", "func (b *binding) RemoveFile(ctx context.Context, path string) error {\n\t_, err := b.Shell(\"rm\", \"-f\", path).Call(ctx)\n\treturn err\n}", "func (v *IconView) UnselectPath(path *TreePath) {\n\tC.gtk_icon_view_unselect_path(v.native(), path.native())\n}", "func (fb *FlowBox) UnselectChild(child *FlowBoxChild) {\n\tC.gtk_flow_box_unselect_child(fb.native(), child.native())\n}", "func UnMenuButton(cb func()) {\n\tjs.Global.Get(\"document\").Call(\"removeEventListener\", \"menubutton\", cb, false)\n}", "func ChoiceDialog(avp *Viewport2D, opts DlgOpts, choices []string, recv ki.Ki, fun ki.RecvFunc) {\n\tdlg := NewStdDialog(opts, false, false) // no buttons\n\tdlg.Modal = true\n\tif recv != nil && fun != nil {\n\t\tdlg.DialogSig.Connect(recv, fun)\n\t}\n\n\tframe := dlg.Frame()\n\tbb, _ := dlg.ButtonBox(frame)\n\tfor i, ch := range choices {\n\t\tchnm := strcase.ToKebab(ch)\n\t\tb := bb.AddNewChild(KiT_Button, chnm).(*Button)\n\t\tb.SetProp(\"__cdSigVal\", int64(i))\n\t\tb.SetText(ch)\n\t\tif chnm == \"cancel\" {\n\t\t\tb.ButtonSig.Connect(dlg.This, func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\t\tif sig == int64(ButtonClicked) {\n\t\t\t\t\ttb := send.Embed(KiT_Button).(*Button)\n\t\t\t\t\tdlg := recv.Embed(KiT_Dialog).(*Dialog)\n\t\t\t\t\tdlg.SigVal = tb.KnownProp(\"__cdSigVal\").(int64)\n\t\t\t\t\tdlg.Cancel()\n\t\t\t\t}\n\t\t\t})\n\t\t} else {\n\t\t\tb.ButtonSig.Connect(dlg.This, func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\t\tif sig == int64(ButtonClicked) {\n\t\t\t\t\ttb := send.Embed(KiT_Button).(*Button)\n\t\t\t\t\tdlg := recv.Embed(KiT_Dialog).(*Dialog)\n\t\t\t\t\tdlg.SigVal = tb.KnownProp(\"__cdSigVal\").(int64)\n\t\t\t\t\tdlg.Accept()\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n\n\tdlg.UpdateEndNoSig(true) // going to be shown\n\tdlg.Open(0, 0, avp, nil)\n}", "func performSuggestionRemovalDialogAction(tconn *chrome.TestConn, dialogButtonName string) uiauto.Action {\n\tui := uiauto.New(tconn)\n\treturn uiauto.Combine(\"press removal dialog button\",\n\t\tui.LeftClick(nodewith.Role(role.Button).Name(dialogButtonName).Ancestor(removalDialogFinder)),\n\t\tui.WaitUntilGone(removalDialogFinder))\n}", "func radioMenuItemFinalizer(m *RadioMenuItem) {\n\truntime.SetFinalizer(m, func(m *RadioMenuItem) { gobject.Unref(m) })\n}", "func (s *BasevhdlListener) ExitChoice(ctx *ChoiceContext) {}", "func (c *Client) RemoveFavoriteSticker(ctx context.Context, sticker InputFileClass) error {\n\tvar ok Ok\n\n\trequest := &RemoveFavoriteStickerRequest{\n\t\tSticker: sticker,\n\t}\n\tif err := c.rpc.Invoke(ctx, request, &ok); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (v *Menu) Remove(position int) {\n\tC.g_menu_remove(v.native(), C.gint(position))\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SetChoice is a wrapper around gtk_file_chooser_set_choice().
func (v *FileChooser) SetChoice(id, option string) { cId := C.CString(id) defer C.free(unsafe.Pointer(cId)) cOption := C.CString(option) defer C.free(unsafe.Pointer(cOption)) C.gtk_file_chooser_set_choice(v.native(), (*C.gchar)(cId), (*C.gchar)(cOption)) }
[ "func (v *FileChooser) AddChoice(id, label string, options, optionLabels []string) {\n\tcId := C.CString(id)\n\tdefer C.free(unsafe.Pointer(cId))\n\n\tcLabel := C.CString(label)\n\tdefer C.free(unsafe.Pointer(cLabel))\n\n\tif options == nil || optionLabels == nil {\n\t\tC.gtk_file_chooser_add_choice(v.native(), (*C.gchar)(cId), (*C.gchar)(cLabel), nil, nil)\n\t\treturn\n\t}\n\n\tcOptions := C.make_strings(C.int(len(options) + 1))\n\tfor i, option := range options {\n\t\tcstr := C.CString(option)\n\t\tdefer C.free(unsafe.Pointer(cstr))\n\t\tC.set_string(cOptions, C.int(i), (*C.gchar)(cstr))\n\t}\n\tC.set_string(cOptions, C.int(len(options)), nil)\n\n\tcOptionLabels := C.make_strings(C.int(len(optionLabels) + 1))\n\tfor i, optionLabel := range optionLabels {\n\t\tcstr := C.CString(optionLabel)\n\t\tdefer C.free(unsafe.Pointer(cstr))\n\t\tC.set_string(cOptionLabels, C.int(i), (*C.gchar)(cstr))\n\t}\n\tC.set_string(cOptionLabels, C.int(len(optionLabels)), nil)\n\n\tC.gtk_file_chooser_add_choice(v.native(), (*C.gchar)(cId), (*C.gchar)(cLabel), cOptions, cOptionLabels)\n}", "func (v *FileChooser) GetChoice(id string) string {\n\tcId := C.CString(id)\n\tdefer C.free(unsafe.Pointer(cId))\n\tc := C.gtk_file_chooser_get_choice(v.native(), (*C.gchar)(cId))\n\treturn C.GoString(c)\n}", "func ChoiceDialog(avp *Viewport2D, opts DlgOpts, choices []string, recv ki.Ki, fun ki.RecvFunc) {\n\tdlg := NewStdDialog(opts, false, false) // no buttons\n\tdlg.Modal = true\n\tif recv != nil && fun != nil {\n\t\tdlg.DialogSig.Connect(recv, fun)\n\t}\n\n\tframe := dlg.Frame()\n\tbb, _ := dlg.ButtonBox(frame)\n\tfor i, ch := range choices {\n\t\tchnm := strcase.ToKebab(ch)\n\t\tb := bb.AddNewChild(KiT_Button, chnm).(*Button)\n\t\tb.SetProp(\"__cdSigVal\", int64(i))\n\t\tb.SetText(ch)\n\t\tif chnm == \"cancel\" {\n\t\t\tb.ButtonSig.Connect(dlg.This, func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\t\tif sig == int64(ButtonClicked) {\n\t\t\t\t\ttb := send.Embed(KiT_Button).(*Button)\n\t\t\t\t\tdlg := recv.Embed(KiT_Dialog).(*Dialog)\n\t\t\t\t\tdlg.SigVal = tb.KnownProp(\"__cdSigVal\").(int64)\n\t\t\t\t\tdlg.Cancel()\n\t\t\t\t}\n\t\t\t})\n\t\t} else {\n\t\t\tb.ButtonSig.Connect(dlg.This, func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\t\tif sig == int64(ButtonClicked) {\n\t\t\t\t\ttb := send.Embed(KiT_Button).(*Button)\n\t\t\t\t\tdlg := recv.Embed(KiT_Dialog).(*Dialog)\n\t\t\t\t\tdlg.SigVal = tb.KnownProp(\"__cdSigVal\").(int64)\n\t\t\t\t\tdlg.Accept()\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n\n\tdlg.UpdateEndNoSig(true) // going to be shown\n\tdlg.Open(0, 0, avp, nil)\n}", "func Choice(s *string, choices []string, title, id, class string, valid Validator) (jquery.JQuery, error) {\n\tj := jq(\"<select>\").AddClass(ClassPrefix + \"-choice\").AddClass(class)\n\tj.SetAttr(\"title\", title).SetAttr(\"id\", id)\n\tif *s == \"\" {\n\t\t*s = choices[0]\n\t}\n\tindex := -1\n\tfor i, c := range choices {\n\t\tif c == *s {\n\t\t\tindex = i\n\t\t}\n\t\tj.Append(jq(\"<option>\").SetAttr(\"value\", c).SetText(c))\n\t}\n\tif index == -1 {\n\t\treturn jq(), fmt.Errorf(\"Default of '%s' is not among valid choices\", *s)\n\t}\n\tj.SetData(\"prev\", index)\n\tj.SetProp(\"selectedIndex\", index)\n\tj.Call(jquery.CHANGE, func(event jquery.Event) {\n\t\tnewS := event.Target.Get(\"value\").String()\n\t\tnewIndex := event.Target.Get(\"selectedIndex\").Int()\n\t\tif valid != nil && !valid.Validate(newS) {\n\t\t\tnewIndex = int(j.Data(\"prev\").(float64))\n\t\t\tj.SetProp(\"selectedIndex\", newIndex)\n\t\t}\n\t\t*s = choices[int(newIndex)]\n\t\tj.SetData(\"prev\", newIndex)\n\t})\n\treturn j, nil\n}", "func (fv *FileView) SetSelFileAction(sel string) {\n\tfv.SelFile = sel\n\tsv := fv.FilesView()\n\tsv.SelectFieldVal(\"Name\", fv.SelFile)\n\tfv.SelectedIdx = sv.SelectedIdx\n\tsf := fv.SelField()\n\tsf.SetText(fv.SelFile)\n\tfv.WidgetSig.Emit(fv.This, int64(gi.WidgetSelected), fv.SelectedFile())\n}", "func (cv *Choice) Set(value string) error {\n\tif indexof.String(cv.AllowedValues, value) == indexof.NotFound {\n\t\treturn fmt.Errorf(\n\t\t\t\"invalid flag value: %s, must be one of %s\",\n\t\t\tvalue,\n\t\t\tstrings.Join(cv.AllowedValues, \", \"),\n\t\t)\n\t}\n\n\tcv.Choice = &value\n\treturn nil\n}", "func (f *FlagSetExt) Choice(name, value, usage string, options ...string) *string {\n\tp := new(string)\n\tf.ChoiceVarP(p, name, \"\", value, usage, options...)\n\treturn p\n}", "func (v *FileChooser) RemoveChoice(id string) {\n\tcId := C.CString(id)\n\tdefer C.free(unsafe.Pointer(cId))\n\tC.gtk_file_chooser_remove_choice(v.native(), (*C.gchar)(cId))\n}", "func Set(newChoices []string) {\n\tchoices = newChoices\n\tchoicelen = len(newChoices)\n}", "func Choice(name, value, usage string, options ...string) *string {\n\treturn (&FlagSetExt{pflag.CommandLine}).ChoiceP(name, \"\", value, usage, options...)\n}", "func Choice(m string, exacts []string) string {\n\tfmt.Println(colors.Blue(prefix + \" \" + m + \": \"))\n\tret := make(chan string, 1)\n\tterminate := make(chan struct{})\n\tgo cho.Run(exacts, ret, terminate)\n\tselected := \"\"\nLOOP:\n\tfor {\n\t\tselect {\n\t\tcase selected = <-ret:\n\t\t\tbreak LOOP\n\t\tcase <-terminate:\n\t\t\tbreak LOOP\n\t\t}\n\t}\n\tif selected != \"\" {\n\t\tfmt.Println(selected)\n\t}\n\treturn selected\n}", "func (p *PollAnswerVoters) SetChosen(value bool) {\n\tif value {\n\t\tp.Flags.Set(0)\n\t\tp.Chosen = true\n\t} else {\n\t\tp.Flags.Unset(0)\n\t\tp.Chosen = false\n\t}\n}", "func RenderSetChoice(resp http.ResponseWriter, req *http.Request, ext string, mode primitive.Mode, fileSeeker io.ReadSeeker) {\n\n\top := []OptStruct{\n\t\t{20, mode},\n\t\t{30, mode},\n\t\t{40, mode},\n\t\t{50, mode},\n\t}\n\topFileList, err := GenImgList(ext, fileSeeker, op...)\n\tif err != nil {\n http.Error(resp, err.Error(), http.StatusInternalServerError)\n return\n\t}\n\thtmlist := `<html>\n <body>\n {{range .}}\n <a href=\"/modify/{{.Name}}?mode={{.Mode}}&n={{.Numshapes}}\">\n <img style =\"width 30%\" src=\"/pics/{{.Name}}\">\n {{end}}\n </body>\n </html>\n `\n\ttempl := template.Must(template.New(\"\").Parse(htmlist))\n\n\ttype Opts struct {\n\t\tName string\n\t\tMode primitive.Mode\n\t\tNumshapes int\n\t}\n\tvar opts []Opts\n\tfor index, val := range opFileList {\n\t\topts = append(opts, Opts{Name: filepath.Base(val), Mode: op[index].mode, Numshapes: op[index].num})\n\t}\n\n\t// err = templ.Execute(resp, opts)\n\t// if err != nil {\n\t// \tpanic(err)\n\t// }\n checkError(templ.Execute(resp,opts))\n}", "func (d *domainClient) SetInterceptFileChooserDialog(ctx context.Context, args *SetInterceptFileChooserDialogArgs) (err error) {\n\tif args != nil {\n\t\terr = rpcc.Invoke(ctx, \"Page.setInterceptFileChooserDialog\", args, nil, d.conn)\n\t} else {\n\t\terr = rpcc.Invoke(ctx, \"Page.setInterceptFileChooserDialog\", nil, nil, d.conn)\n\t}\n\tif err != nil {\n\t\terr = &internal.OpError{Domain: \"Page\", Op: \"SetInterceptFileChooserDialog\", Err: err}\n\t}\n\treturn\n}", "func UpdateChoice(choice *quizzesModel.Choice) error {\n\terr := dbInstance.GetDBConnection().Save(choice).Error\n\tchoicesDiagnostics.WriteChoiceErr(err, \"Update\", choice)\n\treturn err\n}", "func NewChoice() Choice {\n\treturn new(ChoiceImpl)\n}", "func (c *Combobox) SetSelected(index int) {\n\tC.uiComboboxSetSelected(c.c, C.int(index))\n}", "func (c *CmdConfigure) askChoice(label string, choices []string) (int, string) {\n\tselection, index, err := c.askSelection(label, choices)\n\tif err != nil {\n\t\tc.log.FATAL.Fatal(err)\n\t}\n\n\treturn index, selection\n}", "func NewChoice(allowedValues ...string) Choice {\n\treturn Choice{AllowedValues: allowedValues}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetChoice is a wrapper around gtk_file_chooser_get_choice().
func (v *FileChooser) GetChoice(id string) string { cId := C.CString(id) defer C.free(unsafe.Pointer(cId)) c := C.gtk_file_chooser_get_choice(v.native(), (*C.gchar)(cId)) return C.GoString(c) }
[ "func (s *SingleSelectList) GetChoice() string { return s.choice }", "func (v *FileChooser) SetChoice(id, option string) {\n\tcId := C.CString(id)\n\tdefer C.free(unsafe.Pointer(cId))\n\tcOption := C.CString(option)\n\tdefer C.free(unsafe.Pointer(cOption))\n\tC.gtk_file_chooser_set_choice(v.native(), (*C.gchar)(cId), (*C.gchar)(cOption))\n}", "func (f *FlagSetExt) Choice(name, value, usage string, options ...string) *string {\n\tp := new(string)\n\tf.ChoiceVarP(p, name, \"\", value, usage, options...)\n\treturn p\n}", "func Choice(m string, exacts []string) string {\n\tfmt.Println(colors.Blue(prefix + \" \" + m + \": \"))\n\tret := make(chan string, 1)\n\tterminate := make(chan struct{})\n\tgo cho.Run(exacts, ret, terminate)\n\tselected := \"\"\nLOOP:\n\tfor {\n\t\tselect {\n\t\tcase selected = <-ret:\n\t\t\tbreak LOOP\n\t\tcase <-terminate:\n\t\t\tbreak LOOP\n\t\t}\n\t}\n\tif selected != \"\" {\n\t\tfmt.Println(selected)\n\t}\n\treturn selected\n}", "func ChoiceDialog(avp *Viewport2D, opts DlgOpts, choices []string, recv ki.Ki, fun ki.RecvFunc) {\n\tdlg := NewStdDialog(opts, false, false) // no buttons\n\tdlg.Modal = true\n\tif recv != nil && fun != nil {\n\t\tdlg.DialogSig.Connect(recv, fun)\n\t}\n\n\tframe := dlg.Frame()\n\tbb, _ := dlg.ButtonBox(frame)\n\tfor i, ch := range choices {\n\t\tchnm := strcase.ToKebab(ch)\n\t\tb := bb.AddNewChild(KiT_Button, chnm).(*Button)\n\t\tb.SetProp(\"__cdSigVal\", int64(i))\n\t\tb.SetText(ch)\n\t\tif chnm == \"cancel\" {\n\t\t\tb.ButtonSig.Connect(dlg.This, func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\t\tif sig == int64(ButtonClicked) {\n\t\t\t\t\ttb := send.Embed(KiT_Button).(*Button)\n\t\t\t\t\tdlg := recv.Embed(KiT_Dialog).(*Dialog)\n\t\t\t\t\tdlg.SigVal = tb.KnownProp(\"__cdSigVal\").(int64)\n\t\t\t\t\tdlg.Cancel()\n\t\t\t\t}\n\t\t\t})\n\t\t} else {\n\t\t\tb.ButtonSig.Connect(dlg.This, func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\t\tif sig == int64(ButtonClicked) {\n\t\t\t\t\ttb := send.Embed(KiT_Button).(*Button)\n\t\t\t\t\tdlg := recv.Embed(KiT_Dialog).(*Dialog)\n\t\t\t\t\tdlg.SigVal = tb.KnownProp(\"__cdSigVal\").(int64)\n\t\t\t\t\tdlg.Accept()\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n\n\tdlg.UpdateEndNoSig(true) // going to be shown\n\tdlg.Open(0, 0, avp, nil)\n}", "func (c *Config) readChoice(prompt string, choices []string, defaultValue *string) (string, error) {\n\tswitch {\n\tcase c.noTTY:\n\t\tfullPrompt := prompt + \" (\" + strings.Join(choices, \"/\")\n\t\tif defaultValue != nil {\n\t\t\tfullPrompt += \", default \" + *defaultValue\n\t\t}\n\t\tfullPrompt += \")? \"\n\t\tabbreviations := chezmoi.UniqueAbbreviations(choices)\n\t\tfor {\n\t\t\tvalue, err := c.readLineRaw(fullPrompt)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\tif value == \"\" && defaultValue != nil {\n\t\t\t\treturn *defaultValue, nil\n\t\t\t}\n\t\t\tif value, ok := abbreviations[value]; ok {\n\t\t\t\treturn value, nil\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tinitModel := chezmoibubbles.NewChoiceInputModel(prompt, choices, defaultValue)\n\t\tfinalModel, err := runCancelableModel(initModel)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn finalModel.Value(), nil\n\t}\n}", "func (v *FileChooser) AddChoice(id, label string, options, optionLabels []string) {\n\tcId := C.CString(id)\n\tdefer C.free(unsafe.Pointer(cId))\n\n\tcLabel := C.CString(label)\n\tdefer C.free(unsafe.Pointer(cLabel))\n\n\tif options == nil || optionLabels == nil {\n\t\tC.gtk_file_chooser_add_choice(v.native(), (*C.gchar)(cId), (*C.gchar)(cLabel), nil, nil)\n\t\treturn\n\t}\n\n\tcOptions := C.make_strings(C.int(len(options) + 1))\n\tfor i, option := range options {\n\t\tcstr := C.CString(option)\n\t\tdefer C.free(unsafe.Pointer(cstr))\n\t\tC.set_string(cOptions, C.int(i), (*C.gchar)(cstr))\n\t}\n\tC.set_string(cOptions, C.int(len(options)), nil)\n\n\tcOptionLabels := C.make_strings(C.int(len(optionLabels) + 1))\n\tfor i, optionLabel := range optionLabels {\n\t\tcstr := C.CString(optionLabel)\n\t\tdefer C.free(unsafe.Pointer(cstr))\n\t\tC.set_string(cOptionLabels, C.int(i), (*C.gchar)(cstr))\n\t}\n\tC.set_string(cOptionLabels, C.int(len(optionLabels)), nil)\n\n\tC.gtk_file_chooser_add_choice(v.native(), (*C.gchar)(cId), (*C.gchar)(cLabel), cOptions, cOptionLabels)\n}", "func Choice(name, value, usage string, options ...string) *string {\n\treturn (&FlagSetExt{pflag.CommandLine}).ChoiceP(name, \"\", value, usage, options...)\n}", "func getChoice(request events.APIGatewayProxyRequest, span opentracing.Span) (choice string) {\n\t_, winOk := request.QueryStringParameters[\"win\"]\n\tif winOk {\n\t\tspan.SetTag(\"WinFlag\", true)\n\t\treturn \"win\"\n\t} else {\n\t\tspan.SetTag(\"WinFlag\", false)\n\t}\n\tchoiceQP, qpOk := request.QueryStringParameters[\"choice\"]\n\tif !qpOk {\n\t\tspan.LogKV(\"event\", \"No choice query parameter provided.\")\n\t} else {\n\t\tspan.SetTag(\"choiceQueryParameter\", choiceQP)\n\t}\n\t_, validChoice := choiceToNum[choiceQP]\n\tif !validChoice {\n\t\trandomChoice := numToChoice[getRandomNumber()]\n\t\tinvalid := fmt.Sprintf(\"Invalid choice query parameter \\\"%s\\\". Using %s selected at random.\",\n\t\t\tchoiceQP, randomChoice)\n\t\tspan.LogKV(\"event\", invalid)\n\t\tspan.SetTag(\"randomChoice\", randomChoice)\n\t\tchoice = randomChoice\n\t} else {\n\t\tchoice = choiceQP\n\t}\n\treturn choice\n}", "func selectFileGUI(titleA string, filterNameA string, filterTypeA string) string {\n\tfileNameT, errT := dialog.File().Filter(filterNameA, filterTypeA).Title(titleA).Load()\n\n\tif errT != nil {\n\t\treturn tk.GenerateErrorStringF(\"failed: %v\", errT)\n\t}\n\n\treturn fileNameT\n}", "func runFileChooser(win *gtk.Window) (string, error) {\n\n\tvar fn string\n\n\topenFile, err := gtk.FileChooserDialogNewWith2Buttons(\"Open file\", win, gtk.FILE_CHOOSER_ACTION_OPEN,\n\t\t\"Cancel\", gtk.RESPONSE_CANCEL,\n\t\t\"Ok\", gtk.RESPONSE_OK)\n\tdefer openFile.Destroy()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\topenFile.SetDefaultSize(50, 50)\n\n\tres := openFile.Run()\n\n\tif res == int(gtk.RESPONSE_OK) {\n\t\tfn = openFile.FileChooser.GetFilename()\n\t}\n\n\treturn fn, nil\n}", "func (c *chooser) Choose() int {\n\tres := c.Indices[0]\n\tc.Indices = c.Indices[1:]\n\treturn res\n}", "func (v *FileChooser) RemoveChoice(id string) {\n\tcId := C.CString(id)\n\tdefer C.free(unsafe.Pointer(cId))\n\tC.gtk_file_chooser_remove_choice(v.native(), (*C.gchar)(cId))\n}", "func pickFile(dir, message string) string {\n\tfileName := \"\"\n\terr := survey.AskOne(\n\t\t&survey.Select{\n\t\t\tMessage: message,\n\t\t\tOptions: readDir(dir),\n\t\t},\n\t\t&fileName,\n\t\tsurvey.WithValidator(survey.Required),\n\t)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn fileName\n}", "func (c *Config) promptChoice(prompt string, choices []string, args ...string) (string, error) {\n\tvar defaultValue *string\n\tswitch len(args) {\n\tcase 0:\n\t\t// Do nothing.\n\tcase 1:\n\t\tif !slices.Contains(choices, args[0]) {\n\t\t\treturn \"\", fmt.Errorf(\"%s: invalid default value\", args[0])\n\t\t}\n\t\tdefaultValue = &args[0]\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"want 2 or 3 arguments, got %d\", len(args)+2)\n\t}\n\tif c.interactiveTemplateFuncs.promptDefaults && defaultValue != nil {\n\t\treturn *defaultValue, nil\n\t}\n\treturn c.readChoice(prompt, choices, defaultValue)\n}", "func (c *CmdConfigure) askChoice(label string, choices []string) (int, string) {\n\tselection, index, err := c.askSelection(label, choices)\n\tif err != nil {\n\t\tc.log.FATAL.Fatal(err)\n\t}\n\n\treturn index, selection\n}", "func NewChoice() Choice {\n\treturn new(ChoiceImpl)\n}", "func Choice(s *string, choices []string, title, id, class string, valid Validator) (jquery.JQuery, error) {\n\tj := jq(\"<select>\").AddClass(ClassPrefix + \"-choice\").AddClass(class)\n\tj.SetAttr(\"title\", title).SetAttr(\"id\", id)\n\tif *s == \"\" {\n\t\t*s = choices[0]\n\t}\n\tindex := -1\n\tfor i, c := range choices {\n\t\tif c == *s {\n\t\t\tindex = i\n\t\t}\n\t\tj.Append(jq(\"<option>\").SetAttr(\"value\", c).SetText(c))\n\t}\n\tif index == -1 {\n\t\treturn jq(), fmt.Errorf(\"Default of '%s' is not among valid choices\", *s)\n\t}\n\tj.SetData(\"prev\", index)\n\tj.SetProp(\"selectedIndex\", index)\n\tj.Call(jquery.CHANGE, func(event jquery.Event) {\n\t\tnewS := event.Target.Get(\"value\").String()\n\t\tnewIndex := event.Target.Get(\"selectedIndex\").Int()\n\t\tif valid != nil && !valid.Validate(newS) {\n\t\t\tnewIndex = int(j.Data(\"prev\").(float64))\n\t\t\tj.SetProp(\"selectedIndex\", newIndex)\n\t\t}\n\t\t*s = choices[int(newIndex)]\n\t\tj.SetData(\"prev\", newIndex)\n\t})\n\treturn j, nil\n}", "func (cli *CLI) Choose() (string, error) {\n\tcolorstring.Fprintf(cli.errStream, chooseText)\n\n\tnum, err := cli.AskNumber(4, 1)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// If user selects 3, should ask user GPL V2 or V3\n\tif num == 3 {\n\t\tvar buf bytes.Buffer\n\t\tbuf.WriteString(\"\\n\")\n\t\tbuf.WriteString(\"Which version do you want?\\n\")\n\t\tbuf.WriteString(\" 1) V2\\n\")\n\t\tbuf.WriteString(\" 2) V3\\n\")\n\t\tfmt.Fprintf(cli.errStream, buf.String())\n\n\t\tnum, err = cli.AskNumber(2, 1)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tnum += 4\n\t}\n\n\tvar key string\n\tswitch num {\n\tcase 1:\n\t\tkey = \"mit\"\n\tcase 2:\n\t\tkey = \"apache-2.0\"\n\tcase 4:\n\t\tkey = \"\"\n\tcase 5:\n\t\tkey = \"gpl-2.0\"\n\tcase 6:\n\t\tkey = \"gpl-3.0\"\n\tdefault:\n\t\t// Should not reach here\n\t\tpanic(\"Invalid number\")\n\t}\n\n\treturn key, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
/ GtkScrolledWindow GetMaxContentWidth is a wrapper around gtk_scrolled_window_get_max_content_width().
func (v *ScrolledWindow) GetMaxContentWidth() int { c := C.gtk_scrolled_window_get_max_content_width(v.native()) return int(c) }
[ "func (v *ScrolledWindow) GetMaxContentHeight() int {\n\tc := C.gtk_scrolled_window_get_max_content_height(v.native())\n\treturn int(c)\n}", "func (v *ScrolledWindow) SetMaxContentWidth(width int) {\n\tC.gtk_scrolled_window_set_max_content_width(v.native(), C.gint(width))\n}", "func (v *ScrolledWindow) SetMaxContentHeight(width int) {\n\tC.gtk_scrolled_window_set_max_content_height(v.native(), C.gint(width))\n}", "func (v *Entry) GetMaxWidthChars() int {\n\tc := C.gtk_entry_get_max_width_chars(v.native())\n\treturn int(c)\n}", "func (c *content) width() int {\n\tm := c.mw\n\tif m > c.w {\n\t\treturn m\n\t}\n\n\treturn c.w\n}", "func (v *Label) GetMaxWidthChars() int {\n\tc := C.gtk_label_get_max_width_chars(v.native())\n\treturn int(c)\n}", "func getContentWidth(pdf *gofpdf.Fpdf) float64 {\n\tmarginL, _, marginR, _ := pdf.GetMargins()\n\tpageW, _ := pdf.GetPageSize()\n\twidth := pageW - marginL - marginR\n\treturn width\n}", "func (p *pageT) WidthMax(s string) {\n\tp.Style = css.NewStylesResponsive(p.Style)\n\tp.Style.Desktop.StyleBox.WidthMax = s\n\tp.Style.Mobile.StyleBox.WidthMax = \"calc(100% - 1.2rem)\" // 0.6rem margin-left and -right in mobile view\n}", "func (w Widths) MaxWidth() (maxWidth, wideDepth int) {\n\tfor depth, width := range w {\n\t\tif width > maxWidth {\n\t\t\tmaxWidth = width\n\t\t\twideDepth = depth\n\t\t}\n\t}\n\treturn\n}", "func (c *content) maxLinewidth() int {\n\tw := 0\n\n\tfor _, r := range c.s {\n\t\tl := utf8.RuneCountInString(r)\n\t\tif l > w {\n\t\t\tw = l\n\t\t}\n\t}\n\n\treturn w\n}", "func (p pager) maxScrollX() int {\n\tdocWidth, _ := p.size()\n\tviewWidth, _ := termbox.Size()\n\treturn docWidth - viewWidth\n}", "func (w *Window) GetContentSize() (width, height int) {\n\tcWidth, cHeight := C.int(0), C.int(0)\n\tC.uiWindowContentSize(w.w, &cWidth, &cHeight)\n\treturn int(cWidth), int(cHeight)\n}", "func (gr *groupT) WidthMax(s string) {\n\tgr.Style = css.NewStylesResponsive(gr.Style)\n\tgr.Style.Desktop.StyleBox.WidthMax = s\n\tgr.Style.Mobile.StyleBox.WidthMax = \"none\" // => 100% of page - page has margins; replaced desktop max-width\n}", "func (v *ScrolledWindow) GetPropagateNaturalWidth() bool {\n\tc := C.gtk_scrolled_window_get_propagate_natural_width(v.native())\n\treturn gobool(c)\n}", "func (w *WidgetImplement) FixedWidth() int {\n\treturn w.fixedW\n}", "func maxWidth(no_lines int, widthFromLineNo widthFunc) int {\n\tvar max int\n\tfor i := 0; i < no_lines; i++ {\n\t\tval := widthFromLineNo(i)\n\t\tif val > max {\n\t\t\tmax = val\n\t\t}\n\t}\n\treturn max\n}", "func (fb *FlowBox) GetMaxChildrenPerLine() uint {\n\tc := C.gtk_flow_box_get_max_children_per_line(fb.native())\n\treturn uint(c)\n}", "func (st *Settings) MaxWindowSize() uint32 {\n\treturn st.windowSize\n}", "func (me XsdGoPkgHasElems_MaxWidth) MaxWidthDefault() xsdt.Int {\r\n\tvar x = new(xsdt.Int)\r\n\tx.Set(\"0\")\r\n\treturn *x\r\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SetMaxContentWidth is a wrapper around gtk_scrolled_window_set_max_content_width().
func (v *ScrolledWindow) SetMaxContentWidth(width int) { C.gtk_scrolled_window_set_max_content_width(v.native(), C.gint(width)) }
[ "func (v *ScrolledWindow) SetMaxContentHeight(width int) {\n\tC.gtk_scrolled_window_set_max_content_height(v.native(), C.gint(width))\n}", "func (v *ScrolledWindow) GetMaxContentWidth() int {\n\tc := C.gtk_scrolled_window_get_max_content_width(v.native())\n\treturn int(c)\n}", "func (v *ScrolledWindow) GetMaxContentHeight() int {\n\tc := C.gtk_scrolled_window_get_max_content_height(v.native())\n\treturn int(c)\n}", "func (w *Window) SetContentSize(width, height int) {\n\tC.uiWindowSetContentSize(w.w, C.int(width), C.int(height))\n}", "func (p *pageT) WidthMax(s string) {\n\tp.Style = css.NewStylesResponsive(p.Style)\n\tp.Style.Desktop.StyleBox.WidthMax = s\n\tp.Style.Mobile.StyleBox.WidthMax = \"calc(100% - 1.2rem)\" // 0.6rem margin-left and -right in mobile view\n}", "func (v *ScrolledWindow) SetPropagateNaturalWidth(propagate bool) {\n\tC.gtk_scrolled_window_set_propagate_natural_width(v.native(), gbool(propagate))\n}", "func (gr *groupT) WidthMax(s string) {\n\tgr.Style = css.NewStylesResponsive(gr.Style)\n\tgr.Style.Desktop.StyleBox.WidthMax = s\n\tgr.Style.Mobile.StyleBox.WidthMax = \"none\" // => 100% of page - page has margins; replaced desktop max-width\n}", "func (pb *Bar) SetMaxWidth(maxWidth int) *Bar {\n\tpb.mu.Lock()\n\tpb.maxWidth = maxWidth\n\tpb.mu.Unlock()\n\treturn pb\n}", "func (w *ScrollWidget) SetMax(max int) {\n\tw.max = max\n\tw.clampCurrent()\n}", "func (c *content) setWidth(w int) {\n\tc.w = w\n}", "func (v *Entry) SetMaxWidthChars(nChars int) {\n\tC.gtk_entry_set_max_width_chars(v.native(), C.gint(nChars))\n}", "func (wg *WidgetImplement) SetFixedWidth(w int) {\n\twg.fixedW = w\n}", "func (v *Label) SetMaxWidthChars(nChars int) {\n\tC.gtk_label_set_max_width_chars(v.native(), C.gint(nChars))\n}", "func (w *Window) GetContentSize() (width, height int) {\n\tcWidth, cHeight := C.int(0), C.int(0)\n\tC.uiWindowContentSize(w.w, &cWidth, &cHeight)\n\treturn int(cWidth), int(cHeight)\n}", "func (wg *WidgetImplement) SetFixedSize(w, h int) {\n\twg.fixedW = w\n\twg.fixedH = h\n}", "func (v *ScrolledWindow) SetPropagateNaturalHeight(propagate bool) {\n\tC.gtk_scrolled_window_set_propagate_natural_height(v.native(), gbool(propagate))\n}", "func (win *Window) SetDefaultSize(width, height int) {\n\twin.Candy().Guify(\"gtk_window_set_default_size\", win, width, height)\n}", "func (p *Toplevel) SetMaxSize(width int32, height int32) error {\n\treturn p.Context().SendRequest(p, 7, width, height)\n}", "func (c *HostClient) SetMaxConns(newMaxConns int) {\n\tc.connsLock.Lock()\n\tc.MaxConns = newMaxConns\n\tc.connsLock.Unlock()\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetMaxContentHeight is a wrapper around gtk_scrolled_window_get_max_content_height().
func (v *ScrolledWindow) GetMaxContentHeight() int { c := C.gtk_scrolled_window_get_max_content_height(v.native()) return int(c) }
[ "func (v *ScrolledWindow) GetMaxContentWidth() int {\n\tc := C.gtk_scrolled_window_get_max_content_width(v.native())\n\treturn int(c)\n}", "func (v *ScrolledWindow) SetMaxContentHeight(width int) {\n\tC.gtk_scrolled_window_set_max_content_height(v.native(), C.gint(width))\n}", "func (v *ScrolledWindow) SetMaxContentWidth(width int) {\n\tC.gtk_scrolled_window_set_max_content_width(v.native(), C.gint(width))\n}", "func (w *Window) GetContentSize() (width, height int) {\n\tcWidth, cHeight := C.int(0), C.int(0)\n\tC.uiWindowContentSize(w.w, &cWidth, &cHeight)\n\treturn int(cWidth), int(cHeight)\n}", "func (recv *Node) MaxHeight() uint32 {\n\tretC := C.g_node_max_height((*C.GNode)(recv.native))\n\tretGo := (uint32)(retC)\n\n\treturn retGo\n}", "func (g *GitStatusWidget) GetHeight() int {\n\treturn g.renderer.GetHeight()\n}", "func (v *Pixbuf) GetHeight() int {\n\treturn int(C.gdk_pixbuf_get_height(v.Native()))\n}", "func (c *Config) MaxHeight() int {\n\tc.Mutex.RLock()\n\tdefer c.Mutex.RUnlock()\n\treturn c.Raw.MaxHeight\n}", "func (p pager) maxScrollY() int {\n\t_, docHeight := p.size()\n\t_, viewHeight := termbox.Size()\n\treturn docHeight - viewHeight\n}", "func (Empty) MaxHeight(width, height int) int {\n\treturn 1\n}", "func (o ApplicationSettingsOutput) WikiPageMaxContentBytes() pulumi.IntOutput {\n\treturn o.ApplyT(func(v *ApplicationSettings) pulumi.IntOutput { return v.WikiPageMaxContentBytes }).(pulumi.IntOutput)\n}", "func Height() int {\n\treturn js.Global.Get(\"window\").Get(\"innerHeight\").Int()\n}", "func (win *Window) Height() int {\n\tsize := C.sfRenderWindow_getSize(win.win)\n\treturn int(size.y)\n}", "func (c *content) height() int {\n\treturn len(c.c)\n}", "func (me XsdGoPkgHasElems_MaxHeight) MaxHeightDefault() xsdt.Int {\r\n\tvar x = new(xsdt.Int)\r\n\tx.Set(\"0\")\r\n\treturn *x\r\n}", "func (me XsdGoPkgHasElem_MaxHeight) MaxHeightDefault() xsdt.Int {\r\n\tvar x = new(xsdt.Int)\r\n\tx.Set(\"0\")\r\n\treturn *x\r\n}", "func (self *TraitPixbufAnimation) GetHeight() (return__ int) {\n\tvar __cgo__return__ C.int\n\t__cgo__return__ = C.gdk_pixbuf_animation_get_height(self.CPointer)\n\treturn__ = int(__cgo__return__)\n\treturn\n}", "func (w *Window) Height() int {\n\treturn int(C.ANativeWindow_getHeight(w.cptr()))\n}", "func (e Event) GetResizeHeight() int {\n\treturn int(C.caca_get_event_resize_height(e.Ev))\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SetMaxContentHeight is a wrapper around gtk_scrolled_window_set_max_content_height().
func (v *ScrolledWindow) SetMaxContentHeight(width int) { C.gtk_scrolled_window_set_max_content_height(v.native(), C.gint(width)) }
[ "func (v *ScrolledWindow) SetMaxContentWidth(width int) {\n\tC.gtk_scrolled_window_set_max_content_width(v.native(), C.gint(width))\n}", "func (v *ScrolledWindow) GetMaxContentHeight() int {\n\tc := C.gtk_scrolled_window_get_max_content_height(v.native())\n\treturn int(c)\n}", "func (v *ScrolledWindow) GetMaxContentWidth() int {\n\tc := C.gtk_scrolled_window_get_max_content_width(v.native())\n\treturn int(c)\n}", "func (w *Window) SetContentSize(width, height int) {\n\tC.uiWindowSetContentSize(w.w, C.int(width), C.int(height))\n}", "func (w *ScrollWidget) SetMax(max int) {\n\tw.max = max\n\tw.clampCurrent()\n}", "func (o ApplicationSettingsOutput) WikiPageMaxContentBytes() pulumi.IntOutput {\n\treturn o.ApplyT(func(v *ApplicationSettings) pulumi.IntOutput { return v.WikiPageMaxContentBytes }).(pulumi.IntOutput)\n}", "func (v *ScrolledWindow) SetPropagateNaturalHeight(propagate bool) {\n\tC.gtk_scrolled_window_set_propagate_natural_height(v.native(), gbool(propagate))\n}", "func (w *Window) GetContentSize() (width, height int) {\n\tcWidth, cHeight := C.int(0), C.int(0)\n\tC.uiWindowContentSize(w.w, &cWidth, &cHeight)\n\treturn int(cWidth), int(cHeight)\n}", "func (p pager) maxScrollY() int {\n\t_, docHeight := p.size()\n\t_, viewHeight := termbox.Size()\n\treturn docHeight - viewHeight\n}", "func (st *Settings) SetMaxWindowSize(size uint32) {\n\tst.windowSize = size\n}", "func (w *Window) SetMaximized(maximize bool) {\n\tif maximize == w.maximized {\n\t\treturn\n\t}\n\n\tif maximize {\n\t\tw.origX, w.origY = w.Pos()\n\t\tw.origWidth, w.origHeight = w.Size()\n\t\tw.maximized = true\n\t\tw.SetPos(0, 0)\n\t\twidth, height := ScreenSize()\n\t\tw.SetSize(width, height)\n\t} else {\n\t\tw.maximized = false\n\t\tw.SetPos(w.origX, w.origY)\n\t\tw.SetSize(w.origWidth, w.origHeight)\n\t}\n\tw.ResizeChildren()\n\tw.PlaceChildren()\n}", "func (w *WidgetBase) SetHeight(height int) {\n\tw.size.Y = height\n\tif w.size.Y != 0 {\n\t\tw.sizePolicyY = Minimum\n\t} else {\n\t\tw.sizePolicyY = Expanding\n\t}\n}", "func (win *Window) Maximize() {\n\twin.Candy().Guify(\"gtk_window_maximize\", win)\n}", "func (sf *TWindow) SetMaximized(maximize bool) {\n\tif maximize == sf.maximized {\n\t\treturn\n\t}\n\n\tif maximize {\n\t\tx, y := sf.pos.Get()\n\t\tsf.posOrig.X().Set(x)\n\t\tsf.posOrig.Y().Set(y)\n\t\tsf.origWidth, sf.origHeight = sf.Size()\n\t\tsf.maximized = true\n\t\tsf.SetPos(0, 0)\n\t\twidth, height := ScreenSize()\n\t\tsf.SetSize(width, height)\n\t} else {\n\t\tsf.maximized = false\n\t\tsf.SetPos(sf.posOrig.GetX(), sf.posOrig.GetY())\n\t\tsf.SetSize(sf.origWidth, sf.origHeight)\n\t}\n\tsf.ResizeChildren()\n\tsf.PlaceChildren()\n}", "func (Empty) MaxHeight(width, height int) int {\n\treturn 1\n}", "func (m *MailTips) SetMaxMessageSize(value *int32)() {\n err := m.GetBackingStore().Set(\"maxMessageSize\", value)\n if err != nil {\n panic(err)\n }\n}", "func (me XsdGoPkgHasElems_MaxHeight) MaxHeightDefault() xsdt.Int {\r\n\tvar x = new(xsdt.Int)\r\n\tx.Set(\"0\")\r\n\treturn *x\r\n}", "func MaxValSize(max int) Option {\n\treturn func(lc *memoryCache) error {\n\t\tlc.maxValueSize = max\n\t\treturn nil\n\t}\n}", "func (w *WidgetImplement) SetClampHeight(clamp bool) {\n\tw.clamp[1] = clamp\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetPropagateNaturalWidth is a wrapper around gtk_scrolled_window_get_propagate_natural_width().
func (v *ScrolledWindow) GetPropagateNaturalWidth() bool { c := C.gtk_scrolled_window_get_propagate_natural_width(v.native()) return gobool(c) }
[ "func (v *ScrolledWindow) SetPropagateNaturalWidth(propagate bool) {\n\tC.gtk_scrolled_window_set_propagate_natural_width(v.native(), gbool(propagate))\n}", "func (v *ScrolledWindow) GetPropagateNaturalHeight() bool {\n\tc := C.gtk_scrolled_window_get_propagate_natural_height(v.native())\n\treturn gobool(c)\n}", "func (v *ScrolledWindow) SetPropagateNaturalHeight(propagate bool) {\n\tC.gtk_scrolled_window_set_propagate_natural_height(v.native(), gbool(propagate))\n}", "func (w *WidgetImplement) FixedWidth() int {\n\treturn w.fixedW\n}", "func GxOuterWidth(value float64) *SimpleElement { return newSEFloat(\"gx:outerWidth\", value) }", "func (c RelativeConstraint) GetWidth() float32 {\n\treturn c.op(c.parent().GetWidth(), c.constant)\n}", "func (w *Window) Width() int {\n\treturn int(C.ANativeWindow_getWidth(w.cptr()))\n}", "func (window Window) OuterWidth() int {\n\treturn window.Get(\"outerWidth\").Int()\n}", "func (v *ScrolledWindow) GetMaxContentWidth() int {\n\tc := C.gtk_scrolled_window_get_max_content_width(v.native())\n\treturn int(c)\n}", "func (b *Bound) Width() float64 {\n\treturn b.ne.X() - b.sw.X()\n}", "func (w *sliderElement) MinIntrinsicWidth(base.Length) base.Length {\n\twidth, _ := w.handle.GetPreferredWidth()\n\tif limit := base.FromPixelsX(width); limit < 160*DIP {\n\t\treturn 160 * DIP\n\t}\n\treturn base.FromPixelsX(width)\n}", "func (win *Window) Width() int {\n\tsize := C.sfRenderWindow_getSize(win.win)\n\treturn int(size.x)\n}", "func (w *LWindow) MWidth() int32 {\n\treturn w.mWidth\n}", "func (w *Element) MinIntrinsicWidth(base.Length) base.Length {\n\treturn w.Size.Width\n}", "func (w *Window) Width() int {\n\treturn w.width\n}", "func (l *NetworkLayer) Width() int {\n\treturn len(l.Neurons)\n}", "func UlWindowGetWidth(window ULWindow) uint32 {\n\tcwindow, _ := *(*C.ULWindow)(unsafe.Pointer(&window)), cgoAllocsUnknown\n\t__ret := C.ulWindowGetWidth(cwindow)\n\t__v := (uint32)(__ret)\n\treturn __v\n}", "func (b *Bound) GeoWidth(haversine ...bool) float64 {\n\tc := b.Center()\n\n\tA := &Point{b.sw[0], c[1]}\n\tB := &Point{b.ne[0], c[1]}\n\n\treturn A.GeoDistanceFrom(B, yesHaversine(haversine))\n}", "func (n *Node) GetWidth() float64 {\n\tif n == nil || n.Width == nil {\n\t\treturn 0.0\n\t}\n\treturn *n.Width\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SetPropagateNaturalWidth is a wrapper around gtk_scrolled_window_set_propagate_natural_width().
func (v *ScrolledWindow) SetPropagateNaturalWidth(propagate bool) { C.gtk_scrolled_window_set_propagate_natural_width(v.native(), gbool(propagate)) }
[ "func (v *ScrolledWindow) SetPropagateNaturalHeight(propagate bool) {\n\tC.gtk_scrolled_window_set_propagate_natural_height(v.native(), gbool(propagate))\n}", "func (v *ScrolledWindow) GetPropagateNaturalWidth() bool {\n\tc := C.gtk_scrolled_window_get_propagate_natural_width(v.native())\n\treturn gobool(c)\n}", "func (v *ScrolledWindow) GetPropagateNaturalHeight() bool {\n\tc := C.gtk_scrolled_window_get_propagate_natural_height(v.native())\n\treturn gobool(c)\n}", "func (wg *WidgetImplement) SetFixedWidth(w int) {\n\twg.fixedW = w\n}", "func (v *ScrolledWindow) SetMaxContentWidth(width int) {\n\tC.gtk_scrolled_window_set_max_content_width(v.native(), C.gint(width))\n}", "func (w *WidgetImplement) FixedWidth() int {\n\treturn w.fixedW\n}", "func PreserveScroll(adj *gtk.Adjustment) {\n\trelvalue := (adj.GetValue() - adj.GetLower()) / (adj.GetUpper() - adj.GetLower())\n\tif math.IsNaN(relvalue) { // all zero, nothing to restore\n\t\treturn\n\t}\n\tglib.IdleAdd(func() { adj.SetValue(relvalue * (adj.GetUpper() - adj.GetLower())) })\n}", "func (t *http2Client) adjustWindow(s *Stream, n uint32) {\n\tif w := s.fc.maybeAdjust(n); w > 0 {\n\t\tt.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})\n\t}\n}", "func (bl *VBoxLayout) SetAutoWidth(state bool) {\n\n\tbl.autoWidth = state\n\tbl.Recalc(bl.pan)\n}", "func (sf *TWindow) SetFixed() {\n\tsf.chSizeFixed <- sizeFixed\n\ttime.Sleep(time.Millisecond * 10)\n}", "func GxOuterWidth(value float64) *SimpleElement { return newSEFloat(\"gx:outerWidth\", value) }", "func (win *Window) ReshowWithInitialSize() {\n\twin.Candy().Guify(\"gtk_window_reshow_with_initial_size\", win)\n}", "func (w *WidgetBase) SetWidth(width int) {\n\tw.size.X = width\n\tif w.size.X != 0 {\n\t\tw.sizePolicyX = Minimum\n\t} else {\n\t\tw.sizePolicyX = Expanding\n\t}\n}", "func (sf *TWindow) SetUnfixed() {\n\tsf.chSizeFixed <- sizeUnfixed\n\ttime.Sleep(time.Millisecond * 10)\n}", "func (v *Nvim) SetWindowWidth(window Window, width int) error {\n\treturn v.call(\"nvim_win_set_width\", nil, window, width)\n}", "func WindowBorderWidthDP(windowBorderWidthDP int) Option {\n\treturn func(o *Options) { o.WindowBorderWidthDP = windowBorderWidthDP }\n}", "func (b *Batch) SetWindowWidth(window Window, width int) {\n\tb.call(\"nvim_win_set_width\", nil, window, width)\n}", "func (v *Paned) SetWideHandle(wide bool) {\n\tC.gtk_paned_set_wide_handle(v.native(), gbool(wide))\n}", "func FixedWindow(client RedisClient, size time.Duration, limit uint) *Counter {\n\treturn &Counter{client: client, script: fwscr, size: int(size / time.Millisecond), limit: int64(limit)}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetPropagateNaturalHeight is a wrapper around gtk_scrolled_window_get_propagate_natural_height().
func (v *ScrolledWindow) GetPropagateNaturalHeight() bool { c := C.gtk_scrolled_window_get_propagate_natural_height(v.native()) return gobool(c) }
[ "func (v *ScrolledWindow) SetPropagateNaturalHeight(propagate bool) {\n\tC.gtk_scrolled_window_set_propagate_natural_height(v.native(), gbool(propagate))\n}", "func (v *ScrolledWindow) GetPropagateNaturalWidth() bool {\n\tc := C.gtk_scrolled_window_get_propagate_natural_width(v.native())\n\treturn gobool(c)\n}", "func (v *ScrolledWindow) SetPropagateNaturalWidth(propagate bool) {\n\tC.gtk_scrolled_window_set_propagate_natural_width(v.native(), gbool(propagate))\n}", "func (w *WidgetImplement) FixedHeight() int {\n\treturn w.fixedH\n}", "func (c RelativeConstraint) GetHeight() float32 {\n\treturn c.op(c.parent().GetHeight(), c.constant)\n}", "func (w *Window) Height() int {\n\treturn int(C.ANativeWindow_getHeight(w.cptr()))\n}", "func (v *ScrolledWindow) GetMaxContentHeight() int {\n\tc := C.gtk_scrolled_window_get_max_content_height(v.native())\n\treturn int(c)\n}", "func (p *Protocol) GetEpochHeight(epochNum uint64) uint64 {\n\tif epochNum == 0 {\n\t\treturn 0\n\t}\n\tdardanellesEpoch := p.GetEpochNum(p.dardanellesHeight)\n\tif !p.dardanellesOn || epochNum <= dardanellesEpoch {\n\t\treturn (epochNum-1)*p.numDelegates*p.numSubEpochs + 1\n\t}\n\tdardanellesEpochHeight := p.GetEpochHeight(dardanellesEpoch)\n\treturn dardanellesEpochHeight + (epochNum-dardanellesEpoch)*p.numDelegates*p.numSubEpochsDardanelles\n}", "func (b *Bound) Height() float64 {\n\treturn b.ne.Y() - b.sw.Y()\n}", "func UlWindowGetHeight(window ULWindow) uint32 {\n\tcwindow, _ := *(*C.ULWindow)(unsafe.Pointer(&window)), cgoAllocsUnknown\n\t__ret := C.ulWindowGetHeight(cwindow)\n\t__v := (uint32)(__ret)\n\treturn __v\n}", "func PreserveScroll(adj *gtk.Adjustment) {\n\trelvalue := (adj.GetValue() - adj.GetLower()) / (adj.GetUpper() - adj.GetLower())\n\tif math.IsNaN(relvalue) { // all zero, nothing to restore\n\t\treturn\n\t}\n\tglib.IdleAdd(func() { adj.SetValue(relvalue * (adj.GetUpper() - adj.GetLower())) })\n}", "func updateHeight(n *node) {\n\tn.H = math.Max(height(child(n, 0)), height(child(n, 1))+1)\n}", "func (v *ScrolledWindow) GetOverlayScrolling() bool {\n\treturn gobool(C.gtk_scrolled_window_get_overlay_scrolling(v.native()))\n}", "func (v *TextView) GetBorderWindowSize(tp TextWindowType) int {\n\treturn int(C.gtk_text_view_get_border_window_size(v.native(), C.GtkTextWindowType(tp)))\n}", "func (self *TraitPixbufAnimation) GetHeight() (return__ int) {\n\tvar __cgo__return__ C.int\n\t__cgo__return__ = C.gdk_pixbuf_animation_get_height(self.CPointer)\n\treturn__ = int(__cgo__return__)\n\treturn\n}", "func (g *GitStatusWidget) GetHeight() int {\n\treturn g.renderer.GetHeight()\n}", "func (win *Window) Height() int {\n\tsize := C.sfRenderWindow_getSize(win.win)\n\treturn int(size.y)\n}", "func (recv *Tree) Height() int32 {\n\tretC := C.g_tree_height((*C.GTree)(recv.native))\n\tretGo := (int32)(retC)\n\n\treturn retGo\n}", "func (t *Text) LinesHeight() int {\n\tpad := t.setter.opts.Padding\n\tif t.size.Y <= 0 {\n\t\treturn 0\n\t}\n\tif t.size.Y-2*pad <= 0 {\n\t\treturn t.size.Y\n\t}\n\ty := pad\n\tfor _, l := range t.lines {\n\t\th := l.h.Round()\n\t\tif y+h > t.size.Y-pad {\n\t\t\tbreak\n\t\t}\n\t\ty += h\n\t}\n\tif h := trailingNewlineHeight(t); h > 0 && y+h <= t.size.Y-pad {\n\t\ty += h\n\t}\n\treturn y + pad\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SetPropagateNaturalHeight is a wrapper around gtk_scrolled_window_set_propagate_natural_height().
func (v *ScrolledWindow) SetPropagateNaturalHeight(propagate bool) { C.gtk_scrolled_window_set_propagate_natural_height(v.native(), gbool(propagate)) }
[ "func (v *ScrolledWindow) GetPropagateNaturalHeight() bool {\n\tc := C.gtk_scrolled_window_get_propagate_natural_height(v.native())\n\treturn gobool(c)\n}", "func (v *ScrolledWindow) SetPropagateNaturalWidth(propagate bool) {\n\tC.gtk_scrolled_window_set_propagate_natural_width(v.native(), gbool(propagate))\n}", "func (v *ScrolledWindow) GetPropagateNaturalWidth() bool {\n\tc := C.gtk_scrolled_window_get_propagate_natural_width(v.native())\n\treturn gobool(c)\n}", "func PreserveScroll(adj *gtk.Adjustment) {\n\trelvalue := (adj.GetValue() - adj.GetLower()) / (adj.GetUpper() - adj.GetLower())\n\tif math.IsNaN(relvalue) { // all zero, nothing to restore\n\t\treturn\n\t}\n\tglib.IdleAdd(func() { adj.SetValue(relvalue * (adj.GetUpper() - adj.GetLower())) })\n}", "func (w *WidgetImplement) SetFixedHeight(h int) {\n\tw.fixedH = h\n}", "func updateHeight(n *node) {\n\tn.H = math.Max(height(child(n, 0)), height(child(n, 1))+1)\n}", "func (bl *VBoxLayout) SetAutoHeight(state bool) {\n\n\tbl.autoHeight = state\n\tbl.Recalc(bl.pan)\n}", "func (v *ScrolledWindow) SetMaxContentHeight(width int) {\n\tC.gtk_scrolled_window_set_max_content_height(v.native(), C.gint(width))\n}", "func (h *BufPane) ScrollAdjust() {\n\tv := h.GetView()\n\tend := h.SLocFromLoc(h.Buf.End())\n\tif h.Diff(v.StartLine, end) < h.BufView().Height-1 {\n\t\tv.StartLine = h.Scroll(end, -h.BufView().Height+1)\n\t}\n\th.SetView(v)\n}", "func (w *WidgetImplement) FixedHeight() int {\n\treturn w.fixedH\n}", "func (sf *TWindow) SetUnfixed() {\n\tsf.chSizeFixed <- sizeUnfixed\n\ttime.Sleep(time.Millisecond * 10)\n}", "func (v *ScrolledWindow) SetMaxContentWidth(width int) {\n\tC.gtk_scrolled_window_set_max_content_width(v.native(), C.gint(width))\n}", "func (v *ScrolledWindow) SetOverlayScrolling(scrolling bool) {\n\tC.gtk_scrolled_window_set_overlay_scrolling(v.native(), gbool(scrolling))\n}", "func (w *Window) updateDimensions() {\n\tif w.windowLayout == nil {\n\t\treturn\n\t}\n\n\tw.window.SetFixedHeight(w.windowLayout.SizeHint().Height())\n}", "func (sf *TWindow) SetFixed() {\n\tsf.chSizeFixed <- sizeFixed\n\ttime.Sleep(time.Millisecond * 10)\n}", "func (t *http2Client) adjustWindow(s *Stream, n uint32) {\n\tif w := s.fc.maybeAdjust(n); w > 0 {\n\t\tt.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})\n\t}\n}", "func (w *WidgetBase) SetHeight(height int) {\n\tw.size.Y = height\n\tif w.size.Y != 0 {\n\t\tw.sizePolicyY = Minimum\n\t} else {\n\t\tw.sizePolicyY = Expanding\n\t}\n}", "func (v *Nvim) SetWindowHeightNamespace(window Window, nsID int) error {\n\treturn v.call(\"nvim_win_set_hl_ns\", nil, window, nsID)\n}", "func WindowHeightDP(windowHeightDP int) Option {\n\treturn func(o *Options) { o.WindowHeightDP = windowHeightDP }\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewProvider is redirect hook for fabric/fsblkstorage NewProvider()
func NewProvider(conf *blkstorage.Conf, indexConfig *blkstorage.IndexConfig, _ *ledger.Config, metricsProvider metrics.Provider) (extledgerapi.BlockStoreProvider, error) { return blkstorage.NewProvider(conf, indexConfig, metricsProvider) }
[ "func NewProvider(conf *fsblkstorage.Conf, indexConfig *blkstorage.IndexConfig, ledgerconfig *ledger.Config, metricsProvider metrics.Provider) (blkstorage.BlockStoreProvider, error) {\n\treturn cdbblkstorage.NewProvider(indexConfig, ledgerconfig)\n}", "func NewProvider(net network.StorageMarketNetwork,\n\tds datastore.Batching,\n\tfs filestore.FileStore,\n\tdagStore stores.DAGStoreWrapper,\n\tindexer provider.Interface,\n\tpieceStore piecestore.PieceStore,\n\tdataTransfer datatransfer.Manager,\n\tspn storagemarket.StorageProviderNode,\n\tminerAddress address.Address,\n\tstoredAsk StoredAsk,\n\tmeshCreator MeshCreator,\n\toptions ...StorageProviderOption,\n) (storagemarket.StorageProvider, error) {\n\th := &Provider{\n\t\tnet: net,\n\t\tmeshCreator: meshCreator,\n\t\tspn: spn,\n\t\tfs: fs,\n\t\tpieceStore: pieceStore,\n\t\tconns: connmanager.NewConnManager(),\n\t\tstoredAsk: storedAsk,\n\t\tactor: minerAddress,\n\t\tdataTransfer: dataTransfer,\n\t\tpubSub: pubsub.New(providerDispatcher),\n\t\treadyMgr: shared.NewReadyManager(),\n\t\tdagStore: dagStore,\n\t\tstores: stores.NewReadWriteBlockstores(),\n\t\tawaitTransferRestartTimeout: defaultAwaitRestartTimeout,\n\t\tindexProvider: indexer,\n\t\tmetadataForDeal: defaultMetadataFunc,\n\t}\n\tstorageMigrations, err := migrations.ProviderMigrations.Build()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\th.deals, h.migrateDeals, err = newProviderStateMachine(\n\t\tds,\n\t\t&providerDealEnvironment{h},\n\t\th.dispatch,\n\t\tstorageMigrations,\n\t\tversioning.VersionKey(\"2\"),\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\th.Configure(options...)\n\n\t// register a data transfer event handler -- this will send events to the state machines based on DT events\n\th.unsubDataTransfer = dataTransfer.SubscribeToEvents(dtutils.ProviderDataTransferSubscriber(h.deals))\n\n\tpph := &providerPushDeals{h}\n\terr = dataTransfer.RegisterVoucherType(requestvalidation.StorageDataTransferVoucherType, requestvalidation.NewUnifiedRequestValidator(pph, nil))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = dataTransfer.RegisterTransportConfigurer(requestvalidation.StorageDataTransferVoucherType, dtutils.TransportConfigurer(&providerStoreGetter{h}))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn h, nil\n}", "func providerFactory(_ io.Reader) (cloudprovider.Interface, error) {\n\tlog := klogr.NewWithOptions(klogr.WithFormat(klogr.FormatKlog))\n\tc, err := loadConfig(envconfig.OsLookuper())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tapiUrl := katapult.DefaultURL\n\tif c.APIHost != \"\" {\n\t\tlog.Info(\"default API base URL overrided\",\n\t\t\t\"url\", c.APIHost)\n\t\tapiUrl, err = url.Parse(c.APIHost)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse provided api url: %w\", err)\n\t\t}\n\t}\n\n\trm, err := katapult.New(\n\t\tkatapult.WithAPIKey(c.APIKey),\n\t\tkatapult.WithBaseURL(apiUrl),\n\t\tkatapult.WithUserAgent(\"kce-ccm\"), // TODO: Add version.\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient := core.New(rm)\n\n\treturn &provider{\n\t\tlog: log,\n\t\tkatapult: client,\n\t\tconfig: *c,\n\t\tloadBalancer: &loadBalancerManager{\n\t\t\tlog: log,\n\t\t\tconfig: *c,\n\t\t\tloadBalancerController: client.LoadBalancers,\n\t\t\tloadBalancerRuleController: client.LoadBalancerRules,\n\t\t},\n\t}, nil\n}", "func NewProvider(ctx context.Context, args map[string]string) (blockstorage.Provider, error) {\n\tibmCli, err := newClient(ctx, args)\n\treturn &ibmCloud{cli: ibmCli}, err\n}", "func NewProvider(indexConfig *blkstorage.IndexConfig, ledgerconfig *ledger.Config) (api.BlockStoreProvider, error) {\n\tlogger.Debugf(\"constructing CouchDB block storage provider\")\n\tcouchDBConfig := ledgerconfig.StateDBConfig.CouchDB\n\tcouchInstance, err := couchdb.CreateCouchInstance(couchDBConfig, &disabled.Provider{})\n\tif err != nil {\n\t\treturn nil, errors.WithMessage(err, \"obtaining CouchDB instance failed\")\n\t}\n\n\treturn &CDBBlockstoreProvider{\n\t\toptions: []option{\n\t\t\twithBlockByNumCacheSize(cfg.GetBlockStoreBlockByNumCacheSize()),\n\t\t\twithBlockByHashCacheSize(cfg.GetBlockStoreBlockByHashCacheSize()),\n\t\t},\n\t\tcouchInstance: couchInstance,\n\t\tindexConfig: indexConfig,\n\t}, nil\n}", "func NewProvider() (rootfs.Provider, error) {\n\treturn &provider{}, nil\n}", "func newProvider(c dns.Conf) dns.Provider {\n\tswitch c.Provider() {\n\tcase \"r53\":\n\t\treturn &r53.Route53DNS{\n\t\t\tConf: c,\n\t\t}\n\tcase \"gc\":\n\t\treturn &gc.GCloudDNS{\n\t\t\tConf: c,\n\t\t}\n\t}\n\treturn nil\n}", "func Provide.FindProviders(RecordStore.Store, Blocks.Key) (<-Net.ID, error) {}", "func providerFactory(meta *providercache.CachedProvider) providers.Factory {\n\treturn func() (providers.Interface, error) {\n\t\texecFile, err := meta.ExecutableFile()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tconfig := &plugin.ClientConfig{\n\t\t\tHandshakeConfig: tfplugin.Handshake,\n\t\t\tLogger: logging.NewProviderLogger(\"\"),\n\t\t\tAllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC},\n\t\t\tManaged: true,\n\t\t\tCmd: exec.Command(execFile),\n\t\t\tAutoMTLS: enableProviderAutoMTLS,\n\t\t\tVersionedPlugins: tfplugin.VersionedPlugins,\n\t\t\tSyncStdout: logging.PluginOutputMonitor(fmt.Sprintf(\"%s:stdout\", meta.Provider)),\n\t\t\tSyncStderr: logging.PluginOutputMonitor(fmt.Sprintf(\"%s:stderr\", meta.Provider)),\n\t\t}\n\n\t\tclient := plugin.NewClient(config)\n\t\trpcClient, err := client.Client()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\traw, err := rpcClient.Dispense(tfplugin.ProviderPluginName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// store the client so that the plugin can kill the child process\n\t\tprotoVer := client.NegotiatedVersion()\n\t\tswitch protoVer {\n\t\tcase 5:\n\t\t\tp := raw.(*tfplugin.GRPCProvider)\n\t\t\tp.PluginClient = client\n\t\t\treturn p, nil\n\t\tcase 6:\n\t\t\tp := raw.(*tfplugin6.GRPCProvider)\n\t\t\tp.PluginClient = client\n\t\t\treturn p, nil\n\t\tdefault:\n\t\t\tpanic(\"unsupported protocol version\")\n\t\t}\n\t}\n}", "func NewProvider(c Config) (checkpoint.Provider, func() error, error) {\n\tvar err error\n\tcachePath, mountPath := path.Join(defaultCCFSRoot, \"cache\"), path.Join(defaultCCFSRoot, \"mountpoint\")\n\tif c.CacheDirectory != \"\" {\n\t\tcachePath = c.CacheDirectory\n\t} else {\n\t\tc.CacheDirectory = cachePath\n\t}\n\tif c.Exec == \"\" {\n\t\tc.Exec = \"ccfs\"\n\t}\n\tif err = unix.Unmount(mountPath, unix.MNT_DETACH); err != nil && err != syscall.EINVAL && err != syscall.ENOENT {\n\t\treturn nil, nil, errors.Wrap(err, \"failed to umount ccfs\")\n\t}\n\tif err = os.MkdirAll(cachePath, 0644); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif err = os.MkdirAll(mountPath, 0644); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tvar done func() error\n\tif done, err = mountCCFS(mountPath, c); err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"failed to mount ccfs\")\n\t}\n\tp := &provider{\n\t\tmountpoint: mountPath,\n\t\trefs: map[string]int{},\n\t\tlastRefs: map[string]int{},\n\t\tconfig: c,\n\t}\n\tgo p.scan()\n\treturn p, done, nil\n}", "func (s *Spacetime) addNewProvider(k *Kube) error {\n\t// if provider does not exist in spacetime add it\n\tif _, ok := s.Providers[k.Provider]; !ok {\n\t\ts.Providers[k.Provider] = Provider{map[string]Region{\n\t\t\tk.Region: Region{map[string]Kube{\n\t\t\t\tk.Name: *k,\n\t\t\t},\n\t\t\t},\n\t\t},\n\t\t}\n\t\treturn nil\n\t}\n\t//else err\n\treturn errors.New(\"Provider already exists..\")\n}", "func NewLegacyProvider(name string) Provider {\n\treturn Provider{\n\t\tType: name,\n\t\tNamespace: \"-\",\n\t\tHostname: \"registry.terraform.io\",\n\t}\n}", "func Provide.Provide(RecordStore.Store, Blocks.Key, Net.Host) error {}", "func NewProvider() *ProviderConfig {\n\tproviderConfig := &ProviderConfig{\n\t\tAlibaba: make(map[string]*models.AlibabaCloudSpec),\n\t\tAnexia: make(map[string]*models.AnexiaCloudSpec),\n\t\tAws: make(map[string]*models.AWSCloudSpec),\n\t\tAzure: make(map[string]*models.AzureCloudSpec),\n\t\tDigitalocean: make(map[string]*models.DigitaloceanCloudSpec),\n\t\tFake: make(map[string]*models.FakeCloudSpec),\n\t\tGcp: make(map[string]*models.GCPCloudSpec),\n\t\tHetzner: make(map[string]*models.HetznerCloudSpec),\n\t\tKubevirt: make(map[string]*models.KubevirtCloudSpec),\n\t\tOpenstack: make(map[string]*models.OpenstackCloudSpec),\n\t\tPacket: make(map[string]*models.PacketCloudSpec),\n\t\tVsphere: make(map[string]*models.VSphereCloudSpec),\n\t}\n\n\tproviderConfig.Alibaba[\"Alibaba\"] = newAlibabaCloudSpec()\n\tproviderConfig.Anexia[\"Anexia\"] = newAnexiaCloudSpec()\n\tproviderConfig.Aws[\"Aws\"] = newAWSCloudSpec()\n\tproviderConfig.Azure[\"Azure\"] = newAzureCloudSpec()\n\tproviderConfig.Digitalocean[\"Digitalocean\"] = newDigitaloceanCloudSpec()\n\tproviderConfig.Fake[\"Fake\"] = newFakeCloudSpec()\n\tproviderConfig.Gcp[\"Gcp\"] = newGCPCloudSpec()\n\tproviderConfig.Hetzner[\"Hetzner\"] = newHetznerCloudSpec()\n\tproviderConfig.Kubevirt[\"Kubevirt\"] = newKubevirtCloudSpec()\n\tproviderConfig.Openstack[\"Openstack\"] = newOpenstackCloudSpec()\n\tproviderConfig.Packet[\"Packet\"] = newPacketCloudSpec()\n\tproviderConfig.Vsphere[\"Vsphere\"] = newVSphereCloudSpec()\n\n\treturn providerConfig\n}", "func NewProvider() *extblockpublisher.Provider {\n\treturn extblockpublisher.NewProvider()\n}", "func newPluginProvider(pluginBinDir string, provider kubeletconfig.CredentialProvider) (*pluginProvider, error) {\n\tmediaType := \"application/json\"\n\tinfo, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unsupported media type %q\", mediaType)\n\t}\n\n\tgv, ok := apiVersions[provider.APIVersion]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"invalid apiVersion: %q\", provider.APIVersion)\n\t}\n\n\tclock := clock.RealClock{}\n\n\treturn &pluginProvider{\n\t\tclock: clock,\n\t\tmatchImages: provider.MatchImages,\n\t\tcache: cache.NewExpirationStore(cacheKeyFunc, &cacheExpirationPolicy{clock: clock}),\n\t\tdefaultCacheDuration: provider.DefaultCacheDuration.Duration,\n\t\tlastCachePurge: clock.Now(),\n\t\tplugin: &execPlugin{\n\t\t\tname: provider.Name,\n\t\t\tapiVersion: provider.APIVersion,\n\t\t\tencoder: codecs.EncoderForVersion(info.Serializer, gv),\n\t\t\tpluginBinDir: pluginBinDir,\n\t\t\targs: provider.Args,\n\t\t\tenvVars: provider.Env,\n\t\t\tenviron: os.Environ,\n\t\t},\n\t}, nil\n}", "func SetProvider(config *cred.Config) {\n\tstorage.Registry().Registry[ProviderScheme] = func(string) (storage.Service, error) {\n\t\treturn NewService(config), nil\n\t}\n}", "func NewProvider(conf *pvtdatastorage.PrivateDataConfig, ledgerconfig *ledger.Config) (common.Provider, error) {\n\tlogger.Debugf(\"constructing CouchDB private data storage provider\")\n\tcouchDBConfig := ledgerconfig.StateDBConfig.CouchDB\n\n\treturn newProviderWithDBDef(couchDBConfig, conf)\n}", "func unmanagedProviderFactory(provider addrs.Provider, reattach *plugin.ReattachConfig) providers.Factory {\n\treturn func() (providers.Interface, error) {\n\t\tconfig := &plugin.ClientConfig{\n\t\t\tHandshakeConfig: tfplugin.Handshake,\n\t\t\tLogger: logging.NewProviderLogger(\"unmanaged.\"),\n\t\t\tAllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC},\n\t\t\tManaged: false,\n\t\t\tReattach: reattach,\n\t\t\tSyncStdout: logging.PluginOutputMonitor(fmt.Sprintf(\"%s:stdout\", provider)),\n\t\t\tSyncStderr: logging.PluginOutputMonitor(fmt.Sprintf(\"%s:stderr\", provider)),\n\t\t}\n\n\t\tif reattach.ProtocolVersion == 0 {\n\t\t\t// As of the 0.15 release, sdk.v2 doesn't include the protocol\n\t\t\t// version in the ReattachConfig (only recently added to\n\t\t\t// go-plugin), so client.NegotiatedVersion() always returns 0. We\n\t\t\t// assume that an unmanaged provider reporting protocol version 0 is\n\t\t\t// actually using proto v5 for backwards compatibility.\n\t\t\tif defaultPlugins, ok := tfplugin.VersionedPlugins[5]; ok {\n\t\t\t\tconfig.Plugins = defaultPlugins\n\t\t\t} else {\n\t\t\t\treturn nil, errors.New(\"no supported plugins for protocol 0\")\n\t\t\t}\n\t\t} else if plugins, ok := tfplugin.VersionedPlugins[reattach.ProtocolVersion]; !ok {\n\t\t\treturn nil, fmt.Errorf(\"no supported plugins for protocol %d\", reattach.ProtocolVersion)\n\t\t} else {\n\t\t\tconfig.Plugins = plugins\n\t\t}\n\n\t\tclient := plugin.NewClient(config)\n\t\trpcClient, err := client.Client()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\traw, err := rpcClient.Dispense(tfplugin.ProviderPluginName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// store the client so that the plugin can kill the child process\n\t\tprotoVer := client.NegotiatedVersion()\n\t\tswitch protoVer {\n\t\tcase 0, 5:\n\t\t\t// As of the 0.15 release, sdk.v2 doesn't include the protocol\n\t\t\t// version in the ReattachConfig (only recently added to\n\t\t\t// go-plugin), so client.NegotiatedVersion() always returns 0. We\n\t\t\t// assume that an unmanaged provider reporting protocol version 0 is\n\t\t\t// actually using proto v5 for backwards compatibility.\n\t\t\tp := raw.(*tfplugin.GRPCProvider)\n\t\t\tp.PluginClient = client\n\t\t\treturn p, nil\n\t\tcase 6:\n\t\t\tp := raw.(*tfplugin6.GRPCProvider)\n\t\t\tp.PluginClient = client\n\t\t\treturn p, nil\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unsupported protocol version %d\", protoVer)\n\t\t}\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewConf is redirect hook for fabric/fsblkstorage NewConf()
func NewConf(blockStorageDir string, maxBlockfileSize int) *blkstorage.Conf { return blkstorage.NewConf(blockStorageDir, maxBlockfileSize) }
[ "func NewConf(blockStorageDir string, maxBlockfileSize int) *fsblkstorage.Conf {\n\treturn fsblkstorage.NewConf(blockStorageDir, maxBlockfileSize)\n}", "func NewConf(filesystemPath string, maxBlockfileSize int) *Conf {\n\tif !strings.HasSuffix(filesystemPath, \"/\") {\n\t\tfilesystemPath = filesystemPath + \"/\"\n\t}\n\tif maxBlockfileSize <= 0 {\n\t\tmaxBlockfileSize = defaultMaxBlockfileSize\n\t}\n\treturn &Conf{filesystemPath + \"blocks\", filesystemPath + \"db\", maxBlockfileSize}\n}", "func newConfig(appName string, pathToKeybase string, log Log, ignoreSnooze bool) (*config, error) {\n\tcfg := newDefaultConfig(appName, pathToKeybase, log, ignoreSnooze)\n\terr := cfg.load()\n\treturn &cfg, err\n}", "func newConfig(envParams envParams) error {\n\t// Initialize server config.\n\tsrvCfg := newServerConfigV14()\n\n\t// If env is set for a fresh start, save them to config file.\n\tif globalIsEnvCreds {\n\t\tsrvCfg.SetCredential(envParams.creds)\n\t}\n\n\tif globalIsEnvBrowser {\n\t\tsrvCfg.SetBrowser(envParams.browser)\n\t}\n\n\t// Create config path.\n\tif err := createConfigDir(); err != nil {\n\t\treturn err\n\t}\n\n\t// hold the mutex lock before a new config is assigned.\n\t// Save the new config globally.\n\t// unlock the mutex.\n\tserverConfigMu.Lock()\n\tserverConfig = srvCfg\n\tserverConfigMu.Unlock()\n\n\t// Save config into file.\n\treturn serverConfig.Save()\n}", "func newFrmConf() *InstConfig {\n\treturn &InstConfig{\n\t\t\"127.0.0.1:8080\",\n\t\t\"DefaultService\",\n\t\t2097152,\n\t\t5,\n\t\tnil,\n\t\tnil,\n\t\tfalse,\n\t}\n}", "func testConf(create bool) *Config {\n\tc := NewConfig(afero.NewMemMapFs())\n\tv := c.native\n\n\tv.AutomaticEnv()\n\tv.SetConfigName(CONFIG_FILENAME)\n\tv.SetConfigFile(TEST_CONF_FILE)\n\n\tif create {\n\t\tv.WriteConfigAs(TEST_CONF_FILE)\n\t}\n\n\treturn c\n}", "func newClientConfig(fname, id, name, serverKey, serverUrl string) (err error) {\n\tconfig := Config{\n\t\tid,\n\t\tname,\n\t\t\"client\",\n\t\t\"\",\n\t\tserverKey,\n\t\tserverUrl,\n\t\tDEFAULT_PROCESS_USER,\n\t\tDEFAULT_PROCESS_LOCK,\n\t\tDEFAULT_PROCESS_LOG,\n\t\tDEFAULT_BASE_DIR,\n\t\tDEFAULT_DATA_DIR,\n\t\tDEFAULT_HTTP_LISTEN,\n\t\tfname,\n\t}\n\n\treturn SaveConfig(config)\n}", "func newServerConfig(fname, id, name, passWord, serverKey string) (err error) {\n\tconfig := Config{\n\t\tid,\n\t\tname,\n\t\t\"server\",\n\t\tpassWord,\n\t\tserverKey,\n\t\tDEFAULT_SERVER_URL,\n\t\tDEFAULT_PROCESS_USER,\n\t\tDEFAULT_PROCESS_LOCK,\n\t\tDEFAULT_PROCESS_LOG,\n\t\tDEFAULT_BASE_DIR,\n\t\tDEFAULT_DATA_DIR,\n\t\tDEFAULT_HTTP_LISTEN,\n\t\tfname,\n\t}\n\n\treturn SaveConfig(config)\n}", "func newConfig() config {\n\tcfg := config{\n\t\tCtx: context.Background(),\n\t\tProvider: common.DefaultConfigProvider(),\n\t\tDatabaseId: common.String(os.Getenv(\"OCI_DBS_OCID\")),\n\t\tVaultId: common.String(os.Getenv(\"OCI_VAULT_OCID\")),\n\t\tUsername: common.String(os.Getenv(\"OCI_DB_USER\")),\n\t\tConnectionString: common.String(os.Getenv(\"OCI_CONN_STRING\")),\n\t\tPassword: common.String(\n\t\t\tbase64.StdEncoding.EncodeToString(\n\t\t\t\t[]byte(os.Getenv(\"OCI_DB_PASS\")))),\n\t}\n\tcfg.DBToolsClient = cfg.getDatabaseToolsClient()\n\tcfg.VaultsClient = cfg.getVaultsClient()\n\tcfg.KmsVaultsClient = cfg.getKmsVaultsClient()\n\tcfg.DatabaseClient = cfg.getDatabaseClient()\n\tcfg.DbSystemClient = cfg.getMySqlDbSystemClient()\n\n\t// Things that need to be initialized AFTER the SDK clients\n\tcfg.VaultCompartmentId = cfg.getVaultCompartmentId()\n\tcfg.VaultKeyId = cfg.getVaultKeyId()\n\tcfg.WalletBase64 = cfg.getADBsWalletAsBase64String()\n\tcfg.TargetCompartmentId = cfg.getTargetCompartmentId()\n\tcfg.EndpointServiceId = cfg.getEndpointServiceId() // depends on TargetCompartmentId\n\tcfg.SubnetId = cfg.getDatabaseSubnetId()\n\n\tcfg.ConnectionString = cfg.getConnectionString() // respects OCI_CONN_STRING if defined\n\n\treturn cfg\n}", "func newcfgfile(fp string) *cfgfile.ConfigFile {\n\treturn &cfgfile.ConfigFile{\n\t\tSrvName: \"agentd\",\n\t\tFilename: fp,\n\t}\n}", "func newBlockfileMgr(id string, conf *Conf, indexConfig *blkstorage.IndexConfig, indexStore *leveldbhelper.DBHandle) *blockfileMgr {\n\tlogger.Debugf(\"newBlockfileMgr() initializing file-based block storage for ledger: %s \", id)\n\tvar rwMutexs []*sync.RWMutex\n\n\t//Determine the root directory for the blockfile storage, if it does not exist create it\n\trootDir := conf.getLedgerBlockDir(id)\n\t_, err := util.CreateDirIfMissing(rootDir)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Error: %s\", err))\n\t}\n\t// Instantiate the manager, i.e. blockFileMgr structure\n\tmgr := &blockfileMgr{rootDir: rootDir, conf: conf, db: indexStore, rwMutexs: rwMutexs}\n\n\t// cp = checkpointInfo, retrieve from the database the file suffix or number of where blocks were stored.\n\t// It also retrieves the current size of that file and the last block number that was written to that file.\n\t// At init checkpointInfo:latestFileChunkSuffixNum=[0], latestFileChunksize=[0], lastBlockNumber=[0]\n\tcpInfo, err := mgr.loadCurrentInfo()\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Could not get block file info for current block file from db: %s\", err))\n\t}\n\tif cpInfo == nil {\n\t\tlogger.Info(`Getting block information from block storage`)\n\t\tif cpInfo, err = constructCheckpointInfoFromBlockFiles(rootDir); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"Could not build checkpoint info from block files: %s\", err))\n\t\t}\n\t\tlogger.Debugf(\"Info constructed by scanning the blocks dir = %s\", spew.Sdump(cpInfo))\n\t} else {\n\t\tlogger.Debug(`Synching block information from block storage (if needed)`)\n\t\tsyncCPInfoFromFS(rootDir, cpInfo)\n\t}\n\terr = mgr.saveCurrentInfo(cpInfo, true)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Could not save next block file info to db: %s\", err))\n\t}\n\n\tmgr.oldestFileChunkSuffixNum = syncOldestFileNum(rootDir)\n\t//If start up is a restart of an existing storage,new the rwMutex for the files\n\tif conf.dumpConf.Enabled {\n\t\tfor i := 0; i <= cpInfo.latestFileChunkSuffixNum; i++ {\n\t\t\trwMutex := new(sync.RWMutex)\n\t\t\tmgr.rwMutexs = append(mgr.rwMutexs, rwMutex)\n\t\t}\n\t}\n\tmgr.dumpMutex = new(sync.Mutex)\n\n\t//Open a writer to the file identified by the number and truncate it to only contain the latest block\n\t// that was completely saved (file system, index, cpinfo, etc)\n\tcurrentFileWriter, err := newBlockfileWriter(deriveBlockfilePath(rootDir, cpInfo.latestFileChunkSuffixNum))\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Could not open writer to current file: %s\", err))\n\t}\n\t//Truncate the file to remove excess past last block\n\terr = currentFileWriter.truncateFile(cpInfo.latestFileChunksize)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Could not truncate current file to known size in db: %s\", err))\n\t}\n\n\t// Create a new KeyValue store database handler for the blocks index in the keyvalue database\n\tmgr.index = newBlockIndex(indexConfig, indexStore)\n\n\t// Update the manager with the checkpoint info and the file writer\n\tmgr.cpInfo = cpInfo\n\tmgr.currentFileWriter = currentFileWriter\n\t// Create a checkpoint condition (event) variable, for the goroutine waiting for\n\t// or announcing the occurrence of an event.\n\tmgr.cpInfoCond = sync.NewCond(&sync.Mutex{})\n\n\t// init BlockchainInfo for external API's\n\tbcInfo := &common.BlockchainInfo{\n\t\tHeight: 0,\n\t\tCurrentBlockHash: nil,\n\t\tPreviousBlockHash: nil}\n\n\tif !cpInfo.isChainEmpty {\n\t\t//If start up is a restart of an existing storage, sync the index from block storage and update BlockchainInfo for external API's\n\t\tmgr.syncIndex()\n\t\tlastBlockHeader, err := mgr.retrieveBlockHeaderByNumber(cpInfo.lastBlockNumber)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"Could not retrieve header of the last block form file: %s\", err))\n\t\t}\n\t\tlastBlockHash := lastBlockHeader.Hash()\n\t\tpreviousBlockHash := lastBlockHeader.PreviousHash\n\t\tbcInfo = &common.BlockchainInfo{\n\t\t\tHeight: cpInfo.lastBlockNumber + 1,\n\t\t\tCurrentBlockHash: lastBlockHash,\n\t\t\tPreviousBlockHash: previousBlockHash}\n\t}\n\tmgr.bcInfo.Store(bcInfo)\n\treturn mgr\n}", "func defaultConf() Config {\n\tresult := Config{\n\t\tGeneral: general{\n\t\t\t// Configuration version number. If a field is added or changed\n\t\t\t// in this default, the version must be changed to tell the app\n\t\t\t// to rebuild the users config files.\n\t\t\tVersion: \"0.0.1\",\n\t\t\tPoolSize: \"50 GB\",\n\t\t\tNetworkLimit: \"50 GB\",\n\t\t\tStatsReporting: \"OFF\",\n\t\t},\n\t\tDatabase: database{\n\t\t\t// This is the path to the backend database.\n\t\t\t// By default it is placed in the same dir as the config.\n\t\t\tPath: filepath.Join(filepath.Dir(path), \"keys.db\"),\n\t\t},\n\t\tSources: sources{\n\t\t\tConfig: filepath.Join(filepath.Dir(path), \"keysets.yaml\"),\n\t\t\t// Upstream Keyset repositories will be cloned to this location.\n\t\t\tRepositories: filepath.Join(filepath.Dir(path), \"repositories\"),\n\t\t\tStorage: filepath.Join(filepath.Dir(path), \"storage\"),\n\t\t},\n\t\tStats: stats{\n\t\t\tUsername: \"\",\n\t\t\tEmail: \"\",\n\t\t},\n\t}\n\treturn result\n}", "func newSrvConfig(objAPI ObjectLayer) error {\n\t// Initialize server config.\n\tsrvCfg := newServerConfig()\n\n\t// hold the mutex lock before a new config is assigned.\n\tglobalServerConfigMu.Lock()\n\tglobalServerConfig = srvCfg\n\tglobalServerConfigMu.Unlock()\n\n\t// Save config into file.\n\treturn saveServerConfig(GlobalContext, objAPI, globalServerConfig)\n}", "func newStorageConfig(data string) (cfg storageConfig, err error) {\n\t// nil data gets turned into a default storage config,\n\t// using the default local root\n\tif data == \"\" {\n\t\tcfg.Resource = backup.DefaultLocalRoot\n\t\treturn\n\t}\n\n\t// if a file protocol was specified,\n\t// give it priority and return the local storage config for it,\n\t// using the specified path.\n\tif strings.HasPrefix(data, \"file://\") {\n\t\tcfg.Resource = strings.TrimPrefix(data, \"file://\")\n\t\treturn\n\t}\n\n\t// try to interpret it as an FTP server config\n\tftpStorageConfig, err := backup.NewFTPServerConfig(data)\n\tif err == nil {\n\t\tcfg.Resource = ftpStorageConfig\n\t\tcfg.StorageType = ftpStorageType\n\t\treturn\n\t}\n\terr = nil\n\n\t// check if the given data points to a valid path, as a last resort\n\tif exists, _ := localFileExists(data, true); exists {\n\t\tcfg.Resource = data\n\t\treturn\n\t}\n\n\t// invalid data given, cannot create a config based on it\n\terr = errors.Newf(\"%v is an invalid storage config resource string\", data)\n\treturn\n}", "func loadConf(oldConf *conf.Conf) (*conf.Conf, error) {\n\tif oldConf != nil {\n\t\treturn conf.ReloadConf(oldConf)\n\t}\n\treturn conf.Load(*id, *confDir, *stateDir)\n}", "func initConfiguration() {\n\tk = confident.New()\n\tk.WithConfiguration(&Conf)\n\tk.Name = \"config\"\n\tk.Type = \"json\"\n\tk.Path = configDirPath()\n\tk.Path = configDirPathEnsureExists()\n\tk.Permission = os.FileMode(0644)\n\tlogging.LogDebugf(\"config/initConfiguration() - Conf before read: %#v\", Conf)\n\tk.Read()\n\tlogging.LogDebugf(\"config/initConfiguration() - Conf after read: %#v\", Conf)\n\tif *dpRestURL != \"\" || *dpSomaURL != \"\" {\n\t\tif *dpConfigName != \"\" {\n\t\t\tvalidateDpConfigName()\n\t\t\tConf.DataPowerAppliances[*dpConfigName] = DataPowerAppliance{Domain: *dpDomain, Proxy: *proxy, RestUrl: *dpRestURL, SomaUrl: *dpSomaURL, Username: *dpUsername, Password: *dpPassword}\n\t\t\tCurrentApplianceName = *dpConfigName\n\t\t} else {\n\t\t\tConf.DataPowerAppliances[PreviousApplianceName] = DataPowerAppliance{Domain: *dpDomain, Proxy: *proxy, RestUrl: *dpRestURL, SomaUrl: *dpSomaURL, Username: *dpUsername, Password: *dpPassword}\n\t\t\tCurrentApplianceName = PreviousApplianceName\n\t\t}\n\t\tk.Persist()\n\t\tlogging.LogDebugf(\"config/initConfiguration() - Conf after persist: %#v\", Conf)\n\t}\n\tCurrentAppliance = DataPowerAppliance{Domain: *dpDomain, Proxy: *proxy, RestUrl: *dpRestURL, SomaUrl: *dpSomaURL, Username: *dpUsername, Password: *dpPassword}\n}", "func loadConf(oldConf *conf.Conf) (*conf.Conf, error) {\n\tif oldConf != nil {\n\t\treturn conf.ReloadConf(oldConf)\n\t}\n\treturn conf.Load(*id, *confDir, *cacheDir, *stateDir)\n}", "func createNewKfApp(baseConfig string, version string, oldKfCfg *kfconfig.KfConfig) (*kfconfig.KfConfig, string, error) {\n\tappDir, err := os.Getwd()\n\tif err != nil {\n\t\treturn nil, \"\", &kfapis.KfError{\n\t\t\tCode: int(kfapis.INVALID_ARGUMENT),\n\t\t\tMessage: fmt.Sprintf(\"could not get current directory %v\", err),\n\t\t}\n\t}\n\n\t// Load the new KfCfg from the base config\n\tnewKfCfg, err := configconverters.LoadConfigFromURI(baseConfig)\n\tif err != nil {\n\t\treturn nil, \"\", &kfapis.KfError{\n\t\t\tCode: int(kfapis.INTERNAL_ERROR),\n\t\t\tMessage: fmt.Sprintf(\"Could not load %v. Error: %v\", baseConfig, err),\n\t\t}\n\t}\n\n\t// Merge the previous KfCfg's customized values into the new KfCfg\n\tMergeKfCfg(oldKfCfg, newKfCfg)\n\n\t// Compute hash from the new KfCfg and use it to create the new app directory\n\th, err := computeHash(newKfCfg)\n\tif err != nil {\n\t\treturn nil, \"\", &kfapis.KfError{\n\t\t\tCode: int(kfapis.INTERNAL_ERROR),\n\t\t\tMessage: fmt.Sprintf(\"Could not compute sha256 hash. Error: %v\", err),\n\t\t}\n\t}\n\n\tnewAppDir := filepath.Join(appDir, h)\n\tnewKfCfg.Spec.AppDir = newAppDir\n\tnewKfCfg.Spec.Version = version\n\toutputFilePath := filepath.Join(newAppDir, kftypesv3.KfConfigFile)\n\n\t// Make sure the directory is created.\n\tif _, err := os.Stat(newAppDir); os.IsNotExist(err) {\n\t\tlog.Infof(\"Creating directory %v\", newAppDir)\n\t\terr = os.MkdirAll(newAppDir, os.ModePerm)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"couldn't create directory %v Error %v\", newAppDir, err)\n\t\t\treturn nil, \"\", err\n\t\t}\n\t} else {\n\t\tlog.Infof(\"App directory %v already exists\", newAppDir)\n\t}\n\n\terr = configconverters.WriteConfigToFile(*newKfCfg)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\n\treturn newKfCfg, outputFilePath, nil\n}", "func newConfig() Config {\n\treturn Config{\n\t\tDefaultContainerConfig: newDefaultContainerConfig(),\n\t\tContainersConfig: map[string]ContainerConfig{},\n\t\tExclude: []string{},\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Post is post message to slack
func (s *Slack) Post(msg Message) (err error) { b, err := json.Marshal(msg) if err != nil { return err } buf := bytes.NewBuffer(b) req, err := http.NewRequest("POST", s.URL, buf) if err != nil { return errors.Wrap(err, "Can't make new request") } req.Header.Set("User-Agent", "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.5") req.Header.Set("Content-Type", "application/json") req.Header.Set("Accept", "application/json") if s.Verbose { if curl, err := http2curl.GetCurlCommand(req); err == nil { fmt.Fprintf(os.Stderr, "[CURL]: %v", curl) } } client := http.Client{Timeout: s.Timeout} res, err := client.Do(req) if err != nil { return errors.Wrap(err, "Can't post request") } defer func() { if err := res.Body.Close(); err != nil { fmt.Fprintf(os.Stderr, "[WARN]: %v", errors.Wrap(err, "Can't close response body")) } }() if res.StatusCode != 200 { return errors.New("Slack response status is not 2xx") } return nil }
[ "func (s SlackReporter) Post(ctx context.Context, msg string) (string, error) {\n\t_, ts, err := s.api.PostMessageContext(ctx, s.channel, slack.MsgOptionText(msg, false))\n\treturn ts, err\n}", "func (t *FakeSlackChat) postMessage(msg Message) error {\n\treturn nil\n}", "func (b *Bot) post(message map[string]interface{}, reply *domain.WorkReply, data *domain.Context, sub *subscription) error {\n\tmessage[\"text\"] = mainMessageFormatted()\n\tmessage[\"as_user\"] = true\n\tvar err error\n\t_, err = sub.s.Do(\"POST\", \"chat.postMessage\", message)\n\treturn err\n}", "func SlackPost(username string, icon string, text string, hookurl string) (err error) {\n\tapiURL, err := url.ParseRequestURI(hookurl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tquery := url.Values{}\n\tapiURL.RawQuery = query.Encode()\n\tdata, _ := json.Marshal(map[string]string{\n\t\t\"text\": text,\n\t\t\"icon_emoji\": icon,\n\t\t\"username\": username,\n\t})\n\tclient := &http.Client{}\n\tr, err := http.NewRequest(\"POST\", hookurl, strings.NewReader(string(data)))\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.Header.Set(\"Content-Type\", \"application/json\")\n\t_, err = client.Do(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (hc *HipChat2) Post(message string) bool {\n\tif hc.client == nil {\n\t\thc.client = hc.newClient()\n\t\tif hc.client == nil {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tmsg := &hipchat.NotificationRequest{\n\t\tColor: \"purple\",\n\t\tMessage: message,\n\t\tNotify: true,\n\t\tMessageFormat: \"text\",\n\t}\n\n\tif _, err := hc.client.Room.Notification(hc.RoomID, msg); err != nil {\n\t\tlog.Errorf(\"Failed post message...: %s\", msg.Message)\n\t\treturn false\n\t}\n\n\treturn true\n}", "func setupPostToSlack(webhook string) func(string) {\n\treturn func(msg string) {\n\t\tresp, err := http.PostForm(webhook, url.Values{\"payload\": {msg}})\n\t\tcheck(err)\n\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\tdefer resp.Body.Close()\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tcheck(err)\n\t\t\tfmt.Printf(\"error posting to slack: [%s] %s\\n\", resp.Status, body)\n\t\t}\n\t}\n}", "func SendMessageToSlack(message string) {\n\tfmt.Println(\"Sending message to slack...\")\n\n\thttp.Post(url, \"\")\n\n\treturn nil\n}", "func sendToSlack(title, message, color string) error {\n\tattachment := `{\n\t\t\"attachments\": [\n\t\t\t{\n\t\t\t\t\"fallback\": \"` + title + `: ` + message + `\",\n\t\t\t\t\"color\": \"` + color + `\",\n\t\t\t\t\"fields\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"title\": \"` + title + `\",\n\t\t\t\t\t\t\"value\": \"` + message + `\",\n\t\t\t\t\t\t\"short\": false\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}\n\t\t]\n\t}`\n\n\tresponse, err := http.Post(\n\t\tConfig.WebhookURL,\n\t\t\"POST\",\n\t\tbytes.NewBufferString(attachment),\n\t)\n\n\tif response.StatusCode != http.StatusOK {\n\t\tbuffer := new(bytes.Buffer)\n\t\tbuffer.ReadFrom(response.Body)\n\t\terr = errors.New(buffer.String())\n\t}\n\n\treturn err\n}", "func PostToSlack(webhookURL string, payload slack.Payload) []error {\n\n\treturn slack.Send(webhookURL, \"\", payload)\n}", "func (s *SlackService) SimplePost(\n\tchannelName string,\n\tstoryText string,\n\ticon messages.Icon,\n\tasUser bool,\n iconEmoji ...string,\n) {\n\tuser := s.Config.GetString(\"slack.username\")\n\n\tif s.Bot != nil {\n\t\tuser = s.Bot.slackInfo.User.Name\n\t}\n\n emoji := \"\"\n\n if len(iconEmoji) > 0 {\n emoji = iconEmoji[0]\n }\n\n\ts.Slack.PostMessage(\n\t\tchannelName,\n\t\tstoryText,\n\t\tslack.PostMessageParameters{\n\t\t\tUsername: user,\n\t\t\tIconURL: string(icon),\n IconEmoji: string(emoji),\n\t\t\tAsUser: asUser,\n Parse: \"none\",\n\t\t},\n\t)\n}", "func MsgSlack(ctx *Context, msg string) error {\n\tcfg := ctx.Config\n\twebhookURL := \"https://hooks.slack.com/services/\" + cfg.Slacks[*ctx.ArgProfileName].Key\n\n\tlog.Printf(\"webhook: %s\", webhookURL)\n\n\tslackBody, _ := json.Marshal(slackRequestBody{Text: msg})\n\treq, err := http.NewRequest(http.MethodPost, webhookURL, bytes.NewBuffer(slackBody))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\n\tclient := &http.Client{Timeout: 10 * time.Second}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbuf := new(bytes.Buffer)\n\tbuf.ReadFrom(resp.Body)\n\tif buf.String() != \"ok\" {\n\t\treturn errors.New(\"Non-ok response returned from Slack\")\n\t}\n\treturn nil\n}", "func postWebhook(webhookUrl string, webhookType string, p []byte) {\n\n\tvar payloadName string\n\tswitch webhookType {\n\tcase \"slack\":\n\t\tpayloadName = \"payload\"\n\tcase \"discord\":\n\t\tpayloadName = \"payload_json\"\n\t}\n\tresp, _ := http.PostForm(\n\t\twebhookUrl,\n\t\turl.Values{payloadName: {string(p)}},\n\t)\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\tdefer resp.Body.Close()\n\tfmt.Println(string(body))\n}", "func (c Client) Post(method string, params url.Values) (*jsontree.JsonTree, error) {\n\tparams[\"token\"] = []string{c.Token}\n\tresp, err := http.PostForm(fmt.Sprintf(\"https://slack.com/api/%s\", method), params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\n\ttree := jsontree.New()\n\terr = tree.UnmarshalJSON(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tok, err := tree.Get(\"ok\").Boolean()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !ok {\n\t\tmessage, _ := tree.Get(\"error\").String()\n\t\treturn nil, fmt.Errorf(\"Error: %s\", message)\n\t}\n\n\treturn tree, nil\n}", "func (mm mattermostMessage) sendMessage() {\n\tpost := &model.Post{}\n\tpost.ChannelId = mm.Event.Broadcast.ChannelId\n\t// Create file if message is too large\n\tif len(mm.Response) >= 3990 {\n\t\tres, resp := client.UploadFileAsRequestBody([]byte(mm.Response), mm.Event.Broadcast.ChannelId, mm.Request)\n\t\tif resp.Error != nil {\n\t\t\tlogging.Logger.Error(\"Error occured while uploading file. Error: \", resp.Error)\n\t\t}\n\t\tpost.FileIds = []string{string(res.FileInfos[0].Id)}\n\t} else if len(mm.Response) == 0 {\n\t\tlogging.Logger.Info(\"Invalid request. Dumping the response\")\n\t\treturn\n\t} else {\n\t\tpost.Message = \"```\\n\" + mm.Response + \"\\n```\"\n\t}\n\n\t// Create a post in the Channel\n\tif _, resp := client.CreatePost(post); resp.Error != nil {\n\t\tlogging.Logger.Error(\"Failed to send message. Error: \", resp.Error)\n\t}\n}", "func PostInitSlackMessage(webhook string) {\n\tmsg := &slack.WebhookMessage{\n\t\tUsername: Username,\n\t\tIconEmoji: IconEmoji,\n\t\tText: \"DocNoc has started scanning\",\n\t}\n\tif err := slack.PostWebhook(webhook, msg); err != nil {\n\t\tfmt.Println(\"🔥: Can't post init message to slack. Operating in headless state\", err)\n\t}\n}", "func PostMessageOnTopic(c *gin.Context) {\n\tname := c.Param(\"name\")\n\n\tvar msg types.Message\n\terr := c.BindWith(&msg, &types.MessageBinding{})\n\tif err != nil {\n\t\tc.JSON(http.StatusNotAcceptable, gin.H{\"error\": err.Error()})\n\t} else {\n\t\tmsg.Topic = &name\n\t\tmsg.Timestamp = time.Now()\n\t\tdispatcher.DispatchMessage(&msg)\n\t\tc.JSON(http.StatusOK, gin.H{\"result\": \"Message received\"})\n\t}\n}", "func (b *Bot) PostMessage(text, channel string) {\n\tb.rtm.SendMessage(b.rtm.NewOutgoingMessage(text, channel))\n}", "func PostToNewAccounts(msg string) error {\n\tctx := context.Background()\n\tclient := &http.Client{}\n\tu := url.URL{\n\t\tScheme: \"https\",\n\t\tHost: \"hooks.slack.com\",\n\t\tPath: \"services/TJ42GDSA0/BL63K1C57/T2byQxw0oXiRqUOGCEbwP5TG\",\n\t}\n\n\tbuf := &bytes.Buffer{}\n\tslackMsg := slackMessage{\n\t\tText: msg,\n\t}\n\tif err := json.NewEncoder(buf).Encode(&slackMsg); err != nil {\n\t\treturn errors.Wrap(err, \"encoding slack message\")\n\t}\n\n\toperation := func() error {\n\t\treq, err := http.NewRequest(http.MethodPost, u.String(), buf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq.Header.Set(\"Content-type\", \"application/json\")\n\n\t\tresp, err := client.Do(req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer check.Err(resp.Body.Close)\n\n\t\tif resp.StatusCode == http.StatusBadGateway {\n\t\t\treturn fmt.Errorf(\"server: temporary error\")\n\t\t} else if resp.StatusCode >= 300 {\n\t\t\tb, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn backoff.Permanent(fmt.Errorf(\"server: %v\", string(b)))\n\t\t}\n\n\t\treturn nil\n\t}\n\tif err := backoff.RetryNotify(operation,\n\t\tbackoff.WithContext(backoff.WithMaxRetries(backoff.NewExponentialBackOff(), maxRetries), ctx),\n\t\tfunc(err error, t time.Duration) {\n\t\t\tlog.Sugar.Errorw(\"error posting to new-accounts in slack, retrying\",\n\t\t\t\t\"err\", err.Error(),\n\t\t\t)\n\t\t}); err != nil {\n\t\treturn errors.Wrap(err, \"posting to new-accounts in slack\")\n\t}\n\n\treturn nil\n}", "func (s SlackReporter) PostThread(ctx context.Context, timeStamp, msg string) error {\n\t_, _, err := s.api.PostMessageContext(ctx, s.channel, slack.MsgOptionText(msg, false), slack.MsgOptionTS(timeStamp))\n\treturn err\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve schedules by plan ID
func NewGetPlanSchedules(planID uuid.UUID, logger *zap.Logger, db pgxload.PgxLoader) *GetPlanSchedules { return &GetPlanSchedules{ planID: planID, db: db, logger: logger.Named("GetPlanSchedules"), } }
[ "func (r *ScheduleRepo) Get(id usecase.ScheduleID) (*schedule.Schedule, usecase.Error) {\n\n\t// Retrieve from DB\n\tquery := fmt.Sprintf(\"%s WHERE id = $1\", scheduleSelectClause())\n\trow := r.db.QueryRow(query, id)\n\tsd, err := parseScheduleRow(row)\n\tif err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn nil, usecase.NewError(usecase.ErrRecordNotFound, \"no task found with id = %v\", id)\n\t\t}\n\t\treturn nil, usecase.NewError(usecase.ErrUnknown, \"error parsing schedule id %d: %v\", id, err)\n\t}\n\n\t// Get recurring tasks from DB\n\trts, err := r.getRecurringTasks([]usecase.ScheduleID{id})\n\tif err != nil {\n\t\treturn nil, usecase.NewError(usecase.ErrUnknown, \"error retrieving recurring tasks for schedule id %v\", id)\n\t}\n\tfor _, rt := range rts[id] {\n\t\tsd.Schedule.AddTask(rt)\n\t}\n\n\treturn sd.Schedule, nil\n}", "func (store *Postgres) GetSchedulesForProject(accountID, projectID int64) ([]Schedule, error) {\n\tconst getSchedulesForProjectStatement = `SELECT schedules.id, account_id, project_id, schedules.title, cron_expression, repository, schedules.description, ref\n\t\t\t\t\t\t FROM schedules\n\t\t\t\t\t\t INNER JOIN projects ON projects.id = schedules.project_id \n\t\t\t\t\t\t WHERE account_id = $1 AND project_id = $2`\n\n\trows, err := store.DB.Query(getSchedulesForProjectStatement, accountID, projectID)\n\tif err != nil {\n\t\treturn []Schedule{}, err\n\t}\n\n\treturn sqlRowsToSchedules(rows)\n}", "func (s *ScheduleRestClient) Schedule(id string) (models.Schedule, error) {\n\treturn s.requestSchedule(s.url + \"/\" + id)\n}", "func (c Client) Get(id string, params *stripe.SubscriptionScheduleParams) (*stripe.SubscriptionSchedule, error) {\n\tpath := stripe.FormatURLPath(\"/v1/subscription_schedules/%s\", id)\n\tsched := &stripe.SubscriptionSchedule{}\n\terr := c.B.Call(http.MethodGet, path, c.Key, params, sched)\n\treturn sched, err\n}", "func ShowSchedules(conf *config.Config) ([]Schedule, error) {\n\tdb, err := scheduleDB(conf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer db.Close()\n\tvar found []Schedule\n\terr = db.View(func(tx *bolt.Tx) error {\n\t\tvar result Schedule\n\t\tb := tx.Bucket([]byte(conf.Scheduler.SchedulerDBBucket))\n\t\tb.ForEach(func(k, v []byte) error {\n\t\t\terr = json.Unmarshal(v, &result)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif result.Status == SCHEDULE_ENABLED {\n\t\t\t\tfound = append(found, result)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn found, nil\n}", "func GetSchedule(connection *common.Connection, id string) (*Schedule, *common.ErrorHUE, error) {\n\tschedule := &Schedule{}\n\tpath := fmt.Sprintf(\"/api/\" + connection.Username + \"/schedules/\" + id)\n\tbodyResponse, errHUE, err := internal.Request(connection, \"GET\", http.StatusOK, path, nil)\n\tif errHUE != nil {\n\t\tlog.Errorf(\"HUE Error: %s\", errHUE.Error.Description)\n\t\treturn schedule, errHUE, err\n\t}\n\tif err != nil {\n\t\tlog.Errorf(\"Error: %s\", err.Error())\n\t\treturn schedule, errHUE, err\n\t}\n\terr = json.Unmarshal(bodyResponse, &schedule)\n\tif err != nil {\n\t\tlog.Errorf(\"Error with unmarshalling GetSchedule: %s\", err.Error())\n\t\treturn schedule, nil, err\n\t}\n\treturn schedule, nil, nil\n}", "func Get(id string, params *stripe.SubscriptionScheduleParams) (*stripe.SubscriptionSchedule, error) {\n\treturn getC().Get(id, params)\n}", "func (db *Database) GetSchedule(startLocationName, destinationName, date string) ([]Trip, map[int][]TripOffering, error) {\n trips := []Trip{}\n offerings := make(map[int][]TripOffering)\n row, err := db.Query(fmt.Sprintf(\"SELECT * FROM Trip WHERE StartLocationName=%s\", startLocationName))\n if err != nil {\n return trips, offerings, err\n }\n // Get the trips with the given start location name\n trips = RowToTrips(row)\n row.Close()\n // Get the trip offerings for each trip\n for _, t := range trips {\n row, err := db.Query(fmt.Sprintf(\"SELECT * FROM TripOffering WHERE TripNumber=%d\", t.TripNumber))\n if err != nil {\n return trips, offerings, err\n }\n for row.Next() {\n var tripNumber int\n var date string\n var scheduledStartTime string\n var scheduledArrivalTime string\n var driverName string\n var busID int\n row.Scan(&tripNumber, &date, &scheduledStartTime, &scheduledArrivalTime, &driverName, &busID)\n if _, ok := offerings[tripNumber]; !ok {\n offerings[tripNumber] = []TripOffering{}\n }\n offerings[tripNumber] = append(offerings[tripNumber], TripOffering{\n TripNumber: tripNumber,\n Date: date,\n ScheduledStartTime: scheduledStartTime,\n ScheduledArrivalTime: scheduledArrivalTime,\n DriverName: driverName,\n BusID: busID,\n })\n }\n row.Close()\n }\n return trips, offerings, nil\n}", "func (c *Client) Schedules(jobID string) ([]Schedule, error) {\n\tresp, err := c.http.Get(\"/v1/jobs/\" + jobID + \"/schedules\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tswitch resp.StatusCode {\n\tcase 200:\n\t\tvar schedules []Schedule\n\t\terr = json.NewDecoder(resp.Body).Decode(&schedules)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn schedules, nil\n\tcase 404:\n\t\treturn nil, fmt.Errorf(`job \"%s\" does not exist`, jobID)\n\tdefault:\n\t\tvar apiError *Error\n\t\tif err := json.NewDecoder(resp.Body).Decode(&apiError); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tapiError.Code = resp.StatusCode\n\t\treturn nil, apiError\n\t}\n}", "func (c *DetaClient) GetSchedule(req *GetScheduleRequest) (*GetScheduleResponse, error) {\n\ti := &requestInput{\n\t\tPath: fmt.Sprintf(\"/schedules/%s\", req.ProgramID),\n\t\tMethod: \"GET\",\n\t\tNeedsAuth: true,\n\t}\n\n\to, err := c.request(i)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif o.Status == 404 {\n\t\treturn nil, nil\n\t}\n\n\tif o.Status != 200 {\n\t\tmsg := o.Error.Message\n\t\tif msg == \"\" {\n\t\t\tmsg = o.Error.Errors[0]\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to get schedule: %v\", msg)\n\t}\n\n\tvar resp GetScheduleResponse\n\terr = json.Unmarshal(o.Body, &resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}", "func (svc *ServiceDefinition) GetPlanById(planId string) (*ServicePlan, error) {\n\tcatalogEntry, err := svc.CatalogEntry()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, plan := range catalogEntry.Plans {\n\t\tif plan.ID == planId {\n\t\t\treturn &plan, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"Plan ID %q could not be found\", planId)\n}", "func (r *ScheduleRepo) GetAllScheduled() (map[usecase.ScheduleID]*schedule.Schedule, usecase.Error) {\n\treturn r.getAllWhere(\"paused = FALSE AND removed_time = $1\", time.Time{})\n}", "func (m *DirectoryRequestBuilder) RoleAssignmentSchedulesById(id string)(*ifcfeaaa38c74c27248ac242dc390a943df9fda837089f362cf5a0b616515e16e.UnifiedRoleAssignmentScheduleItemRequestBuilder) {\n urlTplParams := make(map[string]string)\n for idx, item := range m.pathParameters {\n urlTplParams[idx] = item\n }\n if id != \"\" {\n urlTplParams[\"unifiedRoleAssignmentSchedule%2Did\"] = id\n }\n return ifcfeaaa38c74c27248ac242dc390a943df9fda837089f362cf5a0b616515e16e.NewUnifiedRoleAssignmentScheduleItemRequestBuilderInternal(urlTplParams, m.requestAdapter);\n}", "func (s *Service) FindPlan(id string) (*Plan, bool) {\n\tfor _, p := range s.Plans {\n\t\tif p.ID == id {\n\t\t\treturn p, true\n\t\t}\n\t}\n\treturn nil, false\n}", "func GetADVSchedules(id string, addr string, localIP string) error {\r\n\tlocalAddr, err := net.ResolveIPAddr(\"ip\", localIP)\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\tLocalBindAddr := &net.TCPAddr{IP: localAddr.IP}\r\n\ttransport := &http.Transport{\r\n\t\tDial: (&net.Dialer{\r\n\t\t\tLocalAddr: LocalBindAddr,\r\n\t\t\tTimeout: 5 * time.Second,\r\n\t\t\tKeepAlive: 30 * time.Second,\r\n\t\t}).Dial,\r\n\t}\r\n\tclient := &http.Client{\r\n\t\tTransport: transport,\r\n\t}\r\n\r\n\turl := \"http://\" + addr + \"/adm/adv-schedules/\" + id + \"?format=cic\"\r\n\r\n\treq, err := http.NewRequest(\"GET\", url, nil)\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\tresp, err := client.Do(req)\r\n\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\tif resp.StatusCode != 200 {\r\n\t\treturn fmt.Errorf(\"ADM Receved %v\", resp.Status)\r\n\t}\r\n\r\n\tfor {\r\n\t\tbuf := make([]byte, 32*1024)\r\n\t\t_, err := resp.Body.Read(buf)\r\n\r\n\t\tif err != nil && err != io.EOF {\r\n\t\t\treturn err\r\n\t\t}\r\n\r\n\t\tif err == io.EOF {\r\n\t\t\tbreak\r\n\t\t}\r\n\t}\r\n\tresp.Body.Close()\r\n\ttransport.CloseIdleConnections()\r\n\r\n\treturn nil\r\n}", "func GetSchedule(ctx *sgo.Context) error {\n\tscheduleData, err := modelGetSchedule()\n\tif err != nil {\n\t\treturn ctx.JSON(500, 0, err.Error(), nil)\n\t}\n\tif len(scheduleData) == 0 {\n\t\treturn ctx.JSON(200, 0, \"no result found\", nil)\n\t}\n\treturn ctx.JSON(200, 1, \"success\", scheduleData)\n}", "func GetPlans(BattleID string) []*Plan {\n\tvar plans = make([]*Plan, 0)\n\tplanRows, plansErr := db.Query(\"SELECT id, name, points, active, skipped, votestart_time, voteend_time, votes FROM plans WHERE battle_id = $1 ORDER BY created_date\", BattleID)\n\tif plansErr == nil {\n\t\tdefer planRows.Close()\n\t\tfor planRows.Next() {\n\t\t\tvar v string\n\t\t\tvar p = &Plan{PlanID: \"\",\n\t\t\t\tPlanName: \"\",\n\t\t\t\tVotes: make([]*Vote, 0),\n\t\t\t\tPoints: \"\",\n\t\t\t\tPlanActive: false,\n\t\t\t\tPlanSkipped: false,\n\t\t\t\tVoteStartTime: time.Now(),\n\t\t\t\tVoteEndTime: time.Now(),\n\t\t\t}\n\t\t\tif err := planRows.Scan(&p.PlanID, &p.PlanName, &p.Points, &p.PlanActive, &p.PlanSkipped, &p.VoteStartTime, &p.VoteEndTime, &v); err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t} else {\n\t\t\t\terr = json.Unmarshal([]byte(v), &p.Votes)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t}\n\n\t\t\t\tfor i := range p.Votes {\n\t\t\t\t\tvote := p.Votes[i]\n\t\t\t\t\tif p.PlanActive {\n\t\t\t\t\t\tvote.VoteValue = \"\"\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tplans = append(plans, p)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn plans\n}", "func (pgmodel *PgDB) SelectCurrentScheduler() ([]model.ScheduleTask, error) {\n\tnow, _ := time.Parse(\"2006-01-02 15:04:00\", time.Now().UTC().Format(\"2006-01-02 15:04:00\"))\n\n\tscheduleRepository := model.NewScheduleRepository()\n\tscheduleModel := scheduleRepository.GetTaskModel()\n\n\terr := pgmodel.db.Model(&scheduleModel).\n\t\tColumnExpr(\"schedule_task.*\").\n\t\tColumnExpr(\"delivery.title AS delivery__title\").\n\t\tColumnExpr(\"delivery.text AS delivery__text\").\n\t\tColumnExpr(\"delivery.user_ids AS delivery__user_ids\").\n\t\tColumnExpr(\"delivery.id AS delivery__id\").\n\t\tColumnExpr(\"delivery.filter AS delivery__filter\").\n\t\tJoin(\"INNER JOIN talkbank_bots.delivery AS delivery ON delivery.id = schedule_task.action_id\").\n\t\tWhere(\"schedule_task.is_active = ?\", true).\n\t\tWhereGroup(func(q *orm.Query) (*orm.Query, error) {\n\t\t\treturn q.\n\t\t\t\tWhereOrGroup(func(subQ1 *orm.Query) (*orm.Query, error) {\n\t\t\t\t\treturn subQ1.\n\t\t\t\t\t\tWhere(\"schedule_task.type = ?\", \"onetime\").\n\t\t\t\t\t\tWhere(\"schedule_task.from_datetime >= ?\", now).\n\t\t\t\t\t\tWhereGroup(func(subQ *orm.Query) (*orm.Query, error) {\n\t\t\t\t\t\t\treturn subQ.\n\t\t\t\t\t\t\t\tWhereOr(\"schedule_task.to_datetime IS NULL\").\n\t\t\t\t\t\t\t\tWhereOr(\"schedule_task.to_datetime >= schedule_task.from_datetime\"), nil\n\t\t\t\t\t\t}), nil\n\t\t\t\t}).\n\t\t\t\tWhereOrGroup(func(subQ2 *orm.Query) (*orm.Query, error) {\n\t\t\t\t\treturn subQ2.\n\t\t\t\t\t\tWhere(\"schedule_task.type = ?\", \"recurrently\").\n\t\t\t\t\t\tWhereGroup(func(subGroup *orm.Query) (*orm.Query, error) {\n\t\t\t\t\t\t\treturn subGroup.Where(\"schedule_task.from_datetime <= ?\", now).\n\t\t\t\t\t\t\t\tWhereGroup(func(subQ *orm.Query) (*orm.Query, error) {\n\t\t\t\t\t\t\t\t\treturn subQ.\n\t\t\t\t\t\t\t\t\t\tWhereOr(\"schedule_task.to_datetime IS NULL\").\n\t\t\t\t\t\t\t\t\t\tWhereOrGroup(func(subQ1 *orm.Query) (*orm.Query, error) {\n\t\t\t\t\t\t\t\t\t\t\treturn subQ1.\n\t\t\t\t\t\t\t\t\t\t\t\tWhere(\"schedule_task.to_datetime >= ?\", now).\n\t\t\t\t\t\t\t\t\t\t\t\tWhere(\"schedule_task.to_datetime > schedule_task.from_datetime\"), nil\n\t\t\t\t\t\t\t\t\t\t}), nil\n\t\t\t\t\t\t\t\t}), nil\n\t\t\t\t\t\t}).\n\t\t\t\t\t\tWhereOrGroup(func(subGroup2 *orm.Query) (*orm.Query, error) {\n\t\t\t\t\t\t\treturn subGroup2.\n\t\t\t\t\t\t\t\tWhere(\"schedule_task.from_datetime >= ?\", now).\n\t\t\t\t\t\t\t\tWhere(\"schedule_task.from_datetime <= schedule_task.next_run\").\n\t\t\t\t\t\t\t\tWhereGroup(func(toGroup *orm.Query) (*orm.Query, error) {\n\t\t\t\t\t\t\t\t\treturn toGroup.\n\t\t\t\t\t\t\t\t\t\tWhereOr(\"schedule_task.to_datetime IS NULL\").\n\t\t\t\t\t\t\t\t\t\tWhereOr(\"schedule_task.to_datetime >= schedule_task.next_run\"), nil\n\t\t\t\t\t\t\t\t}), nil\n\t\t\t\t\t\t}), nil\n\t\t\t\t}), nil\n\t\t}).\n\t\tOrder(\"schedule_task.id ASC\").\n\t\tSelect()\n\n\tif err != nil {\n\t\tfmt.Println(\"Error to get data from scheduler_task\", err)\n\t\treturn nil, err\n\t}\n\n\treturn scheduleModel, nil\n}", "func cmdGetPolicySchedules(ccmd *cobra.Command, args []string) {\n\taplSvc := apl.NewClient()\n\n\toutput := runGetCommand(args, aplSvc.PolicySchedules.Get)\n\n\tif output != nil {\n\t\tfields := []string{\"ID\", \"Name\", \"ResourceType\", \"Status\", \"CreatedTime\"}\n\t\tprintTableResultsCustom(output.(apl.PolicySchedule), fields)\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get takes name of the cloudwatchEventTarget, and returns the corresponding cloudwatchEventTarget object, and an error if there is any.
func (c *FakeCloudwatchEventTargets) Get(name string, options v1.GetOptions) (result *v1alpha1.CloudwatchEventTarget, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(cloudwatcheventtargetsResource, c.ns, name), &v1alpha1.CloudwatchEventTarget{}) if obj == nil { return nil, err } return obj.(*v1alpha1.CloudwatchEventTarget), err }
[ "func GetEventTarget(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *EventTargetState, opts ...pulumi.ResourceOption) (*EventTarget, error) {\n\tvar resource EventTarget\n\terr := ctx.ReadResource(\"aws:cloudwatch/eventTarget:EventTarget\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (e *EventAPI) Get(name string) (*EventType, error) {\n\teventType := &EventType{}\n\terr := e.client.httpGET(e.backOffConf.create(), e.eventURL(name), eventType, \"unable to request event types\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn eventType, nil\n}", "func (s cloudEventsTargetNamespaceLister) Get(name string) (*v1alpha1.CloudEventsTarget, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1alpha1.Resource(\"cloudeventstarget\"), name)\n\t}\n\treturn obj.(*v1alpha1.CloudEventsTarget), nil\n}", "func (m *DeviceManagementTroubleshootingEvent) GetEventName()(*string) {\n val, err := m.GetBackingStore().Get(\"eventName\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func GetEvent(name string) (Event, bool) {\n\treturn std.GetEvent(name)\n}", "func (c *OutputEventContext) Get(eventName string) (*protocol.Event, error) {\n\te, ok := c.Context[eventName]\n\tif !ok {\n\t\terr := fmt.Errorf(\"cannot find the event name in OutputEventContext : %s\", eventName)\n\t\treturn nil, err\n\t}\n\treturn e, nil\n}", "func (collection Listeners) Get(name string) Listener {\n\tfor _, listener := range collection {\n\t\tif listener.Name() == name {\n\t\t\treturn listener\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c *FakeCloudwatchEventTargets) Delete(name string, options *v1.DeleteOptions) error {\n\t_, err := c.Fake.\n\t\tInvokes(testing.NewDeleteAction(cloudwatcheventtargetsResource, c.ns, name), &v1alpha1.CloudwatchEventTarget{})\n\n\treturn err\n}", "func NewEventTarget(ctx *pulumi.Context,\n\tname string, args *EventTargetArgs, opts ...pulumi.ResourceOption) (*EventTarget, error) {\n\tif args == nil || args.Arn == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Arn'\")\n\t}\n\tif args == nil || args.Rule == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Rule'\")\n\t}\n\tif args == nil {\n\t\targs = &EventTargetArgs{}\n\t}\n\tvar resource EventTarget\n\terr := ctx.RegisterResource(\"aws:cloudwatch/eventTarget:EventTarget\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (_this *Event) Target() *EventTarget {\n\tvar ret *EventTarget\n\tvalue := _this.Value_JS.Get(\"target\")\n\tif value.Type() != js.TypeNull && value.Type() != js.TypeUndefined {\n\t\tret = EventTargetFromJS(value)\n\t}\n\treturn ret\n}", "func (e *evt) Name() string {\n\treturn e.name\n}", "func (r *ProjectsTraceSinksService) Get(name string) *ProjectsTraceSinksGetCall {\n\tc := &ProjectsTraceSinksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}", "func (w *Watcher) GetTarget(targetName string) (*Target, error) {\n\tmutableMutex.Lock()\n\tdefer mutableMutex.Unlock()\n\tif w.TargetMap == nil {\n\t\tw.TargetMap = make(map[string]*Target)\n\t}\n\ttarget, ok := w.TargetMap[targetName]\n\tif !ok {\n\t\treturn nil, errors.Errorf(\"not exist domain\")\n\t}\n\treturn target, nil\n}", "func (c *FakeCloudwatchEventTargets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.CloudwatchEventTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewPatchSubresourceAction(cloudwatcheventtargetsResource, c.ns, name, pt, data, subresources...), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}", "func (s googleCloudStorageTargetNamespaceLister) Get(name string) (*v1alpha1.GoogleCloudStorageTarget, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1alpha1.Resource(\"googlecloudstoragetarget\"), name)\n\t}\n\treturn obj.(*v1alpha1.GoogleCloudStorageTarget), nil\n}", "func (m *AppliedAuthenticationEventListener) GetEventType()(*AuthenticationEventType) {\n val, err := m.GetBackingStore().Get(\"eventType\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*AuthenticationEventType)\n }\n return nil\n}", "func (c *FakeCloudwatchEventTargets) Update(cloudwatchEventTarget *v1alpha1.CloudwatchEventTarget) (result *v1alpha1.CloudwatchEventTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewUpdateAction(cloudwatcheventtargetsResource, c.ns, cloudwatchEventTarget), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}", "func (m *UserSimulationEventInfo) GetEventName()(*string) {\n return m.eventName\n}", "func (oc orderedCallbacks) get(name string) *namedCallback {\n\tcallback, _ := oc.getWithPosition(name)\n\treturn callback\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List takes label and field selectors, and returns the list of CloudwatchEventTargets that match those selectors.
func (c *FakeCloudwatchEventTargets) List(opts v1.ListOptions) (result *v1alpha1.CloudwatchEventTargetList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(cloudwatcheventtargetsResource, cloudwatcheventtargetsKind, c.ns, opts), &v1alpha1.CloudwatchEventTargetList{}) if obj == nil { return nil, err } label, _, _ := testing.ExtractFromListOptions(opts) if label == nil { label = labels.Everything() } list := &v1alpha1.CloudwatchEventTargetList{ListMeta: obj.(*v1alpha1.CloudwatchEventTargetList).ListMeta} for _, item := range obj.(*v1alpha1.CloudwatchEventTargetList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } } return list, err }
[ "func (s *cloudEventsTargetLister) List(selector labels.Selector) (ret []*v1alpha1.CloudEventsTarget, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.CloudEventsTarget))\n\t})\n\treturn ret, err\n}", "func (s cloudEventsTargetNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.CloudEventsTarget, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.CloudEventsTarget))\n\t})\n\treturn ret, err\n}", "func (c *FakeAWSSNSTargets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.AWSSNSTargetList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(awssnstargetsResource, awssnstargetsKind, c.ns, opts), &v1alpha1.AWSSNSTargetList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &v1alpha1.AWSSNSTargetList{ListMeta: obj.(*v1alpha1.AWSSNSTargetList).ListMeta}\n\tfor _, item := range obj.(*v1alpha1.AWSSNSTargetList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}", "func (s *Service) ListTargets(ctx context.Context) (list []platform.ScraperTarget, err error) {\n\tlist = make([]platform.ScraperTarget, 0)\n\ts.scraperTargetKV.Range(func(_, v interface{}) bool {\n\t\tb, ok := v.(platform.ScraperTarget)\n\t\tif !ok {\n\t\t\terr = &platform.Error{\n\t\t\t\tCode: platform.EInvalid,\n\t\t\t\tMsg: fmt.Sprintf(\"type %T is not a scraper target\", v),\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t\tlist = append(list, b)\n\t\treturn true\n\t})\n\treturn list, err\n}", "func (w *Watcher) GetTargetNameList() ([]string) {\n\tmutableMutex.Lock()\n\tdefer mutableMutex.Unlock()\n\tif w.TargetMap == nil {\n\t\tw.TargetMap = make(map[string]*Target)\n\t}\n\ttargetNameList := make([]string, 0, len(w.TargetMap))\n\tfor tn := range w.TargetMap {\n\t\ttargetNameList = append(targetNameList, tn)\n\t}\n\treturn targetNameList\n}", "func (s *targetLister) List(selector labels.Selector) (ret []*v1alpha1.Target, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.Target))\n\t})\n\treturn ret, err\n}", "func (t *targets) List() ([]models.SimulationTarget, error) {\n\titems := make([]models.SimulationTarget, 0)\n\tprefix := []byte(\"target-\")\n\terr := t.store.list(prefix, func(k []byte, v []byte) error {\n\t\tvar target models.SimulationTarget\n\t\terr := json.Unmarshal(v, &target)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to deserialize target %s: %w\", k, err)\n\t\t}\n\n\t\titems = append(items, target)\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn items, nil\n}", "func (c *FakeScheduledEchos) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ScheduledEchoList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(scheduledechosResource, scheduledechosKind, c.ns, opts), &v1alpha1.ScheduledEchoList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &v1alpha1.ScheduledEchoList{ListMeta: obj.(*v1alpha1.ScheduledEchoList).ListMeta}\n\tfor _, item := range obj.(*v1alpha1.ScheduledEchoList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}", "func (s *googleCloudStorageTargetLister) List(selector labels.Selector) (ret []*v1alpha1.GoogleCloudStorageTarget, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.GoogleCloudStorageTarget))\n\t})\n\treturn ret, err\n}", "func (ts TargetSet) ListTargets() []string {\n\taddrs := make([]string, 0)\n\tfor _, target := range ts {\n\t\taddrs = append(addrs, target.AddrString())\n\t}\n\treturn addrs\n}", "func (t *targets) List() []string {\n\tif t.lister == nil {\n\t\tt.l.Error(\"List(): Lister t.lister is nil\")\n\t\treturn []string{}\n\t}\n\n\tlist := t.lister.List()\n\n\t// Filter by regexp\n\tif t.re != nil {\n\t\tvar filter []string\n\t\tfor _, i := range list {\n\t\t\tif t.re.MatchString(i) {\n\t\t\t\tfilter = append(filter, i)\n\t\t\t}\n\t\t}\n\t\tlist = filter\n\t}\n\n\t// Filter by lameduck\n\tif t.ldLister != nil {\n\t\tlameDucksList := t.ldLister.List()\n\n\t\tlameDuckMap := make(map[string]bool)\n\t\tfor _, i := range lameDucksList {\n\t\t\tlameDuckMap[i] = true\n\t\t}\n\t\tvar filter []string\n\t\tfor _, i := range list {\n\t\t\tif !lameDuckMap[i] {\n\t\t\t\tfilter = append(filter, i)\n\t\t\t}\n\t\t}\n\t\tlist = filter\n\t}\n\n\treturn list\n}", "func (s *cloudEventsTargetLister) CloudEventsTargets(namespace string) CloudEventsTargetNamespaceLister {\n\treturn cloudEventsTargetNamespaceLister{indexer: s.indexer, namespace: namespace}\n}", "func List(c *eclcloud.ServiceClient, opts ListOptsBuilder) pagination.Pager {\n\turl := listURL(c)\n\n\tif opts != nil {\n\t\tquery, err := opts.ToTargetGroupListQuery()\n\t\tif err != nil {\n\t\t\treturn pagination.Pager{Err: err}\n\t\t}\n\n\t\turl += query\n\t}\n\n\treturn pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page {\n\t\treturn TargetGroupPage{pagination.LinkedPageBase{PageResult: r}}\n\t})\n}", "func (c *FakeAzureEventHubsSources) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.AzureEventHubsSourceList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(azureeventhubssourcesResource, azureeventhubssourcesKind, c.ns, opts), &v1alpha1.AzureEventHubsSourceList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &v1alpha1.AzureEventHubsSourceList{ListMeta: obj.(*v1alpha1.AzureEventHubsSourceList).ListMeta}\n\tfor _, item := range obj.(*v1alpha1.AzureEventHubsSourceList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}", "func (s *cloudAuditLogsSourceLister) List(selector labels.Selector) (ret []*v1.CloudAuditLogsSource, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1.CloudAuditLogsSource))\n\t})\n\treturn ret, err\n}", "func (c *RPCClient) ListTargets() ([]api.Target, error) {\n\tout := &ListTargetsOut{}\n\terr := c.call(\"ListTargets\", ListTargetsIn{}, out)\n\treturn out.Targets, err\n}", "func (h *Handler) ListByLabel(labels string) ([]*unstructured.Unstructured, error) {\n\tlistOptions := h.Options.ListOptions.DeepCopy()\n\tlistOptions.LabelSelector = labels\n\n\tif err := h.getGVRAndNamespaceScope(); err != nil {\n\t\treturn nil, err\n\t}\n\tif h.isNamespaced {\n\t\treturn extractList(h.dynamicClient.Resource(h.gvr).Namespace(h.namespace).List(h.ctx, *listOptions))\n\t}\n\treturn extractList(h.dynamicClient.Resource(h.gvr).List(h.ctx, *listOptions))\n}", "func (t *Target) List(verbose bool, nameWidth int) {\n\tif !verbose {\n\t\tif strings.HasPrefix(t.Name, \"_\") {\n\t\t\t// skip targets in non verbose mode as hidden.\n\t\t\treturn\n\t\t}\n\t\tpadWidth := nameWidth - len(t.Name)\n\t\tpaddedName := color.Yellow(t.Name)\n\t\tif padWidth > 0 {\n\t\t\tpaddedName += strings.Repeat(\" \", padWidth)\n\t\t}\n\t\tout := fmt.Sprintf(\"%s %s\\n\", paddedName, strings.TrimSpace(t.Description))\n\t\t_, err := t.W.Write([]byte(out))\n\t\tif err != nil {\n\t\t\tlog.Println(color.Red(err.Error()))\n\t\t}\n\t\treturn\n\t}\n\n\t// target name\n\tout := fmt.Sprintf(\"%s: \\n\", color.Yellow(t.Name))\n\n\t// target description\n\tif t.Description != \"\" {\n\t\tout += fmt.Sprintf(\" - description: %s\\n\", strings.TrimSpace(t.Description))\n\t}\n\n\t// target before\n\tif len(t.Before) > 0 {\n\t\tbeforeList := \" - before: \" + strings.Join(t.Before, \", \")\n\t\tout += fmt.Sprintln(beforeList)\n\t}\n\n\t// target after\n\tif len(t.After) > 0 {\n\t\tafterList := \" - after: \" + strings.Join(t.After, \", \")\n\t\tout += fmt.Sprintln(afterList)\n\t}\n\n\t// target command\n\tout += fmt.Sprintf(\" - cmd:\\n \")\n\tout += fmt.Sprintln(strings.Replace(t.Cmd, \"\\n\", \"\\n \", -1))\n\t_, err := t.W.Write([]byte(out))\n\tif err != nil {\n\t\tlog.Println(color.Red(err.Error()))\n\t}\n}", "func (c *FakeRedisTriggers) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RedisTriggerList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(redistriggersResource, redistriggersKind, c.ns, opts), &v1beta1.RedisTriggerList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &v1beta1.RedisTriggerList{ListMeta: obj.(*v1beta1.RedisTriggerList).ListMeta}\n\tfor _, item := range obj.(*v1beta1.RedisTriggerList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Watch returns a watch.Interface that watches the requested cloudwatchEventTargets.
func (c *FakeCloudwatchEventTargets) Watch(opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(cloudwatcheventtargetsResource, c.ns, opts)) }
[ "func (c *FakeAWSSNSTargets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {\n\treturn c.Fake.\n\t\tInvokesWatch(testing.NewWatchAction(awssnstargetsResource, c.ns, opts))\n\n}", "func (c *catzzzLoggers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"catzzzloggers\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch(ctx)\n}", "func (obs *Observer) Watch(opts metav1.ListOptions) (watch.Interface, error) {\n\treturn obs.client.Namespace(obs.namespace).Watch(opts)\n}", "func (c *federatedNotificationReceivers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tResource(\"federatednotificationreceivers\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch(ctx)\n}", "func watch(k *kite.Client, eventType string, eventId string, interval time.Duration) error {\n\teventArgs := kloud.EventArgs([]kloud.EventArg{\n\t\tkloud.EventArg{\n\t\t\tType: eventType,\n\t\t\tEventId: eventId,\n\t\t},\n\t})\n\n\tfor {\n\t\tresp, err := k.TellWithTimeout(\"event\", defaultTellTimeout, eventArgs)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar events []kloud.EventResponse\n\t\tif err := resp.Unmarshal(&events); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(events) == 0 {\n\t\t\treturn errors.New(\"incoming event response is not an array\")\n\t\t}\n\n\t\tif events[0].Error != nil {\n\t\t\treturn events[0].Error\n\t\t}\n\n\t\tDefaultUi.Info(fmt.Sprintf(\"%s ==> %s [Status: %s Percentage: %d]\",\n\t\t\tfmt.Sprint(time.Now())[:19],\n\t\t\tevents[0].Event.Message,\n\t\t\tevents[0].Event.Status,\n\t\t\tevents[0].Event.Percentage,\n\t\t))\n\n\t\tif events[0].Event.Error != \"\" {\n\t\t\terr := errors.New(events[0].Event.Error)\n\t\t\tDefaultUi.Error(err.Error())\n\t\t\treturn err\n\t\t}\n\n\t\tif events[0].Event.Percentage == 100 {\n\t\t\treturn nil\n\t\t}\n\n\t\ttime.Sleep(interval)\n\t}\n}", "func (c *googleCloudStorageSources) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"googlecloudstoragesources\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch(ctx)\n}", "func (c *FakeListeners) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {\n\treturn c.Fake.\n\t\tInvokesWatch(testing.NewWatchAction(listenersResource, c.ns, opts))\n\n}", "func (t *FakeObjectTracker) Watch(gvr schema.GroupVersionResource, name string) (watch.Interface, error) {\n\tif t.fakingOptions.failAll != nil {\n\t\terr := t.fakingOptions.failAll.RunFakeInvocations()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn t.delegatee.Watch(gvr, name)\n}", "func (c *Fake) WatchEvents(request *events.Request) (*events.EventChannel, error) {\n\treturn new(events.EventChannel), nil\n}", "func (w *Watcher) Watch(\n\tctx context.Context,\n\teventCh chan<- Event,\n) error {\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\tdefer close(eventCh)\n\n\terrCh := make(chan error, 3)\n\ttaggedEventCh := make(chan Event)\n\n\tgo func() {\n\t\tif err := w.watchTagged(ctx, taggedEventCh); err != nil {\n\t\t\terrCh <- err\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tif err := w.watchTags(ctx, taggedEventCh, eventCh); err != nil {\n\t\t\terrCh <- err\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tif err := w.watchUnsorted(ctx, eventCh); err != nil {\n\t\t\terrCh <- err\n\t\t}\n\t}()\n\n\treturn <-errCh\n}", "func (c *kongs) Watch(opts v1.ListOptions) (watch.Interface, error) {\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"kongs\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tWatch()\n}", "func (c *kafkaTopics) Watch(opts v1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"kafkatopics\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch()\n}", "func (c *interacts) Watch(opts meta_v1.ListOptions) (watch.Interface, error) {\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"interacts\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tWatch()\n}", "func (s *ConfigService) Watch(d time.Duration)", "func Watch(ctx context.Context, i v1.PodInterface, podFilter *regexp.Regexp,\n\tcontainerFilter *regexp.Regexp, containerExcludeFilter *regexp.Regexp,\n\tcontainerState ContainerState, labelSelector labels.Selector) (chan *Target, chan *Target, error) {\n\n\tlogger := requestctx.Logger(ctx).WithName(\"pod-watch\").V(4)\n\n\tlogger.Info(\"create\")\n\twatcher, err := i.Watch(ctx, metav1.ListOptions{Watch: true, LabelSelector: labelSelector.String()})\n\tif err != nil {\n\t\tfmt.Printf(\"err.Error() = %+v\\n\", err.Error())\n\t\treturn nil, nil, errors.Wrap(err, \"failed to set up watch\")\n\t}\n\n\tadded := make(chan *Target)\n\tremoved := make(chan *Target)\n\n\tgo func() {\n\t\tlogger.Info(\"await events\")\n\t\tdefer func() {\n\t\t\tlogger.Info(\"event processing ends\")\n\t\t}()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase e := <-watcher.ResultChan():\n\t\t\t\tlogger.Info(\"received event\")\n\n\t\t\t\tif e.Object == nil {\n\t\t\t\t\tlogger.Info(\"event error, no object\")\n\t\t\t\t\t// Closed because of error\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tpod, ok := e.Object.(*corev1.Pod)\n\t\t\t\tif !ok {\n\t\t\t\t\tlogger.Info(\"event error, object not a pod\")\n\t\t\t\t\t// Not a Pod\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif !podFilter.MatchString(pod.Name) {\n\t\t\t\t\tlogger.Info(\"filtered\", \"pod\", pod.Name, \"filter\", podFilter.String())\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tswitch e.Type {\n\t\t\t\tcase watch.Added, watch.Modified:\n\t\t\t\t\tlogger.Info(\"pod added/modified\", \"name\", pod.Name)\n\n\t\t\t\t\tvar statuses []corev1.ContainerStatus\n\t\t\t\t\tstatuses = append(statuses, pod.Status.InitContainerStatuses...)\n\t\t\t\t\tstatuses = append(statuses, pod.Status.ContainerStatuses...)\n\n\t\t\t\t\tfor _, c := range statuses {\n\t\t\t\t\t\tif !containerFilter.MatchString(c.Name) {\n\t\t\t\t\t\t\tlogger.Info(\"filtered\", \"container\", c.Name, \"filter\", containerFilter.String())\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif containerExcludeFilter != nil && containerExcludeFilter.MatchString(c.Name) {\n\t\t\t\t\t\t\tlogger.Info(\"excluded\", \"container\", c.Name, \"exclude-filter\", containerExcludeFilter.String())\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif c.State.Running != nil || c.State.Terminated != nil { // There are logs to read\n\t\t\t\t\t\t\tlogger.Info(\"report added\", \"container\", c.Name, \"pod\", pod.Name, \"namespace\", pod.Namespace)\n\t\t\t\t\t\t\tadded <- &Target{\n\t\t\t\t\t\t\t\tNamespace: pod.Namespace,\n\t\t\t\t\t\t\t\tPod: pod.Name,\n\t\t\t\t\t\t\t\tContainer: c.Name,\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase watch.Deleted:\n\t\t\t\t\tlogger.Info(\"pod deleted\", \"name\", pod.Name)\n\n\t\t\t\t\tvar containers []corev1.Container\n\t\t\t\t\tcontainers = append(containers, pod.Spec.Containers...)\n\t\t\t\t\tcontainers = append(containers, pod.Spec.InitContainers...)\n\n\t\t\t\t\tfor _, c := range containers {\n\t\t\t\t\t\tif !containerFilter.MatchString(c.Name) {\n\t\t\t\t\t\t\tlogger.Info(\"filtered\", \"container\", c.Name, \"filter\", containerFilter.String())\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif containerExcludeFilter != nil && containerExcludeFilter.MatchString(c.Name) {\n\t\t\t\t\t\t\tlogger.Info(\"excluded\", \"container\", c.Name, \"exclude-filter\", containerExcludeFilter.String())\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tlogger.Info(\"report removed\", \"container\", c.Name, \"pod\", pod.Name, \"namespace\", pod.Namespace)\n\t\t\t\t\t\tremoved <- &Target{\n\t\t\t\t\t\t\tNamespace: pod.Namespace,\n\t\t\t\t\t\t\tPod: pod.Name,\n\t\t\t\t\t\t\tContainer: c.Name,\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase <-ctx.Done():\n\t\t\t\tlogger.Info(\"received stop request\")\n\t\t\t\twatcher.Stop()\n\t\t\t\tclose(added)\n\t\t\t\tclose(removed)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tlogger.Info(\"pass watch report channels\")\n\treturn added, removed, nil\n}", "func (c *buildConfigs) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) {\n\treturn c.r.Get().\n\t\tPrefix(\"watch\").\n\t\tNamespace(c.ns).\n\t\tResource(\"buildConfigs\").\n\t\tParam(\"resourceVersion\", resourceVersion).\n\t\tLabelsSelectorParam(label).\n\t\tFieldsSelectorParam(field).\n\t\tWatch()\n}", "func (c *externalInterfaces) Watch(opts metav1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"externalinterfaces\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch()\n}", "func (c *cronFederatedHPAs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"cronfederatedhpas\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch(ctx)\n}", "func (c *klusterlets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tResource(\"klusterlets\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch(ctx)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create takes the representation of a cloudwatchEventTarget and creates it. Returns the server's representation of the cloudwatchEventTarget, and an error, if there is any.
func (c *FakeCloudwatchEventTargets) Create(cloudwatchEventTarget *v1alpha1.CloudwatchEventTarget) (result *v1alpha1.CloudwatchEventTarget, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(cloudwatcheventtargetsResource, c.ns, cloudwatchEventTarget), &v1alpha1.CloudwatchEventTarget{}) if obj == nil { return nil, err } return obj.(*v1alpha1.CloudwatchEventTarget), err }
[ "func NewEventTarget(ctx *pulumi.Context,\n\tname string, args *EventTargetArgs, opts ...pulumi.ResourceOption) (*EventTarget, error) {\n\tif args == nil || args.Arn == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Arn'\")\n\t}\n\tif args == nil || args.Rule == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Rule'\")\n\t}\n\tif args == nil {\n\t\targs = &EventTargetArgs{}\n\t}\n\tvar resource EventTarget\n\terr := ctx.RegisterResource(\"aws:cloudwatch/eventTarget:EventTarget\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (e *EventAPI) Create(eventType *EventType) error {\n\tconst errMsg = \"unable to create event type\"\n\n\tresponse, err := e.client.httpPOST(e.backOffConf.create(), e.eventBaseURL(), eventType, errMsg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\n\tif response.StatusCode != http.StatusCreated {\n\t\tbuffer, err := io.ReadAll(response.Body)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"%s: unable to read response body\", errMsg)\n\t\t}\n\t\treturn decodeResponseToError(buffer, errMsg)\n\t}\n\n\treturn nil\n}", "func CreateCloudEvent(cloudEventVersion string) *event.Event {\n\tcloudEvent := event.New(cloudEventVersion)\n\tcloudEvent.SetID(EventId)\n\tcloudEvent.SetType(EventType)\n\tcloudEvent.SetSource(EventSource)\n\tcloudEvent.SetDataContentType(EventDataContentType)\n\tcloudEvent.SetSubject(EventSubject)\n\tcloudEvent.SetDataSchema(EventDataSchema)\n\tcloudEvent.SetExtension(constants.ExtensionKeyPartitionKey, PartitionKey)\n\t_ = cloudEvent.SetData(EventDataContentType, EventDataJson)\n\treturn &cloudEvent\n}", "func New(sess *session.Session) CloudWatchEvents {\n\treturn CloudWatchEvents{\n\t\tsvc: cloudwatchevents.New(sess),\n\t}\n}", "func CreateEventObj(tenant, namespace, name, eType, severity, message string) *evtsapi.Event {\n\tcreationTime, _ := types.TimestampProto(time.Now())\n\n\teventObj := &evtsapi.Event{\n\t\tTypeMeta: api.TypeMeta{Kind: \"Event\"},\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: name,\n\t\t\tUUID: name,\n\t\t\tTenant: tenant,\n\t\t\tNamespace: namespace,\n\t\t\tCreationTime: api.Timestamp{\n\t\t\t\tTimestamp: *creationTime,\n\t\t\t},\n\t\t\tModTime: api.Timestamp{\n\t\t\t\tTimestamp: *creationTime,\n\t\t\t},\n\t\t},\n\t\tEventAttributes: evtsapi.EventAttributes{\n\t\t\tSeverity: severity,\n\t\t\tType: eType,\n\t\t\tMessage: message,\n\t\t},\n\t}\n\treturn eventObj\n}", "func CreateEvent(t string) *Event {\n\te := Event{\n\t\tType: t,\n\t}\n\n\t// Generate ID\n\tuuid, _ := uuid.NewRandom()\n\te.Id = uuid.String()\n\n\t// Set Create/Update timestamps\n\tnow := time.Now()\n\tseconds := now.Unix()\n\tnanos := int32(now.Sub(time.Unix(seconds, 0)))\n\n\tts := &timestamp.Timestamp{\n\t\tSeconds: seconds,\n\t\tNanos: nanos,\n\t}\n\n\te.CreationTime = ts\n\n\treturn &e\n}", "func (s *TargetCRUD) Create(arg ...crud.Arg) (crud.Arg, error) {\n\tevent := eventFromArg(arg[0])\n\ttarget := targetFromStuct(event)\n\tprint.CreatePrintln(\"creating target\", *target.Target.Target,\n\t\t\"on upstream\", *target.Upstream.ID)\n\treturn target, nil\n}", "func (c *FakeCloudwatchEventTargets) Update(cloudwatchEventTarget *v1alpha1.CloudwatchEventTarget) (result *v1alpha1.CloudwatchEventTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewUpdateAction(cloudwatcheventtargetsResource, c.ns, cloudwatchEventTarget), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}", "func (c *FakeCloudwatchEventTargets) Get(name string, options v1.GetOptions) (result *v1alpha1.CloudwatchEventTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewGetAction(cloudwatcheventtargetsResource, c.ns, name), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}", "func (c *FakeCloudwatchEventTargets) Delete(name string, options *v1.DeleteOptions) error {\n\t_, err := c.Fake.\n\t\tInvokes(testing.NewDeleteAction(cloudwatcheventtargetsResource, c.ns, name), &v1alpha1.CloudwatchEventTarget{})\n\n\treturn err\n}", "func (c *Client) CreateEvent(ctx context.Context, event Event) error {\n\turl := fmt.Sprintf(\"http://\" + c.hostPort + \"/events\")\n\tfmt.Println(url)\n\tif err := c.client.PostJSON(ctx, \"/events\", url, event); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func createEvent(action string) *Event {\n\treturn &Event{\n\t\tID: uuid.Generate().String(),\n\t\tTimestamp: time.Now(),\n\t\tAction: action,\n\t}\n}", "func (s *Service) CreateEvent(ctx context.Context, req *request.CreateEvent) (*response.Message, error) {\n\t// TODO\n\treturn nil, nil\n}", "func NewCloudPcAuditEvent()(*CloudPcAuditEvent) {\n m := &CloudPcAuditEvent{\n Entity: *NewEntity(),\n }\n return m\n}", "func (es *EntityEventService) Create(campID int, entID int, evt SimpleEntityEvent) (*EntityEvent, error) {\n\tvar err error\n\tend := EndpointCampaign\n\n\tif end, err = end.id(campID); err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid Campaign ID: %w\", err)\n\t}\n\tend = end.concat(endpointEntity)\n\n\tif end, err = end.id(entID); err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid Entity ID: %w\", err)\n\t}\n\tend = end.concat(es.end)\n\n\tb, err := json.Marshal(evt)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot marshal SimpleEntityEvent: %w\", err)\n\t}\n\n\tvar wrap struct {\n\t\tData *EntityEvent `json:\"data\"`\n\t}\n\n\tif err = es.client.post(end, bytes.NewReader(b), &wrap); err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot create EntityEvent for Campaign (ID: %d): %w\", campID, err)\n\t}\n\n\treturn wrap.Data, nil\n}", "func (e *VMTEventRegistry) Create(event *VMTEvent) (*VMTEvent, error) {\n\tout, err := e.create(event)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult := out.(*VMTEvent)\n\treturn result, err\n}", "func (c *EventClient) Create() *EventCreate {\n\tmutation := newEventMutation(c.config, OpCreate)\n\treturn &EventCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}\n}", "func CreateEvent(rUcs *Usecases) *Event {\n\treturn &Event{\n\t\trUcs: rUcs,\n\t}\n}", "func CreateEvent(request protocols.Request, outputEvent output.InternalEvent, isResponseDebug bool) *output.InternalWrappedEvent {\n\treturn CreateEventWithAdditionalOptions(request, outputEvent, isResponseDebug, nil)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update takes the representation of a cloudwatchEventTarget and updates it. Returns the server's representation of the cloudwatchEventTarget, and an error, if there is any.
func (c *FakeCloudwatchEventTargets) Update(cloudwatchEventTarget *v1alpha1.CloudwatchEventTarget) (result *v1alpha1.CloudwatchEventTarget, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(cloudwatcheventtargetsResource, c.ns, cloudwatchEventTarget), &v1alpha1.CloudwatchEventTarget{}) if obj == nil { return nil, err } return obj.(*v1alpha1.CloudwatchEventTarget), err }
[ "func (c *FakeCloudwatchEventTargets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.CloudwatchEventTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewPatchSubresourceAction(cloudwatcheventtargetsResource, c.ns, name, pt, data, subresources...), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}", "func (c *Collection) UpdateEvent(\n\tkey, typ string, ts time.Time, ordinal int64, value interface{},\n) (*Event, error) {\n\theaders := map[string]string{\"Content-Type\": \"application/json\"}\n\treturn c.innerUpdateEvent(key, typ, ts, ordinal, value, headers)\n}", "func (m *WatchEventDispatcher) OnUpdate(resourceGroup string, _, o client.Object) {\n\tif resourceGroup != m.resourceGroup {\n\t\treturn\n\t}\n\tm.events <- &Event{\n\t\tType: watch.Modified,\n\t\tObject: o,\n\t}\n}", "func (s *EventsService) Update(e EventKey, ev EventInfoDTO) (*http.Response, error) {\n\treturn s.client.put(e.URI(), ev, nil)\n}", "func (e *EventAPI) Update(eventType *EventType) error {\n\tconst errMsg = \"unable to update event type\"\n\n\tresponse, err := e.client.httpPUT(e.backOffConf.create(), e.eventURL(eventType.Name), eventType, errMsg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\n\tif response.StatusCode != http.StatusOK {\n\t\tbuffer, err := io.ReadAll(response.Body)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"%s: unable to read response body\", errMsg)\n\t\t}\n\t\treturn decodeResponseToError(buffer, \"unable to update event type\")\n\t}\n\n\treturn nil\n}", "func UpdateEvent(c *gin.Context) {\n\tvar inp model.Event\n\n\tc.BindJSON(&inp)\n\tc.JSON(http.StatusOK, serviceEvent.UpdateEvent(&inp))\n}", "func (r *DeviceManagementAutopilotEventRequest) Update(ctx context.Context, reqObj *DeviceManagementAutopilotEvent) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func (c *FakeCloudwatchEventTargets) Get(name string, options v1.GetOptions) (result *v1alpha1.CloudwatchEventTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewGetAction(cloudwatcheventtargetsResource, c.ns, name), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}", "func (r *DeviceManagementTroubleshootingEventRequest) Update(ctx context.Context, reqObj *DeviceManagementTroubleshootingEvent) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func (e *Timing) Update(e2 Event) error {\n\tif e.Type() != e2.Type() {\n\t\treturn fmt.Errorf(\"statsd event type conflict: %s vs %s \", e.String(), e2.String())\n\t}\n\te.mu.Lock()\n\tdefer e.mu.Unlock()\n\tp := e2.Payload().(map[string]interface{})\n\te.Sample += p[\"sample\"].(float64)\n\te.Count += p[\"cnt\"].(int64)\n\te.Value += p[\"val\"].(int64)\n\te.Min = minInt64(e.Min, p[\"min\"].(int64))\n\te.Max = maxInt64(e.Max, p[\"max\"].(int64))\n\te.Values = append(e.Values, p[\"val\"].(int64))\n\treturn nil\n}", "func (r *EventTagsService) Update(profileId int64, eventtag *EventTag) *EventTagsUpdateCall {\n\tc := &EventTagsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.profileId = profileId\n\tc.eventtag = eventtag\n\treturn c\n}", "func (s *TargetCRUD) Update(arg ...crud.Arg) (crud.Arg, error) {\n\tevent := eventFromArg(arg[0])\n\ttarget := targetFromStuct(event)\n\toldTarget, ok := event.OldObj.(*state.Target)\n\tif !ok {\n\t\tpanic(\"unexpected type, expected *state.Target\")\n\t}\n\tprint.DeletePrintln(\"deleting target\", *oldTarget.Target.Target,\n\t\t\"from upstream\", *oldTarget.Upstream.ID)\n\tprint.CreatePrintln(\"creating target\", *target.Target.Target,\n\t\t\"on upstream\", *target.Upstream.ID)\n\treturn target, nil\n}", "func (es *EntityEventService) Update(campID int, entID int, evtID int, evt SimpleEntityEvent) (*EntityEvent, error) {\n\tvar err error\n\tend := EndpointCampaign\n\n\tif end, err = end.id(campID); err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid Campaign ID: %w\", err)\n\t}\n\tend = end.concat(endpointEntity)\n\n\tif end, err = end.id(entID); err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid Entity ID: %w\", err)\n\t}\n\tend = end.concat(es.end)\n\n\tif end, err = end.id(evtID); err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid EntityEvent ID: %w\", err)\n\t}\n\n\tb, err := json.Marshal(evt)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot marshal SimpleEntityEvent: %w\", err)\n\t}\n\n\tvar wrap struct {\n\t\tData *EntityEvent `json:\"data\"`\n\t}\n\n\tif err = es.client.put(end, bytes.NewReader(b), &wrap); err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot update EntityEvent for Campaign (ID: %d): '%w'\", campID, err)\n\t}\n\n\treturn wrap.Data, nil\n}", "func (s *S3Sink) UpdateEvents(eNew *v1.Event, eOld *v1.Event) {\n\ts.eventCh.In() <- NewEventData(eNew, eOld)\n}", "func (h *HTTPSink) UpdateEvents(eNew *v1.Event, eOld *v1.Event) {\n\th.eventCh.In() <- NewEventData(eNew, eOld)\n}", "func GetEventTarget(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *EventTargetState, opts ...pulumi.ResourceOption) (*EventTarget, error) {\n\tvar resource EventTarget\n\terr := ctx.ReadResource(\"aws:cloudwatch/eventTarget:EventTarget\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (e *Timing) Update(e2 Event) error {\n\tif e.Type() != e2.Type() {\n\t\treturn fmt.Errorf(\"statsd event type conflict: %s vs %s \", e.String(), e2.String())\n\t}\n\tp := e2.Payload().(map[string]float64)\n\te.Value += p[\"val\"]\n\te.Values = append(e.Values, p[\"val\"])\n\tif e.Count == 0 { // Count will only be 0 after Reset()\n\t\te.Min = p[\"min\"]\n\t\te.Max = p[\"max\"]\n\t} else {\n\t\te.Min = minFloat64(e.Min, p[\"min\"])\n\t\te.Max = maxFloat64(e.Max, p[\"max\"])\n\t}\n\te.Count += p[\"cnt\"]\n\te.Tags = []string{}\n\treturn nil\n}", "func (c *FakeCloudwatchEventTargets) Create(cloudwatchEventTarget *v1alpha1.CloudwatchEventTarget) (result *v1alpha1.CloudwatchEventTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewCreateAction(cloudwatcheventtargetsResource, c.ns, cloudwatchEventTarget), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}", "func (o *UpdateEventParams) WithHTTPClient(client *http.Client) *UpdateEventParams {\n\to.SetHTTPClient(client)\n\treturn o\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete takes name of the cloudwatchEventTarget and deletes it. Returns an error if one occurs.
func (c *FakeCloudwatchEventTargets) Delete(name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(cloudwatcheventtargetsResource, c.ns, name), &v1alpha1.CloudwatchEventTarget{}) return err }
[ "func (e *EventAPI) Delete(name string) error {\n\treturn e.client.httpDELETE(e.backOffConf.create(), e.eventURL(name), \"unable to delete event type\")\n}", "func (s *serverMetricsRecorder) DeleteNamedMetric(name string) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tdelete(s.state.NamedMetrics, name)\n}", "func (r *ProjectsTraceSinksService) Delete(nameid string) *ProjectsTraceSinksDeleteCall {\n\tc := &ProjectsTraceSinksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.nameid = nameid\n\treturn c\n}", "func (app *frame) Delete(name string) error {\n\tif app.isStopped {\n\t\treturn nil\n\t}\n\n\tif _, ok := app.variables[name]; !ok {\n\t\tstr := fmt.Sprintf(\"variable: the name variable (%s) is not defined\", name)\n\t\treturn errors.New(str)\n\t}\n\n\tdelete(app.variables, name)\n\treturn nil\n}", "func (r *DeviceManagementAutopilotEventRequest) Delete(ctx context.Context) error {\n\treturn r.JSONRequest(ctx, \"DELETE\", \"\", nil, nil)\n}", "func (ep *eventsProvider) DeleteByName(name string) error {\n\tindices, _ := ep.findByName(name)\n\tif len(indices) == 0 {\n\t\treturn nil\n\t}\n\n\tfor len(indices) != 0 {\n\t\tep.mutex.Lock()\n\t\tep.Data = append(ep.Data[:indices[0]], ep.Data[indices[0]+1:]...)\n\t\tep.mutex.Unlock()\n\t\tindices, _ = ep.findByName(name)\n\t}\n\n\treturn nil\n}", "func (c *FakeCloudwatchEventTargets) Get(name string, options v1.GetOptions) (result *v1alpha1.CloudwatchEventTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewGetAction(cloudwatcheventtargetsResource, c.ns, name), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}", "func (r *ProjectsLocationsProcessesRunsLineageEventsService) Delete(name string) *ProjectsLocationsProcessesRunsLineageEventsDeleteCall {\n\tc := &ProjectsLocationsProcessesRunsLineageEventsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}", "func deleteEvent(key model.Key) api.WatchEvent {\n\treturn api.WatchEvent{\n\t\tType: api.WatchDeleted,\n\t\tOld: &model.KVPair{\n\t\t\tKey: key,\n\t\t\tValue: uuid.NewString(),\n\t\t\tRevision: uuid.NewString(),\n\t\t},\n\t}\n}", "func (svc *Service) Delete(ownerID string, eventID string) error {\n\t_, err := svc.client.Delete(fmt.Sprintf(\"%s/%s\", eventsURL(ownerID), eventID))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Unable to perform Update\")\n\t}\n\treturn nil\n}", "func (k *Kubeclient) Delete(name string, opts *metav1.DeleteOptions) error {\n\tif len(name) == 0 {\n\t\treturn errors.New(\"failed to delete volumesnapshotdata: missing snapshotdata name\")\n\t}\n\tcli, err := k.getClientsetOrCached()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to delete VolumeSnapshotData: {%s}\", name)\n\t}\n\treturn k.del(cli, name, opts)\n}", "func (r *DeviceManagementTroubleshootingEventRequest) Delete(ctx context.Context) error {\n\treturn r.JSONRequest(ctx, \"DELETE\", \"\", nil, nil)\n}", "func (c *k8sClient) OnDelete(obj interface{}) {\n\tselect {\n\tcase c.eventCh <- obj:\n\tdefault:\n\t}\n}", "func (s *TargetCRUD) Delete(arg ...crud.Arg) (crud.Arg, error) {\n\tevent := eventFromArg(arg[0])\n\ttarget := targetFromStuct(event)\n\tprint.DeletePrintln(\"deleting target\", *target.Target.Target,\n\t\t\"from upstream\", *target.Upstream.ID)\n\treturn target, nil\n}", "func Delete(c *golangsdk.ServiceClient, id string) (r DeleteResult) {\n\turl := resourceURL(c, id)\n\t//fmt.Printf(\"Delete listener url: %s.\\n\", url)\n\t_, r.Err = c.Delete(url, &golangsdk.RequestOpts{\n\t\tOkCodes: []int{204},\n\t})\n\treturn\n}", "func (r *ProjectsLocationsConnectionsEventSubscriptionsService) Delete(name string) *ProjectsLocationsConnectionsEventSubscriptionsDeleteCall {\n\tc := &ProjectsLocationsConnectionsEventSubscriptionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}", "func (s *serverMetricsRecorder) DeleteNamedUtilization(name string) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tdelete(s.state.Utilization, name)\n}", "func (p *Probe) OnQoSDelete(*goovn.QoS) {\n}", "func (client BaseClient) DeleteName(ctx context.Context, nameID string, timeout *int64) (result autorest.Response, err error) {\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: timeout,\n\t\t\tConstraints: []validation.Constraint{{Target: \"timeout\", Name: validation.Null, Rule: false,\n\t\t\t\tChain: []validation.Constraint{{Target: \"timeout\", Name: validation.InclusiveMaximum, Rule: int64(4294967295), Chain: nil},\n\t\t\t\t\t{Target: \"timeout\", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},\n\t\t\t\t}}}}}); err != nil {\n\t\treturn result, validation.NewError(\"servicefabric.BaseClient\", \"DeleteName\", err.Error())\n\t}\n\n\treq, err := client.DeleteNamePreparer(ctx, nameID, timeout)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"servicefabric.BaseClient\", \"DeleteName\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.DeleteNameSender(req)\n\tif err != nil {\n\t\tresult.Response = resp\n\t\terr = autorest.NewErrorWithError(err, \"servicefabric.BaseClient\", \"DeleteName\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.DeleteNameResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"servicefabric.BaseClient\", \"DeleteName\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Patch applies the patch and returns the patched cloudwatchEventTarget.
func (c *FakeCloudwatchEventTargets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.CloudwatchEventTarget, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(cloudwatcheventtargetsResource, c.ns, name, pt, data, subresources...), &v1alpha1.CloudwatchEventTarget{}) if obj == nil { return nil, err } return obj.(*v1alpha1.CloudwatchEventTarget), err }
[ "func (c *FakeCloudwatchEventTargets) Update(cloudwatchEventTarget *v1alpha1.CloudwatchEventTarget) (result *v1alpha1.CloudwatchEventTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewUpdateAction(cloudwatcheventtargetsResource, c.ns, cloudwatchEventTarget), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}", "func GetEventTarget(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *EventTargetState, opts ...pulumi.ResourceOption) (*EventTarget, error) {\n\tvar resource EventTarget\n\terr := ctx.ReadResource(\"aws:cloudwatch/eventTarget:EventTarget\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (f *FakeWatcher) Modify(obj runtime.Object) {\n\tf.result <- Event{Modified, obj}\n}", "func (iface *Iface) patch(dev Patchable) {\n\tiface.patched = dev\n}", "func NewEventTarget(ctx *pulumi.Context,\n\tname string, args *EventTargetArgs, opts ...pulumi.ResourceOption) (*EventTarget, error) {\n\tif args == nil || args.Arn == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Arn'\")\n\t}\n\tif args == nil || args.Rule == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Rule'\")\n\t}\n\tif args == nil {\n\t\targs = &EventTargetArgs{}\n\t}\n\tvar resource EventTarget\n\terr := ctx.RegisterResource(\"aws:cloudwatch/eventTarget:EventTarget\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (s *ProjectWebhookService) PatchProjectWebhook(ctx context.Context, patch *api.ProjectWebhookPatch) (*api.ProjectWebhook, error) {\n\ttx, err := s.db.BeginTx(ctx, nil)\n\tif err != nil {\n\t\treturn nil, FormatError(err)\n\t}\n\tdefer tx.Rollback()\n\n\tprojectWebhook, err := patchProjectWebhook(ctx, tx, patch)\n\tif err != nil {\n\t\treturn nil, FormatError(err)\n\t}\n\n\tif err := tx.Commit(); err != nil {\n\t\treturn nil, FormatError(err)\n\t}\n\n\treturn projectWebhook, nil\n}", "func Patch(t testing.TB, dest, value interface{}) {\n\tNew(t).Patch(dest, value)\n}", "func Patch(old *map[string]interface{}, delta []byte) (err error) {\n\treturn fleecedelta.PatchJSON(old, delta)\n}", "func patchProjectWebhook(ctx context.Context, tx *Tx, patch *api.ProjectWebhookPatch) (*api.ProjectWebhook, error) {\n\t// Build UPDATE clause.\n\tset, args := []string{\"updater_id = ?\"}, []interface{}{patch.UpdaterId}\n\tif v := patch.Name; v != nil {\n\t\tset, args = append(set, \"name = ?\"), append(args, *v)\n\t}\n\tif v := patch.URL; v != nil {\n\t\tset, args = append(set, \"url = ?\"), append(args, *v)\n\t}\n\tif v := patch.ActivityList; v != nil {\n\t\tset, args = append(set, \"activity_list = ?\"), append(args, *v)\n\t}\n\n\targs = append(args, patch.ID)\n\n\t// Execute update query with RETURNING.\n\trow, err := tx.QueryContext(ctx, `\n\t\tUPDATE project_webhook\n\t\tSET `+strings.Join(set, \", \")+`\n\t\tWHERE id = ?\n\t\tRETURNING id, creator_id, created_ts, updater_id, updated_ts, project_id, type, name, url, activity_list\n\t`,\n\t\targs...,\n\t)\n\tif err != nil {\n\t\treturn nil, FormatError(err)\n\t}\n\tdefer row.Close()\n\n\tif row.Next() {\n\t\tvar projectWebhook api.ProjectWebhook\n\t\tvar activityList string\n\t\tif err := row.Scan(\n\t\t\t&projectWebhook.ID,\n\t\t\t&projectWebhook.CreatorId,\n\t\t\t&projectWebhook.CreatedTs,\n\t\t\t&projectWebhook.UpdaterId,\n\t\t\t&projectWebhook.UpdatedTs,\n\t\t\t&projectWebhook.ProjectId,\n\t\t\t&projectWebhook.Type,\n\t\t\t&projectWebhook.Name,\n\t\t\t&projectWebhook.URL,\n\t\t\t&activityList,\n\t\t); err != nil {\n\t\t\treturn nil, FormatError(err)\n\t\t}\n\t\tprojectWebhook.ActivityList = strings.Split(activityList, \",\")\n\n\t\treturn &projectWebhook, nil\n\t}\n\n\treturn nil, &common.Error{Code: common.NotFound, Err: fmt.Errorf(\"project hook ID not found: %d\", patch.ID)}\n}", "func PatchFunc(target, repl any) *Patch {\n\tassertSameFuncType(target, repl)\n\ttargetVal := reflect.ValueOf(target)\n\treplVal := reflect.ValueOf(repl)\n\treturn patchFunc(targetVal, replVal)\n}", "func Patch(pkgName, typeName, methodName string, patchFunc interface{}) {\n\t// find addr of the func\n\tsymbolName := getSymbolName(pkgName, typeName, methodName)\n\taddr := symbolTable[symbolName]\n\toriginalBytes := replaceFunction(addr, (uintptr)(getPtr(reflect.ValueOf(patchFunc))))\n\tpatchRecord[addr] = originalBytes\n}", "func NewEventPatch(ctx *pulumi.Context,\n\tname string, args *EventPatchArgs, opts ...pulumi.ResourceOption) (*EventPatch, error) {\n\tif args == nil {\n\t\targs = &EventPatchArgs{}\n\t}\n\n\targs.ApiVersion = pulumi.StringPtr(\"events.k8s.io/v1\")\n\targs.Kind = pulumi.StringPtr(\"Event\")\n\taliases := pulumi.Aliases([]pulumi.Alias{\n\t\t{\n\t\t\tType: pulumi.String(\"kubernetes:core/v1:EventPatch\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"kubernetes:events.k8s.io/v1beta1:EventPatch\"),\n\t\t},\n\t})\n\topts = append(opts, aliases)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource EventPatch\n\terr := ctx.RegisterResource(\"kubernetes:events.k8s.io/v1:EventPatch\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func patchMutatingWebhookConfiguration(k client.Client, rootCAPem []byte, name string) (err error) {\n\n\twebhookConfiguration := &admissionregistrationv1beta1.MutatingWebhookConfiguration{}\n\tif err = k.Get(context.TODO(), types.NamespacedName{Name: name}, webhookConfiguration); err != nil {\n\t\treturn\n\t}\n\tfor i := range webhookConfiguration.Webhooks {\n\t\twebhookConfiguration.Webhooks[i].ClientConfig.CABundle = rootCAPem\n\t}\n\terr = k.Update(context.TODO(), webhookConfiguration)\n\n\treturn\n\n}", "func Patch(dest, value interface{}) Restorer {\n\tdestv := reflect.ValueOf(dest).Elem()\n\toldv := reflect.New(destv.Type()).Elem()\n\toldv.Set(destv)\n\tvaluev := reflect.ValueOf(value)\n\tif !valuev.IsValid() {\n\t\t// This isn't quite right when the destination type is not\n\t\t// nilable, but it's better than the complex alternative.\n\t\tvaluev = reflect.Zero(destv.Type())\n\t}\n\tdestv.Set(valuev)\n\treturn func() {\n\t\tdestv.Set(oldv)\n\t}\n}", "func (c *FakeListeners) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkextensionv1.Listener, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewPatchSubresourceAction(listenersResource, c.ns, name, pt, data, subresources...), &networkextensionv1.Listener{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*networkextensionv1.Listener), err\n}", "func (c *FakeCloudwatchEventTargets) Get(name string, options v1.GetOptions) (result *v1alpha1.CloudwatchEventTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewGetAction(cloudwatcheventtargetsResource, c.ns, name), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}", "func (c *FakeCloudwatchEventTargets) Watch(opts v1.ListOptions) (watch.Interface, error) {\n\treturn c.Fake.\n\t\tInvokesWatch(testing.NewWatchAction(cloudwatcheventtargetsResource, c.ns, opts))\n\n}", "func (toc *tOperationCtx) postCloudEventToWatcher(ip string, port string, endpoint string, payload []byte) error {\n\treq, err := http.NewRequest(\"POST\", fmt.Sprintf(\"http://%s:%s%s\", ip, port, endpoint), bytes.NewBuffer(payload))\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tclient := &http.Client{}\n\t_, err = client.Do(req)\n\treturn err\n}", "func (r *ProjectsTraceSinksService) Patch(nameid string, tracesink *TraceSink) *ProjectsTraceSinksPatchCall {\n\tc := &ProjectsTraceSinksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.nameid = nameid\n\tc.tracesink = tracesink\n\treturn c\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
JSON jsonified content message.
func (c contentMessage) JSON() string { c.Status = "success" jsonMessageBytes, e := json.MarshalIndent(c, "", " ") fatalIf(probe.NewError(e), "Unable to marshal into JSON.") return string(jsonMessageBytes) }
[ "func (msg *Message) JsonContent() (string, error) {\n\t//if msg.Content == nil {\n\t//\treturn \"\", nil\n\t//}\n\tb, err := json.Marshal(msg.Content)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(b), nil\n}", "func (m lockCmdMessage) JSON() string {\n\tmsgBytes, e := json.MarshalIndent(m, \"\", \" \")\n\tfatalIf(probe.NewError(e), \"Unable to marshal into JSON.\")\n\treturn string(msgBytes)\n}", "func (msg *Any) JSON() []byte {\n\tjsonBytes, err := json.Marshal(*msg)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn jsonBytes\n}", "func (c copyMessage) JSON() string {\n\tc.Status = \"success\"\n\tcopyMessageBytes, e := json.MarshalIndent(c, \"\", \" \")\n\tfatalIf(probe.NewError(e), \"Unable to marshal into JSON.\")\n\n\treturn string(copyMessageBytes)\n}", "func (s SizeMessage) JSON() string {\n\treturn strutil.JSON(s)\n}", "func (h aliasMessage) JSON() string {\n\th.Status = \"success\"\n\tjsonMessageBytes, e := json.MarshalIndent(h, \"\", \" \")\n\tfatalIf(probe.NewError(e), \"Unable to marshal into JSON.\")\n\n\treturn string(jsonMessageBytes)\n}", "func bodyContent(message map[string]interface{}) *bytes.Buffer {\n\trepresent, err := json.Marshal(message)\n\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\treturn bytes.NewBuffer(represent)\n}", "func (c *Controller) JsonContent(model interface{}) {\n\tc.ResponseWriter.Header().Set(\"Content-Type\", \"application/javascript\")\n\tjson.NewEncoder(c.ResponseWriter).Encode(model)\n}", "func (s policyMessage) JSON() string {\n\tpolicyJSONBytes, e := json.MarshalIndent(s, \"\", \" \")\n\tfatalIf(probe.NewError(e), \"Unable to marshal into JSON.\")\n\n\treturn string(policyJSONBytes)\n}", "func (d *InfoOutput) JSON() ([]byte, error) {\n\treturn json.Marshal(d.reply)\n}", "func (g *Game) getMessageJson() []byte {\n\te := g.Messages.Pop()\n\tif e == nil {\n\t\treturn nil\n\t}\n\tm := e.Value.(*TextMessage)\n\tb, err := json.Marshal(m)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn b\n}", "func (s stopHealMessage) JSON() string {\n\tstopHealJSONBytes, e := json.MarshalIndent(s, \"\", \" \")\n\tfatalIf(probe.NewError(e), \"Unable to marshal into JSON.\")\n\n\treturn string(stopHealJSONBytes)\n}", "func (msg *Int64) JSON() []byte {\n\tjsonBytes, err := json.Marshal(*msg)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn jsonBytes\n}", "func (msg *EPNStatus) JSON() []byte {\n\tjsonBytes, err := json.Marshal(*msg)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn jsonBytes\n}", "func (u configExportMessage) JSON() string {\n\tu.Status = \"success\"\n\tstatusJSONBytes, e := json.MarshalIndent(u, \"\", \" \")\n\tfatalIf(probe.NewError(e), \"Unable to marshal into JSON.\")\n\n\treturn string(statusJSONBytes)\n}", "func (s policyLinksMessage) JSON() string {\n\tpolicyJSONBytes, e := json.MarshalIndent(s, \"\", \" \")\n\tfatalIf(probe.NewError(e), \"Unable to marshal into JSON.\")\n\n\treturn string(policyJSONBytes)\n}", "func (msg Message) ToJSON() (string, error) {\n\treturn libs.ToJSON(msg)\n}", "func (d *ConfigureOutput) JSON() ([]byte, error) {\n\treturn json.Marshal(d.reply)\n}", "func MessageToJSON(msg Message) string {\n\tvar jl string\n\tb, err := json.Marshal(msg)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tjl = string(b)\n\treturn jl\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
parseContent parse client Content container into printer struct.
func parseContent(c *clientContent) contentMessage { content := contentMessage{} content.Time = c.Time.Local() // guess file type. content.Filetype = func() string { if c.Type.IsDir() { return "folder" } return "file" }() content.Size = c.Size md5sum := strings.TrimPrefix(c.ETag, "\"") md5sum = strings.TrimSuffix(md5sum, "\"") content.ETag = md5sum // Convert OS Type to match console file printing style. content.Key = getKey(c) return content }
[ "func parseContent(content []byte, request bool, chunked bool) {\n\tlength := len(content)\n\tif length == 0 {\n\t\tfmt.Println(\"Data:\")\n\t\treturn\n\t} else {\n\t\tif request {\n\t\t\tfmt.Println(\"Data:\" + string(content))\n\t\t} else {\n\t\t\tfmt.Print(\"Data:\")\n\t\t}\n\t}\n\tif chunked {\n\t\tcur := 0\n\t\tfor i := 0; i < length; i++ {\n\t\t\tif i <= length-1 && content[i] == '\\n' {\n\t\t\t\tvar line string\n\t\t\t\tif content[i-1] == '\\r' {\n\t\t\t\t\tline = string(content[cur : i-1])\n\t\t\t\t} else {\n\t\t\t\t\tline = string(content[cur:i])\n\t\t\t\t}\n\t\t\t\tlength, err := strconv.ParseInt(line, 16, 0)\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif length > 0 {\n\t\t\t\t\tfmt.Println(string(content[i+1 : i+1+int(length)]))\n\t\t\t\t\ti = i + 2 + int(length)\n\t\t\t\t\tcur = i\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfmt.Println(string(content))\n\t}\n}", "func ParseContent(content string, params Params) (string, error) {\n\tcheckAndInitDefaultView()\n\treturn defaultViewObj.ParseContent(content, params)\n}", "func ParseText(content []byte) []interface{} {\n jsonObject := []interface{}{}\n if err := json.Unmarshal(content, &jsonObject); err != nil {\n panic(err)\n }\n return parse(jsonObject)\n}", "func ParseContent(text []byte) (*Appcast, error) {\n\tvar appcast = New()\n\terr := xml.Unmarshal(text, appcast)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn appcast, nil\n}", "func parseContent(content string) template.HTML {\n\trenderer := blackfriday.HtmlRenderer(commonHtmlFlags, \"\", \"\")\n\treturn template.HTML(blackfriday.Markdown([]byte(content), renderer, markdownExtensions))\n}", "func (b *buffer) content() []line {\n\treturn b.text\n}", "func (d *GetResult) Content(valuePtr interface{}) error {\n\treturn DefaultDecode(d.contents, d.flags, valuePtr)\n}", "func (r *Response) ParseTplContent(content string, params ...gview.Params) (string, error) {\n\treturn r.Request.GetView().ParseContent(r.Request.Context(), content, r.buildInVars(params...))\n}", "func (f *File) ParseContent(doc *Doc) (err error) {\n\tcontent, err := f.Open(\"content.xml\")\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer content.Close()\n\n\td := xml.NewDecoder(content)\n\terr = d.Decode(doc)\n\treturn\n}", "func (p *ParseData) Content() string {\n\treturn p.content\n}", "func (resp *Response) Content() ([]byte, error) {\n\tbuf := bufferpool.Get()\n\tdefer buf.Free()\n\terr := drainBody(resp.Body, buf)\n\treturn buf.Bytes(), err\n}", "func ParseMessageContents(data *bytes.Buffer) *MessageContent {\n\n\tbites := data.Bytes()\n\n\t// Set up some slice references.\n\tvar mentions []string\n\tvar emojis []string\n\tvar links []Link\n\n\t// N iteration loop\n\tlog.Debug(\"Buffer size: \", len(bites))\n\tfor current := 0; current < len(bites); current++ {\n\t\tlog.Debug(\"Current iteration: \", current)\n\t\tb := bites[current]\n\t\tswitch {\n\n\t\tcase mentionPrefix == b:\n\t\t\tm := ParseSection(bites, &current, stopForNonWord, -1, false, false)\n\t\t\tif \"\" != m {\n\t\t\t\tappendString(&mentions, &m)\n\t\t\t}\n\t\t\tcontinue\n\n\t\tcase emojiStart == b:\n\t\t\t// Emojis cannot be longer than 15 (not including the '()' )\n\t\t\te := ParseSection(bites, &current, stopForEmojiEnd, 15, false, true)\n\t\t\tif \"\" != e {\n\t\t\t\tappendString(&emojis, &e)\n\t\t\t}\n\t\t\tcontinue\n\n\t\tcase h == b:\n\t\t\t// We MAY be dealing with a URL\n\t\t\tl := parseURL(bites, &current)\n\t\t\tif nil != l {\n\t\t\t\tappendLink(&links, l)\n\t\t\t}\n\t\t\tcontinue\n\n\t\tdefault:\n\t\t\t// Keep moving forward\n\t\t\tcontinue\n\t\t}\n\t}\n\n\t// Speed up the processing of web links.\n\t// This will suck when some cool individual decides to send a large of\n\t// N of links in their message.\n\tvar wg sync.WaitGroup\n\twg.Add(len(links))\n\tfor i := range links {\n\t\tgo func(l *Link) {\n\t\t\tdefer wg.Done() // Tell the wait group were done after this go routine.\n\t\t\tt, err := getWebTitle(&l.URL)\n\t\t\t// If we coudn't retrieve the URL we ignore it.\n\t\t\tif err != nil || \"\" == t {\n\t\t\t\tl.Title = \"Not Found\"\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tl.Title = t\n\t\t}(&links[i])\n\t}\n\n\twg.Wait()\n\n\treturn &MessageContent{Mentions: mentions, Emojis: emojis, Links: links}\n}", "func ParseLayerContent(layer *v1.Layer) LayerHandler {\n\tvar layerParser LayerCopy\n\tif layer.Type == common.COPYCOMMAND {\n\t\tlayerParser = ParseCopyLayerValue(layer.Value)\n\t}\n\n\tswitch layerParser.HandlerType {\n\t// imageList;yaml,chart\n\tcase ImageListHandler:\n\t\treturn NewImageListHandler(layerParser)\n\tcase YamlHandler:\n\t\treturn NewYamlHandler(layerParser)\n\tcase ChartHandler:\n\t\treturn NewChartHandler(layerParser)\n\t}\n\treturn nil\n}", "func (h *kafkaMessageHandler) unmarshalContent(msg consumer.Message) (content.Content, error) {\n\tbinaryContent := []byte(msg.Body)\n\n\theaders := msg.Headers\n\tsystemID := headers[systemIDKey]\n\ttxID := msg.Headers[\"X-Request-Id\"]\n\tswitch systemID {\n\tcase \"http://cmdb.ft.com/systems/methode-web-pub\":\n\t\tvar eomFile content.EomFile\n\n\t\terr := json.Unmarshal(binaryContent, &eomFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\txml.Unmarshal([]byte(eomFile.Attributes), &eomFile.Source)\n\t\teomFile = eomFile.Initialize(binaryContent).(content.EomFile)\n\t\ttheType, resolvedUuid, err := h.typeRes.ResolveTypeAndUuid(eomFile, txID)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"couldn't map kafka message to methode Content while fetching its type and uuid. %v\", err)\n\t\t}\n\t\teomFile.Type = theType\n\t\teomFile.UUID = resolvedUuid\n\t\treturn eomFile, nil\n\tcase \"http://cmdb.ft.com/systems/wordpress\":\n\t\tvar wordPressMsg content.WordPressMessage\n\t\terr := json.Unmarshal(binaryContent, &wordPressMsg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn wordPressMsg.Initialize(binaryContent), nil\n\tcase \"http://cmdb.ft.com/systems/next-video-editor\":\n\t\tvar video content.Video\n\t\terr := json.Unmarshal(binaryContent, &video)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn video.Initialize(binaryContent), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported content with system ID: [%s]\", systemID)\n\t}\n}", "func (_BaseContentSpace *BaseContentSpaceFilterer) ParseCreateContent(log types.Log) (*BaseContentSpaceCreateContent, error) {\n\tevent := new(BaseContentSpaceCreateContent)\n\tif err := _BaseContentSpace.contract.UnpackLog(event, \"CreateContent\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}", "func LoadContent (data []byte, unsafe...bool) (*Parser, error) {\n if j, e := gjson.LoadContent(data, unsafe...); e == nil {\n return &Parser{j}, nil\n } else {\n return nil, e\n }\n}", "func (m *OnenotePage) GetContent()([]byte) {\n return m.content\n}", "func contentFromReader(content io.Reader, split bufio.SplitFunc) (Content, error) {\n\tc := Content{}\n\tscanner := bufio.NewScanner(content)\n\tscanner.Split(split)\n\n\tfor scanner.Scan() {\n\t\tc.c = append(c.c, scanner.Text())\n\t}\n\tc.reader = strings.NewReader(c.String())\n\n\treturn c, scanner.Err()\n}", "func (jv *Viewer) Content(content interface{}) error {\n\n\tjson, err := toJSON(content)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error converting %v to json: %s\", content, err.Error())\n\t}\n\twriter := colorwriter.New(\n\t\tcolorMap,\n\t\ttermbox.Attribute(jv.theme.Bg))\n\tformatter := jsonfmt.New(json, writer)\n\tif err := formatter.Format(); err != nil {\n\t\treturn err\n\t}\n\tformattedJSON := writer.Lines\n\n\tjv.tree = jsontree.New(formattedJSON)\n\tfor index := 0; index < len(formattedJSON); index++ {\n\t\tjv.tree.ToggleLine(index)\n\t}\n\treturn nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
doList list all entities inside a folder.
func doList(clnt Client, isRecursive, isIncomplete bool) error { prefixPath := clnt.GetURL().Path separator := string(clnt.GetURL().Separator) if !strings.HasSuffix(prefixPath, separator) { prefixPath = prefixPath[:strings.LastIndex(prefixPath, separator)+1] } var cErr error for content := range clnt.List(isRecursive, isIncomplete, false, DirNone) { if content.Err != nil { switch content.Err.ToGoError().(type) { // handle this specifically for filesystem related errors. case BrokenSymlink: errorIf(content.Err.Trace(clnt.GetURL().String()), "Unable to list broken link.") continue case TooManyLevelsSymlink: errorIf(content.Err.Trace(clnt.GetURL().String()), "Unable to list too many levels link.") continue case PathNotFound: errorIf(content.Err.Trace(clnt.GetURL().String()), "Unable to list folder.") continue case PathInsufficientPermission: errorIf(content.Err.Trace(clnt.GetURL().String()), "Unable to list folder.") continue } errorIf(content.Err.Trace(clnt.GetURL().String()), "Unable to list folder.") cErr = exitStatus(globalErrorExitStatus) // Set the exit status. continue } if content.StorageClass == s3StorageClassGlacier { continue } // Convert any os specific delimiters to "/". contentURL := filepath.ToSlash(content.URL.Path) prefixPath = filepath.ToSlash(prefixPath) // Trim prefix of current working dir prefixPath = strings.TrimPrefix(prefixPath, "."+separator) // Trim prefix path from the content path. contentURL = strings.TrimPrefix(contentURL, prefixPath) content.URL.Path = contentURL parsedContent := parseContent(content) // Print colorized or jsonized content info. printMsg(parsedContent) } return cErr }
[ "func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {\n\tvar iErr error\n\t_, err = f.listAll(ctx, dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool {\n\t\tif isDir {\n\t\t\td := fs.NewDir(remote, time.Time(info.Modified))\n\t\t\t// .SetID(info.ID)\n\t\t\t// FIXME more info from dir? can set size, items?\n\t\t\tentries = append(entries, d)\n\t\t} else {\n\t\t\to, err := f.newObjectWithInfo(ctx, remote, info)\n\t\t\tif err != nil {\n\t\t\t\tiErr = err\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tentries = append(entries, o)\n\t\t}\n\t\treturn false\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif iErr != nil {\n\t\treturn nil, iErr\n\t}\n\treturn entries, nil\n}", "func (c *Client) List(path string) (entries []client.DirEnt, err error) {\n\tvar ret internal.ListReturn\n\terr = c.server.Call(\"list\", &ret, path, c.session)\n\tif err != nil {\n\t\treturn nil, client.MakeFatalError(err)\n\t}\n\tif ret.Err != \"\" {\n\t\treturn nil, fmt.Errorf(ret.Err)\n\t}\n\tvar ents []client.DirEnt\n\tfor _, e := range ret.Entries {\n\t\tents = append(ents, e)\n\t}\n\treturn ents, nil\n}", "func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {\n\tdirectoryID, err := f.dirCache.FindDir(ctx, dir, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar iErr error\n\t_, err = f.listAll(ctx, directoryID,\n\t\tfunc(info *api.File) bool {\n\t\t\tremote := path.Join(dir, info.Name)\n\t\t\to, err := f.newObjectWithInfo(ctx, remote, info)\n\t\t\tif err != nil {\n\t\t\t\tiErr = err\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tentries = append(entries, o)\n\t\t\treturn false\n\t\t},\n\t\tfunc(info *api.Collection) bool {\n\t\t\tremote := path.Join(dir, info.Name)\n\t\t\tid := info.Ref\n\t\t\t// cache the directory ID for later lookups\n\t\t\tf.dirCache.Put(remote, id)\n\t\t\td := fs.NewDir(remote, info.TimeCreated).SetID(id)\n\t\t\tentries = append(entries, d)\n\t\t\treturn false\n\t\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif iErr != nil {\n\t\treturn nil, iErr\n\t}\n\treturn entries, nil\n}", "func (c *FoldersController) GetList(w http.ResponseWriter, req *http.Request) {\n\tuserID := getUserID(req)\n\n\tfolders, err := c.repo.Get(storage.FoldersFilter{UserID: &userID})\n\tif err != nil {\n\t\tc.log.Errorf(\"Failed to get folders: %v\", err)\n\t\tinternalServerError(w)\n\t\treturn\n\t}\n\n\trespond(w, http.StatusOK, folders)\n}", "func (f *testLister) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) {\n\tfor _, name := range f.names {\n\t\tentries = append(entries, mockobject.New(prefix+name))\n\t}\n\treturn entries, nil\n}", "func (be *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error {\n\tdebug.Log(\"listing %v\", t)\n\n\tprefix, _ := be.Basedir(t)\n\n\t// make sure prefix ends with a slash\n\tif !strings.HasSuffix(prefix, \"/\") {\n\t\tprefix += \"/\"\n\t}\n\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\tlistReq := be.service.Objects.List(be.bucketName).Context(ctx).Prefix(prefix).MaxResults(int64(be.listMaxItems))\n\tfor {\n\t\tbe.sem.GetToken()\n\t\tobj, err := listReq.Do()\n\t\tbe.sem.ReleaseToken()\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdebug.Log(\"returned %v items\", len(obj.Items))\n\n\t\tfor _, item := range obj.Items {\n\t\t\tm := strings.TrimPrefix(item.Name, prefix)\n\t\t\tif m == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif ctx.Err() != nil {\n\t\t\t\treturn ctx.Err()\n\t\t\t}\n\n\t\t\tfi := restic.FileInfo{\n\t\t\t\tName: path.Base(m),\n\t\t\t\tSize: int64(item.Size),\n\t\t\t}\n\n\t\t\terr := fn(fi)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif ctx.Err() != nil {\n\t\t\t\treturn ctx.Err()\n\t\t\t}\n\t\t}\n\n\t\tif obj.NextPageToken == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tlistReq.PageToken(obj.NextPageToken)\n\t}\n\n\treturn ctx.Err()\n}", "func (_Bfs *BfsCallerSession) List(absolutePath string, offset *big.Int, limit *big.Int) (*big.Int, []BfsInfo, error) {\n\treturn _Bfs.Contract.List(&_Bfs.CallOpts, absolutePath, offset, limit)\n}", "func (d *Directory) List(p string) ([]INodeInfo, error) {\n\tdir, err := d.checkPathExists(p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn generateINodeInfos(dir.INodes), nil\n}", "func (l *Location) List() ([]string, error) {\n\n\tvar filenames []string\n\tclient, err := l.fileSystem.Client(l.Authority)\n\tif err != nil {\n\t\treturn filenames, err\n\t}\n\t// start timer once action is completed\n\tdefer l.fileSystem.connTimerStart()\n\n\tfileinfos, err := client.ReadDir(l.Path())\n\tif err != nil {\n\t\tif err == os.ErrNotExist {\n\t\t\treturn filenames, nil\n\t\t}\n\t\treturn filenames, err\n\t}\n\tfor _, fileinfo := range fileinfos {\n\t\tif !fileinfo.IsDir() {\n\t\t\tfilenames = append(filenames, fileinfo.Name())\n\t\t}\n\t}\n\n\treturn filenames, nil\n}", "func (db database) list(w http.ResponseWriter, req *http.Request) {\n\n\tif err := itemList.Execute(w, db); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (s *Service) ListAll() ([]*basefs.File, error) {\n\tret := []*basefs.File{}\n\n\trootNode := s.megaCli.FS.GetRoot()\n\n\tvar addAll func(*mega.Node, string) // Closure that basically appends entries to local ret\n\taddAll = func(n *mega.Node, pathstr string) {\n\t\tchildren, err := s.megaCli.FS.GetChildren(n)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\t// Add to ret\n\t\tfor _, childNode := range children {\n\t\t\tspath := pathstr + \"/\" + childNode.GetName()\n\t\t\tret = append(ret, File(&MegaPath{Path: spath, Node: childNode}))\n\t\t\tif childNode.GetType() == mega.FOLDER {\n\t\t\t\taddAll(childNode, pathstr+\"/\"+childNode.GetName())\n\t\t\t}\n\t\t}\n\t}\n\n\taddAll(rootNode, \"\")\n\n\treturn ret, nil\n\n}", "func (d *Dir) List() []string {\n\tvar s []string\n\tif err := d.ensure(); err != nil {\n\t\t// Return an empty list if we can't enter the correct directory\n\t\treturn s\n\t}\n\tfiles, _ := ioutil.ReadDir(d.path)\n\tfor _, f := range files {\n\t\ts = append(s, f.Name())\n\t}\n\treturn s\n}", "func ListDir(f Fs, w io.Writer) error {\n\tfor dir := range f.ListDir() {\n\t\tsyncFprintf(w, \"%12d %13s %9d %s\\n\", dir.Bytes, dir.When.Format(\"2006-01-02 15:04:05\"), dir.Count, dir.Name)\n\t}\n\treturn nil\n}", "func (s *Store) List(_ context.Context, start string, f func(string) error) error {\n\troots, err := listdir(s.dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, root := range roots {\n\t\tkeys, err := listdir(filepath.Join(s.dir, root))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, tail := range keys {\n\t\t\tkey, err := decodeKey(root + tail)\n\t\t\tif err != nil || key < start {\n\t\t\t\tcontinue // skip non-key files and keys prior to the start\n\t\t\t} else if err := f(key); errors.Is(err, blob.ErrStopListing) {\n\t\t\t\treturn nil\n\t\t\t} else if err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func List(ctx context.Context) ([]meta.SimpleTreeNode, error) {\n\tvar managerService = services.NewManagerService()\n\tvar data, err = managerService.MenuList()\n\treturn data, err\n}", "func (d *dir) list(path string, output []string) []string {\n\tdirPath := filepath.Join(path, d.name)\n\toutput = append(output, dirPath)\n\tfor _, subDir := range d.children {\n\t\toutput = subDir.list(dirPath, output)\n\t}\n\treturn output\n}", "func GetAllFolders(c *gin.Context) {\n // get db and collection refs\n client := GetMongoClient()\n file_coll := client.Database(\"streamosphere\").Collection(\"folders\")\n\n // hit the db\n cursor, err := file_coll.Find(context.Background(), bson.M{\"UserID\": c.Param(\"UserID\")})\n if err != nil { c.AbortWithError(404, err) }\n\n // decode all the folders from mongo into go structs\n var folders []*Folder\n defer cursor.Close(context.Background())\n for cursor.Next(context.Background()) {\n var folder_json Folder\n err := cursor.Decode(&folder_json)\n if err != nil { c.AbortWithError(500, err) }\n\n folders = append(folders, &folder_json)\n }\n\n c.Header(\"Content-Type\", \"application/json\")\n c.JSON(http.StatusOK, folders)\n}", "func (st *fakeConn) ListDir(ctx context.Context, dirPath string, full bool) (res []DirEntry, err error) {\n\tif dirPath == \"error\" {\n\t\treturn res, fmt.Errorf(\"Dummy error\")\n\n\t}\n\treturn res, err\n}", "func dirList(path string) ([]string, error) {\n\tnames := []string{}\n\tfiles, err := ioutil.ReadDir(path)\n\tif err != nil {\n\t\tlog.Printf(\"Template error: %v\", err)\n\t\treturn names, nil\n\t}\n\tfor _, f := range files {\n\t\tnames = append(names, f.Name())\n\t}\n\treturn names, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
BeforeNow checks a mm/dd/yyyy string to determine if it is before now.
func BeforeNow(d string) bool { f := strings.FieldsFunc(d, func(r rune) bool { return r == '/' }) t := time.Date(atoi(f[2]), time.Month(atoi(f[0])), atoi(f[1]), 0, 0, 0, 0, time.UTC) return t.Before(time.Now()) }
[ "func BeforeNow(in string) bool {\n\tnow := time.Now().In(time.UTC).Truncate(oneDay)\n\ttest, _ := time.ParseInLocation(SpotDateFormat, in, time.UTC)\n\tif now.Equal(test) {\n\t\treturn false\n\t}\n\treturn test.Before(now)\n}", "func ISODateStringBeforeToday(datetime string) (bool, error) {\n\tdate, err := IsoDateFormatter(datetime)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\ttoday := time.Date(time.Now().Year(), time.Now().Month(), time.Now().Day(), 0, 0, 0, 0, date.Location())\n\tif date.Before(today) {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}", "func DateBefore(testDate time.Time, beforeDate time.Time) bool {\n\tif testDate.Before(beforeDate) {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IsRelativeToNow(str string) bool {\n\treturn strings.HasPrefix(str, \"+\") || strings.HasPrefix(str, \"now\")\n}", "func DateBefore(val, exp time.Time) ValidationFunc {\r\n\treturn func() error {\r\n\t\tif val.Before(exp) {\r\n\t\t\treturn nil\r\n\t\t}\r\n\t\treturn fmt.Errorf(validateDateBefore, val, exp)\r\n\t}\r\n}", "func (t Date) Before(u Date) bool {\n\treturn t.Time().Before(u.Time())\n}", "func (t UnixTime) Before(t2 UnixTime) bool {\n\treturn time.Time(t).Before(time.Time(t2))\n}", "func (d Date) Before(d2 Date) bool {\n\tif d.Year != d2.Year {\n\t\treturn d.Year < d2.Year\n\t}\n\tif d.Month != d2.Month {\n\t\treturn d.Month < d2.Month\n\t}\n\treturn d.Day < d2.Day\n}", "func (h *ValidationHelper) Before(t time.Time) bool {\n\treturn h.now().Before(t.Add(-h.leeway))\n}", "func (t TimeValidators) BeforeNowUTC() Validator {\n\treturn func() error {\n\t\tnowUTC := time.Now().UTC()\n\t\tif t.Value == nil {\n\t\t\treturn Errorf(ErrTimeBefore, nil, \"before: %v\", nowUTC)\n\t\t}\n\t\tif t.Value.After(nowUTC) {\n\t\t\treturn Errorf(ErrTimeBefore, *t.Value, \"before: %v\", nowUTC)\n\t\t}\n\t\treturn nil\n\t}\n}", "func (d Date) Before(t Date) bool {\n\treturn t.After(d)\n}", "func (t *BL) isEarlierDate(date1Str string, date2Str string) (bool, error) {\r\n\tlayout := time_format\r\n\r\n\t// Parse the dates\r\n\tdate1, err := time.Parse(layout, date1Str)\r\n\tif err != nil {\r\n\t\treturn true, errors.New(\"Incorrect date format for date1. Expecting mm/dd/yyyy; \" + date1Str)\r\n\t}\r\n\tdate2, err := time.Parse(layout, date2Str)\r\n\tif err != nil {\r\n\t\treturn true, errors.New(\"Incorrect date format for date2. Expecting mm/dd/yyyy; \" + date2Str)\r\n\t}\r\n\r\n\treturn date1.Before(date2) || date1.Equal(date2), nil\r\n}", "func IsBeforeCutoffTime(cutoff string) bool {\n\tcutoffTime, err := time.Parse(time.RFC822, cutoff)\n\tif err != nil {\n\t\toutput.UserErr.Printf(\"Failed to parse cutoffTime %s: %v\", cutoffTime, err)\n\t}\n\tcurrentTime := time.Now()\n\tif currentTime.Before(cutoffTime) {\n\t\treturn true\n\t}\n\treturn false\n}", "func ShouldHappenBefore(actual interface{}, expected ...interface{}) string {\n\tif fail := need(1, expected); fail != success {\n\t\treturn fail\n\t}\n\tactualTime, firstOk := actual.(time.Time)\n\texpectedTime, secondOk := expected[0].(time.Time)\n\n\tif !firstOk || !secondOk {\n\t\treturn shouldUseTimes\n\t}\n\n\tif !actualTime.Before(expectedTime) {\n\t\treturn fmt.Sprintf(shouldHaveHappenedBefore, actualTime, expectedTime, actualTime.Sub(expectedTime))\n\t}\n\n\treturn success\n}", "func TimestampBefore(ts *tspb.Timestamp, uts *tspb.Timestamp) bool {\n\treturn ts.GetSeconds() < uts.GetSeconds() || ts.GetSeconds() == uts.GetSeconds() && ts.GetNanos() < uts.GetNanos()\n}", "func (t Timestamp) Before(u Timestamp) bool {\n\treturn time.Time(t).Before(time.Time(u))\n}", "func IsDateBeforeUTCToday(requestedDate time.Time) (isBefore bool) {\n\tlocation, err := time.LoadLocation(\"UTC\")\n\tif err != nil {\n\t\treturn true\n\t}\n\tutcDate := time.Now().In(location)\n\t// Can't do the direct time comparison (time.Before() time.After())\n\t// because the actual timestamp doesn't matter, just the year/month/day\n\tisBeforeUTC := requestedDate.Year() <= utcDate.Year() && requestedDate.Month() <= utcDate.Month() && requestedDate.Day() < utcDate.Day()\n\ttoLog(\"IsDateBeforeUTCToday\", \"Requested: \"+requestedDate.Format(time.RFC822)+\", UTC Date: \"+utcDate.Format(time.RFC822)+\" -> BEFORE: \"+strconv.FormatBool(isBeforeUTC))\n\treturn isBeforeUTC\n}", "func IsDateExpiredFromNow(date time.Time) bool {\n\tdate = date.Local()\n\ttoday := Now()\n\n\treturn date.Before(today)\n}", "func (t *timeDataType) Before(after time.Time) *timeDataType {\n\treturn t.Validate(func(t time.Time) error {\n\t\tif t.Before(after) {\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"time was not before %s\", after.Format(time.RFC3339))\n\t})\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
////////////////////////////////////////////////////////////////////////////////// // IsAttachment return true if content is attachment
func (c *Content) IsAttachment() bool { return c.Type == CONTENT_TYPE_ATTACHMENT }
[ "func (p *Part) IsAttachment() bool {\n\tif p.gmimePart == nil {\n\t\treturn false\n\t}\n\tif !gobool(C.gmime_is_part(p.gmimePart)) || gobool(C.gmime_is_multi_part(p.gmimePart)) {\n\t\treturn false\n\t}\n\tif gobool(C.g_mime_part_is_attachment((*C.GMimePart)(unsafe.Pointer(p.gmimePart)))) {\n\t\treturn true\n\t}\n\tif len(p.Filename()) > 0 {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func isImageAttachment(mime string) bool {\n\tfor _, tp := range imageMimeTypes {\n\t\tif tp == mime {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (s *Store) IsAttachment(clientID, psychologistID string) (bool, error) {\n\n\tif strings.TrimSpace(clientID) == \"\" {\n\t\treturn false, errors.New(\"clientID is empty\")\n\t}\n\n\tif strings.TrimSpace(psychologistID) == \"\" {\n\t\treturn false, errors.New(\"psychologistID is empty\")\n\t}\n\n\tvar count int64\n\n\terr := s.db.SQL.Get(&count, `\n\tselect count(c.id) from clients c\n\t where c.client_public_id = $1 and c.psychologist_public_id = $2`, clientID, psychologistID)\n\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"an error occurred while check attachment client from psychologist\")\n\t}\n\n\tif count <= 0 {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}", "func isAttachmentDownload(ctx *macaron.Context) bool {\n\treturn strings.HasPrefix(ctx.Req.URL.Path, \"/attachments/\") && ctx.Req.Method == \"GET\"\n}", "func (message *Message) HasAttachments() bool {\n\treturn message.GetInteger(3591) & 0x10 != 0\n}", "func (e *Entry) HasAttachment() bool {\n\treturn e.Attachment.Name != \"\"\n}", "func (attachStatus *AttachmentStatus) ShouldSend() bool {\n\treturn *attachStatus == AttachmentAttached\n}", "func (me TxsdImpactSimpleContentExtensionType) IsFile() bool { return me.String() == \"file\" }", "func (m *SendVoice) IsMultipart() bool {\n\treturn m.File != nil\n}", "func (r *AttachmentOriginal) HasAttachmentID() bool {\n\treturn r.hasAttachmentID\n}", "func (m *Attachment) GetIsInline()(*bool) {\n return m.isInline\n}", "func (o *Post) HasAttachments() bool {\n\tif o != nil && o.Attachments != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (m *SendPhoto) IsMultipart() bool {\n\treturn m.File != nil\n}", "func (r *AttachmentOriginal) HasDownload() bool {\n\treturn r.hasDownload\n}", "func (r *AttachmentPreview) HasAttachmentID() bool {\n\treturn r.hasAttachmentID\n}", "func IsMIME(buf []byte, mime string) bool {\n\tresult := false\n\ttypes.Types.Range(func(k, v interface{}) bool {\n\t\tkind := v.(types.Type)\n\t\tif kind.MIME.Value == mime {\n\t\t\tmatcher := matchers.Matchers[kind]\n\t\t\tresult = matcher(buf) != types.Unknown\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t})\n\n\treturn result\n}", "func (o *PostsIdJsonPost) HasAttachments() bool {\n\tif o != nil && o.Attachments != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (a FramebufferAttachment) IsDepth() bool {\n\treturn a == FramebufferAttachment_Depth\n}", "func (o *ProjectsIdPostsJsonPost) HasAttachments() bool {\n\tif o != nil && o.Attachments != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
IsComment return true if content is comment
func (c *Content) IsComment() bool { return c.Type == CONTENT_TYPE_COMMENT }
[ "func (t Type) IsComment() bool {\n\treturn comm_start < t && t < comm_end\n}", "func isComment(s state) bool {\n\tswitch s {\n\tcase stateHTMLCmt, stateJSBlockCmt, stateJSLineCmt, stateCSSBlockCmt, stateCSSLineCmt:\n\t\treturn true\n\t}\n\treturn false\n}", "func (t Token) IsComment() bool {\n\treturn t.Kind == TBlockComment || t.Kind == TLineComment\n}", "func IsComment(lex *LexProduct) bool {\n\treturn lex.ProductType() == int(PGLA_PRODUCT_COMMENT)\n}", "func IsComment(s string) bool {\n\treturn len(s) == 0 || s[0] == '#'\n}", "func (l *line) isComment() bool {\n\treturn len(l.tokens) > 0 && l.tokens[0] == slash\n}", "func (f *FileInfo) isComment(i Item) bool {\n\titem := f.items[i]\n\tif item.length < 2 {\n\t\treturn false\n\t}\n\t// see if item text starts with \"//\" or \"/*\"\n\tif f.data[item.offset] != '/' {\n\t\treturn false\n\t}\n\tc := f.data[item.offset+1]\n\treturn c == '/' || c == '*'\n}", "func (l *line) isHTMLComment() bool {\n\treturn len(l.tokens) > 0 && l.tokens[0] == slash+slash\n}", "func (me TAttlistCommentsCorrectionsRefType) IsCommentIn() bool { return me.String() == \"CommentIn\" }", "func (me TAttlistCommentsCorrectionsRefType) IsCommentOn() bool { return me.String() == \"CommentOn\" }", "func isCommentContaining(s, target string) bool {\n\tss := strings.Split(s, \"//\")\n\tif len(ss) < 2 {\n\t\treturn false // it's not a comment\n\t}\n\treturn strings.Index(strings.TrimSpace(ss[1]), target) == 0\n}", "func (b *builder) shouldTakeComment(prev, next *stackElem) bool {\n\tif prev.t.Category() != node.Comment || !b.areAdjacent(prev, next) {\n\t\treturn false\n\t}\n\n\tif next.t.Category() != node.Comment {\n\t\treturn true\n\t}\n\n\t// We take sequences of non-multiline comments, but otherwise stop after the first one.\n\treturn !prev.t.HasProperty(node.IsMultilineToken) && !next.t.HasProperty(node.IsMultilineToken)\n}", "func (tb *TextBuf) InComment(pos TextPos) bool {\n\tcs := tb.CommentStart(pos.Ln)\n\tif cs < 0 {\n\t\treturn false\n\t}\n\treturn pos.Ch > cs\n}", "func (parser *Parser) tokenComment() bool {\n\treturn parser.nextToken().Type == scanner.TokenComment\n}", "func hasStartComment() {}", "func hasComment(x Expr, text string) bool {\n\tif x == nil {\n\t\treturn false\n\t}\n\tfor _, com := range x.Comment().Before {\n\t\tif strings.Contains(strings.ToLower(com.Token), text) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (cfg *AdjunctCfg) Comments() bool {\n\treturn true // FUTURE: okay, maybe this should be configurable :)\n}", "func (p *Doc) Comment(key, comments string) bool {\n\te, ok := p.props[key]\n\tif !ok {\n\t\treturn false\n\t}\n\n\t// 如果所有注释为空\n\tif comments == \"\" {\n\t\tp.lines.InsertBefore(&line{typo: '#', value: \"#\"}, e)\n\t\treturn true\n\t}\n\n\t// 创建一个新的Scanner\n\tscanner := bufio.NewScanner(strings.NewReader(comments))\n\tfor scanner.Scan() {\n\t\tp.lines.InsertBefore(&line{typo: '#', value: \"#\" + scanner.Text()}, e)\n\t}\n\n\treturn true\n}", "func (c Comment) IsWhitespace() bool {\n\to := C.clang_Comment_isWhitespace(c.c)\n\n\treturn o != C.uint(0)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
IsPage return true if content is page
func (c *Content) IsPage() bool { return c.Type == CONTENT_TYPE_PAGE }
[ "func (c *Container) IsPage() bool {\n\treturn c.Title != \"\"\n}", "func (b *Block) IsPage() bool {\n\treturn b.Type == BlockPage\n}", "func (b *Block) IsSubPage() bool {\n\tpanicIf(b.Type != BlockPage)\n\tif b.Parent == nil {\n\t\treturn false\n\t}\n\treturn b.ParentID == b.Parent.ID\n}", "func (b *Block) IsLinkToPage() bool {\n\tif b.Type != BlockPage {\n\t\treturn false\n\t}\n\treturn b.ParentTable == TableSpace\n}", "func (p Page) inPage(s string) bool {\n\tfor _, v := range p.Links {\n\t\tif s == v.Url.String() || v.Url.String()+\"/\" == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (p Page) IsHTML() bool {\n\treturn p.Type().MediaType() == \"text/html\"\n}", "func (m *Method) IsPaged() bool {\n\treturn m.kind == MethodPaged\n}", "func (o OrderedCollectionPage) IsObject() bool {\n\treturn true\n}", "func (p *Paginator) IsCurrentPage(page int) bool {\n\treturn p.CurrentPage() == page\n}", "func (s *Site) contentPage(path string) (page *page.Page, found, folderRedirect bool) {\n\tif page, found = s.pageMap[path]; !found {\n\t\tslashed := path + \"/\"\n\t\tif page, found = s.pageMap[slashed]; found {\n\t\t\tfolderRedirect = true\n\t\t}\n\t\treturn\n\t}\n\treturn\n}", "func (o *Bundles) HasCurrentPage() bool {\n\tif o != nil && o.CurrentPage != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o OrderedCollectionPage) IsCollection() bool {\n\treturn true\n}", "func (me TxsdCounterSimpleContentExtensionType) IsSite() bool { return me.String() == \"site\" }", "func (o *Origin1) GetPageOk() (*PageType, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Page, true\n}", "func IsAjaxPage(vals url.Values) bool {\n\tpage := getPageName(vals)\n\tajax := vals.Get(\"ajax\")\n\tasJson := vals.Get(\"asJson\")\n\treturn page == FetchEventboxAjaxPageName ||\n\t\tpage == FetchResourcesAjaxPageName ||\n\t\tpage == GalaxyContentAjaxPageName ||\n\t\tpage == EventListAjaxPageName ||\n\t\tpage == AjaxChatAjaxPageName ||\n\t\tpage == NoticesAjaxPageName ||\n\t\tpage == RepairlayerAjaxPageName ||\n\t\tpage == TechtreeAjaxPageName ||\n\t\tpage == PhalanxAjaxPageName ||\n\t\tpage == ShareReportOverlayAjaxPageName ||\n\t\tpage == JumpgatelayerAjaxPageName ||\n\t\tpage == FederationlayerAjaxPageName ||\n\t\tpage == UnionchangeAjaxPageName ||\n\t\tpage == ChangenickAjaxPageName ||\n\t\tpage == PlanetlayerAjaxPageName ||\n\t\tpage == TraderlayerAjaxPageName ||\n\t\tpage == PlanetRenameAjaxPageName ||\n\t\tpage == RightmenuAjaxPageName ||\n\t\tpage == AllianceOverviewAjaxPageName ||\n\t\tpage == SupportAjaxPageName ||\n\t\tpage == BuffActivationAjaxPageName ||\n\t\tpage == AuctioneerAjaxPageName ||\n\t\tpage == HighscoreContentAjaxPageName ||\n\t\tajax == \"1\" ||\n\t\tasJson == \"1\"\n}", "func (this ActivityStreamsActorPropertyIterator) IsActivityStreamsPage() bool {\n\treturn this.activitystreamsPageMember != nil\n}", "func (p Pagination) IsCurrent(page int) bool {\n\treturn page == p.CurrentPage\n}", "func (o OrderedCollectionPage) IsLink() bool {\n\treturn false\n}", "func (a *Article) IsContent() bool {\n\treturn !ess.IsStrEmpty(a.Content)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
IsTrashed return true if content is trashed
func (c *Content) IsTrashed() bool { return c.Status == CONTENT_STATUS_TRASHED }
[ "func (o *TrashStructureApplication) HasTrashed() bool {\n\tif o != nil && !IsNil(o.Trashed) {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *TrashStructureApplication) GetTrashed() bool {\n\tif o == nil || IsNil(o.Trashed) {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\treturn *o.Trashed\n}", "func (o *TrashStructureApplication) GetTrashedOk() (*bool, bool) {\n\tif o == nil || IsNil(o.Trashed) {\n\t\treturn nil, false\n\t}\n\treturn o.Trashed, true\n}", "func (o *TrashStructureApplication) SetTrashed(v bool) {\n\to.Trashed = &v\n}", "func ItemTrashed(r *provider.DeleteResponse, req *provider.DeleteRequest, spaceOwner, executant *user.UserId) events.ItemTrashed {\n\topaqueID := utils.ReadPlainFromOpaque(r.Opaque, \"opaque_id\")\n\treturn events.ItemTrashed{\n\t\tSpaceOwner: spaceOwner,\n\t\tExecutant: executant,\n\t\tRef: req.Ref,\n\t\tID: &provider.ResourceId{\n\t\t\tStorageId: req.Ref.GetResourceId().GetStorageId(),\n\t\t\tSpaceId: req.Ref.GetResourceId().GetSpaceId(),\n\t\t\tOpaqueId: opaqueID,\n\t\t},\n\t\tTimestamp: utils.TSNow(),\n\t}\n}", "func (e *Entry) IsDeleted() bool {\n\treturn e.Latest().GetTombstone()\n}", "func (me TxsdSpace) IsPreserve() bool { return me.String() == \"preserve\" }", "func (s *Subtitle) IsDeleted() bool {\n\treturn s.Num == -1\n}", "func (d dynamicSystemView) Tainted() bool {\n\treturn d.mountEntry.Tainted\n}", "func (m *UserModel) WithTrashed() *UserModel {\n\treturn m.WithoutGlobalScopes(\"soft_delete\")\n}", "func (obj *InstallPhase) IsDeleted() bool {\n\treturn obj.GetDeletionTimestamp() != nil\n}", "func (obj *RollbackPhase) IsDeleted() bool {\n\treturn obj.GetDeletionTimestamp() != nil\n}", "func (e *ExternalService) IsDeleted() bool { return !e.DeletedAt.IsZero() }", "func (s *shard) IsFlushing() bool { return s.isFlushing.Load() }", "func isStefulSetMarkedForTermination(ss *appsv1.StatefulSet) bool {\n\treturn ss.DeletionTimestamp != nil\n}", "func (m *Model) IsSoftDelete() bool {\n\treturn m.SoftDelete\n}", "func (v *SourceView) GetSmartBackspace() bool {\n\treturn gobool(C.gtk_source_view_get_smart_backspace(v.native()))\n}", "func (obj *TrafficRolloutPhase) IsDeleted() bool {\n\treturn obj.GetDeletionTimestamp() != nil\n}", "func (s TxnStatus) IsRolledBack() bool { return s.ttl == 0 && s.commitTS == 0 }" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
IsDraft return true if content is draft
func (c *Content) IsDraft() bool { return c.Status == CONTENT_STATUS_DRAFT }
[ "func (m *ModelStatus) IsDraft() bool {\n\treturn m.Status == Draft\n}", "func (prc *GHPullRequestClient) IsDraft(ctx context.Context) bool {\n\treturn aws.BoolValue(prc.pullRequest.Draft)\n}", "func IsDraft(source string) bool {\n\treturn strings.Contains(filepath.Base(filepath.Dir(source)), \"drafts\")\n}", "func (r *RepositoryRelease) GetDraft() bool {\n\tif r == nil || r.Draft == nil {\n\t\treturn false\n\t}\n\treturn *r.Draft\n}", "func Draft(v bool) predicate.GithubRelease {\n\treturn predicate.GithubRelease(sql.FieldEQ(FieldDraft, v))\n}", "func (u *User) IsEditor() bool {\n\treturn u.UserGroupID == EDITOR\n}", "func (me TRoleType) IsEditor() bool { return me.String() == \"editor\" }", "func (br *BlogRouter) GetDrafts(c *gin.Context) {\n\to := models.PageSlice{}\n\tfor _, v := range models.MPages {\n\t\tif v.Draft {\n\t\t\to = append(o, v)\n\t\t}\n\t}\n\tsort.Sort(o)\n\tdata := gin.H{\n\t\t\"posts\": o,\n\t\t\"title\": viper.GetString(\"blog.title\"),\n\t\t\"description\": viper.GetString(\"blog.description\"),\n\t\t\"extra\": template.HTML(`These articles are drafts and may be incomplete`),\n\t\t\"analytics\": gin.H{\"tag\": viper.GetString(\"analytics.tag\"), \"enabled\": viper.GetBool(\"analytics.enabled\")},\n\t\t\"author\": models.GetGlobalAuthor(),\n\t}\n\tc.HTML(http.StatusOK, \"index.tmpl\", data)\n}", "func (s ChangesetPublicationState) Published() bool { return s == ChangesetPublicationStatePublished }", "func (s *Service) ChangeDraft(ctx context.Context, draftID, companyID string, post *job.Posting) (string, error) {\n\tspan := s.tracer.MakeSpan(ctx, \"ChangeDraft\")\n\tdefer span.Finish()\n\n\t// get userID\n\tuserID, err := s.authRPC.GetUserID(ctx)\n\tif err != nil {\n\t\ts.tracer.LogError(span, err)\n\t\treturn \"\", err\n\t}\n\n\terr = post.SetCompanyID(companyID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = post.SetID(draftID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// check admin level\n\tallowed := s.checkAdminLevel(\n\t\tctx,\n\t\tpost.GetCompanyID(),\n\t\tcompanyadmin.AdminLevelAdmin,\n\t\tcompanyadmin.AdminLevelJob,\n\t)\n\tif !allowed {\n\t\treturn \"\", errors.New(\"not_allowed\")\n\t}\n\n\tpost.SetUserID(userID)\n\t// id := post.GenerateID()\n\n\tif !post.JobMetadata.Anonymous {\n\t\tpost.CompanyDetails = &company.Details{\n\t\t\t// TODO: company avatar, URL, Industry, subindustry\n\t\t}\n\t\tpost.CompanyDetails.SetCompanyID(post.GetCompanyID())\n\t}\n\n\tpost.CreatedAt = time.Now()\n\n\tpost.Status = job.StatusDraft\n\t// post.JobPriority = post.JobMetadata.JobPlan.GetPriority()\n\n\t// if post.JobDetails.SalaryMin > 0 && post.JobDetails.SalaryInterval != \"\" {\n\t// \tpost.NormalizedSalaryMin = float32(post.JobDetails.SalaryMin) / float32(post.JobDetails.SalaryInterval.GetHours()) // TODO also convert currency\n\t// }\n\n\t// if post.JobDetails.SalaryMax > 0 && post.JobDetails.SalaryInterval != \"\" {\n\t// \tpost.NormalizedSalaryMax = float32(post.JobDetails.SalaryMax) / float32(post.JobDetails.SalaryInterval.GetHours()) // TODO also convert currency\n\t// }\n\n\terr = s.jobs.UpdateJobPosting(ctx, post)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn post.GetID(), nil\n}", "func (s *MessagesSendMessageRequest) GetClearDraft() (value bool) {\n\tif s == nil {\n\t\treturn\n\t}\n\treturn s.Flags.Has(7)\n}", "func (m *Member) IsPublished() bool { return m.Published }", "func (msgr *Messenger) IsEditable(id string) bool {\n\ti, err := message.ParseID(id)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tmsgr.messageMutex.Lock()\n\tdefer msgr.messageMutex.Unlock()\n\n\tm, ok := msgr.messages[i]\n\tif ok {\n\t\t// Editable if same author.\n\t\treturn m.Author().Name().String() == msgr.channel.user.String()\n\t}\n\n\treturn false\n}", "func (e PartialContent) IsPartialContent() {}", "func (me TPubStatusUnion4) IsRevised() bool { return me.String() == \"revised\" }", "func (s *MessagesSendInlineBotResultRequest) GetClearDraft() (value bool) {\n\tif s == nil {\n\t\treturn\n\t}\n\treturn s.Flags.Has(7)\n}", "func (me THITReviewStatus) IsMarkedForReview() bool { return me.String() == \"MarkedForReview\" }", "func ShowDraftPath() string {\n\n\treturn fmt.Sprintf(\"/sao/v1/drafts/\")\n}", "func (m *MailTips) GetIsModerated()(*bool) {\n val, err := m.GetBackingStore().Get(\"isModerated\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*bool)\n }\n return nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
IsGlobal return true if space is global
func (s *Space) IsGlobal() bool { return s.Type == SPACE_TYPE_GLOBAL }
[ "func (TypesObject) IsGlobal() bool { return boolResult }", "func (app *builder) IsGlobal() Builder {\n\tapp.isGlobal = true\n\treturn app\n}", "func (o *RiskRulesListAllOfData) HasIsGlobal() bool {\n\tif o != nil && !IsNil(o.IsGlobal) {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IsObjectGlobal(obj *metav1.ObjectMeta) bool {\n\tif obj.Annotations == nil {\n\t\treturn false\n\t}\n\n\tif obj.Annotations[util.GlobalLabel] == \"true\" {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *RiskRulesListAllOfData) GetIsGlobal() bool {\n\tif o == nil || IsNil(o.IsGlobal) {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\treturn *o.IsGlobal\n}", "func (c *C) ExistKeyGlobal(k string) bool {\n\t_, exist := c.Global[k]\n\n\treturn exist\n}", "func (lli LogLineItem) Global() bool {\n\treturn lli.TestId == nil\n}", "func checkForGlobal(vals []ast.Expr) bool {\n\tif len(vals) == 0 {\n\t\treturn false\n\t}\n\n\tif len(vals) < 2 {\n\t\tif list, ok := vals[0].(*ast.ListLit); ok {\n\t\t\tvals = list.Value\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t}\n\tlit, ok := vals[len(vals)-1].(*ast.BasicLit)\n\tif !ok {\n\t\treturn false\n\t}\n\tif lit.Kind == token.STRING && lit.Value == \"!global\" {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func Global() Scope {\n\treturn globalScope\n}", "func FieldNameIsGlobal(name string) bool {\n\tfor _, n := range GlobalFieldNames() {\n\t\tif n == name {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (me TxsdDunsNumberDataTypeSimpleContentExtensionDunsNumberType) IsGlobalUltimate() bool {\n\treturn me.String() == \"global ultimate\"\n}", "func (o *RiskRulesListAllOfData) SetIsGlobal(v bool) {\n\to.IsGlobal = &v\n}", "func (t *Table) HasGlobalTs() bool {\n\treturn t.globalTs != 0\n}", "func ModeGlobal(d *latest.DeployConfig) {\n\td.Mode = \"global\"\n}", "func (m Module) Global(name string) (DevicePtr, int64, error) {\n\tvar d C.CUdeviceptr\n\tvar size C.size_t\n\tmod := m.c()\n\tstr := C.CString(name)\n\tif err := result(C.cuModuleGetGlobal(&d, &size, mod, str)); err != nil {\n\t\treturn 0, 0, err\n\t}\n\treturn DevicePtr(d), int64(size), nil\n}", "func (o *RiskRulesListAllOfData) GetIsGlobalOk() (*bool, bool) {\n\tif o == nil || IsNil(o.IsGlobal) {\n\t\treturn nil, false\n\t}\n\treturn o.IsGlobal, true\n}", "func (s QorSEOSetting) GetIsGlobalSEO() bool {\n\treturn s.IsGlobalSEO\n}", "func (or OktetoRegistry) HasGlobalPushAccess() (bool, error) {\n\tif !or.config.IsOktetoCluster() {\n\t\treturn false, nil\n\t}\n\timage := or.imageCtrl.ExpandOktetoGlobalRegistry(globalTestImage)\n\treturn or.client.HasPushAccess(image)\n}", "func getGlobalInfo() (globalInfo map[string]interface{}) {\n\tglobalInfo = map[string]interface{}{/*\n\t\t\"isDistXL\": globalIsDistXL,\n\t\t\"isXL\": globalIsXL,\n\t\t\"isBrowserEnabled\": globalIsBrowserEnabled,\n\t\t\"isWorm\": globalWORMEnabled,\n\t\t\"isEnvBrowser\": globalIsEnvBrowser,\n\t\t\"isEnvCreds\": globalIsEnvCreds,\n\t\t\"isEnvRegion\": globalIsEnvRegion,\n\t\t\"isSSL\": globalIsSSL,\n\t\t\"serverRegion\": globalServerRegion,\n\t\t// Add more relevant global settings here.*/\n\t}\n\n\treturn globalInfo\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
IsPersonal return true if space is personal
func (s *Space) IsPersonal() bool { return s.Type == SPACE_TYPE_PERSONAL }
[ "func (me TcontactMethodUseType) IsPersonal() bool { return me.String() == \"personal\" }", "func (me TResumeItemTypeTypes) IsPersonal() bool { return me.String() == \"Personal\" }", "func (me TBasicReferenceType) IsPersonal() bool { return me.String() == \"Personal\" }", "func (obj *Global) IsPersonalMode(ctx context.Context) (bool, error) {\n\tresult := &struct {\n\t\tReturn bool `json:\"qReturn\"`\n\t}{}\n\terr := obj.RPC(ctx, \"IsPersonalMode\", result)\n\treturn result.Return, err\n}", "func (s *System) IsPublic() bool { return s.Name == PublicSystem.Name || s.Name == PublicCDSystem.Name }", "func (me TdegreeTypes) IsPostprofessional() bool { return me.String() == \"postprofessional\" }", "func (me TxsdContactType) IsPerson() bool { return me.String() == \"person\" }", "func (me TdegreeTypes) IsProfessional() bool { return me.String() == \"professional\" }", "func (me TxsdMarkerTypeMarkerUnits) IsUserSpace() bool { return me.String() == \"userSpace\" }", "func (me TxsdContactRole) IsTech() bool { return me.String() == \"tech\" }", "func (me TrestrictionType) IsPublic() bool { return me.String() == \"public\" }", "func (me TPatentStatusTypeTypes) IsPatentFiled() bool { return me.String() == \"PatentFiled\" }", "func (me TAssociationTypeType) IsProfessional() bool { return me.String() == \"Professional\" }", "func (me TartIdTypeInt) IsBookaccession() bool { return me.String() == \"bookaccession\" }", "func (n UsernsMode) IsPrivate() bool {\n\treturn !n.IsHost()\n}", "func (me TrestrictionType) IsPrivate() bool { return me.String() == \"private\" }", "func (me TcontactMethodLocationType) IsOffice() bool { return me.String() == \"office\" }", "func (svc *AuthenticationService) HasPersonalAccessTokenAuth() bool {\n\treturn svc.authType == PersonalAccessToken\n}", "func (me TAttlistGeneralNoteOwner) IsPip() bool { return me.String() == \"PIP\" }" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
IsArchived return true if space is archived
func (s *Space) IsArchived() bool { return s.Type == SPACE_STATUS_ARCHIVED }
[ "func (sdk Sdk) IsArchived() bool {\n\treturn sdk.archiveFile() != \"\"\n}", "func (r *BackupItem) IsArchived() bool {\n\treturn r.Status&StatusArchived == StatusArchived\n}", "func (o *ShortenBitlinkBodyAllOf) HasArchived() bool {\n\tif o != nil && o.Archived != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *DataExportQuery) HasArchived() bool {\n\tif o != nil && o.Archived != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (s *StickerSet) GetIsArchived() (value bool) {\n\tif s == nil {\n\t\treturn\n\t}\n\treturn s.IsArchived\n}", "func (s *StickerSet) GetArchived() (value bool) {\n\tif s == nil {\n\t\treturn\n\t}\n\treturn s.Flags.Has(1)\n}", "func (r *Repository) GetArchived() bool {\n\tif r == nil || r.Archived == nil {\n\t\treturn false\n\t}\n\treturn *r.Archived\n}", "func (s *Wave) SetIsArchived(v bool) *Wave {\n\ts.IsArchived = &v\n\treturn s\n}", "func (o LookupUserDataMappingResultOutput) Archived() pulumi.BoolOutput {\n\treturn o.ApplyT(func(v LookupUserDataMappingResult) bool { return v.Archived }).(pulumi.BoolOutput)\n}", "func (o *SummaryColumnEstimatedResponse) HasArchived() bool {\n\tif o != nil && o.Archived != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (s *Application) SetIsArchived(v bool) *Application {\n\ts.IsArchived = &v\n\treturn s\n}", "func (fa FileAttributes) IsArchive() bool {\n\treturn fa&32 > 0\n}", "func (o *ShortenBitlinkBodyAllOf) SetArchived(v bool) {\n\to.Archived = &v\n}", "func (o *ShortenBitlinkBodyAllOf) GetArchivedOk() (*bool, bool) {\n\tif o == nil || o.Archived == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Archived, true\n}", "func (o *DataExportQuery) GetArchivedOk() (*string, bool) {\n\tif o == nil || o.Archived == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Archived, true\n}", "func (c *AdsListCall) Archived(archived bool) *AdsListCall {\n\tc.urlParams_.Set(\"archived\", fmt.Sprint(archived))\n\treturn c\n}", "func IsArchive(path string) bool {\n\tif filepath.Ext(path) == \".temp\" {\n\t\tpath = path[:len(path)-len(\".temp\")]\n\t}\n\n\t_, err := archiver.ByExtension(path)\n\treturn err == nil\n}", "func (m *ServiceUpdateMessageViewpoint) GetIsArchived()(*bool) {\n return m.isArchived\n}", "func (s *StickerSet) SetArchived(value bool) {\n\tif value {\n\t\ts.Flags.Set(1)\n\t\ts.Archived = true\n\t} else {\n\t\ts.Flags.Unset(1)\n\t\ts.Archived = false\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
IsPage return true if container is page
func (c *Container) IsPage() bool { return c.Title != "" }
[ "func (c *Content) IsPage() bool {\n\treturn c.Type == CONTENT_TYPE_PAGE\n}", "func (b *Block) IsPage() bool {\n\treturn b.Type == BlockPage\n}", "func (o OrderedCollectionPage) IsCollection() bool {\n\treturn true\n}", "func (b *Block) IsSubPage() bool {\n\tpanicIf(b.Type != BlockPage)\n\tif b.Parent == nil {\n\t\treturn false\n\t}\n\treturn b.ParentID == b.Parent.ID\n}", "func (p *Paginator) IsCurrentPage(page int) bool {\n\treturn p.CurrentPage() == page\n}", "func (m *Method) IsPaged() bool {\n\treturn m.kind == MethodPaged\n}", "func (t *Type) IsContainer() bool {\n\t_, ok := frugalContainerTypes[t.Name]\n\treturn ok\n}", "func (b *Block) IsLinkToPage() bool {\n\tif b.Type != BlockPage {\n\t\treturn false\n\t}\n\treturn b.ParentTable == TableSpace\n}", "func (p Pagination) IsCurrent(page int) bool {\n\treturn page == p.CurrentPage\n}", "func (o OrderedCollectionPage) IsObject() bool {\n\treturn true\n}", "func (tn *TargetNode) IsContainer() bool {\n\treturn tn.proto.GetIsContainer()\n}", "func (n PidMode) IsContainer() bool {\n\t_, ok := containerID(string(n))\n\treturn ok\n}", "func IsContainerized() (bool, error) {\n\treturn false, nil\n}", "func (p Page) inPage(s string) bool {\n\tfor _, v := range p.Links {\n\t\tif s == v.Url.String() || v.Url.String()+\"/\" == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (b *BaseElement) IsContainer() bool {\n\treturn false\n}", "func (n NetworkMode) IsContainer() bool {\n\t_, ok := containerID(string(n))\n\treturn ok\n}", "func (p *Pagination) Show() bool {\n\treturn p.NumberOfPages() > 1\n}", "func (p *Paginator) hasPages() bool {\n\treturn p.PagerData.TotalPage > 1\n}", "func (p *Paginator) IsActive(page int) bool {\n\treturn p.Page() == page\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
IsSpace return true if container is space
func (c *Container) IsSpace() bool { return c.Key != "" }
[ "func IsSpace(lex *LexProduct) bool {\n\treturn lex.ProductType() == int(PGLA_PRODUCT_SPACE)\n}", "func IsContainerized() (bool, error) {\n\treturn false, nil\n}", "func IsSpace(r rune) bool {\n\treturn is(space, r)\n}", "func IsSpaceRoot(r *Node) bool {\n\tpath := r.InternalPath()\n\tif spaceNameBytes, err := xattr.Get(path, xattrs.SpaceNameAttr); err == nil {\n\t\tif string(spaceNameBytes) != \"\" {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (o *IpamNetworkDataData) HasSpaceIsTemplate() bool {\n\tif o != nil && o.SpaceIsTemplate != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IsContainerized() (bool, error) {\n\t// TODO: Implement jail detection for freeBSD\n\treturn false, errors.New(\"cannot detect if we are in container\")\n}", "func (me TxsdClipPathTypeClipPathUnits) IsUserSpace() bool { return me.String() == \"userSpace\" }", "func (o *IpamNetworkDataData) HasSpaceDescription() bool {\n\tif o != nil && o.SpaceDescription != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (n *Node) IsSpaceRoot(ctx context.Context) bool {\n\t_, err := n.Xattr(ctx, prefixes.SpaceNameAttr)\n\treturn err == nil\n}", "func (me Tokens) HasSpaces() bool {\n\tfor i := 1; i < len(me); i++ {\n\t\tif diff := me[i].Pos.Off0 - (me[i-1].Pos.Off0 + len(me[i-1].Lexeme)); diff > 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func isSpace(c rune) bool {\n\treturn c == ' ' || c == '\\t'\n}", "func (o *IpamNetworkDataData) HasSpaceName() bool {\n\tif o != nil && o.SpaceName != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func isSpace(r rune) bool {\n\treturn unicode.IsSpace(r)\n}", "func (adapter *LevelAdapter) IsCyberspace() (result bool) {\n\tif properties := adapter.properties(); properties != nil {\n\t\tresult = *properties.CyberspaceFlag\n\t}\n\treturn\n}", "func (me TxsdTspanTypeLengthAdjust) IsSpacing() bool { return me.String() == \"spacing\" }", "func (o *IpamNetworkDataData) HasSpaceClassName() bool {\n\tif o != nil && o.SpaceClassName != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (me TxsdMarkerTypeMarkerUnits) IsUserSpace() bool { return me.String() == \"userSpace\" }", "func isContainerMetric(e *loggregator_v2.Envelope) bool {\n\tgauge := e.GetGauge()\n\tif len(gauge.Metrics) != 5 {\n\t\treturn false\n\t}\n\trequired := []string{\n\t\t\"cpu\",\n\t\t\"memory\",\n\t\t\"disk\",\n\t\t\"memory_quota\",\n\t\t\"disk_quota\",\n\t}\n\n\tfor _, req := range required {\n\t\tif _, found := gauge.Metrics[req]; !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (s SpaceUnit) Space(SpaceUnit, int8) MetricUnit {\n\tpanic(\"Cannot add another space unit\")\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Combined return united slice with all watchers
func (wi *WatchInfo) Combined() []*Watcher { var result []*Watcher result = append(result, wi.PageWatchers...) MAINLOOP: for _, watcher := range wi.SpaceWatchers { for _, pageWatcher := range wi.PageWatchers { if watcher.Key == pageWatcher.Key { continue MAINLOOP } } result = append(result, watcher) } return result }
[ "func StickersSlice() StickersService {\n\treturn &sliceStickers{}\n}", "func (t *tentacle) Items() []WatchKey {\n\treturn t.items\n}", "func watchFilterer(t *store, ns string, list bool) func(watch.Event) (watch.Event, bool) {\n\treturn func(in watch.Event) (watch.Event, bool) {\n\t\tencodedBytes, err := runtime.Encode(t.codec, in.Object)\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"couldn't encode watch event object (%s)\", err)\n\t\t\treturn watch.Event{}, false\n\t\t}\n\t\tif list {\n\t\t\t// if we're watching a list, extract to a list object\n\t\t\tfinalObj := t.listShell()\n\t\t\tif err := decode(t.codec, encodedBytes, finalObj); err != nil {\n\t\t\t\tglog.Errorf(\"couldn't decode watch event bytes (%s)\", err)\n\t\t\t\treturn watch.Event{}, false\n\t\t\t}\n\t\t\tif !t.hasNamespace {\n\t\t\t\t// if we're watching a list and not supposed to have a namespace, strip namespaces\n\t\t\t\tobjs, err := meta.ExtractList(finalObj)\n\t\t\t\tif err != nil {\n\t\t\t\t\tglog.Errorf(\"couldn't extract a list from %#v (%s)\", finalObj, err)\n\t\t\t\t\treturn watch.Event{}, false\n\t\t\t\t}\n\t\t\t\tobjList := make([]runtime.Object, len(objs))\n\t\t\t\tfor i, obj := range objs {\n\t\t\t\t\tif err := removeNamespace(obj); err != nil {\n\t\t\t\t\t\tglog.Errorf(\"couldn't remove namespace from %#v (%s)\", obj, err)\n\t\t\t\t\t\treturn watch.Event{}, false\n\t\t\t\t\t}\n\t\t\t\t\tobjList[i] = obj\n\t\t\t\t}\n\t\t\t\tif err := meta.SetList(finalObj, objList); err != nil {\n\t\t\t\t\tglog.Errorf(\"setting list items (%s)\", err)\n\t\t\t\t\treturn watch.Event{}, false\n\t\t\t\t}\n\t\t\t\treturn watch.Event{\n\t\t\t\t\tType: in.Type,\n\t\t\t\t\tObject: finalObj,\n\t\t\t\t}, true\n\t\t\t}\n\t\t\treturn watch.Event{\n\t\t\t\tType: in.Type,\n\t\t\t\tObject: finalObj,\n\t\t\t}, true\n\t\t}\n\t\tfinalObj := t.singularShell(\"\", \"\")\n\t\tif err := decode(t.codec, encodedBytes, finalObj); err != nil {\n\t\t\tglog.Errorf(\"couldn't decode watch event bytes (%s)\", err)\n\t\t\treturn watch.Event{}, false\n\t\t}\n\t\tif !t.hasNamespace {\n\t\t\tif err := removeNamespace(finalObj); err != nil {\n\t\t\t\tglog.Errorf(\"couldn't remove namespace from %#v (%s)\", finalObj, err)\n\t\t\t\treturn watch.Event{}, false\n\t\t\t}\n\t\t}\n\t\treturn watch.Event{\n\t\t\tType: in.Type,\n\t\t\tObject: finalObj,\n\t\t}, true\n\t}\n\n}", "func ThingChanSlice(inp ...[]Thing) (out ThingFrom) {\n\tcha := make(chan Thing)\n\tgo chanThingSlice(cha, inp...)\n\treturn cha\n}", "func mutatingWatcherFor(source watch.Interface, mutator func(runtime.Object) error) watch.Interface {\n\tw := mutatingWatcher{\n\t\tmutator: mutator,\n\t\tsource: source,\n\t\toutput: make(chan watch.Event),\n\t\twg: &sync.WaitGroup{},\n\t}\n\tw.wg.Add(1)\n\tgo func(input <-chan watch.Event, output chan<- watch.Event) {\n\t\tdefer w.wg.Done()\n\t\tfor event := range input {\n\t\t\tif err := mutator(event.Object); err != nil {\n\t\t\t\toutput <- watch.Event{\n\t\t\t\t\tType: watch.Error,\n\t\t\t\t\tObject: &errors.NewInternalError(fmt.Errorf(\"failed to mutate object in watch event: %v\", err)).ErrStatus,\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\toutput <- event\n\t\t\t}\n\t\t}\n\t}(source.ResultChan(), w.output)\n\treturn &w\n}", "func (c *FakeDispatchers) Watch(opts v1.ListOptions) watch.AggregatedWatchInterface {\n\taggWatch := watch.NewAggregatedWatcher()\n\twatcher, err := c.Fake.\n\t\tInvokesWatch(testing.NewWatchActionWithMultiTenancy(dispatchersResource, c.ns, opts, c.te))\n\n\taggWatch.AddWatchInterface(watcher, err)\n\treturn aggWatch\n}", "func (w *Window) Slice() []float64 {\n\tw.mx.RLock()\n\t// 4 Times faster than \"defer Unlock\"\n\tret := w.base[w.start : w.start+w.Len]\n\tw.mx.RUnlock()\n\treturn ret\n}", "func ThingChanSlice(inp ...[]Thing) (out <-chan Thing) {\n\tcha := make(chan Thing)\n\tgo chanThingSlice(cha, inp...)\n\treturn cha\n}", "func (iobuf *buf) slice(free, base, bound uint) *Slice {\n\tatomic.AddInt32(&iobuf.refcount, 1)\n\treturn &Slice{iobuf: iobuf, free: free, base: base, Contents: iobuf.Contents[base:bound]}\n}", "func (mock *UniversalClientMock) WatchCalls() []struct {\n\tFn func(*redis.Tx) error\n\tKeys []string\n} {\n\tvar calls []struct {\n\t\tFn func(*redis.Tx) error\n\t\tKeys []string\n\t}\n\tlockUniversalClientMockWatch.RLock()\n\tcalls = mock.calls.Watch\n\tlockUniversalClientMockWatch.RUnlock()\n\treturn calls\n}", "func (m *Machine) Watchers() []*watchers.WatcherDef {\n\treturn m.WatcherDefs\n}", "func (c *AnalyticsController) runWatches() {\n\tlastResourceVersion := big.NewInt(0)\n\tcurrentResourceVersion := big.NewInt(0)\n\twatchListItems := WatchFuncList(c.kclient, c.client)\n\tfor name := range watchListItems {\n\n\t\t// assign local variable (not in range operator above) so that each\n\t\t// goroutine gets the correct watch function required\n\t\twfnc := watchListItems[name]\n\t\tn := name\n\t\tbackoff := 1 * time.Second\n\n\t\tgo wait.Until(func() {\n\t\t\t// any return from this func only exits that invocation of the func.\n\t\t\t// wait.Until will call it again after its sync period.\n\t\t\twatchLog := log.WithFields(log.Fields{\n\t\t\t\t\"watch\": n,\n\t\t\t})\n\t\t\twatchLog.Infof(\"starting watch\")\n\t\t\tw, err := wfnc.watchFunc(metav1.ListOptions{})\n\t\t\tif err != nil {\n\t\t\t\twatchLog.Errorf(\"error creating watch: %v\", err)\n\t\t\t}\n\n\t\t\twatchLog.Debugf(\"backing off watch for %v seconds\", backoff)\n\t\t\ttime.Sleep(backoff)\n\t\t\tbackoff = backoff * 2\n\t\t\tif backoff > 60*time.Second {\n\t\t\t\tbackoff = 60 * time.Second\n\t\t\t}\n\n\t\t\tif w == nil {\n\t\t\t\twatchLog.Errorln(\"watch function nil, watch not created, returning\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase event, ok := <-w.ResultChan():\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\twatchLog.Warnln(\"watch channel closed unexpectedly, attempting to re-establish\")\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tif event.Type == watch.Error {\n\t\t\t\t\t\twatchLog.Errorf(\"watch channel returned error: %s\", spew.Sdump(event))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t// success means the watch is working.\n\t\t\t\t\t// reset the backoff back to 1s for this watch\n\t\t\t\t\tbackoff = 1 * time.Second\n\n\t\t\t\t\tif event.Type == watch.Added || event.Type == watch.Deleted {\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\twatchLog.Errorf(\"Unable to create object meta for %v: %v\", event.Object, err)\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tm, err := meta.Accessor(event.Object)\n\t\t\t\t\t\t// if both resource versions can be converted to numbers\n\t\t\t\t\t\t// and if the current resource version is lower than the\n\t\t\t\t\t\t// last recorded resource version for this resource type\n\t\t\t\t\t\t// then skip the event\n\t\t\t\t\t\tc.mutex.RLock()\n\t\t\t\t\t\tif _, ok := lastResourceVersion.SetString(c.watchResourceVersions[n], 10); ok {\n\t\t\t\t\t\t\tif _, ok = currentResourceVersion.SetString(m.GetResourceVersion(), 10); ok {\n\t\t\t\t\t\t\t\tif lastResourceVersion.Cmp(currentResourceVersion) == 1 {\n\t\t\t\t\t\t\t\t\twatchLog.Debugf(\"ResourceVersion %v is to old (%v)\",\n\t\t\t\t\t\t\t\t\t\tcurrentResourceVersion, c.watchResourceVersions[n])\n\t\t\t\t\t\t\t\t\tc.mutex.RUnlock()\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tc.mutex.RUnlock()\n\n\t\t\t\t\t\t// each watch is a separate go routine\n\t\t\t\t\t\tc.mutex.Lock()\n\t\t\t\t\t\tc.watchResourceVersions[n] = m.GetResourceVersion()\n\t\t\t\t\t\tc.mutex.Unlock()\n\n\t\t\t\t\t\tanalytic, err := newEvent(c.typer, event.Object, event.Type)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\twatchLog.Errorf(\"unexpected error creating analytic from watch event %#v\", event.Object)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t// additional info will be set to the analytic and\n\t\t\t\t\t\t\t// an instance queued for all destinations\n\t\t\t\t\t\t\terr := c.AddEvent(analytic)\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\twatchLog.Errorf(\"error adding event: %v - %v\", err, analytic)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}, 1*time.Millisecond, c.stopChannel)\n\t}\n}", "func oneishToFiveObservable() Observable {\n return NewObservableSlice([]interface{}{1, 1, 2, 3, 3, 4, 5, 5})\n}", "func provisionWatchersByServiceName(conn redis.Conn, offset int, limit int, name string) (provisionWatchers []models.ProvisionWatcher, edgexErr errors.EdgeX) {\n\tobjects, err := getObjectsByRevRange(conn, CreateKey(ProvisionWatcherCollectionServiceName, name), offset, limit)\n\tif err != nil {\n\t\treturn provisionWatchers, errors.NewCommonEdgeXWrapper(err)\n\t}\n\n\tprovisionWatchers = make([]models.ProvisionWatcher, len(objects))\n\tfor i, in := range objects {\n\t\tpw := models.ProvisionWatcher{}\n\t\terr := json.Unmarshal(in, &pw)\n\t\tif err != nil {\n\t\t\treturn []models.ProvisionWatcher{}, errors.NewCommonEdgeX(errors.KindDatabaseError, \"provision watcher format parsing failed from the database\", err)\n\t\t}\n\t\tprovisionWatchers[i] = pw\n\t}\n\n\treturn provisionWatchers, nil\n}", "func Get() []Event {\n\tvar events []Event\n\tfor len(q) > 0 {\n\t\tevents = append(events, <-q)\n\n\t}\n\treturn events\n}", "func (e *Pusher) ShowWatchers() {\n\tfor _, k := range e.Watchers {\n\t\tfmt.Println(\"Watcher: \", k)\n\t}\n}", "func (mock *HarborRepositoryInterfaceMock) WatchCalls() []struct {\n\tOpts v1.ListOptions\n} {\n\tvar calls []struct {\n\t\tOpts v1.ListOptions\n\t}\n\tlockHarborRepositoryInterfaceMockWatch.RLock()\n\tcalls = mock.calls.Watch\n\tlockHarborRepositoryInterfaceMockWatch.RUnlock()\n\treturn calls\n}", "func (f *MemKv) setupWatchers(key string, v *memKvRec) {\n\tfor watchKey, wl := range f.cluster.watchers {\n\t\tfor _, w := range wl {\n\t\t\tif w.recursive {\n\t\t\t\tif strings.HasPrefix(key, watchKey) {\n\t\t\t\t\tv.watchers = append(v.watchers, w)\n\t\t\t\t\tsendEvent(w, key, v, false)\n\t\t\t\t}\n\t\t\t} else if watchKey == key {\n\t\t\t\tv.watchers = append(v.watchers, w)\n\t\t\t\tsendEvent(w, key, v, false)\n\t\t\t}\n\t\t}\n\t}\n}", "func (mock *PodControllerMock) WatchCalls() []struct {\n\tNamespace string\n\tOpts v1b.ListOptions\n} {\n\tvar calls []struct {\n\t\tNamespace string\n\t\tOpts v1b.ListOptions\n\t}\n\tlockPodControllerMockWatch.RLock()\n\tcalls = mock.calls.Watch\n\tlockPodControllerMockWatch.RUnlock()\n\treturn calls\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
UnmarshalJSON is custom container ID unmarshaler
func (c *ContainerID) UnmarshalJSON(b []byte) error { switch { case len(b) == 0: // nop case b[0] == '"': *c = ContainerID(strings.Replace(string(b), "\"", "", -1)) default: *c = ContainerID(string(b)) } return nil }
[ "func (c *Container) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &c.ID)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (c *Container) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &c.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &c.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &c.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"systemData\":\n\t\t\terr = unpopulate(val, \"SystemData\", &c.SystemData)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &c.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (id *ID) UnmarshalJSON(b []byte) error {\n\tvar s string\n\terr := json.Unmarshal(b, &s)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = hex.Decode(id[:], []byte(s))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (n *ContentID) UnmarshalJSON(p []byte) error {\n\tvar id []byte\n\terr := json.Unmarshal(p, &id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(id) != wordSize {\n\t\treturn fmt.Errorf(\"ContentID: Incorrect size: %d\", len(id))\n\t}\n\tn.id = make([]byte, wordSize)\n\tcopy(n.id, id)\n\treturn nil\n}", "func (c *CorelationID) UnmarshalJSON(b []byte) error {\n\ttype T struct {\n\t\tID string `json:\"id\"`\n\t}\n\tvar tmp T\n\tif err := json.Unmarshal(b, &tmp); err != nil {\n\t\treturn err\n\t}\n\n\tc.corealtionID = tmp.ID\n\treturn nil\n}", "func (j *JobID) UnmarshalJSON(b []byte) error {\n\tvar u UUID\n\tif err := json.Unmarshal(b, &u); err != nil {\n\t\treturn err\n\t}\n\t*j = JobID(u)\n\treturn nil\n}", "func (cs *ContainerService) UnmarshalJSON(body []byte) error {\n\tvar m map[string]*json.RawMessage\n\terr := json.Unmarshal(body, &m)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor k, v := range m {\n\t\tswitch k {\n\t\tcase \"properties\":\n\t\t\tif v != nil {\n\t\t\t\tvar properties Properties\n\t\t\t\terr = json.Unmarshal(*v, &properties)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcs.Properties = &properties\n\t\t\t}\n\t\tcase \"id\":\n\t\t\tif v != nil {\n\t\t\t\tvar ID string\n\t\t\t\terr = json.Unmarshal(*v, &ID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcs.ID = &ID\n\t\t\t}\n\t\tcase \"name\":\n\t\t\tif v != nil {\n\t\t\t\tvar name string\n\t\t\t\terr = json.Unmarshal(*v, &name)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcs.Name = &name\n\t\t\t}\n\t\tcase \"type\":\n\t\t\tif v != nil {\n\t\t\t\tvar typeVar string\n\t\t\t\terr = json.Unmarshal(*v, &typeVar)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcs.Type = &typeVar\n\t\t\t}\n\t\tcase \"location\":\n\t\t\tif v != nil {\n\t\t\t\tvar location string\n\t\t\t\terr = json.Unmarshal(*v, &location)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcs.Location = &location\n\t\t\t}\n\t\tcase \"tags\":\n\t\t\tif v != nil {\n\t\t\t\tvar tags map[string]*string\n\t\t\t\terr = json.Unmarshal(*v, &tags)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcs.Tags = tags\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (uid *MyULID) UnmarshalJSON(data []byte) error {\n\tvar s string\n\terr := json.Unmarshal(data, &s)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttmp, err := ulid.Parse(s)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*uid = MyULID(string(tmp[:]))\n\treturn nil\n}", "func (id *KeyID) UnmarshalJSON(data []byte) error {\n\t// need to strip leading and trailing double quotes\n\tif data[0] != '\"' || data[len(data)-1] != '\"' {\n\t\treturn fmt.Errorf(\"KeyID is not quoted\")\n\t}\n\tdata = data[1 : len(data)-1]\n\t*id = make([]byte, hex.DecodedLen(len(data)))\n\t_, err := hex.Decode(*id, data)\n\treturn err\n}", "func (a *AzureContainerInfo) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"containerName\":\n\t\t\terr = unpopulate(val, \"ContainerName\", &a.ContainerName)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"dataFormat\":\n\t\t\terr = unpopulate(val, \"DataFormat\", &a.DataFormat)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"storageAccountCredentialId\":\n\t\t\terr = unpopulate(val, \"StorageAccountCredentialID\", &a.StorageAccountCredentialID)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (f *FileID) UnmarshalJSON(b []byte) error {\n\tvar fstr string\n\terr := json.Unmarshal(b, &fstr)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*f, err = DecodeFileID(fstr)\n\treturn err\n}", "func (o *OuContainer) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", o, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"etag\":\n\t\t\terr = unpopulate(val, \"Etag\", &o.Etag)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &o.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"location\":\n\t\t\terr = unpopulate(val, \"Location\", &o.Location)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &o.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &o.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"systemData\":\n\t\t\terr = unpopulate(val, \"SystemData\", &o.SystemData)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"tags\":\n\t\t\terr = unpopulate(val, \"Tags\", &o.Tags)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &o.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", o, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (d *IdentityDocument) UnmarshalJSON(data []byte) error {\n\ttype identityDocument IdentityDocument\n\tvar doc identityDocument\n\terr := json.Unmarshal(data, &doc)\n\n\tif err == nil {\n\t\t*d = IdentityDocument(doc)\n\t} else {\n\t\t// the id is surrounded by \"\\\" characters, so strip them\n\t\td.ID = string(data[1 : len(data)-1])\n\t}\n\n\treturn nil\n}", "func (t *TrustedIDProvider) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", t, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &t.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &t.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &t.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &t.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", t, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (c *Card) UnmarshalJSON(data []byte) error {\n\tif id, ok := ParseID(data); ok {\n\t\tc.ID = id\n\t\treturn nil\n\t}\n\n\ttype card Card\n\tvar v card\n\tif err := json.Unmarshal(data, &v); err != nil {\n\t\treturn err\n\t}\n\n\t*c = Card(v)\n\treturn nil\n}", "func (c *ContainerNetworkInterface) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"etag\":\n\t\t\terr = unpopulate(val, \"Etag\", &c.Etag)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &c.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &c.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &c.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &c.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (sc *StorageContainer) UnmarshalJSON(body []byte) error {\n\tvar m map[string]*json.RawMessage\n\terr := json.Unmarshal(body, &m)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor k, v := range m {\n\t\tswitch k {\n\t\tcase \"properties\":\n\t\t\tif v != nil {\n\t\t\t\tvar storageContainerProperties StorageContainerProperties\n\t\t\t\terr = json.Unmarshal(*v, &storageContainerProperties)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsc.StorageContainerProperties = &storageContainerProperties\n\t\t\t}\n\t\tcase \"id\":\n\t\t\tif v != nil {\n\t\t\t\tvar ID string\n\t\t\t\terr = json.Unmarshal(*v, &ID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsc.ID = &ID\n\t\t\t}\n\t\tcase \"name\":\n\t\t\tif v != nil {\n\t\t\t\tvar name string\n\t\t\t\terr = json.Unmarshal(*v, &name)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsc.Name = &name\n\t\t\t}\n\t\tcase \"type\":\n\t\t\tif v != nil {\n\t\t\t\tvar typeVar string\n\t\t\t\terr = json.Unmarshal(*v, &typeVar)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsc.Type = &typeVar\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (u *USERID) UnmarshalJSON(b []byte) error {\n\tvar intid int\n\tif err := json.Unmarshal(b, &intid); err != nil {\n\t\tvar stringid string\n\t\tif err := json.Unmarshal(b, &stringid); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*u = USERID(stringid)\n\t\treturn nil\n\t}\n\t*u = USERID(strconv.Itoa(intid))\n\treturn nil\n}", "func (sid *SpanID) UnmarshalJSON(data []byte) error {\n\tsid.id = [8]byte{}\n\treturn unmarshalJSON(sid.id[:], data)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ToQuery convert params to URL query
func (p SearchParameters) ToQuery() string { return paramsToQuery(p) }
[ "func makeQueryStringFromParam(params map[string][]string) string {\n\tif params == nil {\n\t\treturn \"\"\n\t}\n\tresult := \"\"\n\tfor key, array := range params {\n\t\tfor _, value := range array {\n\t\t\tkeyVal := fmt.Sprintf(\"%s-%s\", key, value)\n\t\t\tif result == \"\" {\n\t\t\t\tresult = \"?\" + keyVal\n\t\t\t} else {\n\t\t\t\tresult = result + \"&\" + keyVal\n\t\t\t}\n\t\t}\n\t}\n\treturn result\n}", "func (p *StreamsParams) ToQueryParams() map[string]string {\n\tparams := make(map[string]string)\n\tif p.After != \"\" {\n\t\tparams[\"after\"] = p.After\n\t}\n\tif p.Before != \"\" {\n\t\tparams[\"before\"] = p.Before\n\t}\n\tif p.First > 100 {\n\t\tp.First = 100\n\t}\n\tif p.First > 0 {\n\t\tparams[\"first\"] = string(p.First)\n\t}\n\tif p.GameID != \"\" {\n\t\tparams[\"game_id\"] = p.GameID\n\t}\n\tif p.Language != \"\" {\n\t\tparams[\"language\"] = p.Language\n\t}\n\tif p.UserID != \"\" {\n\t\tparams[\"user_id\"] = p.UserID\n\t}\n\tif p.UserLogin != \"\" {\n\t\tparams[\"user_login\"] = p.UserLogin\n\t}\n\treturn params\n}", "func (filter TestRunFilter) ToQuery() (q url.Values) {\n\tu := url.URL{}\n\tq = u.Query()\n\tif !filter.SHAs.EmptyOrLatest() {\n\t\tfor _, sha := range filter.SHAs {\n\t\t\tq.Add(\"sha\", sha)\n\t\t}\n\t}\n\tif filter.Labels != nil && filter.Labels.Cardinality() > 0 {\n\t\tfor label := range filter.Labels.Iter() {\n\t\t\tq.Add(\"label\", label.(string))\n\t\t}\n\t}\n\tif len(filter.Products) > 0 {\n\t\tfor _, p := range filter.Products {\n\t\t\tq.Add(\"product\", p.String())\n\t\t}\n\t}\n\tif filter.Aligned != nil {\n\t\tq.Set(\"aligned\", strconv.FormatBool(*filter.Aligned))\n\t}\n\tif filter.MaxCount != nil {\n\t\tq.Set(\"max-count\", fmt.Sprintf(\"%v\", *filter.MaxCount))\n\t}\n\tif filter.Offset != nil {\n\t\tq.Set(\"offset\", fmt.Sprintf(\"%v\", *filter.Offset))\n\t}\n\tif filter.From != nil {\n\t\tq.Set(\"from\", filter.From.Format(time.RFC3339))\n\t}\n\tif filter.To != nil {\n\t\tq.Set(\"to\", filter.From.Format(time.RFC3339))\n\t}\n\tif filter.View != nil {\n\t\tq.Set(\"view\", *filter.View)\n\t}\n\treturn q\n}", "func (p *Params) EncodeToQuery() string {\n\treturn \"\"\n}", "func (p *GetAllParams) QueryString() string {\n\turlValues := &url.Values{}\n\n\turlvalues.AddStringSliceToURLValues(urlValues, p.Statuses, \"statuses\")\n\tif p.Limit > 0 {\n\t\turlValues.Add(\"limit\", strconv.Itoa(p.Limit))\n\t}\n\turlvalues.AddTimeToURLValues(urlValues, p.CreatedAfter, \"created_after\")\n\turlvalues.AddTimeToURLValues(urlValues, p.CreatedBefore, \"created_before\")\n\turlvalues.AddTimeToURLValues(urlValues, p.PaidAfter, \"paid_after\")\n\turlvalues.AddTimeToURLValues(urlValues, p.PaidBefore, \"paid_before\")\n\turlvalues.AddTimeToURLValues(urlValues, p.PaidBefore, \"paid_before\")\n\turlvalues.AddTimeToURLValues(urlValues, p.ExpiredAfter, \"expired_after\")\n\turlvalues.AddTimeToURLValues(urlValues, p.ExpiredBefore, \"expired_before\")\n\turlvalues.AddStringSliceToURLValues(urlValues, p.ClientTypes, \"client_types\")\n\turlvalues.AddStringSliceToURLValues(urlValues, p.PaymentChannels, \"payment_channels\")\n\tif p.OnDemandLink != \"\" {\n\t\turlValues.Add(\"on_demand\", p.OnDemandLink)\n\t}\n\tif p.RecurringPaymentID != \"\" {\n\t\turlValues.Add(\"recurring_payment_id\", p.RecurringPaymentID)\n\t}\n\n\treturn urlValues.Encode()\n}", "func buildQueryParamUrl(reqURL *url.URL, queryStructs []interface{}, queryParams map[string]string) error {\n\turlValues, err := url.ParseQuery(reqURL.RawQuery)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// encodes query structs into a url.Values map and merges maps\n\tfor _, queryStruct := range queryStructs {\n\t\tqueryValues, err := goquery.Values(queryStruct)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor key, values := range queryValues {\n\t\t\tfor _, value := range values {\n\t\t\t\turlValues.Add(key, value)\n\t\t\t}\n\t\t}\n\t}\n\tfor k, v := range queryParams {\n\t\turlValues.Add(k, v)\n\t}\n\t// url.Values format to a sorted \"url encoded\" string, e.g. \"key=val&foo=bar\"\n\treqURL.RawQuery = urlValues.Encode()\n\treturn nil\n}", "func (args ForecastArgs) QueryParams() url.Values {\n\tq := make(url.Values)\n\tif args.Location != nil {\n\t\tfor k, v := range args.Location.LocationQueryParams() {\n\t\t\tq[k] = v\n\t\t}\n\t}\n\n\tif !args.Start.IsZero() {\n\t\tq.Add(\"start_time\", args.Start.Format(time.RFC3339))\n\t}\n\tif !args.End.IsZero() {\n\t\tq.Add(\"end_time\", args.End.Format(time.RFC3339))\n\t}\n\tif args.Timestep > 0 {\n\t\tq.Add(\"timestep\", strconv.Itoa(args.Timestep))\n\t}\n\tif args.UnitSystem != \"\" {\n\t\tq.Add(\"unit_system\", args.UnitSystem)\n\t}\n\tif len(args.Fields) > 0 {\n\t\tq.Add(\"fields\", strings.Join(args.Fields, \",\"))\n\t}\n\treturn q\n}", "func PrepareQuery(params map[string]string) url.Values {\n\tquery := url.Values{}\n\tfor key, value := range params {\n\t\tquery.Add(key, value)\n\t}\n\treturn query\n}", "func (sc SearchClient) QueryParams() url.Values {\n\tparams := url.Values{}\n\n\tif sc.FilterID > 0 {\n\t\tparams.Add(\"filter_id\", strconv.Itoa(sc.FilterID))\n\t}\n\n\tif sc.PerPage > 1 && sc.PerPage != 25 {\n\t\tparams.Add(\"per_page\", strconv.Itoa(sc.PerPage))\n\t}\n\n\tif len(sc.Key) > 0 {\n\t\tparams.Add(\"key\", sc.Key)\n\t}\n\n\tif len(sc.SortDirection) > 0 {\n\t\tparams.Add(\"sd\", sc.SortDirection)\n\t}\n\n\tif len(sc.SortField) > 0 {\n\t\tparams.Add(\"sf\", sc.SortField)\n\t}\n\n\treturn params\n}", "func (r AnnounceRequest) ToQuery() (vs url.Values) {\n\tvs = make(url.Values, 9)\n\tvs.Set(\"info_hash\", r.InfoHash.BytesString())\n\tvs.Set(\"peer_id\", r.PeerID.BytesString())\n\tvs.Set(\"uploaded\", strconv.FormatInt(r.Uploaded, 10))\n\tvs.Set(\"downloaded\", strconv.FormatInt(r.Downloaded, 10))\n\tvs.Set(\"left\", strconv.FormatInt(r.Left, 10))\n\n\tif r.IP != \"\" {\n\t\tvs.Set(\"ip\", r.IP)\n\t}\n\tif r.Event > 0 {\n\t\tvs.Set(\"event\", strconv.FormatInt(int64(r.Event), 10))\n\t}\n\tif r.Port > 0 {\n\t\tvs.Set(\"port\", strconv.FormatUint(uint64(r.Port), 10))\n\t}\n\tif r.NumWant != 0 {\n\t\tvs.Set(\"numwant\", strconv.FormatUint(uint64(r.NumWant), 10))\n\t}\n\tif r.Key != 0 {\n\t\tvs.Set(\"key\", strconv.FormatInt(int64(r.Key), 10))\n\t}\n\n\t// BEP 23\n\tif r.Compact {\n\t\tvs.Set(\"compact\", \"1\")\n\t} else {\n\t\tvs.Set(\"compact\", \"0\")\n\t}\n\n\treturn\n}", "func (w *Wrapper) paramToQuery(data interface{}, parentheses ...bool) (param string) {\n\tswitch v := data.(type) {\n\tcase *Wrapper:\n\t\tif len(parentheses) > 0 {\n\t\t\tif parentheses[0] == false {\n\t\t\t\tparam = fmt.Sprintf(\"%s\", v.query)\n\t\t\t}\n\t\t} else {\n\t\t\tparam = fmt.Sprintf(\"(%s)\", v.query)\n\t\t}\n\tcase function:\n\t\tparam = v.query\n\tcase nil:\n\t\tparam = \"NULL\"\n\tdefault:\n\t\tparam = \"?\"\n\t}\n\treturn\n}", "func (args HistoryArgs) QueryParams() url.Values {\n\tq := make(url.Values)\n\n\tif !args.StartAt.IsZero() {\n\t\tq.Add(\"start_at\", args.StartAt.Format(dateLayout))\n\t}\n\n\tif !args.EndAt.IsZero() {\n\t\tq.Add(\"end_at\", args.EndAt.Format(dateLayout))\n\t}\n\n\tif args.Base != \"\" {\n\t\tq.Add(\"base\", args.Base)\n\t}\n\n\tif len(args.Symbols) > 0 {\n\t\tq.Add(\"symbols\", strings.Join(args.Symbols, \",\"))\n\t}\n\n\treturn q\n}", "func (args LatestArgs) QueryParams() url.Values {\n\tq := make(url.Values)\n\n\tif args.Base != \"\" {\n\t\tq.Add(\"base\", args.Base)\n\t}\n\n\tif len(args.Symbols) > 0 {\n\t\tq.Add(\"symbols\", strings.Join(args.Symbols, \",\"))\n\t}\n\n\treturn q\n}", "func buildQuery (req *http.Request, args map[string]string) (*http.Request) {\n\tq := req.URL.Query()\n\t// build query from map\n\tfor key, _ := range args {\n\t\tif key == \"q\" {\n\t\t\tparts := strings.Split(args[key],\" \")\n\t\t\tq.Add(\"q\", strings.Join(parts, \"+\"))\n\t\t} else {\n\t\t\tq.Add(key, args[key])\n\t\t}\n\t}\n\treq.URL.RawQuery = q.Encode()\n\treturn req\n}", "func (o AvailablePhoneNumbersOptions) ToQueryString() (url.Values, error) {\n\treturn query.Values(o)\n}", "func (tir TreeInstanceRequest) ToQueryURI() string {\n\tvar (\n\t\tparams = &url.Values{}\n\t\tt = reflect.TypeOf(tir)\n\t\tv = reflect.ValueOf(tir)\n\t\tfv string\n\t)\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tfv = v.Field(i).Interface().(string)\n\t\tif fv != \"\" {\n\t\t\tparams.Set(t.Field(i).Tag.Get(_query), fv)\n\t\t}\n\t}\n\treturn params.Encode()\n}", "func StructToQueryString(st interface{}) (qs string, err error) {\n\tjsonBytes, err := json.Marshal(st)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tconvert := map[string]interface{}{}\n\terr = json.Unmarshal(jsonBytes, &convert)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tqsParams := make([]string, len(convert))\n\ti := 0\n\tfor key, val := range convert {\n\t\tqsParams[i] = fmt.Sprintf(\"%s=%v\", key, val)\n\t\ti++\n\t}\n\n\tqs = strings.Join(qsParams, \"&\")\n\treturn\n}", "func getQueryParams(v interface{}, vals url.Values) error {\n\t// normalize all query string key/values\n\targs := make(map[string]string)\n\n\tfor k, v := range vals {\n\t\tif len(v) > 0 {\n\t\t\targs[k] = v[0]\n\t\t}\n\t}\n\n\tb, err := json.Marshal(args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn json.Unmarshal(b, v)\n}", "func (f FilterParameter) ToURLParams() string {\n\tflat := make([]string, 0)\n\ttemplate := \"filters[%s][][%s]=%s\"\n\n\tfor key, values := range f.filters {\n\t\tfor _, value := range values {\n\t\t\tflat = append(flat, fmt.Sprintf(template, key, \"type\", value.Type))\n\t\t\tflat = append(flat, fmt.Sprintf(template, key, \"value\", value.Value))\n\t\t}\n\t}\n\n\treturn strings.Join(flat, \"&\")\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use ListRepositoryReq.ProtoReflect.Descriptor instead.
func (*ListRepositoryReq) Descriptor() ([]byte, []int) { return file_api_ops_proto_rawDescGZIP(), []int{3} }
[ "func (*ListMetadataRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{23}\n}", "func (*ListRepositoriesRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{12}\n}", "func (*ListRefsRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{15}\n}", "func (*ListRepositoriesRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_devtools_artifactregistry_v1_repository_proto_rawDescGZIP(), []int{1}\n}", "func (*ListRepositoriesRequest) Descriptor() ([]byte, []int) {\n\treturn file_seankhliao_archrepo_v1alpha2_archrepo_proto_rawDescGZIP(), []int{2}\n}", "func (*ListTypeRequest) Descriptor() ([]byte, []int) {\n\treturn file_backend_proto_backend_proto_rawDescGZIP(), []int{11}\n}", "func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{5}\n}", "func (*ListModelVersionsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{88}\n}", "func (*ListNotificationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{63}\n}", "func (*RequestGetRevokedList) Descriptor() ([]byte, []int) {\n\treturn file_pkg_rpc_rpc_proto_rawDescGZIP(), []int{45}\n}", "func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_natan_proto_rawDescGZIP(), []int{1}\n}", "func (*ListRepositoryRes) Descriptor() ([]byte, []int) {\n\treturn file_api_ops_proto_rawDescGZIP(), []int{4}\n}", "func (*UpdateRepoReq) Descriptor() ([]byte, []int) {\n\treturn file_helm_api_proto_rawDescGZIP(), []int{5}\n}", "func (*DescribeRepositoryReq) Descriptor() ([]byte, []int) {\n\treturn file_api_ops_proto_rawDescGZIP(), []int{13}\n}", "func (*ListResultRequest) Descriptor() ([]byte, []int) {\n\treturn file_kobe_proto_rawDescGZIP(), []int{19}\n}", "func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{14}\n}", "func (*ListPodsRequest) Descriptor() ([]byte, []int) {\n\treturn file_viz_proto_rawDescGZIP(), []int{7}\n}", "func (*GetRepositoryRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{13}\n}", "func (*ListNotificationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_v1_notification_proto_rawDescGZIP(), []int{8}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use ListRepositoryRes.ProtoReflect.Descriptor instead.
func (*ListRepositoryRes) Descriptor() ([]byte, []int) { return file_api_ops_proto_rawDescGZIP(), []int{4} }
[ "func (*ListRepositoryReq) Descriptor() ([]byte, []int) {\n\treturn file_api_ops_proto_rawDescGZIP(), []int{3}\n}", "func (*ListMetadataRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{23}\n}", "func (*ListMetadataResponse) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{24}\n}", "func (*ListRefsResponse) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{16}\n}", "func (*List) Descriptor() ([]byte, []int) {\n\treturn file_k8s_io_api_core_v1_generated_proto_rawDescGZIP(), []int{77}\n}", "func (*ListRepositoriesRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_devtools_artifactregistry_v1_repository_proto_rawDescGZIP(), []int{1}\n}", "func (*ListRepositoriesRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{12}\n}", "func (*ListRefsRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{15}\n}", "func (*List) Descriptor() ([]byte, []int) {\n\treturn file_proto_ssql_proto_rawDescGZIP(), []int{11}\n}", "func (*ListRepositoriesRequest) Descriptor() ([]byte, []int) {\n\treturn file_seankhliao_archrepo_v1alpha2_archrepo_proto_rawDescGZIP(), []int{2}\n}", "func (*ListRepositoriesResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_devtools_artifactregistry_v1_repository_proto_rawDescGZIP(), []int{2}\n}", "func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}", "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_sys_account_models_proto_rawDescGZIP(), []int{22}\n}", "func (*RequestGetRevokedList) Descriptor() ([]byte, []int) {\n\treturn file_pkg_rpc_rpc_proto_rawDescGZIP(), []int{45}\n}", "func (*ListLabelsResponse) Descriptor() ([]byte, []int) {\n\treturn file_gripql_proto_rawDescGZIP(), []int{34}\n}", "func (*ListRepositoriesResponse) Descriptor() ([]byte, []int) {\n\treturn file_seankhliao_archrepo_v1alpha2_archrepo_proto_rawDescGZIP(), []int{3}\n}", "func (*ResponseGetRevokedList) Descriptor() ([]byte, []int) {\n\treturn file_pkg_rpc_rpc_proto_rawDescGZIP(), []int{46}\n}", "func (*FindRepositories) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{34}\n}", "func (*ListResp) Descriptor() ([]byte, []int) {\n\treturn file_rpc_proto_rawDescGZIP(), []int{3}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use ListJobReq.ProtoReflect.Descriptor instead.
func (*ListJobReq) Descriptor() ([]byte, []int) { return file_api_ops_proto_rawDescGZIP(), []int{7} }
[ "func (*ListJobsRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_manager_http_grpc_job_proto_rawDescGZIP(), []int{3}\n}", "func (*ListPatchJobsRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_osconfig_v1_patch_jobs_proto_rawDescGZIP(), []int{5}\n}", "func (*ListBatchJobResultsRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v13_services_batch_job_service_proto_rawDescGZIP(), []int{7}\n}", "func (*ListBatchJobResultsRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_services_batch_job_service_proto_rawDescGZIP(), []int{7}\n}", "func (*ListPatchJobInstanceDetailsRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_osconfig_v1_patch_jobs_proto_rawDescGZIP(), []int{2}\n}", "func (*GetJobsRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_clusrun_proto_rawDescGZIP(), []int{5}\n}", "func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{5}\n}", "func (*ListTypeRequest) Descriptor() ([]byte, []int) {\n\treturn file_backend_proto_backend_proto_rawDescGZIP(), []int{11}\n}", "func (*ListMessagesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_threads_proto_rawDescGZIP(), []int{14}\n}", "func (*ListScheduledWorkloadsRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_protoc_api_list_scheduled_workloads_request_message_proto_rawDescGZIP(), []int{0}\n}", "func (*ListPatchJobsResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_osconfig_v1_patch_jobs_proto_rawDescGZIP(), []int{6}\n}", "func (*ReportTryjobsRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cv_api_migration_migration_proto_rawDescGZIP(), []int{11}\n}", "func (*RunDisconnectedServicesJobReq) Descriptor() ([]byte, []int) {\n\treturn file_external_applications_applications_proto_rawDescGZIP(), []int{15}\n}", "func (*CancelPatchJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_osconfig_v1_patch_jobs_proto_rawDescGZIP(), []int{10}\n}", "func (*GetPatchJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_osconfig_v1_patch_jobs_proto_rawDescGZIP(), []int{1}\n}", "func (*ListJobsResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_manager_http_grpc_job_proto_rawDescGZIP(), []int{4}\n}", "func (*AddBatchJobOperationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v13_services_batch_job_service_proto_rawDescGZIP(), []int{5}\n}", "func (*AddBatchJobOperationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_services_batch_job_service_proto_rawDescGZIP(), []int{5}\n}", "func (*ListTimersRequest) Descriptor() ([]byte, []int) {\n\treturn file_list_timers_proto_rawDescGZIP(), []int{0}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use ListJobRes.ProtoReflect.Descriptor instead.
func (*ListJobRes) Descriptor() ([]byte, []int) { return file_api_ops_proto_rawDescGZIP(), []int{8} }
[ "func (*ListJobsRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_manager_http_grpc_job_proto_rawDescGZIP(), []int{3}\n}", "func (*ListJobReq) Descriptor() ([]byte, []int) {\n\treturn file_api_ops_proto_rawDescGZIP(), []int{7}\n}", "func (*ListJobsResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_manager_http_grpc_job_proto_rawDescGZIP(), []int{4}\n}", "func (*ListBatchJobResultsRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_services_batch_job_service_proto_rawDescGZIP(), []int{7}\n}", "func (*ListBatchJobResultsRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v13_services_batch_job_service_proto_rawDescGZIP(), []int{7}\n}", "func (*JobList) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{10}\n}", "func (*MutateBatchJobResult) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v13_services_batch_job_service_proto_rawDescGZIP(), []int{3}\n}", "func (*MutateBatchJobResult) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_services_batch_job_service_proto_rawDescGZIP(), []int{3}\n}", "func (*ListTask) Descriptor() ([]byte, []int) {\n\treturn file_proto_jobModule_jobModule_proto_rawDescGZIP(), []int{10}\n}", "func (*ListPatchJobInstanceDetailsRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_osconfig_v1_patch_jobs_proto_rawDescGZIP(), []int{2}\n}", "func (*ListPatchJobsResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_osconfig_v1_patch_jobs_proto_rawDescGZIP(), []int{6}\n}", "func (*ListBatchJobResultsResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v13_services_batch_job_service_proto_rawDescGZIP(), []int{8}\n}", "func (*ListBatchJobResultsResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_services_batch_job_service_proto_rawDescGZIP(), []int{8}\n}", "func (*ListPatchJobsRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_osconfig_v1_patch_jobs_proto_rawDescGZIP(), []int{5}\n}", "func (*RevokeJobResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{21}\n}", "func (*GetJobsReply) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_clusrun_proto_rawDescGZIP(), []int{7}\n}", "func (*ListPatchJobInstanceDetailsResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_osconfig_v1_patch_jobs_proto_rawDescGZIP(), []int{3}\n}", "func (*ReportTryjobsRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cv_api_migration_migration_proto_rawDescGZIP(), []int{11}\n}", "func (*BatchJobResult) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v13_services_batch_job_service_proto_rawDescGZIP(), []int{9}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use ListVariableReq.ProtoReflect.Descriptor instead.
func (*ListVariableReq) Descriptor() ([]byte, []int) { return file_api_ops_proto_rawDescGZIP(), []int{11} }
[ "func (*ListWorkflowVariablesRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_ingress_list_workflow_variables_proto_rawDescGZIP(), []int{0}\n}", "func (*ListVariableRes) Descriptor() ([]byte, []int) {\n\treturn file_api_ops_proto_rawDescGZIP(), []int{12}\n}", "func (*ListWorkflowVariablesResponse_Variable) Descriptor() ([]byte, []int) {\n\treturn file_pkg_ingress_list_workflow_variables_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*ListTypeRequest) Descriptor() ([]byte, []int) {\n\treturn file_backend_proto_backend_proto_rawDescGZIP(), []int{11}\n}", "func (*ListWorkflowVariablesResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_ingress_list_workflow_variables_proto_rawDescGZIP(), []int{1}\n}", "func (*Variable) Descriptor() ([]byte, []int) {\n\treturn file_google_devtools_clouddebugger_v2_data_proto_rawDescGZIP(), []int{3}\n}", "func (*CMsg_CVars_CVar) Descriptor() ([]byte, []int) {\n\treturn file_csgo_netmessages_proto_rawDescGZIP(), []int{7, 0}\n}", "func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{5}\n}", "func (*CMsg_CVars_CVar) Descriptor() ([]byte, []int) {\n\treturn file_netmessages_proto_rawDescGZIP(), []int{7, 0}\n}", "func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{14}\n}", "func (*ListMessagesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_threads_proto_rawDescGZIP(), []int{14}\n}", "func (*CMsg_CVars) Descriptor() ([]byte, []int) {\n\treturn file_csgo_netmessages_proto_rawDescGZIP(), []int{7}\n}", "func (*Variable) Descriptor() ([]byte, []int) {\n\treturn file_proto_v1_synthetics_proto_rawDescGZIP(), []int{2}\n}", "func (*ListProblemsRequest) Descriptor() ([]byte, []int) {\n\treturn file_problempb_service_proto_rawDescGZIP(), []int{20}\n}", "func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_natan_proto_rawDescGZIP(), []int{1}\n}", "func (*CMsg_CVars) Descriptor() ([]byte, []int) {\n\treturn file_netmessages_proto_rawDescGZIP(), []int{7}\n}", "func (*ListItemRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_proto_rawDescGZIP(), []int{0}\n}", "func (*MemberListReq) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{2}\n}", "func (*ListReqMsg) Descriptor() ([]byte, []int) {\n\treturn file_register_proto_rawDescGZIP(), []int{1}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use ListVariableRes.ProtoReflect.Descriptor instead.
func (*ListVariableRes) Descriptor() ([]byte, []int) { return file_api_ops_proto_rawDescGZIP(), []int{12} }
[ "func (*ListVariableReq) Descriptor() ([]byte, []int) {\n\treturn file_api_ops_proto_rawDescGZIP(), []int{11}\n}", "func (*ListWorkflowVariablesResponse_Variable) Descriptor() ([]byte, []int) {\n\treturn file_pkg_ingress_list_workflow_variables_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*Variable) Descriptor() ([]byte, []int) {\n\treturn file_google_devtools_clouddebugger_v2_data_proto_rawDescGZIP(), []int{3}\n}", "func (*ListWorkflowVariablesRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_ingress_list_workflow_variables_proto_rawDescGZIP(), []int{0}\n}", "func (*ListWorkflowVariablesResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_ingress_list_workflow_variables_proto_rawDescGZIP(), []int{1}\n}", "func (*List) Descriptor() ([]byte, []int) {\n\treturn file_k8s_io_api_core_v1_generated_proto_rawDescGZIP(), []int{77}\n}", "func (*Type_ListType) Descriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*List) Descriptor() ([]byte, []int) {\n\treturn file_proto_ssql_proto_rawDescGZIP(), []int{11}\n}", "func (*Variable) Descriptor() ([]byte, []int) {\n\treturn file_proto_v1_synthetics_proto_rawDescGZIP(), []int{2}\n}", "func (*Value_List) Descriptor() ([]byte, []int) {\n\treturn file_origin_proto_rawDescGZIP(), []int{0, 6}\n}", "func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}", "func (*ListTypeRequest) Descriptor() ([]byte, []int) {\n\treturn file_backend_proto_backend_proto_rawDescGZIP(), []int{11}\n}", "func (*CMsg_CVars_CVar) Descriptor() ([]byte, []int) {\n\treturn file_csgo_netmessages_proto_rawDescGZIP(), []int{7, 0}\n}", "func (*CMsg_CVars) Descriptor() ([]byte, []int) {\n\treturn file_csgo_netmessages_proto_rawDescGZIP(), []int{7}\n}", "func (*Decl) Descriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2}\n}", "func (*CMsg_CVars_CVar) Descriptor() ([]byte, []int) {\n\treturn file_netmessages_proto_rawDescGZIP(), []int{7, 0}\n}", "func (*ListValue) Descriptor() ([]byte, []int) {\n\treturn file_proto_value_value_proto_rawDescGZIP(), []int{4}\n}", "func (*ListMatcher) Descriptor() ([]byte, []int) {\n\treturn file_envoy_type_matcher_v3_value_proto_rawDescGZIP(), []int{1}\n}", "func (*CMsg_CVars) Descriptor() ([]byte, []int) {\n\treturn file_netmessages_proto_rawDescGZIP(), []int{7}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use DescribeRepositoryReq.ProtoReflect.Descriptor instead.
func (*DescribeRepositoryReq) Descriptor() ([]byte, []int) { return file_api_ops_proto_rawDescGZIP(), []int{13} }
[ "func (*GetRepositoryRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_devtools_artifactregistry_v1_repository_proto_rawDescGZIP(), []int{3}\n}", "func (*ComputeRepositoryDiffRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{22}\n}", "func (*GetRepositoryRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{13}\n}", "func (*UpdateRepositoryRequest) Descriptor() ([]byte, []int) {\n\treturn file_seankhliao_archrepo_v1alpha2_archrepo_proto_rawDescGZIP(), []int{6}\n}", "func (*GetRepositoryRequest) Descriptor() ([]byte, []int) {\n\treturn file_seankhliao_archrepo_v1alpha2_archrepo_proto_rawDescGZIP(), []int{5}\n}", "func (*UpdateRepoReq) Descriptor() ([]byte, []int) {\n\treturn file_helm_api_proto_rawDescGZIP(), []int{5}\n}", "func (*FindRemoteRepositoryRequest) Descriptor() ([]byte, []int) {\n\treturn file_remote_proto_rawDescGZIP(), []int{2}\n}", "func (*ListRepositoryReq) Descriptor() ([]byte, []int) {\n\treturn file_api_ops_proto_rawDescGZIP(), []int{3}\n}", "func (*DeleteRepositoryRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{15}\n}", "func (*RemoveTeamBaseRepositoryScopeRequest) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_team_proto_rawDescGZIP(), []int{37}\n}", "func (*DeleteRepositoryRequest) Descriptor() ([]byte, []int) {\n\treturn file_seankhliao_archrepo_v1alpha2_archrepo_proto_rawDescGZIP(), []int{7}\n}", "func (*RemoveTeamRepositoryScopeRequest) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_team_proto_rawDescGZIP(), []int{45}\n}", "func (*AddTeamRepositoryScopeRequest) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_team_proto_rawDescGZIP(), []int{41}\n}", "func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}", "func (*DetachMetadataRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{22}\n}", "func (*AddTeamBaseRepositoryScopeRequest) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_team_proto_rawDescGZIP(), []int{33}\n}", "func (*ConfigRequest_V1_Deprecated) Descriptor() ([]byte, []int) {\n\treturn file_config_opensearch_config_request_proto_rawDescGZIP(), []int{0, 0, 23}\n}", "func (*DiscoveryRequest) Descriptor() ([]byte, []int) {\n\treturn file_xds_envoy_simplified_proto_rawDescGZIP(), []int{1}\n}", "func (*ListMetadataRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{23}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use RunOpsReq.ProtoReflect.Descriptor instead.
func (*RunOpsReq) Descriptor() ([]byte, []int) { return file_api_ops_proto_rawDescGZIP(), []int{15} }
[ "func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}", "func (*PatchTasksRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{154}\n}", "func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) {\n\treturn file_tfplugin6_proto_rawDescGZIP(), []int{13, 0}\n}", "func (*UpdateTensorboardRunRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_aiplatform_v1_tensorboard_service_proto_rawDescGZIP(), []int{24}\n}", "func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) {\n\treturn file_tfplugin6_proto_rawDescGZIP(), []int{14, 0}\n}", "func (*StopProvider_Request) Descriptor() ([]byte, []int) {\n\treturn file_tfplugin6_proto_rawDescGZIP(), []int{3, 0}\n}", "func (*FetchRunStatusRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cv_api_migration_migration_proto_rawDescGZIP(), []int{9}\n}", "func (*UpdateTaskRunRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{1}\n}", "func (*GetTaskRunRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{2}\n}", "func (*GetTensorboardRunRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_aiplatform_v1_tensorboard_service_proto_rawDescGZIP(), []int{19}\n}", "func (*ReportVerifiedRunRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cv_api_migration_migration_proto_rawDescGZIP(), []int{0}\n}", "func (*RunWorkflowRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_instances_proto_rawDescGZIP(), []int{15}\n}", "func (*RevokeJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{20}\n}", "func (*PatchWorkflowsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{131}\n}", "func (*PatchConceptsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{34}\n}", "func (*PatchAnnotationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{4}\n}", "func (*CreateTensorboardRunRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_aiplatform_v1_tensorboard_service_proto_rawDescGZIP(), []int{18}\n}", "func (*CreateTaskRunRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{0}\n}", "func (*SqlOperationsGetRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1beta4_cloud_sql_proto_rawDescGZIP(), []int{34}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use RunOpsRes.ProtoReflect.Descriptor instead.
func (*RunOpsRes) Descriptor() ([]byte, []int) { return file_api_ops_proto_rawDescGZIP(), []int{16} }
[ "func (*RunOpsReq) Descriptor() ([]byte, []int) {\n\treturn file_api_ops_proto_rawDescGZIP(), []int{15}\n}", "func (*UpdateTensorboardRunRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_aiplatform_v1_tensorboard_service_proto_rawDescGZIP(), []int{24}\n}", "func (*GetTensorboardRunRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_aiplatform_v1_tensorboard_service_proto_rawDescGZIP(), []int{19}\n}", "func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}", "func (*DiagOperation) Descriptor() ([]byte, []int) {\n\treturn file_testvector_tv_proto_rawDescGZIP(), []int{10}\n}", "func (StandardPTransforms_DeprecatedPrimitives) EnumDescriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{4, 1}\n}", "func (*RunCL) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cv_api_migration_migration_proto_rawDescGZIP(), []int{8}\n}", "func (*DeleteTensorboardRunRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_aiplatform_v1_tensorboard_service_proto_rawDescGZIP(), []int{25}\n}", "func (*FetchRunStatusRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cv_api_migration_migration_proto_rawDescGZIP(), []int{9}\n}", "func (*FetchActiveRunsRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cv_api_migration_migration_proto_rawDescGZIP(), []int{5}\n}", "func (*RunCL_Dep) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cv_api_migration_migration_proto_rawDescGZIP(), []int{8, 1}\n}", "func (*ActiveRun) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cv_api_migration_migration_proto_rawDescGZIP(), []int{7}\n}", "func (*PlanResourceChange) Descriptor() ([]byte, []int) {\n\treturn file_tfplugin6_proto_rawDescGZIP(), []int{13}\n}", "func (*ReportVerifiedRunRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cv_api_migration_migration_proto_rawDescGZIP(), []int{0}\n}", "func (*StandardRunnerProtocols) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{55}\n}", "func (*CreateTensorboardRunRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_aiplatform_v1_tensorboard_service_proto_rawDescGZIP(), []int{18}\n}", "func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}", "func (*QueryPlanStatusResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{25}\n}", "func (*CancelPlanResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{23}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use Playbook_Task_Args.ProtoReflect.Descriptor instead.
func (*Playbook_Task_Args) Descriptor() ([]byte, []int) { return file_api_ops_proto_rawDescGZIP(), []int{14, 1, 0} }
[ "func (*Task) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_todolist_proto_rawDescGZIP(), []int{3}\n}", "func (*PatchTasksRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{154}\n}", "func (*TaskUpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_protobuf_v1_task_proto_rawDescGZIP(), []int{2}\n}", "func (*AnalysisMessageWeakSchema_ArgType) Descriptor() ([]byte, []int) {\n\treturn file_analysis_v1alpha1_message_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*Task) Descriptor() ([]byte, []int) {\n\treturn file_infra_grpc_task_proto_rawDescGZIP(), []int{0}\n}", "func (*TaskRequest) Descriptor() ([]byte, []int) {\n\treturn file_infra_grpc_task_proto_rawDescGZIP(), []int{1}\n}", "func (*ApiCollectorTask) Descriptor() ([]byte, []int) {\n\treturn file_protos_collectors_generic_proto_rawDescGZIP(), []int{5}\n}", "func (*AddTaskRequest) Descriptor() ([]byte, []int) {\n\treturn file_todo_todo_proto_rawDescGZIP(), []int{0}\n}", "func (*OldSystemTask) Descriptor() ([]byte, []int) {\n\treturn file_offline_v3tasks_proto_rawDescGZIP(), []int{2}\n}", "func (*Task) Descriptor() ([]byte, []int) {\n\treturn file_proto_task_v1_task_proto_rawDescGZIP(), []int{0}\n}", "func (*TaskInternal) Descriptor() ([]byte, []int) {\n\treturn file_board_board_proto_rawDescGZIP(), []int{17}\n}", "func (*TaskDefWrapper) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_scheduler_appengine_messages_config_proto_rawDescGZIP(), []int{10}\n}", "func (*TaskRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_tasklist_server_proto_rawDescGZIP(), []int{2}\n}", "func (*CreateTaskRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_api_proto_rawDescGZIP(), []int{0}\n}", "func (*OldSystemTaskDetail) Descriptor() ([]byte, []int) {\n\treturn file_offline_v3tasks_proto_rawDescGZIP(), []int{3}\n}", "func (*ExternalPayload) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{53}\n}", "func (*CreateTaskRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_todolist_proto_rawDescGZIP(), []int{0}\n}", "func (*UpdateTaskRunRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{1}\n}", "func (*GetTaskRunRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{2}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CheckLock tries to obtain an exclude lock on a lockfile and returns an error if one occurs
func (s *Single) CheckLock() error { if err := os.Remove(s.Filename()); err != nil && !os.IsNotExist(err) { return ErrAlreadyRunning } file, err := os.OpenFile(s.Filename(), os.O_EXCL|os.O_CREATE, 0600) if err != nil { return err } s.file = file return nil }
[ "func CheckLock(path string) bool {\n\treturn false\n}", "func _1462dotlockCheckReservedLock(tls *crt.TLS, _id uintptr /* *Tsqlite3_file */, _pResOut uintptr /* *int32 */) (r int32) {\n\tvar (\n\t\t_rc int32\n\t\t_reserved int32\n\t\t_pFile uintptr // *TunixFile\n\t)\n\t_rc = int32(0)\n\t_reserved = int32(0)\n\t_pFile = _id\n\n\t_reserved = bool2int(fn52(*(*Tsqlite3_syscall_ptr)(unsafe.Pointer((_1119aSyscall + 24) + 4)))(tls, *(*uintptr)(unsafe.Pointer(_pFile + 24)), int32(0)) == int32(0))\n\t*(*int32)(unsafe.Pointer(_pResOut)) = _reserved\n\treturn _rc\n}", "func _1458nolockCheckReservedLock(tls *crt.TLS, _NotUsed uintptr /* *Tsqlite3_file */, _pResOut uintptr /* *int32 */) (r int32) {\n\t*(*int32)(unsafe.Pointer(_pResOut)) = int32(0)\n\treturn int32(0)\n}", "func (l *Lockfile) MustUnlock() {\n\tif err := l.Unlock(); err != nil {\n\t\tlog.Fatalf(\"could not unlock %q: %v\", l.path, err)\n\t}\n}", "func checkTrylockMainProcess(t *testing.T) {\n\tvar err error\n\tlockfile := lockOrFail(t)\n\tdefer removeTestLock(lockfile)\n\tlockdir := filepath.Dir(lockfile.File.Name())\n\totherAcquired, message, err := forkAndGetLock(lockdir)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error in subprocess trying to lock uncontested fileLock: %v. Subprocess output: %q\", err, message)\n\t}\n\tif !otherAcquired {\n\t\tt.Fatalf(\"Subprocess failed to lock uncontested fileLock. Subprocess output: %q\", message)\n\t}\n\n\terr = lockfile.tryLock()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to lock fileLock: %v\", err)\n\t}\n\n\treacquired, message, err := forkAndGetLock(filepath.Dir(lockfile.File.Name()))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif reacquired {\n\t\tt.Fatalf(\"Permitted locking fileLock twice. Subprocess output: %q\", message)\n\t}\n\n\terr = lockfile.Unlock()\n\tif err != nil {\n\t\tt.Fatalf(\"Error unlocking fileLock: %v\", err)\n\t}\n\n\treacquired, message, err = forkAndGetLock(filepath.Dir(lockfile.File.Name()))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !reacquired {\n\t\tt.Fatalf(\"Subprocess failed to acquire lock after it was released by the main process. Subprocess output: %q\", message)\n\t}\n}", "func (l *FileLock) ExclusiveLock() error {\n\treturn syscall.Flock(l.fd, syscall.LOCK_EX)\n}", "func _1444unixCheckReservedLock(tls *crt.TLS, _id uintptr /* *Tsqlite3_file */, _pResOut uintptr /* *int32 */) (r int32) {\n\tesc := crt.MustMalloc(24)\n\tvar (\n\t\t_rc int32\n\t\t_reserved int32\n\t\t_pFile uintptr // *TunixFile\n\t\t_lock = esc // *Sflock\n\t)\n\tdefer crt.Free(esc)\n\t_rc = int32(0)\n\t_reserved = int32(0)\n\t_pFile = _id\n\n\t_1124unixEnterMutex(tls)\n\tif int32(*(*uint8)(unsafe.Pointer((*(*uintptr)(unsafe.Pointer(_pFile + 8))) + 20))) <= int32(1) {\n\t\tgoto _1\n\t}\n\n\t_reserved = int32(1)\n_1:\n\tif _reserved != 0 || (*(*uint8)(unsafe.Pointer((*(*uintptr)(unsafe.Pointer(_pFile + 8))) + 21))) != 0 {\n\t\tgoto _2\n\t}\n\n\t*(*int16)(unsafe.Pointer(_lock + 2)) = int16(0)\n\t*(*T__off64_t)(unsafe.Pointer(_lock + 4)) = T__off64_t(_151sqlite3PendingByte + int32(1))\n\t*(*T__off64_t)(unsafe.Pointer(_lock + 12)) = T__off64_t(1)\n\t*(*int16)(unsafe.Pointer(_lock)) = int16(1)\n\tif fn181(*(*Tsqlite3_syscall_ptr)(unsafe.Pointer((_1119aSyscall + 84) + 4)))(tls, *(*int32)(unsafe.Pointer(_pFile + 12)), int32(12), _lock) == 0 {\n\t\tgoto _3\n\t}\n\n\t_rc = int32(3594)\n\t_1281storeLastErrno(tls, _pFile, *(*int32)(unsafe.Pointer(crt.X__errno_location(tls))))\n\tgoto _4\n\n_3:\n\tif int32(*(*int16)(unsafe.Pointer(_lock))) == int32(2) {\n\t\tgoto _5\n\t}\n\n\t_reserved = int32(1)\n_5:\n_4:\n_2:\n\t_1125unixLeaveMutex(tls)\n\t*(*int32)(unsafe.Pointer(_pResOut)) = _reserved\n\treturn _rc\n}", "func _1456nolockLock(tls *crt.TLS, _NotUsed uintptr /* *Tsqlite3_file */, _NotUsed2 int32) (r int32) {\n\treturn int32(0)\n}", "func (g Lock) CheckNotOn() error {\n\tif !DebugGoroutines {\n\t\treturn nil\n\t}\n\tif ID() == uint64(g) {\n\t\treturn errors.New(\"running on the wrong goroutine\")\n\t}\n\treturn nil\n}", "func (l *fileLock) tryLock() (Unlocker, error) {\n\tl.mu.Lock()\n\terr := syscall.Flock(l.fd, syscall.LOCK_EX|syscall.LOCK_NB)\n\tswitch err {\n\tcase syscall.EWOULDBLOCK:\n\t\tl.mu.Unlock()\n\t\treturn nopUnlocker{}, nil\n\tcase nil:\n\t\treturn l, nil\n\tdefault:\n\t\tl.mu.Unlock()\n\t\treturn nil, err\n\t}\n}", "func HasLock(basepath string) bool {\n\t_, err := os.Stat(filepath.Join(basepath, LockFile))\n\treturn err == nil\n}", "func LockCheckNetworkHook(ctx context.Context) (unlock func(), e error) {\n\tlockChan := make(chan error, 1) // To notify lock completion to main thread.\n\tdone := make(chan struct{}) // To notify main thread completion to the goroutine.\n\n\tdoUnlock := func() {\n\t\tclose(done)\n\t}\n\n\tsucceeded := false\n\tdefer func() {\n\t\tif !succeeded {\n\t\t\tdoUnlock()\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tf, err := os.Create(checkNetworkLockPath)\n\t\tif err != nil {\n\t\t\tlockChan <- err\n\t\t\treturn\n\t\t}\n\t\tdefer f.Close()\n\n\t\t// NOTE: if this lock is held for a \"long time\", the main\n\t\t// thread may time out but we'll still be stuck here beyond the\n\t\t// time of test completion. This is OK, but beware that (for\n\t\t// example) cleanup logging may not go anywhere useful.\n\t\tif err = unix.Flock(int(f.Fd()), unix.LOCK_SH); err != nil {\n\t\t\tlockChan <- err\n\t\t\treturn\n\t\t}\n\t\tdefer func() {\n\t\t\t// Update access and modification time, so\n\t\t\t// check_ethernet.hook knows when we last released the\n\t\t\t// lock.\n\t\t\tif err = unix.Futimes(int(f.Fd()), nil); err != nil {\n\t\t\t\ttesting.ContextLogf(ctx, \"Failed to update time %s: %v\", checkNetworkLockPath, err)\n\t\t\t}\n\t\t\tif err = unix.Flock(int(f.Fd()), unix.LOCK_UN); err != nil {\n\t\t\t\ttesting.ContextLogf(ctx, \"Failed to unlock %s: %v\", checkNetworkLockPath, err)\n\t\t\t}\n\t\t}()\n\t\tlockChan <- nil\n\t\t<-done // Wait for main thread.\n\t}()\n\n\tlctx, cancel := context.WithTimeout(ctx, checkNetworkLockTimeout)\n\tdefer cancel()\n\tselect {\n\tcase err := <-lockChan:\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to acquire lock %s\", checkNetworkLockPath)\n\t\t}\n\tcase <-lctx.Done():\n\t\treturn nil, errors.Wrapf(lctx.Err(), \"timed out acquiring lock %s\", checkNetworkLockPath)\n\t}\n\n\tsucceeded = true\n\treturn doUnlock, nil\n}", "func (g GoroutineLock) CheckNotOn() {\n\tif getGoroutineID() == goroutineID(g) {\n\t\tpanic(\"running on the wrong goroutine\")\n\t}\n}", "func (r *Locking) MustUnlock() (Result, error) {\n\treturn r.delete()\n}", "func (f *volatileFile) CheckReservedLock() bool {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\treturn f.reserved > 0 || f.pending > 0 || f.exclusive > 0\n}", "func (wbc *WriteBackConfig) RequiresLocking() bool {\n\tswitch wbc.Method {\n\tcase WriteBackGit:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func checkDeleteLocked(vfsd *vfs.Dentry) error {\n\tparentVFSD := vfsd.Parent()\n\tif parentVFSD == nil {\n\t\treturn syserror.EBUSY\n\t}\n\tif parentVFSD.IsDisowned() {\n\t\treturn syserror.ENOENT\n\t}\n\treturn nil\n}", "func TestRestrictInPresenceOfThreading(t *testing.T) {\n\tRunInSubprocess(t, func() {\n\t\tRequireLandlockABI(t, 1)\n\n\t\tfpath := MakeSomeFile(t)\n\n\t\terr := landlock.V1.RestrictPaths() // No access permitted at all.\n\t\tif err != nil {\n\t\t\tt.Skipf(\"kernel does not support Landlock v1; tests cannot be run.\")\n\t\t}\n\n\t\tvar wg sync.WaitGroup\n\t\tdefer wg.Wait()\n\n\t\tconst (\n\t\t\tparallelism = 3\n\t\t\tattempts = 10\n\t\t)\n\t\tfor g := 0; g < parallelism; g++ {\n\t\t\twg.Add(1)\n\t\t\tgo func(grIdx int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tfor i := 0; i < attempts; i++ {\n\t\t\t\t\tif err := openForRead(fpath); err == nil {\n\t\t\t\t\t\tt.Errorf(\"openForRead(%q) successful, want error\", fpath)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}(g)\n\t\t}\n\t})\n}", "func obtainLock(lock lockfile.Lockfile) error {\n\t// Check if the lock has any owner.\n\tprocess, err := lock.GetOwner()\n\tif err == nil {\n\t\t// A lock already exists. Check if the lock owner is the current process\n\t\t// itself.\n\t\tif process.Pid == os.Getpid() {\n\t\t\treturn fmt.Errorf(\"lockfile %q already locked by this process\", lock)\n\t\t}\n\n\t\t// A lock already exists, but it's owned by some other process. Continue\n\t\t// to obtain lock, in case the lock owner no longer exists.\n\t}\n\n\t// Obtain a lock. Retry if the lock can't be obtained.\n\terr = lock.TryLock()\n\tfor err != nil {\n\t\t// Check if it's a lock temporary error that can be mitigated with a\n\t\t// retry. Fail if any other error.\n\t\tif _, ok := err.(interface{ Temporary() bool }); !ok {\n\t\t\treturn fmt.Errorf(\"unable to lock %q: %v\", lock, err)\n\t\t}\n\t\terr = lock.TryLock()\n\t}\n\n\treturn nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
TryUnlock closes and removes the lockfile
func (s *Single) TryUnlock() error { if err := s.file.Close(); err != nil { return fmt.Errorf("failed to close the lock file: %v", err) } if err := os.Remove(s.Filename()); err != nil { return fmt.Errorf("failed to remove the lock file: %v", err) } return nil }
[ "func (l *LockFile) Unlock() error {\n\treturn os.Remove(l.dirname)\n}", "func (l *fileLock) Unlock() error {\n\tdefer l.mu.Unlock()\n\treturn syscall.Close(l.fd)\n}", "func (l *NullPathLocker) Unlock() {}", "func release() error {\n\tif disabled() {\n\t\treturn nil\n\t}\n\n\tpath, err := path(lockFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn os.Remove(path)\n}", "func (f *FileLock) Unlock() {\n\tif f.db == nil {\n\t\treturn\n\t}\n\tif err := f.db.Close(); err != nil {\n\t\tlogger.Warningf(\"unable to release the lock on file %s: %s\", f.filePath, err)\n\t\treturn\n\t}\n\tf.db = nil\n}", "func (lh *LockHandle) Unlock(panicOnUnlockFailure bool) error {\n\tif lh == nil {\n\t\treturn errors.New(\"cannot unlock nil LockHandle\")\n\t}\n\n\tif !lh.locked {\n\t\treturn fmt.Errorf(\"multiple calls to unlock file %q\", lh.filename)\n\t}\n\n\tzlog.Debug().Str(\"lockfile\", lh.filename).Msg(\"going to unlock lockfile\")\n\tif err := syscall.Flock(int(lh.osFile.Fd()), syscall.LOCK_UN); err != nil {\n\t\twrapped := fmt.Errorf(\"could not release exclusive lock on %q: %w\", lh.filename, err)\n\t\tif panicOnUnlockFailure {\n\t\t\tpanic(wrapped)\n\t\t} else {\n\t\t\treturn wrapped\n\t\t}\n\t}\n\tlh.locked = false\n\tzlog.Debug().Str(\"lockfile\", lh.filename).Msg(\"lockfile unlocked\")\n\n\tif err := lh.osFile.Close(); err != nil {\n\t\treturn fmt.Errorf(\"failed to close file %q after unlocking successfully: %w\", lh.filename, err)\n\t}\n\n\t// I don't care if we can't delete the lock file.\n\t_ = os.Remove(lh.filename)\n\n\treturn nil\n}", "func (l *FileLock) Unlock() error {\n\tif l.fd == nil {\n\t\treturn fmt.Errorf(\"file %s descriptor is nil\", l.fileName)\n\t}\n\tfd := l.fd\n\tl.fd = nil\n\n\tdefer fd.Close()\n\tif err := syscall.Flock(int(fd.Fd()), syscall.LOCK_UN); err != nil {\n\t\treturn errors.Wrapf(err, \"file %s unlock failed\", l.fileName)\n\t}\n\treturn nil\n}", "func (l *Lockfile) Unlock() error {\n\tswitch err := l.checkLockfile(); err {\n\tcase nil:\n\t\t// Nuke the symlink and its target.\n\t\ttarget, err := os.Readlink(l.path)\n\t\tif os.IsNotExist(err) {\n\t\t\treturn ErrRogueDeletion\n\t\t} else if err != nil {\n\t\t\t// The symlink is somehow screwed up. Just nuke it.\n\t\t\tif err := os.Remove(l.path); err != nil {\n\t\t\t\tlog.Fatalf(\"Lockfile could not remove %q: %v\", l.path, err)\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\tabsTarget := resolveSymlinkTarget(l.path, target)\n\t\tif err := os.Remove(absTarget); os.IsNotExist(err) {\n\t\t\treturn ErrRogueDeletion\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := os.Remove(l.path); os.IsNotExist(err) {\n\t\t\treturn ErrRogueDeletion\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\n\tcase ErrInvalidPID, ErrProcessDead, errUnlocked, ErrBusy:\n\t\treturn ErrRogueDeletion\n\n\tdefault:\n\t\treturn err\n\t}\n}", "func (rw *RWMutex) Unlock() {\n\tif err := syscall.Flock(rw.fd, syscall.LOCK_UN); err != nil {\n\t\tpanic(err)\n\t}\n\trw.mu.Unlock()\n}", "func TestLockAndUnlock(t *testing.T) {\n\tf, err := ioutil.TempFile(\"\", \"lock\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tf.Close()\n\tdefer func() {\n\t\terr = os.Remove(f.Name())\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}()\n\n\t// lock the file\n\tl, err := LockedOpenFile(f.Name(), os.O_WRONLY, 0600)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// unlock the file\n\tif err = l.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// try lock the unlocked file\n\tdupl, err := LockedOpenFile(f.Name(), os.O_WRONLY|os.O_CREATE, 0600)\n\tif err != nil {\n\t\tt.Errorf(\"err = %v, want %v\", err, nil)\n\t}\n\n\t// blocking on locked file\n\tlocked := make(chan struct{}, 1)\n\tgo func() {\n\t\tbl, blerr := LockedOpenFile(f.Name(), os.O_WRONLY, 0600)\n\t\tif blerr != nil {\n\t\t\tt.Error(blerr)\n\t\t\treturn\n\t\t}\n\t\tlocked <- struct{}{}\n\t\tif blerr = bl.Close(); blerr != nil {\n\t\t\tt.Error(blerr)\n\t\t\treturn\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-locked:\n\t\tt.Error(\"unexpected unblocking\")\n\tcase <-time.After(100 * time.Millisecond):\n\t}\n\n\t// unlock\n\tif err = dupl.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// the previously blocked routine should be unblocked\n\tselect {\n\tcase <-locked:\n\tcase <-time.After(1 * time.Second):\n\t\tt.Error(\"unexpected blocking\")\n\t}\n}", "func (tl *TryLock) Unlock() {\n\ttl.lock.Unlock()\n}", "func (f *File) Unlock() error {\n\treturn nil\n}", "func (l *FileLock) Close() error {\n\tfd := l.fd\n\tl.fd = -1\n\treturn syscall.Close(fd)\n}", "func (lock *MultiDiskStorageLock) Unlock() {\n\tif lock.fs != nil {\n\t\tlock.fs.mu.Lock()\n\t\tdefer lock.fs.mu.Unlock()\n\t\tif lock.fs.slock == lock {\n\t\t\tlock.fs.slock = nil\n\t\t}\n\t}\n}", "func (s *Snapshot) closeLocked() error {\n\ts.db.mu.snapshots.remove(s)\n\n\t// If s was the previous earliest snapshot, we might be able to reclaim\n\t// disk space by dropping obsolete records that were pinned by s.\n\tif e := s.db.mu.snapshots.earliest(); e > s.seqNum {\n\t\ts.db.maybeScheduleCompactionPicker(pickElisionOnly)\n\t}\n\ts.db = nil\n\treturn nil\n}", "func _1459dotlockClose(tls *crt.TLS, _id uintptr /* *Tsqlite3_file */) (r int32) {\n\tvar _pFile uintptr // *TunixFile\n\n\t_pFile = _id\n\n\t_1461dotlockUnlock(tls, _id, int32(0))\n\tXsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(_pFile + 24)))\n\treturn _1706closeUnixFile(tls, _id)\n}", "func Unlock() {\n\tmutex.Unlock()\n}", "func (m *MutexSafe) unlock() {\n\tm.Mutex.Unlock()\n}", "func (l *Locker) Unlock() {\n\tif l.locker != -1 {\n\t\tpanic(\"db: Unlock of unlocked Locker\")\n\t}\n\tatomic.StoreInt32(&l.locker, 0)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check availability of resource
func checkResource(url *string, channel chan RetData, cl *http.Client) { ret := RetData{} ret.startTime = time.Now() resp, err := cl.Get(*url) ret.finishTime = time.Now() if err != nil { ret.bad = true if terr, ok := err.(net.Error); ok && terr.Timeout() { ret.timeOut = true } } else { if resp.StatusCode < 200 || resp.StatusCode > 299 { ret.bad = true } defer resp.Body.Close() } channel <- ret }
[ "func (m *Manager) IsAvailableResource(item interface{}) bool {\n\tif v, ok := item.(BinConfig); ok {\n\t\treturn utils.FileExists(v.Path) || utils.DirectoryExists(v.Path)\n\t}\n\tif v, ok := item.(WeightConfig); ok {\n\t\treturn utils.FileExists(v.Path)\n\t}\n\tif v, ok := item.(ConfigConfig); ok {\n\t\treturn utils.FileExists(v.Path)\n\t}\n\treturn false\n}", "func isResourceReady(dynamicClient dynamic.Interface, obj *MetaResource) (bool, error) {\n\t// get the resource's name, namespace and gvr\n\tname := obj.Name\n\tnamespace := obj.Namespace\n\tgvk := obj.GroupVersionKind()\n\tgvr, _ := meta.UnsafeGuessKindToResource(gvk)\n\t// use the helper functions to convert the resource to a KResource duck\n\ttif := &duck.TypedInformerFactory{Client: dynamicClient, Type: &duckv1alpha1.KResource{}}\n\t_, lister, err := tif.Get(gvr)\n\tif err != nil {\n\t\t// Return error to stop the polling.\n\t\treturn false, err\n\t}\n\tuntyped, err := lister.ByNamespace(namespace).Get(name)\n\tif k8serrors.IsNotFound(err) {\n\t\t// Return false as we are not done yet.\n\t\t// We swallow the error to keep on polling.\n\t\t// It should only happen if we wait for the auto-created resources, like default Broker.\n\t\treturn false, nil\n\t} else if err != nil {\n\t\t// Return error to stop the polling.\n\t\treturn false, err\n\t}\n\tkr := untyped.(*duckv1alpha1.KResource)\n\treturn kr.Status.GetCondition(duckv1alpha1.ConditionReady).IsTrue(), nil\n}", "func (h *Host) IsAvailable() bool {\n\treturn h.Available.Load()\n}", "func ServiceAvailable(ctx *Context, url string, timeout time.Duration) bool {\n\n\tLog(INFO, ctx, \"ServiceAvailable\", \"url\", url)\n\n\tclient := &http.Client{Timeout: timeout}\n\tresp, err := client.Get(url)\n\tif err != nil {\n\t\tLog(INFO, ctx, \"ServiceAvailable\", \"url\", url, \"error\", err, \"available\", false)\n\t\tLog(ERROR, ctx, \"ServiceAvailable\", \"url\", url, \"error\", err, \"available\", false)\n\t\treturn false\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\tLog(INFO, ctx, \"ServiceAvailable\", \"url\", url, \"code\", resp.StatusCode, \"available\", false)\n\t\treturn false\n\t}\n\n\tLog(INFO, ctx, \"ServiceAvailable\", \"url\", url, \"available\", true)\n\treturn true\n}", "func CheckResource(nsId string, resourceType string, resourceId string) (bool, error) {\n\n\t// Check parameters' emptiness\n\tif nsId == \"\" {\n\t\terr := fmt.Errorf(\"CheckResource failed; nsId given is null.\")\n\t\treturn false, err\n\t} else if resourceType == \"\" {\n\t\terr := fmt.Errorf(\"CheckResource failed; resourceType given is null.\")\n\t\treturn false, err\n\t} else if resourceId == \"\" {\n\t\terr := fmt.Errorf(\"CheckResource failed; resourceId given is null.\")\n\t\treturn false, err\n\t}\n\n\t// Check resourceType's validity\n\tif resourceType == common.StrImage ||\n\t\tresourceType == common.StrSSHKey ||\n\t\tresourceType == common.StrSpec ||\n\t\tresourceType == common.StrVNet ||\n\t\tresourceType == common.StrSecurityGroup {\n\t\t//resourceType == \"subnet\" ||\n\t\t//resourceType == \"publicIp\" ||\n\t\t//resourceType == \"vNic\" {\n\t\t// continue\n\t} else {\n\t\terr := fmt.Errorf(\"invalid resource type\")\n\t\treturn false, err\n\t}\n\n\terr := common.CheckString(nsId)\n\tif err != nil {\n\t\tcommon.CBLog.Error(err)\n\t\treturn false, err\n\t}\n\n\terr = common.CheckString(resourceId)\n\tif err != nil {\n\t\tcommon.CBLog.Error(err)\n\t\treturn false, err\n\t}\n\n\tfmt.Println(\"[Check resource] \" + resourceType + \", \" + resourceId)\n\n\tkey := common.GenResourceKey(nsId, resourceType, resourceId)\n\t//fmt.Println(key)\n\n\tkeyValue, err := common.CBStore.Get(key)\n\tif err != nil {\n\t\tcommon.CBLog.Error(err)\n\t\treturn false, err\n\t}\n\tif keyValue != nil {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n\n}", "func (d downloader) IsAvailable() bool {\n\treturn len(d.urls) != 0\n}", "func isAppAvailable(t *testing.T, healthCheckEndPoint string) bool {\n\tclient := &http.Client{}\n\tresp, err := client.Get(healthCheckEndPoint)\n\trequire.NoError(t, err)\n\n\tdefer resp.Body.Close()\n\treturn resp.StatusCode == http.StatusOK\n}", "func (q *Queue) CheckResourceConnectionStatus(res *Resource) bool {\n\tvar reply int64\n\terr := res.Client.Call(\"Queue.Ping\", 12345, &reply)\n\tif err == rpc.ErrShutdown || err == io.EOF || err == io.ErrUnexpectedEOF {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func hasResource(client discovery.DiscoveryInterface, resource schema.GroupVersionResource) bool {\n\tresources, err := client.ServerResourcesForGroupVersion(resource.GroupVersion().String())\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tfor _, serverResource := range resources.APIResources {\n\t\tif serverResource.Name == resource.Resource {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func isAppAvailable(t *testing.T, healthCheckEndPoint string) bool {\n\tclient := &http.Client{}\n\tresp, err := client.Get(healthCheckEndPoint)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get a response from health probe: %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\treturn resp.StatusCode == http.StatusNoContent\n}", "func (sr *ScheduledResource) Wait(checkInterval time.Duration, timeout time.Duration, stopChan <-chan struct{}) (bool, error) {\n\tch := make(chan error, 1)\n\tgo func(ch chan error) {\n\t\tlog.Printf(\"Waiting for %v to be created\", sr.Key())\n\t\tif isResourceFinished(sr, ch) {\n\t\t\treturn\n\t\t}\n\t\tticker := time.NewTicker(checkInterval)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-stopChan:\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tif isResourceFinished(sr, ch) {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}(ch)\n\n\tselect {\n\tcase <-stopChan:\n\t\treturn true, nil\n\tcase err := <-ch:\n\t\treturn false, err\n\tcase <-time.After(timeout):\n\t\te := fmt.Errorf(\"timeout waiting for resource %s\", sr.Key())\n\t\tsr.Lock()\n\t\tdefer sr.Unlock()\n\t\tsr.Error = e\n\t\treturn false, e\n\t}\n}", "func (p *ResourcePool) getAvailable(timeout <-chan time.Time) (ResourceWrapper, error) {\n\n\t//Wait for an object, or a timeout\n\tselect {\n\tcase <-timeout:\n\t\treturn ResourceWrapper{p: p, e: ResourceTimeoutError}, ResourceTimeoutError\n\n\tcase wrapper, ok := <-p.resources:\n\n\t\t//pool is closed\n\t\tif !ok {\n\t\t\treturn ResourceWrapper{p: p, e: PoolClosedError}, PoolClosedError\n\t\t}\n\n\t\t//decriment the number of available resources\n\t\tatomic.AddUint32(&p.nAvailable, ^uint32(0))\n\n\t\t//if the resource fails the test, close it and wait to get another resource\n\t\tif p.resTest(wrapper.Resource) != nil {\n\t\t\tp.resClose(wrapper.Resource)\n\t\t\twrapper.Close()\n\t\t\treturn ResourceWrapper{p: p, e: ResourceTestError}, ResourceTestError\n\t\t}\n\n\t\t//we got a valid resource to return\n\t\t//signal the filler that we need to fill\n\t\treturn wrapper, wrapper.e\n\n\t//we don't have a resource available\n\t//lets create one if we can\n\tdefault:\n\n\t\t//try to obtain a lock for a new resource\n\t\tif n_open := atomic.AddUint32(&p.open, 1); n_open > p.Cap() {\n\t\t\t//decriment\n\t\t\tatomic.AddUint32(&p.open, ^uint32(0))\n\t\t\treturn ResourceWrapper{p: p, e: ResourceExhaustedError}, ResourceExhaustedError\n\t\t}\n\n\t\tresource, err := p.resOpen()\n\t\tif err != nil {\n\t\t\t//decriment\n\t\t\tatomic.AddUint32(&p.open, ^uint32(0))\n\t\t\treturn ResourceWrapper{p: p, e: ResourceCreationError}, ResourceCreationError\n\t\t}\n\n\t\treturn ResourceWrapper{p: p, Resource: resource}, nil\n\t}\n}", "func (client *InstanceMetadataClient) IsAvailable(ctx context.Context) bool {\n\tctx, cancel := context.WithTimeout(ctx, 250*time.Millisecond)\n\tdefer cancel()\n\n\t// try to retrieve the instance id of our EC2 instance\n\tid, err := client.getMetadata(ctx, \"instance-id\")\n\treturn err == nil && ec2ResourceIDRE.MatchString(id)\n}", "func (pr *Provider) IsAvailable() bool {\n\treturn pr.available\n}", "func isResourceBestEffort(container *v1.Container, resource v1.ResourceName) bool {\n\t// A container resource is best-effort if its request is unspecified or 0.\n\t// If a request is specified, then the user expects some kind of resource guarantee.\n\treq, hasReq := container.Resources.Requests[resource]\n\treturn !hasReq || req.Value() == 0\n}", "func isResourceBestEffort(container *api.Container, resource api.ResourceName) bool {\n\t// A container resource is best-effort if its request is unspecified or 0.\n\t// If a request is specified, then the user expects some kind of resource guarantee.\n\treq, hasReq := container.Resources.Requests[resource]\n\treturn !hasReq || req.Value() == 0\n}", "func (r Result) HasResource() bool {\n\treturn r.Resource.UID != \"\"\n}", "func (m *mountPoint) hasResource(absolutePath string) bool {\n\trelPath, err := filepath.Rel(m.Destination, absolutePath)\n\n\treturn err == nil && relPath != \"..\" && !strings.HasPrefix(relPath, fmt.Sprintf(\"..%c\", filepath.Separator))\n}", "func (rp *ResolverPool) Available() (bool, error) {\n\treturn true, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewLocalCollector returns a Collector that writes directly to a Store.
func NewLocalCollector(s Store) Collector { return s }
[ "func NewCollector() Collector {\n\treturn make(Collector)\n}", "func New() *Collector { return &Collector{} }", "func NewLocalRouterCollector(ctx context.Context, logger *slog.Logger, errors *prometheus.CounterVec, client platform.LocalRouterClient) *LocalRouterCollector {\n\terrors.WithLabelValues(\"local_router\").Add(0)\n\n\tlocalRouterLabels := []string{\"id\", \"name\"}\n\tlocalRouterInfoLabels := append(localRouterLabels, \"tags\", \"description\")\n\tlocalRouterSwitchInfoLabels := append(localRouterLabels, \"category\", \"code\", \"zone_id\")\n\tlocalRouterServerNetworkInfoLabels := append(localRouterLabels, \"vip\", \"ipaddress1\", \"ipaddress2\", \"nw_mask_len\", \"vrid\")\n\tlocalRouterPeerLabels := append(localRouterLabels, \"peer_index\", \"peer_id\")\n\tlocalRouterPeerInfoLabels := append(localRouterPeerLabels, \"enabled\", \"description\")\n\tlocalRouterStaticRouteInfoLabels := append(localRouterLabels, \"route_index\", \"prefix\", \"next_hop\")\n\n\treturn &LocalRouterCollector{\n\t\tctx: ctx,\n\t\tlogger: logger,\n\t\terrors: errors,\n\t\tclient: client,\n\t\tUp: prometheus.NewDesc(\n\t\t\t\"sakuracloud_local_router_up\",\n\t\t\t\"If 1 the LocalRouter is available, 0 otherwise\",\n\t\t\tlocalRouterLabels, nil,\n\t\t),\n\t\tLocalRouterInfo: prometheus.NewDesc(\n\t\t\t\"sakuracloud_local_router_info\",\n\t\t\t\"A metric with a constant '1' value labeled by localRouter information\",\n\t\t\tlocalRouterInfoLabels, nil,\n\t\t),\n\t\tSwitchInfo: prometheus.NewDesc(\n\t\t\t\"sakuracloud_local_router_switch_info\",\n\t\t\t\"A metric with a constant '1' value labeled by localRouter connected switch information\",\n\t\t\tlocalRouterSwitchInfoLabels, nil,\n\t\t),\n\t\tNetworkInfo: prometheus.NewDesc(\n\t\t\t\"sakuracloud_local_router_network_info\",\n\t\t\t\"A metric with a constant '1' value labeled by network information of the localRouter\",\n\t\t\tlocalRouterServerNetworkInfoLabels, nil,\n\t\t),\n\t\tPeerInfo: prometheus.NewDesc(\n\t\t\t\"sakuracloud_local_router_peer_info\",\n\t\t\t\"A metric with a constant '1' value labeled by peer information\",\n\t\t\tlocalRouterPeerInfoLabels, nil,\n\t\t),\n\t\tPeerUp: prometheus.NewDesc(\n\t\t\t\"sakuracloud_local_router_peer_up\",\n\t\t\t\"If 1 the Peer is available, 0 otherwise\",\n\t\t\tlocalRouterPeerLabels, nil,\n\t\t),\n\t\tStaticRouteInfo: prometheus.NewDesc(\n\t\t\t\"sakuracloud_local_router_static_route_info\",\n\t\t\t\"A metric with a constant '1' value labeled by static route information\",\n\t\t\tlocalRouterStaticRouteInfoLabels, nil,\n\t\t),\n\t\tReceiveBytesPerSec: prometheus.NewDesc(\n\t\t\t\"sakuracloud_local_router_receive_per_sec\",\n\t\t\t\"Receive bytes per seconds\",\n\t\t\tlocalRouterLabels, nil,\n\t\t),\n\t\tSendBytesPerSec: prometheus.NewDesc(\n\t\t\t\"sakuracloud_local_router_send_per_sec\",\n\t\t\t\"Send bytes per seconds\",\n\t\t\tlocalRouterLabels, nil,\n\t\t),\n\t}\n}", "func New() *LocalStore {\n\treturn &LocalStore{}\n}", "func NewLocalStore() *LocalStore {\n\treturn &LocalStore{\n\t\tstore: make(map[string]string),\n\t\tlock: &sync.RWMutex{},\n\t}\n}", "func New(gaugeFunc GaugeFunc) *Collector {\n\treturn &Collector{\n\t\tPauseDur: 10 * time.Second,\n\t\tEnableCPU: true,\n\t\tEnableMem: true,\n\t\tEnableGC: true,\n\t\tgaugeFunc: gaugeFunc,\n\t}\n}", "func NewCollector(store *forensicstore.ForensicStore, tempDir string, definitions []goartifacts.ArtifactDefinition) (*LiveCollector, error) {\n\tprovidesMap := map[string][]goartifacts.Source{}\n\n\tdefinitions = goartifacts.FilterOS(definitions)\n\n\tfor _, definition := range definitions {\n\t\tfor _, source := range definition.Sources {\n\t\t\tfor _, provide := range source.Provides {\n\t\t\t\tkey := strings.TrimPrefix(provide.Key, \"environ_\")\n\t\t\t\tif providingSources, ok := providesMap[key]; !ok {\n\t\t\t\t\tprovidesMap[key] = []goartifacts.Source{source}\n\t\t\t\t} else {\n\t\t\t\t\tprovidesMap[key] = append(providingSources, source)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tsourceFS, err := systemfs.New()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"system fs creation failed: %w\", err)\n\t}\n\n\treturn &LiveCollector{\n\t\tSourceFS: sourceFS,\n\t\tregistryfs: registryfs.New(),\n\t\tStore: store,\n\t\tTempDir: tempDir,\n\t\tprovidesMap: providesMap,\n\t\tknowledgeBase: map[string][]string{},\n\t}, nil\n}", "func NewLocalAmboyStatsCollector(env cedar.Environment, id string) amboy.Job {\n\tj := makeAmboyStatsCollector()\n\tj.ExcludeRemote = true\n\tj.env = env\n\tj.SetID(fmt.Sprintf(\"%s-%s\", amboyStatsCollectorJobName, id))\n\treturn j\n}", "func NewCollector(in chan *proxyBuilder) Pipeline {\n\treturn &Collector{\n\t\tdataCh: in,\n\t\texecutors: make([]Executor, 0),\n\t}\n}", "func NewCollector(\n\tfile string,\n\tupdater updater.Updater,\n\tparser clusterParser,\n) Collector {\n\treturn &fileCollector{\n\t\tfile: file,\n\t\tupdater: updater,\n\t\tparser: parser,\n\t\tos: tbnos.New(),\n\t}\n}", "func NewCollector(runner QueryRunner, valType prometheus.ValueType, metricName, query string) *Collector {\n\treturn &Collector{\n\t\trunner: runner,\n\t\tname: metricName,\n\t\tquery: query,\n\t\tvalType: valType,\n\t\tdesc: nil,\n\t\tmetrics: nil,\n\t\tmux: sync.Mutex{},\n\t}\n}", "func NewCollector(api API) *Collector {\n\treturn &Collector{api: api}\n}", "func NewLocalStore(baseDirectory string) *LocalStore {\n\tlogger := util.RootLogger().WithField(\"Logger\", \"LocalStore\")\n\treturn &LocalStore{base: baseDirectory, logger: logger}\n}", "func NewLocal() build_remote.ExecutionCacheServiceServer {\n\tstore, err := action.NewOnDisk()\n\tif err != nil {\n\t\tlog.Fatalf(\"could not initialise ExecutionCacheService: %v\", err)\n\t}\n\treturn &local{store}\n}", "func New(c *Config) *Collector {\n\treturn &Collector{\n\t\tscpConfig: &scpConfig{\n\t\t\tuser: c.RemoteUser,\n\t\t\tport: c.RemotePort,\n\t\t\tidentifyKeyFile: c.RemoteKeyFile,\n\t\t},\n\t\tnamespace: c.Namespace,\n\t\tk8s: c.K8sClient,\n\t}\n}", "func NewCollector(config *Config) (coll *Collector, err error) {\n\tvar gelfWriter *gelf.Writer\n\n\tif gelfWriter, err = gelf.NewWriter(config.Graylog.Address); err != nil {\n\t\treturn nil, err\n\t}\n\n\tcoll = new(Collector)\n\tcoll.writer = gelfWriter\n\tcoll.host = config.Collector.Hostname\n\n\treturn coll, nil\n}", "func NewCollector(registry *prometheus.Registry) (Collector, error) {\n\tpodNamespace := os.Getenv(\"POD_NAMESPACE\")\n\tif podNamespace == \"\" {\n\t\tpodNamespace = \"default\"\n\t}\n\n\tpodName := os.Getenv(\"POD_NAME\")\n\n\tic := collectors.NewController(podName, podNamespace, class.IngressClass)\n\n\treturn Collector(&collector{\n\t\tingressController: ic,\n\t\tregistry: registry,\n\t}), nil\n}", "func NewUseCollector() *Use {\n\treturn &Use{}\n}", "func NewCollector(cfg *config.AgentConfig) TelemetryCollector {\n\tif !cfg.TelemetryConfig.Enabled {\n\t\treturn &noopTelemetryCollector{}\n\t}\n\n\tvar endpoints []config.Endpoint\n\tfor _, endpoint := range cfg.TelemetryConfig.Endpoints {\n\t\tu, err := url.Parse(endpoint.Host)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tu.Path = \"/api/v2/apmtelemetry\"\n\t\tendpointWithPath := *endpoint\n\t\tendpointWithPath.Host = u.String()\n\n\t\tendpoints = append(endpoints, endpointWithPath)\n\t}\n\n\treturn &telemetryCollector{\n\t\tclient: cfg.NewHTTPClient(),\n\t\tendpoints: endpoints,\n\t\tuserAgent: fmt.Sprintf(\"Datadog Trace Agent/%s/%s\", cfg.AgentVersion, cfg.GitCommit),\n\n\t\tcfg: cfg,\n\t\tcollectedStartupError: &atomic.Bool{},\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
newCollectPacket returns an initialized wire.CollectPacket given a span and set of annotations.
func newCollectPacket(s SpanID, as Annotations) *wire.CollectPacket { return &wire.CollectPacket{ Spanid: s.wire(), Annotation: as.wire(), } }
[ "func (cc *ChunkedCollector) Collect(span SpanID, anns ...Annotation) error {\n\tcc.mu.Lock()\n\tdefer cc.mu.Unlock()\n\n\tif cc.stopped {\n\t\treturn errors.New(\"ChunkedCollector is stopped\")\n\t}\n\tif !cc.started {\n\t\tcc.start()\n\t}\n\n\tif cc.pendingBySpanID == nil {\n\t\tcc.pendingBySpanID = map[SpanID]*wire.CollectPacket{}\n\t}\n\n\tif p, present := cc.pendingBySpanID[span]; present {\n\t\tif len(anns) > 0 {\n\t\t\tp.Annotation = append(p.Annotation, Annotations(anns).wire()...)\n\t\t}\n\t} else {\n\t\tcc.pendingBySpanID[span] = newCollectPacket(span, anns)\n\t\tcc.pending = append(cc.pending, span)\n\t}\n\n\tif err := cc.lastErr; err != nil {\n\t\tcc.lastErr = nil\n\t\treturn err\n\t}\n\treturn nil\n}", "func (rc *RemoteCollector) Collect(span SpanID, anns ...Annotation) error {\n\treturn rc.collectAndRetry(newCollectPacket(span, anns))\n}", "func NewSpanCollector(l *Logger) *spanCollector {\n\treturn &spanCollector{l}\n}", "func New() *Collector { return &Collector{} }", "func NewSpanCollector(matcher func(s *monkit.Span) bool) (\n\trv *SpanCollector) {\n\tif matcher == nil {\n\t\tmatcher = func(*monkit.Span) bool { return false }\n\t}\n\treturn &SpanCollector{\n\t\tmatcher: matcher,\n\t\tdone: make(chan struct{}),\n\t\tspansByParent: map[*monkit.Span][]*FinishedSpan{},\n\t}\n}", "func NewCollect() *cobra.Command {\n\tcollectOptions := newCollectOptions()\n\n\tcmd := &cobra.Command{\n\t\tUse: \"collect\",\n\t\tShort: \"Obtain all the data of the current node\",\n\t\tLong: edgecollectLongDescription,\n\t\tExample: edgecollectExample,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\terr := ExecuteCollect(collectOptions)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t},\n\t}\n\tcmd.AddCommand()\n\taddCollectOtherFlags(cmd, collectOptions)\n\treturn cmd\n}", "func newCollectdMetric() *CollectdMetric {\n\tmetric := new(CollectdMetric)\n\tmetric.DataSource = saconfig.DataSourceCollectd\n\treturn metric\n}", "func (cspc *CStorPoolCluster) WithAnnotationsNew(annotations map[string]string) *CStorPoolCluster {\n\tcspc.Annotations = make(map[string]string)\n\tfor key, value := range annotations {\n\t\tcspc.Annotations[key] = value\n\t}\n\treturn cspc\n}", "func New(token string, collectionID string) *SpanListener {\n\treturn &SpanListener{\n\t\tToken: token,\n\t\tCollectionID: collectionID,\n\t\tmeasurementCh: make(chan *apipb.CarrierModuleMeasurements),\n\t}\n}", "func (n *networkManager) NewCollectResources(instanceID string, resp *CollectResourcesResp,\n\tlogField string) workflow.Workflow {\n\treturn workflow.WorkflowFunc(func() error {\n\t\treturn n.collectResources(instanceID, resp, n.log.WithField(projName, logField))\n\t})\n}", "func New(c *Config) *Collector {\n\treturn &Collector{\n\t\tscpConfig: &scpConfig{\n\t\t\tuser: c.RemoteUser,\n\t\t\tport: c.RemotePort,\n\t\t\tidentifyKeyFile: c.RemoteKeyFile,\n\t\t},\n\t\tnamespace: c.Namespace,\n\t\tk8s: c.K8sClient,\n\t}\n}", "func newPacket(pt uint8, cmd uint16, seq uint16, payloadSize int) (pkt packet) {\n\tpkt.header = msgHdr\n\tpkt.toDrone = true\n\tpkt.packetType = pt\n\tpkt.messageID = cmd\n\tpkt.sequence = seq\n\tif payloadSize > 0 {\n\t\tpkt.payload = make([]byte, payloadSize)\n\t}\n\treturn pkt\n}", "func (c collector) Collect(traceID, parentSpanID, spanID, message string, data map[string]interface{}) {\n\tf := &format.Std{\n\t\tCallStackSkip: NO_STACK_TRACE_INFO,\n\t\tTraceId: traceID,\n\t\tSpanId: spanID,\n\t\tParentSpanId: parentSpanID,\n\t\tMessage: message,\n\t\tData: data,\n\t}\n\tf.EnableSyslogHeader(c.l.syslog)\n\tf.SetService(c.l.key.service)\n\tf.SetLevel(INFO)\n\tc.l.writer.WriteFrom(f)\n}", "func NewUnReportStatsCollect(storeID uint64, regionIDs map[uint64]struct{}, interval uint64) *FlowItem {\n\treturn &FlowItem{\n\t\tpeerInfo: nil,\n\t\tregionInfo: nil,\n\t\texpiredStat: nil,\n\t\tunReportStatsCollect: &unReportStatsCollect{\n\t\t\tstoreID: storeID,\n\t\t\tregionIDs: regionIDs,\n\t\t\tinterval: interval,\n\t\t},\n\t}\n}", "func New(pipeline pipeline.Pipeline, apiToken string, collectionID string) listener.Listener {\n\treturn &spanListener{\n\t\tpipeline: pipeline,\n\t\tapiToken: apiToken,\n\t\tcollectionID: collectionID,\n\t\tapiEndpointURL: util.EndpointURL(util.DefaultSpanWebsocketEndpointBaseURL, collectionID),\n\t\thandshakeTimeout: util.DefaultSpanWebsocketHandshakeTimeout,\n\t\treconnectDelay: util.DefaultSpanWebsocketReconnectDelay,\n\t\tshutdownChan: make(chan interface{}),\n\t}\n}", "func NewSpan(name errors.Op) (*Metric, *Span) {\n\tm := New(name)\n\treturn m, m.StartSpan(name)\n}", "func newCollectionStatsCollector(ctx context.Context, client *mongo.Client, logger *logrus.Logger, compatible, discovery bool, topology labelsGetter, collections []string) *collstatsCollector {\n\treturn &collstatsCollector{\n\t\tctx: ctx,\n\t\tbase: newBaseCollector(client, logger),\n\n\t\tcompatibleMode: compatible,\n\t\tdiscoveringMode: discovery,\n\t\ttopologyInfo: topology,\n\n\t\tcollections: collections,\n\t}\n}", "func (t *Tracer) newSpan() *Span {\n\treturn t.spanAllocator.Get()\n}", "func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr SamplingResult, config *trace.SpanConfig) *recordingSpan {\n\tstartTime := config.Timestamp()\n\tif startTime.IsZero() {\n\t\tstartTime = time.Now()\n\t}\n\n\ts := &recordingSpan{\n\t\t// Do not pre-allocate the attributes slice here! Doing so will\n\t\t// allocate memory that is likely never going to be used, or if used,\n\t\t// will be over-sized. The default Go compiler has been tested to\n\t\t// dynamically allocate needed space very well. Benchmarking has shown\n\t\t// it to be more performant than what we can predetermine here,\n\t\t// especially for the common use case of few to no added\n\t\t// attributes.\n\n\t\tparent: psc,\n\t\tspanContext: sc,\n\t\tspanKind: trace.ValidateSpanKind(config.SpanKind()),\n\t\tname: name,\n\t\tstartTime: startTime,\n\t\tevents: newEvictedQueue(tr.provider.spanLimits.EventCountLimit),\n\t\tlinks: newEvictedQueue(tr.provider.spanLimits.LinkCountLimit),\n\t\ttracer: tr,\n\t}\n\n\tfor _, l := range config.Links() {\n\t\ts.addLink(l)\n\t}\n\n\ts.SetAttributes(sr.Attributes...)\n\ts.SetAttributes(config.Attributes()...)\n\n\treturn s\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Collect adds the span and annotations to a local buffer until the next call to Flush (or when MinInterval elapses), at which point they are sent (grouped by span) to the underlying collector.
func (cc *ChunkedCollector) Collect(span SpanID, anns ...Annotation) error { cc.mu.Lock() defer cc.mu.Unlock() if cc.stopped { return errors.New("ChunkedCollector is stopped") } if !cc.started { cc.start() } if cc.pendingBySpanID == nil { cc.pendingBySpanID = map[SpanID]*wire.CollectPacket{} } if p, present := cc.pendingBySpanID[span]; present { if len(anns) > 0 { p.Annotation = append(p.Annotation, Annotations(anns).wire()...) } } else { cc.pendingBySpanID[span] = newCollectPacket(span, anns) cc.pending = append(cc.pending, span) } if err := cc.lastErr; err != nil { cc.lastErr = nil return err } return nil }
[ "func (as *AggregateStore) Collect(id SpanID, anns ...Annotation) error {\n\tas.mu.Lock()\n\tdefer as.mu.Unlock()\n\n\t// Initialization\n\tif as.groups == nil {\n\t\tas.groups = make(map[ID]*spanGroup)\n\t\tas.groupsByName = make(map[string]ID)\n\t\tas.pre = &LimitStore{\n\t\t\tMax: as.MaxRate,\n\t\t\tDeleteStore: NewMemoryStore(),\n\t\t}\n\t}\n\n\t// Collect into the limit store.\n\tif err := as.pre.Collect(id, anns...); err != nil {\n\t\treturn err\n\t}\n\n\t// Consider eviction of old data.\n\tif time.Since(as.lastEvicted) > as.MinEvictAge {\n\t\tif err := as.evictBefore(time.Now().Add(-1 * as.MinEvictAge)); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Grab the group for our span.\n\tgroup, ok := as.group(id, anns...)\n\tif !ok {\n\t\t// We don't have a group for the trace, and can't create one (the\n\t\t// spanName event isn't present yet).\n\t\treturn nil\n\t}\n\n\t// Unmarshal the events.\n\tvar events []Event\n\tif err := UnmarshalEvents(anns, &events); err != nil {\n\t\treturn err\n\t}\n\n\t// Find the start and end time of the trace.\n\teStart, eEnd, ok := findTraceTimes(events)\n\tif !ok {\n\t\t// We didn't find any timespan events at all, so we're done here.\n\t\treturn nil\n\t}\n\n\t// Update the group to consider this trace being one of the slowest.\n\tgroup.update(eStart, eEnd, id.Trace, func(trace ID) {\n\t\t// Delete the request trace from the output store.\n\t\tif err := as.deleteOutput(trace); err != nil {\n\t\t\tlog.Printf(\"AggregateStore: failed to delete a trace: %s\", err)\n\t\t}\n\t})\n\n\t// Move traces from the limit store into the group, as needed.\n\tfor _, slowest := range group.Slowest {\n\t\t// Find the trace in the limit store.\n\t\ttrace, err := as.pre.Trace(slowest.TraceID)\n\t\tif err == ErrTraceNotFound {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Place into output store.\n\t\tvar walk func(t *Trace) error\n\t\twalk = func(t *Trace) error {\n\t\t\terr := as.MemoryStore.Collect(t.Span.ID, t.Span.Annotations...)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfor _, sub := range t.Sub {\n\t\t\t\tif err := walk(sub); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\tif err := walk(trace); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Delete from the limit store.\n\t\terr = as.pre.Delete(slowest.TraceID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Prepare the aggregation event (before locking below).\n\tev := &AggregateEvent{\n\t\tName: group.Name,\n\t\tTimes: group.Times,\n\t}\n\tfor _, slowest := range group.Slowest {\n\t\tif !slowest.empty() {\n\t\t\tev.Slowest = append(ev.Slowest, slowest.TraceID)\n\t\t}\n\t}\n\tif as.Debug && len(ev.Slowest) == 0 {\n\t\tlog.Printf(\"AggregateStore: no slowest traces for group %q (consider increasing MaxRate)\", group.Name)\n\t}\n\n\t// As we're updating the aggregation event, we go ahead and delete the old\n\t// one now. We do this all under as.MemoryStore.Lock otherwise users (e.g. the\n\t// web UI) can pull from as.MemoryStore when the trace has been deleted.\n\tas.MemoryStore.Lock()\n\tdefer as.MemoryStore.Unlock()\n\tif err := as.MemoryStore.deleteNoLock(group.Trace); err != nil {\n\t\treturn err\n\t}\n\n\t// Record an aggregate event with the given name.\n\trecEvent := func(e Event) error {\n\t\tanns, err := MarshalEvent(e)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn as.MemoryStore.collectNoLock(SpanID{Trace: group.Trace}, anns...)\n\t}\n\tif err := recEvent(spanName{Name: group.Name}); err != nil {\n\t\treturn err\n\t}\n\tif err := recEvent(ev); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (rc *RemoteCollector) Collect(span SpanID, anns ...Annotation) error {\n\treturn rc.collectAndRetry(newCollectPacket(span, anns))\n}", "func (c collector) Collect(traceID, parentSpanID, spanID, message string, data map[string]interface{}) {\n\tf := &format.Std{\n\t\tCallStackSkip: NO_STACK_TRACE_INFO,\n\t\tTraceId: traceID,\n\t\tSpanId: spanID,\n\t\tParentSpanId: parentSpanID,\n\t\tMessage: message,\n\t\tData: data,\n\t}\n\tf.EnableSyslogHeader(c.l.syslog)\n\tf.SetService(c.l.key.service)\n\tf.SetLevel(INFO)\n\tc.l.writer.WriteFrom(f)\n}", "func CollectSpans(ctx context.Context, work func(ctx context.Context)) (\n\tspans []*FinishedSpan) {\n\ts := monkit.SpanFromCtx(ctx)\n\tif s == nil {\n\t\twork(ctx)\n\t\treturn nil\n\t}\n\tcollector := NewSpanCollector(nil)\n\tdefer collector.Stop()\n\ts.Trace().ObserveSpans(collector)\n\tf := s.Func()\n\tnewF := f.Scope().FuncNamed(fmt.Sprintf(\"%s-TRACED\", f.ShortName()))\n\tfunc() {\n\t\tdefer newF.Task(&ctx)(nil)\n\t\tcollector.ForceStart(monkit.SpanFromCtx(ctx))\n\t\twork(ctx)\n\t}()\n\treturn collector.Spans()\n}", "func (c *Collector) Collect(scs []stats.SampleContainer) {\n\tc.bufferLock.Lock()\n\tdefer c.bufferLock.Unlock()\n\tfor _, sc := range scs {\n\t\tc.buffer = append(c.buffer, sc.GetSamples()...)\n\t}\n}", "func (l *LabelStatistics) Collect() {\n\tl.RLock()\n\tdefer l.RUnlock()\n\tfor level, count := range l.labelCounter {\n\t\tregionLabelLevelGauge.WithLabelValues(level).Set(float64(count))\n\t}\n}", "func (c *cluster) collectSpans(\n\tt *testing.T, txn *roachpb.Transaction, ts hlc.Timestamp, reqs []roachpb.Request,\n) (latchSpans, lockSpans *spanset.SpanSet) {\n\tlatchSpans, lockSpans = &spanset.SpanSet{}, &spanset.SpanSet{}\n\th := roachpb.Header{Txn: txn, Timestamp: ts}\n\tfor _, req := range reqs {\n\t\tif cmd, ok := batcheval.LookupCommand(req.Method()); ok {\n\t\t\tcmd.DeclareKeys(c.rangeDesc, h, req, latchSpans, lockSpans)\n\t\t} else {\n\t\t\tt.Fatalf(\"unrecognized command %s\", req.Method())\n\t\t}\n\t}\n\n\t// Commands may create a large number of duplicate spans. De-duplicate\n\t// them to reduce the number of spans we pass to the spanlatch manager.\n\tfor _, s := range [...]*spanset.SpanSet{latchSpans, lockSpans} {\n\t\ts.SortAndDedup()\n\t\tif err := s.Validate(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\treturn latchSpans, lockSpans\n}", "func (cc *ChunkedCollector) Flush() error {\n\tcc.mu.Lock()\n\tpendingBySpanID := cc.pendingBySpanID\n\tpending := cc.pending\n\tcc.pendingBySpanID = nil\n\tcc.pending = nil\n\tcc.mu.Unlock()\n\n\tvar errs []error\n\tfor _, spanID := range pending {\n\t\tp := pendingBySpanID[spanID]\n\t\tif err := cc.Collector.Collect(spanIDFromWire(p.Spanid), annotationsFromWire(p.Annotation)...); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\n\tif len(errs) == 1 {\n\t\treturn errs[0]\n\t} else if len(errs) > 1 {\n\t\treturn fmt.Errorf(\"ChunkedCollector: multiple errors: %v\", errs)\n\t}\n\treturn nil\n}", "func (bq *Collector) Collect(ch chan<- prometheus.Metric) {\n\tbq.mux.Lock()\n\tdefer bq.mux.Unlock()\n\n\tfor i := range bq.metrics {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tbq.desc, bq.valType, bq.metrics[i].value, bq.metrics[i].values...)\n\t}\n}", "func (c *Collector) Collect(ch chan<- prometheus.Metric) {\n\tc.mut.RLock()\n\tdefer c.mut.RUnlock()\n\n\tif c.inner != nil {\n\t\tc.inner.Collect(ch)\n\t}\n}", "func (statColl *StatCollector) Collect(op *RecordedOp, replayedOp Op, reply Replyable, msg string) {\n\tif statColl.noop {\n\t\treturn\n\t}\n\tstatColl.Do(func() {\n\t\tstatColl.statStream = make(chan *OpStat, statColl.statStreamSize)\n\t\tstatColl.done = make(chan struct{})\n\t\tgo func() {\n\t\t\tfor stat := range statColl.statStream {\n\t\t\t\tstatColl.StatRecorder.RecordStat(stat)\n\t\t\t}\n\t\t\tclose(statColl.done)\n\t\t}()\n\t})\n\tif stat := statColl.GenerateOpStat(op, replayedOp, reply, msg); stat != nil {\n\t\tstatColl.statStream <- stat\n\t}\n}", "func (c *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tfor _, cc := range c.collectors {\n\t\tcc.Collect(ch)\n\t}\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mu.Lock()\n\tdefer e.mu.Unlock()\n\n\tfor _, cc := range e.collectors {\n\t\tcc.Collect(ch)\n\t}\n}", "func (c *Aggregator) Collect(ctx context.Context, rec export.Record, exp export.Batcher) {\n\tc.checkpoint = c.current.SwapNumberAtomic(core.Number(0))\n\n\texp.Export(ctx, rec, c)\n}", "func (c *Collector) Collect(scs []stats.SampleContainer) {\n\tc.lock.Lock()\n\tfor _, sc := range scs {\n\t\tc.Samples = append(c.Samples, sc.GetSamples()...)\n\t}\n\tc.lock.Unlock()\n}", "func (i *InMemCollector) AddSpan(sp *types.Span) {\n\t// TODO protect against sending on a closed channel during shutdown\n\ti.incoming <- sp\n}", "func (c *VMCollector) Collect(ch chan<- prometheus.Metric) {\n\tfor _, m := range c.getMetrics() {\n\t\tch <- m\n\t}\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\t// Reset metrics.\n\tfor _, vec := range e.gauges {\n\t\tvec.Reset()\n\t}\n\n\tfor _, vec := range e.counters {\n\t\tvec.Reset()\n\t}\n\n\tresp, err := e.client.Get(e.URI)\n\tif err != nil {\n\t\te.up.Set(0)\n\t\tlog.Printf(\"Error while querying Elasticsearch: %v\", err)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\tlog.Printf(\"Failed to read ES response body: %v\", err)\n\t\te.up.Set(0)\n\t\treturn\n\t}\n\n\te.up.Set(1)\n\n\tvar all_stats NodeStatsResponse\n\terr = json.Unmarshal(body, &all_stats)\n\n\tif err != nil {\n\t\tlog.Printf(\"Failed to unmarshal JSON into struct: %v\", err)\n\t\treturn\n\t}\n\n\t// Regardless of whether we're querying the local host or the whole\n\t// cluster, here we can just iterate through all nodes found.\n\n\tfor node, stats := range all_stats.Nodes {\n\t\tlog.Printf(\"Processing node %v\", node)\n\t\t// GC Stats\n\t\tfor collector, gcstats := range stats.JVM.GC.Collectors {\n\t\t\te.counters[\"jvm_gc_collection_count\"].WithLabelValues(all_stats.ClusterName, stats.Name, collector).Set(float64(gcstats.CollectionCount))\n\t\t\te.counters[\"jvm_gc_collection_time_in_millis\"].WithLabelValues(all_stats.ClusterName, stats.Name, collector).Set(float64(gcstats.CollectionTime))\n\t\t}\n\n\t\t// Breaker stats\n\t\tfor breaker, bstats := range stats.Breakers {\n\t\t\te.gauges[\"breakers_estimated_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name, breaker).Set(float64(bstats.EstimatedSize))\n\t\t\te.gauges[\"breakers_limit_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name, breaker).Set(float64(bstats.LimitSize))\n\t\t}\n\n\t\t// JVM Memory Stats\n\t\te.gauges[\"jvm_mem_heap_committed_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.HeapCommitted))\n\t\te.gauges[\"jvm_mem_heap_used_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.HeapUsed))\n\t\te.gauges[\"jvm_mem_heap_max_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.HeapMax))\n\t\te.gauges[\"jvm_mem_non_heap_committed_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.NonHeapCommitted))\n\t\te.gauges[\"jvm_mem_non_heap_used_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.NonHeapUsed))\n\n\t\t// Indices Stats\n\t\te.gauges[\"indices_fielddata_evictions\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FieldData.Evictions))\n\t\te.gauges[\"indices_fielddata_memory_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FieldData.MemorySize))\n\t\te.gauges[\"indices_filter_cache_evictions\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FilterCache.Evictions))\n\t\te.gauges[\"indices_filter_cache_memory_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FilterCache.MemorySize))\n\n\t\te.gauges[\"indices_docs_count\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Docs.Count))\n\t\te.gauges[\"indices_docs_deleted\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Docs.Deleted))\n\n\t\te.gauges[\"indices_segments_memory_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Segments.Memory))\n\n\t\te.gauges[\"indices_store_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Store.Size))\n\t\te.counters[\"indices_store_throttle_time_in_millis\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Store.ThrottleTime))\n\n\t\te.counters[\"indices_flush_total\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Flush.Total))\n\t\te.counters[\"indices_flush_time_in_millis\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Flush.Time))\n\n\t\t// Transport Stats\n\t\te.counters[\"transport_rx_count\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.RxCount))\n\t\te.counters[\"transport_rx_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.RxSize))\n\t\te.counters[\"transport_tx_count\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.TxCount))\n\t\te.counters[\"transport_tx_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.TxSize))\n\t}\n\n\t// Report metrics.\n\tch <- e.up\n\n\tfor _, vec := range e.counters {\n\t\tvec.Collect(ch)\n\t}\n\n\tfor _, vec := range e.gauges {\n\t\tvec.Collect(ch)\n\t}\n}", "func (t *TimestampCollector) Collect(ch chan<- prometheus.Metric) {\n\t// New map to dedup filenames.\n\tuniqueFiles := make(map[string]float64)\n\tt.lock.RLock()\n\tfor fileSD := range t.discoverers {\n\t\tfileSD.lock.RLock()\n\t\tfor filename, timestamp := range fileSD.timestamps {\n\t\t\tuniqueFiles[filename] = timestamp\n\t\t}\n\t\tfileSD.lock.RUnlock()\n\t}\n\tt.lock.RUnlock()\n\tfor filename, timestamp := range uniqueFiles {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tt.Description,\n\t\t\tprometheus.GaugeValue,\n\t\t\ttimestamp,\n\t\t\tfilename,\n\t\t)\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Flush immediately sends all pending spans to the underlying collector.
func (cc *ChunkedCollector) Flush() error { cc.mu.Lock() pendingBySpanID := cc.pendingBySpanID pending := cc.pending cc.pendingBySpanID = nil cc.pending = nil cc.mu.Unlock() var errs []error for _, spanID := range pending { p := pendingBySpanID[spanID] if err := cc.Collector.Collect(spanIDFromWire(p.Spanid), annotationsFromWire(p.Annotation)...); err != nil { errs = append(errs, err) } } if len(errs) == 1 { return errs[0] } else if len(errs) > 1 { return fmt.Errorf("ChunkedCollector: multiple errors: %v", errs) } return nil }
[ "func (s *Collector) Flush() error {\n\treturn s.flusher.Flush(s.stats)\n}", "func (c *HTTPCollector) flush(b []*zipkincore.Span) (err error) {\n\tdefer func() {\n\t\tc.batchPool.Put(b[:0])\n\t\tif err != nil {\n\t\t\tc.logger.Log(\"err\", err)\n\t\t}\n\t}()\n\n\t// Do not send an empty batch\n\tif len(b) == 0 {\n\t\treturn nil\n\t}\n\n\tdata, err := httpSerialize(b)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar req *http.Request\n\n\treq, err = http.NewRequest(\"POST\", c.url, data)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/x-thrift\")\n\tif _, err = c.client.Do(req); err != nil {\n\t\treturn\n\t}\n\n\treturn nil\n}", "func (c *Collector) Flush() {\n\tclose(c.results)\n\t<-c.done\n}", "func (c *Stats) Flush() {\n\t// Add a job to the flush wait group\n\tc.flushWG.Add(1)\n\tc.jobs <- &job{flush: true}\n\tc.flushWG.Wait()\n}", "func (s *samplingSender) Flush() {\n\tif !s.agg.Stats.Zero() {\n\t\ts.next.Send(&zoekt.SearchResult{\n\t\t\tStats: s.agg.Stats,\n\t\t\tProgress: zoekt.Progress{\n\t\t\t\tPriority: math.Inf(-1),\n\t\t\t\tMaxPendingPriority: math.Inf(-1),\n\t\t\t},\n\t\t})\n\t}\n}", "func Flush() {\n\tif t, ok := internal.GetGlobalTracer().(*tracer); ok {\n\t\tt.flushSync()\n\t}\n}", "func (e *Exporter) Flush() {\n\te.tracer.Flush(context.Background())\n}", "func (r *reporter) Flush() {\n\tdone := make(chan struct{})\n\tselect {\n\tcase <-r.ctx.Done():\n\t\tr.log.Println(\"Context is done: ignoring metrics flush\")\n\tcase r.flushChan <- done:\n\t\t<-done // wait until flush completes\n\t}\n}", "func (al *Logger) flush() {\n\tal.mu.Lock()\n\tdefer al.mu.Unlock()\n\tif len(al.batch) > 0 {\n\t\tbatch := al.batch\n\t\tal.batch = nil\n\t\tal.batchBytes = 0\n\t\t// be careful not to send al.batch, since we will unlock before we finish sending the batch\n\t\tal.sendBatchWG.Add(1)\n\t\tgo func() {\n\t\t\tdefer al.sendBatchWG.Done()\n\t\t\terr := sendBatch(batch, al.fhAPI, al.fhStream, time.Now().Add(timeoutForSendingBatches))\n\t\t\tif err != nil {\n\t\t\t\tal.errLogger.ErrorD(\"send-batch-error\", logger.M{\n\t\t\t\t\t\"stream\": al.fhStream,\n\t\t\t\t\t\"error\": err.Error(),\n\t\t\t\t})\n\t\t\t}\n\t\t}()\n\t}\n}", "func (f *flusher) Flush() {\n\tf.mu.Lock()\n\tfor _, m := range f.meters {\n\t\tm.FlushReading(f.sink)\n\t}\n\tf.sink.Flush()\n\tf.mu.Unlock()\n}", "func Flush() {\n\tif traceListeners != nil {\n\t\tfor _, tl := range traceListeners {\n\t\t\t(*tl).Flush()\n\t\t}\n\t}\n}", "func (fl *flusher) Flush(ctx context.Context, span opentracing.Span, classrooms []models.Classroom) []models.Classroom {\n\n\tchunks, err := utils.SplitSlice(classrooms, fl.chunkSize)\n\n\tif err != nil {\n\t\treturn classrooms\n\t}\n\n\tfor i, chunk := range chunks {\n\n\t\tvar childSpan opentracing.Span\n\t\tif span != nil {\n\t\t\tchildSpan = opentracing.StartSpan(\"Flush\", opentracing.ChildOf(span.Context()))\n\t\t}\n\n\t\t_, err := fl.repo.MultiAddClassroom(ctx, chunk)\n\n\t\tif span != nil {\n\n\t\t\tchildSpan.LogFields(\n\t\t\t\tlog.Int(\"len\", len(chunk)),\n\t\t\t\tlog.Bool(\"sent\", err == nil),\n\t\t\t)\n\t\t\tchildSpan.Finish()\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn classrooms[fl.chunkSize*i:]\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c *Client) flush() {\n\tsubmissions := make([]*Request, 0, len(c.requests)+1)\n\tif c.newMetrics {\n\t\tc.newMetrics = false\n\t\tr := c.newRequest(RequestTypeGenerateMetrics)\n\t\tpayload := &Metrics{\n\t\t\tNamespace: c.Namespace,\n\t\t\tLibLanguage: \"go\",\n\t\t\tLibVersion: version.Tag,\n\t\t}\n\t\tfor _, m := range c.metrics {\n\t\t\ts := Series{\n\t\t\t\tMetric: m.name,\n\t\t\t\tType: string(m.kind),\n\t\t\t\tTags: m.tags,\n\t\t\t\tCommon: m.common,\n\t\t\t}\n\t\t\ts.Points = [][2]float64{{m.ts, m.value}}\n\t\t\tpayload.Series = append(payload.Series, s)\n\t\t}\n\t\tr.Payload = payload\n\t\tsubmissions = append(submissions, r)\n\t}\n\n\t// copy over requests so we can do the actual submission without holding\n\t// the lock. Zero out the old stuff so we don't leak references\n\tfor i, r := range c.requests {\n\t\tsubmissions = append(submissions, r)\n\t\tc.requests[i] = nil\n\t}\n\tc.requests = c.requests[:0]\n\n\tgo func() {\n\t\tfor _, r := range submissions {\n\t\t\terr := c.submit(r)\n\t\t\tif err != nil {\n\t\t\t\tc.log(\"telemetry submission failed: %s\", err)\n\t\t\t}\n\t\t}\n\t}()\n}", "func (c *Collector) Flush() error {\n\tcounters := make(map[string]interface{})\n\tc.counters.Range(func(key, val interface{}) bool {\n\t\tcs := val.(*Counters)\n\t\tcounters[cs.peerAddress.ByteString()] = val\n\t\treturn true\n\t})\n\n\tif err := c.persistence.Put(counters); err != nil {\n\t\treturn fmt.Errorf(\"unable to persist counters: %w\", err)\n\t}\n\treturn nil\n}", "func (b *Buffer) Flush() {\n\tif len(b.series) == 0 {\n\t\treturn\n\t}\n\n\tsbuffer := []*influxdb.Series{}\n\tfor _, item := range b.series {\n\t\tsbuffer = append(sbuffer, item)\n\t}\n\n\tb.fn(sbuffer)\n\tb.Clear()\n}", "func (e *eventBatch) flush(ctx context.Context, resyncEvents chan resyncEvent) error {\n\tfor k, event := range e.events {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn fmt.Errorf(\"context done and the batch wasn't complete, update will be delayed, outstanding events: %d\", len(e.events))\n\t\tcase resyncEvents <- *event:\n\t\t\t// Once an event is sent we remove it from the batch\n\t\t\tdelete(e.events, k)\n\t\t}\n\t}\n\treturn nil\n}", "func (r *reporter) Flush() {}", "func (t *tracer) flushSync() {\n\tdone := make(chan struct{})\n\tt.flush <- done\n\t<-done\n}", "func (lms *MessageSorter) Flush() {\n\tsort.Sort(ktail.ByTimestamp(lms.cache))\n\n\tfor _, msg := range lms.cache {\n\t\tlms.format(lms.wr, msg)\n\t}\n\n\tlms.cache = []*ktail.LogMessage{}\n\tlms.current = 0\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewRemoteCollector creates a collector that sends data to a collector server (created with NewServer). It sends data immediately when Collect is called. To send data in chunks, use a ChunkedCollector.
func NewRemoteCollector(addr string) *RemoteCollector { return &RemoteCollector{ addr: addr, dial: func() (net.Conn, error) { return net.Dial("tcp", addr) }, } }
[ "func NewRemoteAmboyStatsCollector(env cedar.Environment, id string) amboy.Job {\n\tj := makeAmboyStatsCollector()\n\tj.ExcludeLocal = true\n\tj.env = env\n\tj.SetID(fmt.Sprintf(\"%s-%s\", amboyStatsCollectorJobName, id))\n\treturn j\n}", "func NewCollector(in chan *proxyBuilder) Pipeline {\n\treturn &Collector{\n\t\tdataCh: in,\n\t\texecutors: make([]Executor, 0),\n\t}\n}", "func NewTLSRemoteCollector(addr string, tlsConfig *tls.Config) *RemoteCollector {\n\treturn &RemoteCollector{\n\t\taddr: addr,\n\t\tdial: func() (net.Conn, error) {\n\t\t\treturn tls.Dial(\"tcp\", addr, tlsConfig)\n\t\t},\n\t}\n}", "func New(c *Config) *Collector {\n\treturn &Collector{\n\t\tscpConfig: &scpConfig{\n\t\t\tuser: c.RemoteUser,\n\t\t\tport: c.RemotePort,\n\t\t\tidentifyKeyFile: c.RemoteKeyFile,\n\t\t},\n\t\tnamespace: c.Namespace,\n\t\tk8s: c.K8sClient,\n\t}\n}", "func NewServer(l net.Listener, c Collector) *CollectorServer {\n\tcs := &CollectorServer{c: c, l: l}\n\treturn cs\n}", "func NewCollector(\n\tfile string,\n\tupdater updater.Updater,\n\tparser clusterParser,\n) Collector {\n\treturn &fileCollector{\n\t\tfile: file,\n\t\tupdater: updater,\n\t\tparser: parser,\n\t\tos: tbnos.New(),\n\t}\n}", "func NewCollector() Collector {\n\treturn make(Collector)\n}", "func NewCollector(config *Config) (coll *Collector, err error) {\n\tvar gelfWriter *gelf.Writer\n\n\tif gelfWriter, err = gelf.NewWriter(config.Graylog.Address); err != nil {\n\t\treturn nil, err\n\t}\n\n\tcoll = new(Collector)\n\tcoll.writer = gelfWriter\n\tcoll.host = config.Collector.Hostname\n\n\treturn coll, nil\n}", "func NewCollector(api API) *Collector {\n\treturn &Collector{api: api}\n}", "func New() *Collector { return &Collector{} }", "func NewLocalCollector(s Store) Collector {\n\treturn s\n}", "func NewRemote(ctx context.Context, conn *grpc.ClientConn) Subjects {\n\treturn &remote{\n\t\tclient: NewServiceClient(conn),\n\t}\n}", "func NewCollector(h host.Host, opts ...InitOpt) (PubSubCollector, error) {\n\tio, err := NewInitOpts(opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tswitch io.Conf.Router {\n\tcase \"basic\":\n\t\treturn NewBasicPubSubCollector(h, opts...)\n\tcase \"relay\":\n\t\treturn NewRelayPubSubCollector(h, opts...)\n\tcase \"intbfs\":\n\t\treturn NewIntBFSCollector(h, opts...)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown router type\")\n\t}\n}", "func NewCollector(store *forensicstore.ForensicStore, tempDir string, definitions []goartifacts.ArtifactDefinition) (*LiveCollector, error) {\n\tprovidesMap := map[string][]goartifacts.Source{}\n\n\tdefinitions = goartifacts.FilterOS(definitions)\n\n\tfor _, definition := range definitions {\n\t\tfor _, source := range definition.Sources {\n\t\t\tfor _, provide := range source.Provides {\n\t\t\t\tkey := strings.TrimPrefix(provide.Key, \"environ_\")\n\t\t\t\tif providingSources, ok := providesMap[key]; !ok {\n\t\t\t\t\tprovidesMap[key] = []goartifacts.Source{source}\n\t\t\t\t} else {\n\t\t\t\t\tprovidesMap[key] = append(providingSources, source)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tsourceFS, err := systemfs.New()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"system fs creation failed: %w\", err)\n\t}\n\n\treturn &LiveCollector{\n\t\tSourceFS: sourceFS,\n\t\tregistryfs: registryfs.New(),\n\t\tStore: store,\n\t\tTempDir: tempDir,\n\t\tprovidesMap: providesMap,\n\t\tknowledgeBase: map[string][]string{},\n\t}, nil\n}", "func New(c *ovsnl.Client) prometheus.Collector {\n\treturn &collector{\n\t\tcs: []prometheus.Collector{\n\t\t\t// Additional generic netlink family collectors can be added here.\n\t\t\tnewDatapathCollector(c.Datapath.List),\n\t\t},\n\t}\n}", "func newRemotePeer(meta PeerMeta, p2ps PeerManager, iServ ActorService, log *log.Logger, signer msgSigner) *RemotePeer {\n\tpeer := &RemotePeer{\n\t\tmeta: meta, pm: p2ps, actorServ: iServ, logger: log, signer: signer,\n\t\tpingDuration: defaultPingInterval,\n\t\tstate: types.STARTING,\n\n\t\tstopChan: make(chan struct{}),\n\t\tcloseWrite: make(chan struct{}),\n\t\thsLock: &sync.Mutex{},\n\n\t\trequests: make(map[string]msgOrder),\n\t\tconsumeChan: make(chan string, 10),\n\n\t\thandlers: make(map[SubProtocol]MessageHandler),\n\t}\n\tpeer.write = p2putil.NewDefaultChannelPipe(20, newHangresolver(peer, log))\n\n\tvar err error\n\tpeer.blkHashCache, err = lru.New(DefaultPeerInvCacheSize)\n\tif err != nil {\n\t\tpanic(\"Failed to create remotepeer \" + err.Error())\n\t}\n\tpeer.txHashCache, err = lru.New(DefaultPeerInvCacheSize)\n\tif err != nil {\n\t\tpanic(\"Failed to create remotepeer \" + err.Error())\n\t}\n\n\treturn peer\n}", "func NewCollector(db DB, url string) *EventsCollector {\n\treturn &EventsCollector{url, make(chan struct{}), db}\n}", "func NewCollector(cfg *config.AgentConfig) TelemetryCollector {\n\tif !cfg.TelemetryConfig.Enabled {\n\t\treturn &noopTelemetryCollector{}\n\t}\n\n\tvar endpoints []config.Endpoint\n\tfor _, endpoint := range cfg.TelemetryConfig.Endpoints {\n\t\tu, err := url.Parse(endpoint.Host)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tu.Path = \"/api/v2/apmtelemetry\"\n\t\tendpointWithPath := *endpoint\n\t\tendpointWithPath.Host = u.String()\n\n\t\tendpoints = append(endpoints, endpointWithPath)\n\t}\n\n\treturn &telemetryCollector{\n\t\tclient: cfg.NewHTTPClient(),\n\t\tendpoints: endpoints,\n\t\tuserAgent: fmt.Sprintf(\"Datadog Trace Agent/%s/%s\", cfg.AgentVersion, cfg.GitCommit),\n\n\t\tcfg: cfg,\n\t\tcollectedStartupError: &atomic.Bool{},\n\t}\n}", "func NewRemote() (Catalog, error) {\n\treturn newRemoteFunc()\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewTLSRemoteCollector creates a RemoteCollector that uses TLS.
func NewTLSRemoteCollector(addr string, tlsConfig *tls.Config) *RemoteCollector { return &RemoteCollector{ addr: addr, dial: func() (net.Conn, error) { return tls.Dial("tcp", addr, tlsConfig) }, } }
[ "func NewTLS() *TLS {\n\treturn &TLS{\n\t\tHTTPSEnforced: new(bool),\n\t\tCertsAutoEnabled: new(bool),\n\t}\n}", "func NewTLSConnection(\n\tsrvRemote Remote,\n\trootCerts []byte,\n\terrorUnwrapper ErrorUnwrapper,\n\thandler ConnectionHandler,\n\tlogFactory LogFactory,\n\tinstrumenterStorage NetworkInstrumenterStorage,\n\tlogOutput LogOutputWithDepthAdder,\n\tmaxFrameLength int32,\n\topts ConnectionOpts,\n) *Connection {\n\ttransport := &ConnectionTransportTLS{\n\t\trootCerts: rootCerts,\n\t\tsrvRemote: srvRemote,\n\t\tmaxFrameLength: maxFrameLength,\n\t\tlogFactory: logFactory,\n\t\tinstrumenterStorage: instrumenterStorage,\n\t\twef: opts.WrapErrorFunc,\n\t\tdialerTimeout: opts.DialerTimeout,\n\t\thandshakeTimeout: opts.HandshakeTimeout,\n\t\tlog: newConnectionLogUnstructured(logOutput, \"CONNTSPT\"),\n\t}\n\treturn newConnectionWithTransportAndProtocols(handler, transport, errorUnwrapper, logOutput, opts)\n}", "func newTLSConfig(target string, registry *prometheus.Registry, cfg *config.TLSConfig) (*tls.Config, error) {\n\ttlsConfig, err := config.NewTLSConfig(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif tlsConfig.ServerName == \"\" && target != \"\" {\n\t\ttargetAddress, _, err := net.SplitHostPort(target)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttlsConfig.ServerName = targetAddress\n\t}\n\n\ttlsConfig.VerifyConnection = func(state tls.ConnectionState) error {\n\t\treturn collectConnectionStateMetrics(state, registry)\n\t}\n\n\treturn tlsConfig, nil\n}", "func NewRemoteCollector(addr string) *RemoteCollector {\n\treturn &RemoteCollector{\n\t\taddr: addr,\n\t\tdial: func() (net.Conn, error) {\n\t\t\treturn net.Dial(\"tcp\", addr)\n\t\t},\n\t}\n}", "func NewServerTLS(addr string,\n\thandler func(conn Conn, cmd Command),\n\taccept func(conn Conn) bool,\n\tclosed func(conn Conn, err error),\n\tconfig *tls.Config,\n) *TLSServer {\n\treturn NewServerNetworkTLS(\"tcp\", addr, handler, accept, closed, config)\n}", "func GenerateTLS(hosts string, validity string) (*CertificateChain, error) {\n\treturn tls.GenerateTLS(hosts, validity)\n}", "func NewTLSConnectionWithTLSConfig(\n\tsrvRemote Remote,\n\ttlsConfig *tls.Config,\n\terrorUnwrapper ErrorUnwrapper,\n\thandler ConnectionHandler,\n\tlogFactory LogFactory,\n\tinstrumenterStorage NetworkInstrumenterStorage,\n\tlogOutput LogOutputWithDepthAdder,\n\tmaxFrameLength int32,\n\topts ConnectionOpts,\n) *Connection {\n\ttransport := &ConnectionTransportTLS{\n\t\tsrvRemote: srvRemote,\n\t\ttlsConfig: copyTLSConfig(tlsConfig),\n\t\tmaxFrameLength: maxFrameLength,\n\t\tlogFactory: logFactory,\n\t\tinstrumenterStorage: instrumenterStorage,\n\t\twef: opts.WrapErrorFunc,\n\t\tdialerTimeout: opts.DialerTimeout,\n\t\thandshakeTimeout: opts.HandshakeTimeout,\n\t\tlog: newConnectionLogUnstructured(logOutput, \"CONNTSPT\"),\n\t}\n\treturn newConnectionWithTransportAndProtocols(handler, transport, errorUnwrapper, logOutput, opts)\n}", "func NewTLS(c Config, t *tls.Config) (*Asock, error) {\n\tl, err := tls.Listen(\"tcp\", c.Sockname, t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn commonNew(c, l), nil\n}", "func InstallTLS(streams genericclioptions.IOStreams) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"tls\",\n\t\tDisableFlagsInUseLine: true,\n\t\tShort: \"Generate and install TLS material to be used with the Falco gRPC server\",\n\t\tLong: `Falco gRPC server runs with mutually encrypted TLS by default.\n\nThis command is a convenience to not only generate the TLS material, but also drop it off on the local filesystem.`,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tlogger.Critical(\"this command only works on machines running a linux kernel\")\n\n\t\t\treturn nil\n\t\t},\n\t}\n\n\treturn cmd\n}", "func NewTLSProxy(targetURL string) (*TLSProxy, error) {\n\tcert, err := tls.X509KeyPair([]byte(ServerCert), []byte(serverKey))\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"load keypair\")\n\t}\n\tcfg := &tls.Config{Certificates: []tls.Certificate{cert}}\n\n\t// Assign any available port\n\tlistener, err := tls.Listen(\"tcp\", \"localhost:0\", cfg)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"bind localhost\")\n\t}\n\n\taddr, ok := listener.Addr().(*net.TCPAddr)\n\tif !ok {\n\t\treturn nil, errors.New(\"listener is not *net.TCPAddr\")\n\t}\n\n\thandler, err := newProxyHandler(targetURL)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"make proxy handler\")\n\t}\n\n\tproxy := &TLSProxy{\n\t\tPort: addr.Port,\n\t\tlistener: listener,\n\t\tserver: &http.Server{Handler: handler},\n\t}\n\n\treturn proxy, nil\n}", "func newTLSServer(t *testing.T, domain string) net.Listener {\n\tcert := cert(t, domain)\n\n\tl := newLocalListener(t)\n\tgo func() {\n\t\tfor {\n\t\t\trawConn, err := l.Accept()\n\t\t\tif err != nil {\n\t\t\t\treturn // assume closed\n\t\t\t}\n\n\t\t\tcfg := &tls.Config{\n\t\t\t\tCertificates: []tls.Certificate{cert},\n\t\t\t}\n\t\t\tcfg.BuildNameToCertificate()\n\t\t\tconn := tls.Server(rawConn, cfg)\n\t\t\tif _, err = io.WriteString(conn, domain); err != nil {\n\t\t\t\tt.Errorf(\"writing to tlsconn: %s\", err)\n\t\t\t}\n\t\t\tconn.Close()\n\t\t}\n\t}()\n\n\treturn l\n}", "func NewTLS(dsn string, pex bool, tlsCfg *tls.Config) (*Amqp, error) {\n\n\treturn newImpl(dsn, pex, tlsCfg)\n}", "func NewTLSServer(config *tls.Config) *TLSServer {\n\tserver := new(TLSServer)\n\tserver.config = config\n\tserver.pl = new(pipeline)\n\tserver.AddHandler(server.optHandler)\n\treturn server\n}", "func NewTLSConn() TLSConn {\n\tc := TLSConn{}\n\n\tc.tlsVersions = []uint16{\n\t\ttls.VersionSSL30,\n\t\ttls.VersionTLS10,\n\t\ttls.VersionTLS11,\n\t\ttls.VersionTLS12,\n\t\ttls.VersionTLS13,\n\t}\n\n\tc.tlsCurves = []tls.CurveID{\n\t\ttls.CurveP256,\n\t\ttls.CurveP384,\n\t\ttls.CurveP521,\n\t\ttls.X25519,\n\t}\n\n\tc.conf = &tls.Config{}\n\n\treturn c\n}", "func NewListenerTLS(network, addr, certFile, keyFile string, readTimeout, writeTimeout time.Duration) (net.Listener, error) {\n\tconfig := &tls.Config{}\n\tif config.NextProtos == nil {\n\t\tconfig.NextProtos = []string{\"http/1.1\"}\n\t}\n\n\tvar err error\n\tconfig.Certificates = make([]tls.Certificate, 1)\n\tconfig.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconn, err := net.Listen(network, addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttl := &Listener{\n\t\tListener: tls.NewListener(conn, config),\n\t\tReadTimeout: readTimeout,\n\t\tWriteTimeout: writeTimeout,\n\t}\n\treturn tl, nil\n}", "func NewTotalTls(ctx *pulumi.Context,\n\tname string, args *TotalTlsArgs, opts ...pulumi.ResourceOption) (*TotalTls, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Enabled == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Enabled'\")\n\t}\n\tif args.ZoneId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ZoneId'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource TotalTls\n\terr := ctx.RegisterResource(\"cloudflare:index/totalTls:TotalTls\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewStkSSLCollector() (Collector, error) {\n\treturn &stkSSLCollector{\n\t\tstkSslUp: prometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, stkSSLCollectorSubsystem, \"up\"),\n\t\t\t\"StatusCake Test SSL Status\",\n\t\t\t[]string{\"domain\",\"contactGroupId\"}, nil,\n\t\t),\n\t\tstkSslCertScore: prometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, stkSSLCollectorSubsystem, \"cert_score\"),\n\t\t\t\"StatusCake SSL Certificate Score\",\n\t\t\t[]string{\"domain\",\"contactGroupId\"}, nil,\n\t\t),\n\t\tstkSslCipherScore: prometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, stkSSLCollectorSubsystem, \"cipher_score\"),\n\t\t\t\"StatusCake SSL Cipher Score\",\n\t\t\t[]string{\"domain\",\"contactGroupId\"}, nil,\n\t\t),\n\t\tstkSslCertStatus: prometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, stkSSLCollectorSubsystem, \"cert_status\"),\n\t\t\t\"StatusCake SSL Cert Status\",\n\t\t\t[]string{\"domain\",\"contactGroupId\"}, nil,\n\t\t),\n\t\tstkSslValidUntil: prometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, stkSSLCollectorSubsystem, \"valid_until_sec\"),\n\t\t\t\"StatusCake SSL Valid Until\",\n\t\t\t[]string{\"domain\",\"contactGroupId\"}, nil,\n\t\t),\n\t\tstkSslValidUntilDays: prometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, stkSSLCollectorSubsystem, \"valid_until_days\"),\n\t\t\t\"StatusCake SSL Valid Until (in Days)\",\n\t\t\t[]string{\"domain\",\"contactGroupId\"}, nil,\n\t\t),\n\t\tstkSslAlertReminder: prometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, stkSSLCollectorSubsystem, \"alert_reminder\"),\n\t\t\t\"StatusCake SSL Alert Reminder boolean\",\n\t\t\t[]string{\"domain\",\"contactGroupId\"}, nil,\n\t\t),\n\t\tstkSslLastReminder: prometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, stkSSLCollectorSubsystem, \"last_reminder\"),\n\t\t\t\"StatusCake SSL Last Reminder\",\n\t\t\t[]string{\"domain\",\"contactGroupId\"}, nil,\n\t\t),\n\t\tstkSslAlertExpiry: prometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, stkSSLCollectorSubsystem, \"alert_expiry\"),\n\t\t\t\"StatusCake SSL Alert Expiry boolean\",\n\t\t\t[]string{\"domain\",\"contactGroupId\"}, nil,\n\t\t),\n\t\tstkSslAlertBroken: prometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, stkSSLCollectorSubsystem, \"alert_broken\"),\n\t\t\t\"StatusCake SSL Alert Broken boolean\",\n\t\t\t[]string{\"domain\",\"contactGroupId\"}, nil,\n\t\t),\n\t\tstkSslAlertMixed: prometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, stkSSLCollectorSubsystem, \"alert_mixed\"),\n\t\t\t\"StatusCake SSL Alert Broken boolean\",\n\t\t\t[]string{\"domain\",\"contactGroupId\"}, nil,\n\t\t),\n\t\tstkSslFlagEnabled: prometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, stkSSLCollectorSubsystem, \"flag_enabled\"),\n\t\t\t\"StatusCake SSL Flag is enabled\",\n\t\t\t[]string{\"domain\", \"name\",\"contactGroupId\"}, nil,\n\t\t),\n\t}, nil\n}", "func NewWsTransportTLS(marshaler Marshaler, uri *url.URL, serveMux *http.ServeMux,\n\tcfg *Config, tlscfg *tls.Config) Transport {\n\n\tself := &wsTransport{}\n\tself.init(marshaler, uri, serveMux, cfg, tlscfg)\n\treturn self\n}", "func NewTLSConfig(opts ...Option) (*tls.Config, error) {\n\tcfg := &tls.Config{}\n\tfor _, opt := range opts {\n\t\tif err := opt(cfg); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn cfg, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Collect implements the Collector interface by sending the events that occured in the span to the remote collector server (see CollectorServer).
func (rc *RemoteCollector) Collect(span SpanID, anns ...Annotation) error { return rc.collectAndRetry(newCollectPacket(span, anns)) }
[ "func (cc *ChunkedCollector) Collect(span SpanID, anns ...Annotation) error {\n\tcc.mu.Lock()\n\tdefer cc.mu.Unlock()\n\n\tif cc.stopped {\n\t\treturn errors.New(\"ChunkedCollector is stopped\")\n\t}\n\tif !cc.started {\n\t\tcc.start()\n\t}\n\n\tif cc.pendingBySpanID == nil {\n\t\tcc.pendingBySpanID = map[SpanID]*wire.CollectPacket{}\n\t}\n\n\tif p, present := cc.pendingBySpanID[span]; present {\n\t\tif len(anns) > 0 {\n\t\t\tp.Annotation = append(p.Annotation, Annotations(anns).wire()...)\n\t\t}\n\t} else {\n\t\tcc.pendingBySpanID[span] = newCollectPacket(span, anns)\n\t\tcc.pending = append(cc.pending, span)\n\t}\n\n\tif err := cc.lastErr; err != nil {\n\t\tcc.lastErr = nil\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c collector) Collect(traceID, parentSpanID, spanID, message string, data map[string]interface{}) {\n\tf := &format.Std{\n\t\tCallStackSkip: NO_STACK_TRACE_INFO,\n\t\tTraceId: traceID,\n\t\tSpanId: spanID,\n\t\tParentSpanId: parentSpanID,\n\t\tMessage: message,\n\t\tData: data,\n\t}\n\tf.EnableSyslogHeader(c.l.syslog)\n\tf.SetService(c.l.key.service)\n\tf.SetLevel(INFO)\n\tc.l.writer.WriteFrom(f)\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tlog.Infof(\"Syno exporter starting\")\n\tif e.Client == nil {\n\t\tlog.Errorf(\"Syno client not configured.\")\n\t\treturn\n\t}\n\terr := e.Client.Connect()\n\tif err != nil {\n\t\tlog.Errorln(\"Can't connect to Synology for SNMP: %s\", err)\n\t\treturn\n\t}\n\tdefer e.Client.SNMP.Conn.Close()\n\n\te.collectSystemMetrics(ch)\n\te.collectCPUMetrics(ch)\n\te.collectLoadMetrics(ch)\n\te.collectMemoryMetrics(ch)\n\te.collectNetworkMetrics(ch)\n\te.collectDiskMetrics(ch)\n\n\tlog.Infof(\"Syno exporter finished\")\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\t// Reset metrics.\n\tfor _, vec := range e.gauges {\n\t\tvec.Reset()\n\t}\n\n\tfor _, vec := range e.counters {\n\t\tvec.Reset()\n\t}\n\n\tresp, err := e.client.Get(e.URI)\n\tif err != nil {\n\t\te.up.Set(0)\n\t\tlog.Printf(\"Error while querying Elasticsearch: %v\", err)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\tlog.Printf(\"Failed to read ES response body: %v\", err)\n\t\te.up.Set(0)\n\t\treturn\n\t}\n\n\te.up.Set(1)\n\n\tvar all_stats NodeStatsResponse\n\terr = json.Unmarshal(body, &all_stats)\n\n\tif err != nil {\n\t\tlog.Printf(\"Failed to unmarshal JSON into struct: %v\", err)\n\t\treturn\n\t}\n\n\t// Regardless of whether we're querying the local host or the whole\n\t// cluster, here we can just iterate through all nodes found.\n\n\tfor node, stats := range all_stats.Nodes {\n\t\tlog.Printf(\"Processing node %v\", node)\n\t\t// GC Stats\n\t\tfor collector, gcstats := range stats.JVM.GC.Collectors {\n\t\t\te.counters[\"jvm_gc_collection_count\"].WithLabelValues(all_stats.ClusterName, stats.Name, collector).Set(float64(gcstats.CollectionCount))\n\t\t\te.counters[\"jvm_gc_collection_time_in_millis\"].WithLabelValues(all_stats.ClusterName, stats.Name, collector).Set(float64(gcstats.CollectionTime))\n\t\t}\n\n\t\t// Breaker stats\n\t\tfor breaker, bstats := range stats.Breakers {\n\t\t\te.gauges[\"breakers_estimated_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name, breaker).Set(float64(bstats.EstimatedSize))\n\t\t\te.gauges[\"breakers_limit_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name, breaker).Set(float64(bstats.LimitSize))\n\t\t}\n\n\t\t// JVM Memory Stats\n\t\te.gauges[\"jvm_mem_heap_committed_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.HeapCommitted))\n\t\te.gauges[\"jvm_mem_heap_used_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.HeapUsed))\n\t\te.gauges[\"jvm_mem_heap_max_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.HeapMax))\n\t\te.gauges[\"jvm_mem_non_heap_committed_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.NonHeapCommitted))\n\t\te.gauges[\"jvm_mem_non_heap_used_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.NonHeapUsed))\n\n\t\t// Indices Stats\n\t\te.gauges[\"indices_fielddata_evictions\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FieldData.Evictions))\n\t\te.gauges[\"indices_fielddata_memory_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FieldData.MemorySize))\n\t\te.gauges[\"indices_filter_cache_evictions\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FilterCache.Evictions))\n\t\te.gauges[\"indices_filter_cache_memory_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FilterCache.MemorySize))\n\n\t\te.gauges[\"indices_docs_count\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Docs.Count))\n\t\te.gauges[\"indices_docs_deleted\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Docs.Deleted))\n\n\t\te.gauges[\"indices_segments_memory_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Segments.Memory))\n\n\t\te.gauges[\"indices_store_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Store.Size))\n\t\te.counters[\"indices_store_throttle_time_in_millis\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Store.ThrottleTime))\n\n\t\te.counters[\"indices_flush_total\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Flush.Total))\n\t\te.counters[\"indices_flush_time_in_millis\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Flush.Time))\n\n\t\t// Transport Stats\n\t\te.counters[\"transport_rx_count\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.RxCount))\n\t\te.counters[\"transport_rx_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.RxSize))\n\t\te.counters[\"transport_tx_count\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.TxCount))\n\t\te.counters[\"transport_tx_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.TxSize))\n\t}\n\n\t// Report metrics.\n\tch <- e.up\n\n\tfor _, vec := range e.counters {\n\t\tvec.Collect(ch)\n\t}\n\n\tfor _, vec := range e.gauges {\n\t\tvec.Collect(ch)\n\t}\n}", "func (m *Monitoring) collect() {\n\tfor {\n\t\tevents, ok := <-m.ch\n\t\tif !ok {\n\t\t\tlog.Printf(\"event channel is closed\")\n\t\t\treturn\n\t\t}\n\n\t\tif err := m.w.Write(context.Background(), events); err != nil {\n\t\t\tlog.Printf(\"failed to write metric events %+v: %v\", events, err)\n\t\t}\n\t}\n\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\t// Protect metrics from concurrent collects.\n\te.mutex.Lock()\n\tdefer e.mutex.Unlock()\n\n\t// Scrape metrics from Tankerkoenig API.\n\tif err := e.scrape(ch); err != nil {\n\t\te.logger.Printf(\"error: cannot scrape tankerkoenig api: %v\", err)\n\t}\n\n\t// Collect metrics.\n\te.up.Collect(ch)\n\te.scrapeDuration.Collect(ch)\n\te.failedScrapes.Collect(ch)\n\te.totalScrapes.Collect(ch)\n}", "func (c *Client) Collect(s stat.Stat) error {\n\tbuf, err := json.Marshal(s)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", fmt.Sprintf(\"%s/collect\", c.url), bytes.NewBuffer(buf))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\treturn errors.Wrap(errors.New(resp.Status), \"statscoll\")\n\t}\n\n\treturn nil\n}", "func (c *KeenCollector) Collect(event Collectable) error {\n\treturn c.keenClient.AddEvent(event.Collection(), event)\n}", "func (thisSocketSet *socketSet) collect(prometheusGaugeVector *prometheus.GaugeVec) {\n\tfor _, currentSocket := range thisSocketSet.Sockets {\n\t\tcurrentSocket.collect(prometheusGaugeVector)\n\t}\n\treturn\n}", "func (statColl *StatCollector) Collect(op *RecordedOp, replayedOp Op, reply Replyable, msg string) {\n\tif statColl.noop {\n\t\treturn\n\t}\n\tstatColl.Do(func() {\n\t\tstatColl.statStream = make(chan *OpStat, statColl.statStreamSize)\n\t\tstatColl.done = make(chan struct{})\n\t\tgo func() {\n\t\t\tfor stat := range statColl.statStream {\n\t\t\t\tstatColl.StatRecorder.RecordStat(stat)\n\t\t\t}\n\t\t\tclose(statColl.done)\n\t\t}()\n\t})\n\tif stat := statColl.GenerateOpStat(op, replayedOp, reply, msg); stat != nil {\n\t\tstatColl.statStream <- stat\n\t}\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mu.Lock()\n\tdefer e.mu.Unlock()\n\n\tfor _, cc := range e.collectors {\n\t\tcc.Collect(ch)\n\t}\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.Lock()\n\tdefer e.Unlock()\n\te.totalScrapes.Inc()\n\n\tif e.redisAddr != \"\" {\n\t\tstartTime := time.Now()\n\t\tvar up float64\n\t\tif err := e.scrapeRedisHost(ch); err != nil {\n\t\t\te.registerConstMetricGauge(ch, \"exporter_last_scrape_error\", 1.0, fmt.Sprintf(\"%s\", err))\n\t\t} else {\n\t\t\tup = 1\n\t\t\te.registerConstMetricGauge(ch, \"exporter_last_scrape_error\", 0, \"\")\n\t\t}\n\n\t\te.registerConstMetricGauge(ch, \"up\", up)\n\n\t\ttook := time.Since(startTime).Seconds()\n\t\te.scrapeDuration.Observe(took)\n\t\te.registerConstMetricGauge(ch, \"exporter_last_scrape_duration_seconds\", took)\n\t}\n\n\tch <- e.totalScrapes\n\tch <- e.scrapeDuration\n\tch <- e.targetScrapeRequestErrors\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\tup := e.scrape(ch)\n\n\tch <- prometheus.MustNewConstMetric(artifactoryUp, prometheus.GaugeValue, up)\n\tch <- e.totalScrapes\n\tch <- e.jsonParseFailures\n}", "func (c *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tfor _, cc := range c.collectors {\n\t\tcc.Collect(ch)\n\t}\n}", "func (c *Collector) Collect(ch chan<- prometheus.Metric) {\n\tc.mut.RLock()\n\tdefer c.mut.RUnlock()\n\n\tif c.inner != nil {\n\t\tc.inner.Collect(ch)\n\t}\n}", "func (p *Collector) Collect(c chan<- prometheus.Metric) {\n\tp.Sink.mu.Lock()\n\tdefer p.Sink.mu.Unlock()\n\n\texpire := p.Sink.expiration != 0\n\tnow := time.Now()\n\tfor k, v := range p.Sink.gauges {\n\t\tlast := p.Sink.updates[k]\n\t\tif expire && last.Add(p.Sink.expiration).Before(now) {\n\t\t\tdelete(p.Sink.updates, k)\n\t\t\tdelete(p.Sink.gauges, k)\n\t\t} else {\n\t\t\tv.Collect(c)\n\t\t}\n\t}\n\tfor k, v := range p.Sink.summaries {\n\t\tlast := p.Sink.updates[k]\n\t\tif expire && last.Add(p.Sink.expiration).Before(now) {\n\t\t\tdelete(p.Sink.updates, k)\n\t\t\tdelete(p.Sink.summaries, k)\n\t\t} else {\n\t\t\tv.Collect(c)\n\t\t}\n\t}\n\tfor k, v := range p.Sink.counters {\n\t\tlast := p.Sink.updates[k]\n\t\tif expire && last.Add(p.Sink.expiration).Before(now) {\n\t\t\tdelete(p.Sink.updates, k)\n\t\t\tdelete(p.Sink.counters, k)\n\t\t} else {\n\t\t\tv.Collect(c)\n\t\t}\n\t}\n}", "func (c *VMCollector) Collect(ch chan<- prometheus.Metric) {\n\tfor _, m := range c.getMetrics() {\n\t\tch <- m\n\t}\n}", "func (as *AggregateStore) Collect(id SpanID, anns ...Annotation) error {\n\tas.mu.Lock()\n\tdefer as.mu.Unlock()\n\n\t// Initialization\n\tif as.groups == nil {\n\t\tas.groups = make(map[ID]*spanGroup)\n\t\tas.groupsByName = make(map[string]ID)\n\t\tas.pre = &LimitStore{\n\t\t\tMax: as.MaxRate,\n\t\t\tDeleteStore: NewMemoryStore(),\n\t\t}\n\t}\n\n\t// Collect into the limit store.\n\tif err := as.pre.Collect(id, anns...); err != nil {\n\t\treturn err\n\t}\n\n\t// Consider eviction of old data.\n\tif time.Since(as.lastEvicted) > as.MinEvictAge {\n\t\tif err := as.evictBefore(time.Now().Add(-1 * as.MinEvictAge)); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Grab the group for our span.\n\tgroup, ok := as.group(id, anns...)\n\tif !ok {\n\t\t// We don't have a group for the trace, and can't create one (the\n\t\t// spanName event isn't present yet).\n\t\treturn nil\n\t}\n\n\t// Unmarshal the events.\n\tvar events []Event\n\tif err := UnmarshalEvents(anns, &events); err != nil {\n\t\treturn err\n\t}\n\n\t// Find the start and end time of the trace.\n\teStart, eEnd, ok := findTraceTimes(events)\n\tif !ok {\n\t\t// We didn't find any timespan events at all, so we're done here.\n\t\treturn nil\n\t}\n\n\t// Update the group to consider this trace being one of the slowest.\n\tgroup.update(eStart, eEnd, id.Trace, func(trace ID) {\n\t\t// Delete the request trace from the output store.\n\t\tif err := as.deleteOutput(trace); err != nil {\n\t\t\tlog.Printf(\"AggregateStore: failed to delete a trace: %s\", err)\n\t\t}\n\t})\n\n\t// Move traces from the limit store into the group, as needed.\n\tfor _, slowest := range group.Slowest {\n\t\t// Find the trace in the limit store.\n\t\ttrace, err := as.pre.Trace(slowest.TraceID)\n\t\tif err == ErrTraceNotFound {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Place into output store.\n\t\tvar walk func(t *Trace) error\n\t\twalk = func(t *Trace) error {\n\t\t\terr := as.MemoryStore.Collect(t.Span.ID, t.Span.Annotations...)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfor _, sub := range t.Sub {\n\t\t\t\tif err := walk(sub); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\tif err := walk(trace); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Delete from the limit store.\n\t\terr = as.pre.Delete(slowest.TraceID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Prepare the aggregation event (before locking below).\n\tev := &AggregateEvent{\n\t\tName: group.Name,\n\t\tTimes: group.Times,\n\t}\n\tfor _, slowest := range group.Slowest {\n\t\tif !slowest.empty() {\n\t\t\tev.Slowest = append(ev.Slowest, slowest.TraceID)\n\t\t}\n\t}\n\tif as.Debug && len(ev.Slowest) == 0 {\n\t\tlog.Printf(\"AggregateStore: no slowest traces for group %q (consider increasing MaxRate)\", group.Name)\n\t}\n\n\t// As we're updating the aggregation event, we go ahead and delete the old\n\t// one now. We do this all under as.MemoryStore.Lock otherwise users (e.g. the\n\t// web UI) can pull from as.MemoryStore when the trace has been deleted.\n\tas.MemoryStore.Lock()\n\tdefer as.MemoryStore.Unlock()\n\tif err := as.MemoryStore.deleteNoLock(group.Trace); err != nil {\n\t\treturn err\n\t}\n\n\t// Record an aggregate event with the given name.\n\trecEvent := func(e Event) error {\n\t\tanns, err := MarshalEvent(e)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn as.MemoryStore.collectNoLock(SpanID{Trace: group.Trace}, anns...)\n\t}\n\tif err := recEvent(spanName{Name: group.Name}); err != nil {\n\t\treturn err\n\t}\n\tif err := recEvent(ev); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func collect(emit emitFn, c collector) {\n\tswitch cc := c.(type) {\n\tdefault:\n\t\tlog.Panicf(\"unsupported collector type: %T\", c)\n\n\tcase statsCollector:\n\t\tresp, err := Client.Stat.Get(c.Subsystem())\n\t\tif err != nil {\n\t\t\tlog.Print(err)\n\t\t\treturn\n\t\t}\n\t\tlogPanics(func() {\n\t\t\tcc.CollectStats(emit, resp)\n\t\t})\n\n\tcase configCollector:\n\t\tresp, err := Client.Config.Get(c.Subsystem())\n\t\tif err != nil {\n\t\t\tlog.Print(err)\n\t\t\treturn\n\t\t}\n\t\tlogPanics(func() {\n\t\t\tcc.CollectConfig(emit, resp)\n\t\t})\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }